1eda14cbcSMatt Macy /* 2eda14cbcSMatt Macy * CDDL HEADER START 3eda14cbcSMatt Macy * 4eda14cbcSMatt Macy * The contents of this file are subject to the terms of the 5eda14cbcSMatt Macy * Common Development and Distribution License (the "License"). 6eda14cbcSMatt Macy * You may not use this file except in compliance with the License. 7eda14cbcSMatt Macy * 8eda14cbcSMatt Macy * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9eda14cbcSMatt Macy * or http://www.opensolaris.org/os/licensing. 10eda14cbcSMatt Macy * See the License for the specific language governing permissions 11eda14cbcSMatt Macy * and limitations under the License. 12eda14cbcSMatt Macy * 13eda14cbcSMatt Macy * When distributing Covered Code, include this CDDL HEADER in each 14eda14cbcSMatt Macy * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15eda14cbcSMatt Macy * If applicable, add the following below this CDDL HEADER, with the 16eda14cbcSMatt Macy * fields enclosed by brackets "[]" replaced with your own identifying 17eda14cbcSMatt Macy * information: Portions Copyright [yyyy] [name of copyright owner] 18eda14cbcSMatt Macy * 19eda14cbcSMatt Macy * CDDL HEADER END 20eda14cbcSMatt Macy */ 21eda14cbcSMatt Macy 22eda14cbcSMatt Macy /* 23eda14cbcSMatt Macy * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 242c48331dSMatt Macy * Copyright (c) 2011, 2020 by Delphix. All rights reserved. 25eda14cbcSMatt Macy * Copyright (c) 2018, Nexenta Systems, Inc. All rights reserved. 26eda14cbcSMatt Macy * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 27eda14cbcSMatt Macy * Copyright 2013 Saso Kiselkov. All rights reserved. 28eda14cbcSMatt Macy * Copyright (c) 2014 Integros [integros.com] 29eda14cbcSMatt Macy * Copyright 2016 Toomas Soome <tsoome@me.com> 30eda14cbcSMatt Macy * Copyright (c) 2016 Actifio, Inc. All rights reserved. 31eda14cbcSMatt Macy * Copyright 2018 Joyent, Inc. 32eda14cbcSMatt Macy * Copyright (c) 2017, 2019, Datto Inc. All rights reserved. 33eda14cbcSMatt Macy * Copyright 2017 Joyent, Inc. 34eda14cbcSMatt Macy * Copyright (c) 2017, Intel Corporation. 35ee36e25aSMartin Matuska * Copyright (c) 2021, Colm Buckley <colm@tuatha.org> 36eda14cbcSMatt Macy */ 37eda14cbcSMatt Macy 38eda14cbcSMatt Macy /* 39eda14cbcSMatt Macy * SPA: Storage Pool Allocator 40eda14cbcSMatt Macy * 41eda14cbcSMatt Macy * This file contains all the routines used when modifying on-disk SPA state. 42eda14cbcSMatt Macy * This includes opening, importing, destroying, exporting a pool, and syncing a 43eda14cbcSMatt Macy * pool. 44eda14cbcSMatt Macy */ 45eda14cbcSMatt Macy 46eda14cbcSMatt Macy #include <sys/zfs_context.h> 47eda14cbcSMatt Macy #include <sys/fm/fs/zfs.h> 48eda14cbcSMatt Macy #include <sys/spa_impl.h> 49eda14cbcSMatt Macy #include <sys/zio.h> 50eda14cbcSMatt Macy #include <sys/zio_checksum.h> 51eda14cbcSMatt Macy #include <sys/dmu.h> 52eda14cbcSMatt Macy #include <sys/dmu_tx.h> 53eda14cbcSMatt Macy #include <sys/zap.h> 54eda14cbcSMatt Macy #include <sys/zil.h> 55eda14cbcSMatt Macy #include <sys/ddt.h> 56eda14cbcSMatt Macy #include <sys/vdev_impl.h> 57eda14cbcSMatt Macy #include <sys/vdev_removal.h> 58eda14cbcSMatt Macy #include <sys/vdev_indirect_mapping.h> 59eda14cbcSMatt Macy #include <sys/vdev_indirect_births.h> 60eda14cbcSMatt Macy #include <sys/vdev_initialize.h> 61eda14cbcSMatt Macy #include <sys/vdev_rebuild.h> 62eda14cbcSMatt Macy #include <sys/vdev_trim.h> 63eda14cbcSMatt Macy #include <sys/vdev_disk.h> 647877fdebSMatt Macy #include <sys/vdev_draid.h> 65eda14cbcSMatt Macy #include <sys/metaslab.h> 66eda14cbcSMatt Macy #include <sys/metaslab_impl.h> 67eda14cbcSMatt Macy #include <sys/mmp.h> 68eda14cbcSMatt Macy #include <sys/uberblock_impl.h> 69eda14cbcSMatt Macy #include <sys/txg.h> 70eda14cbcSMatt Macy #include <sys/avl.h> 71eda14cbcSMatt Macy #include <sys/bpobj.h> 72eda14cbcSMatt Macy #include <sys/dmu_traverse.h> 73eda14cbcSMatt Macy #include <sys/dmu_objset.h> 74eda14cbcSMatt Macy #include <sys/unique.h> 75eda14cbcSMatt Macy #include <sys/dsl_pool.h> 76eda14cbcSMatt Macy #include <sys/dsl_dataset.h> 77eda14cbcSMatt Macy #include <sys/dsl_dir.h> 78eda14cbcSMatt Macy #include <sys/dsl_prop.h> 79eda14cbcSMatt Macy #include <sys/dsl_synctask.h> 80eda14cbcSMatt Macy #include <sys/fs/zfs.h> 81eda14cbcSMatt Macy #include <sys/arc.h> 82eda14cbcSMatt Macy #include <sys/callb.h> 83eda14cbcSMatt Macy #include <sys/systeminfo.h> 84eda14cbcSMatt Macy #include <sys/spa_boot.h> 85eda14cbcSMatt Macy #include <sys/zfs_ioctl.h> 86eda14cbcSMatt Macy #include <sys/dsl_scan.h> 87eda14cbcSMatt Macy #include <sys/zfeature.h> 88eda14cbcSMatt Macy #include <sys/dsl_destroy.h> 89eda14cbcSMatt Macy #include <sys/zvol.h> 90eda14cbcSMatt Macy 91eda14cbcSMatt Macy #ifdef _KERNEL 92eda14cbcSMatt Macy #include <sys/fm/protocol.h> 93eda14cbcSMatt Macy #include <sys/fm/util.h> 94eda14cbcSMatt Macy #include <sys/callb.h> 95eda14cbcSMatt Macy #include <sys/zone.h> 96eda14cbcSMatt Macy #include <sys/vmsystm.h> 97eda14cbcSMatt Macy #endif /* _KERNEL */ 98eda14cbcSMatt Macy 99eda14cbcSMatt Macy #include "zfs_prop.h" 100eda14cbcSMatt Macy #include "zfs_comutil.h" 101eda14cbcSMatt Macy 102eda14cbcSMatt Macy /* 103eda14cbcSMatt Macy * The interval, in seconds, at which failed configuration cache file writes 104eda14cbcSMatt Macy * should be retried. 105eda14cbcSMatt Macy */ 106eda14cbcSMatt Macy int zfs_ccw_retry_interval = 300; 107eda14cbcSMatt Macy 108eda14cbcSMatt Macy typedef enum zti_modes { 109eda14cbcSMatt Macy ZTI_MODE_FIXED, /* value is # of threads (min 1) */ 110eda14cbcSMatt Macy ZTI_MODE_BATCH, /* cpu-intensive; value is ignored */ 11116038816SMartin Matuska ZTI_MODE_SCALE, /* Taskqs scale with CPUs. */ 112eda14cbcSMatt Macy ZTI_MODE_NULL, /* don't create a taskq */ 113eda14cbcSMatt Macy ZTI_NMODES 114eda14cbcSMatt Macy } zti_modes_t; 115eda14cbcSMatt Macy 116eda14cbcSMatt Macy #define ZTI_P(n, q) { ZTI_MODE_FIXED, (n), (q) } 117eda14cbcSMatt Macy #define ZTI_PCT(n) { ZTI_MODE_ONLINE_PERCENT, (n), 1 } 118eda14cbcSMatt Macy #define ZTI_BATCH { ZTI_MODE_BATCH, 0, 1 } 11916038816SMartin Matuska #define ZTI_SCALE { ZTI_MODE_SCALE, 0, 1 } 120eda14cbcSMatt Macy #define ZTI_NULL { ZTI_MODE_NULL, 0, 0 } 121eda14cbcSMatt Macy 122eda14cbcSMatt Macy #define ZTI_N(n) ZTI_P(n, 1) 123eda14cbcSMatt Macy #define ZTI_ONE ZTI_N(1) 124eda14cbcSMatt Macy 125eda14cbcSMatt Macy typedef struct zio_taskq_info { 126eda14cbcSMatt Macy zti_modes_t zti_mode; 127eda14cbcSMatt Macy uint_t zti_value; 128eda14cbcSMatt Macy uint_t zti_count; 129eda14cbcSMatt Macy } zio_taskq_info_t; 130eda14cbcSMatt Macy 131eda14cbcSMatt Macy static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = { 132eda14cbcSMatt Macy "iss", "iss_h", "int", "int_h" 133eda14cbcSMatt Macy }; 134eda14cbcSMatt Macy 135eda14cbcSMatt Macy /* 136eda14cbcSMatt Macy * This table defines the taskq settings for each ZFS I/O type. When 137eda14cbcSMatt Macy * initializing a pool, we use this table to create an appropriately sized 138eda14cbcSMatt Macy * taskq. Some operations are low volume and therefore have a small, static 139eda14cbcSMatt Macy * number of threads assigned to their taskqs using the ZTI_N(#) or ZTI_ONE 140eda14cbcSMatt Macy * macros. Other operations process a large amount of data; the ZTI_BATCH 141eda14cbcSMatt Macy * macro causes us to create a taskq oriented for throughput. Some operations 142eda14cbcSMatt Macy * are so high frequency and short-lived that the taskq itself can become a 143eda14cbcSMatt Macy * point of lock contention. The ZTI_P(#, #) macro indicates that we need an 144eda14cbcSMatt Macy * additional degree of parallelism specified by the number of threads per- 145eda14cbcSMatt Macy * taskq and the number of taskqs; when dispatching an event in this case, the 14616038816SMartin Matuska * particular taskq is chosen at random. ZTI_SCALE is similar to ZTI_BATCH, 14716038816SMartin Matuska * but with number of taskqs also scaling with number of CPUs. 148eda14cbcSMatt Macy * 149eda14cbcSMatt Macy * The different taskq priorities are to handle the different contexts (issue 150eda14cbcSMatt Macy * and interrupt) and then to reserve threads for ZIO_PRIORITY_NOW I/Os that 151eda14cbcSMatt Macy * need to be handled with minimum delay. 152eda14cbcSMatt Macy */ 153eda14cbcSMatt Macy const zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = { 154eda14cbcSMatt Macy /* ISSUE ISSUE_HIGH INTR INTR_HIGH */ 155eda14cbcSMatt Macy { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* NULL */ 15616038816SMartin Matuska { ZTI_N(8), ZTI_NULL, ZTI_SCALE, ZTI_NULL }, /* READ */ 15716038816SMartin Matuska { ZTI_BATCH, ZTI_N(5), ZTI_SCALE, ZTI_N(5) }, /* WRITE */ 15816038816SMartin Matuska { ZTI_SCALE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* FREE */ 159eda14cbcSMatt Macy { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* CLAIM */ 160eda14cbcSMatt Macy { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* IOCTL */ 161eda14cbcSMatt Macy { ZTI_N(4), ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* TRIM */ 162eda14cbcSMatt Macy }; 163eda14cbcSMatt Macy 164eda14cbcSMatt Macy static void spa_sync_version(void *arg, dmu_tx_t *tx); 165eda14cbcSMatt Macy static void spa_sync_props(void *arg, dmu_tx_t *tx); 166eda14cbcSMatt Macy static boolean_t spa_has_active_shared_spare(spa_t *spa); 167eda14cbcSMatt Macy static int spa_load_impl(spa_t *spa, spa_import_type_t type, char **ereport); 168eda14cbcSMatt Macy static void spa_vdev_resilver_done(spa_t *spa); 169eda14cbcSMatt Macy 17016038816SMartin Matuska uint_t zio_taskq_batch_pct = 80; /* 1 thread per cpu in pset */ 17116038816SMartin Matuska uint_t zio_taskq_batch_tpq; /* threads per taskq */ 172eda14cbcSMatt Macy boolean_t zio_taskq_sysdc = B_TRUE; /* use SDC scheduling class */ 173eda14cbcSMatt Macy uint_t zio_taskq_basedc = 80; /* base duty cycle */ 174eda14cbcSMatt Macy 175eda14cbcSMatt Macy boolean_t spa_create_process = B_TRUE; /* no process ==> no sysdc */ 176eda14cbcSMatt Macy 177eda14cbcSMatt Macy /* 178eda14cbcSMatt Macy * Report any spa_load_verify errors found, but do not fail spa_load. 179eda14cbcSMatt Macy * This is used by zdb to analyze non-idle pools. 180eda14cbcSMatt Macy */ 181eda14cbcSMatt Macy boolean_t spa_load_verify_dryrun = B_FALSE; 182eda14cbcSMatt Macy 183eda14cbcSMatt Macy /* 184eda14cbcSMatt Macy * This (illegal) pool name is used when temporarily importing a spa_t in order 185eda14cbcSMatt Macy * to get the vdev stats associated with the imported devices. 186eda14cbcSMatt Macy */ 187eda14cbcSMatt Macy #define TRYIMPORT_NAME "$import" 188eda14cbcSMatt Macy 189eda14cbcSMatt Macy /* 190eda14cbcSMatt Macy * For debugging purposes: print out vdev tree during pool import. 191eda14cbcSMatt Macy */ 192eda14cbcSMatt Macy int spa_load_print_vdev_tree = B_FALSE; 193eda14cbcSMatt Macy 194eda14cbcSMatt Macy /* 195eda14cbcSMatt Macy * A non-zero value for zfs_max_missing_tvds means that we allow importing 196eda14cbcSMatt Macy * pools with missing top-level vdevs. This is strictly intended for advanced 197eda14cbcSMatt Macy * pool recovery cases since missing data is almost inevitable. Pools with 198eda14cbcSMatt Macy * missing devices can only be imported read-only for safety reasons, and their 199eda14cbcSMatt Macy * fail-mode will be automatically set to "continue". 200eda14cbcSMatt Macy * 201eda14cbcSMatt Macy * With 1 missing vdev we should be able to import the pool and mount all 202eda14cbcSMatt Macy * datasets. User data that was not modified after the missing device has been 203eda14cbcSMatt Macy * added should be recoverable. This means that snapshots created prior to the 204eda14cbcSMatt Macy * addition of that device should be completely intact. 205eda14cbcSMatt Macy * 206eda14cbcSMatt Macy * With 2 missing vdevs, some datasets may fail to mount since there are 207eda14cbcSMatt Macy * dataset statistics that are stored as regular metadata. Some data might be 208eda14cbcSMatt Macy * recoverable if those vdevs were added recently. 209eda14cbcSMatt Macy * 210eda14cbcSMatt Macy * With 3 or more missing vdevs, the pool is severely damaged and MOS entries 211eda14cbcSMatt Macy * may be missing entirely. Chances of data recovery are very low. Note that 212eda14cbcSMatt Macy * there are also risks of performing an inadvertent rewind as we might be 213eda14cbcSMatt Macy * missing all the vdevs with the latest uberblocks. 214eda14cbcSMatt Macy */ 215eda14cbcSMatt Macy unsigned long zfs_max_missing_tvds = 0; 216eda14cbcSMatt Macy 217eda14cbcSMatt Macy /* 218eda14cbcSMatt Macy * The parameters below are similar to zfs_max_missing_tvds but are only 219eda14cbcSMatt Macy * intended for a preliminary open of the pool with an untrusted config which 220eda14cbcSMatt Macy * might be incomplete or out-dated. 221eda14cbcSMatt Macy * 222eda14cbcSMatt Macy * We are more tolerant for pools opened from a cachefile since we could have 223eda14cbcSMatt Macy * an out-dated cachefile where a device removal was not registered. 224eda14cbcSMatt Macy * We could have set the limit arbitrarily high but in the case where devices 225eda14cbcSMatt Macy * are really missing we would want to return the proper error codes; we chose 226eda14cbcSMatt Macy * SPA_DVAS_PER_BP - 1 so that some copies of the MOS would still be available 227eda14cbcSMatt Macy * and we get a chance to retrieve the trusted config. 228eda14cbcSMatt Macy */ 229eda14cbcSMatt Macy uint64_t zfs_max_missing_tvds_cachefile = SPA_DVAS_PER_BP - 1; 230eda14cbcSMatt Macy 231eda14cbcSMatt Macy /* 232eda14cbcSMatt Macy * In the case where config was assembled by scanning device paths (/dev/dsks 233eda14cbcSMatt Macy * by default) we are less tolerant since all the existing devices should have 234eda14cbcSMatt Macy * been detected and we want spa_load to return the right error codes. 235eda14cbcSMatt Macy */ 236eda14cbcSMatt Macy uint64_t zfs_max_missing_tvds_scan = 0; 237eda14cbcSMatt Macy 238eda14cbcSMatt Macy /* 239eda14cbcSMatt Macy * Debugging aid that pauses spa_sync() towards the end. 240eda14cbcSMatt Macy */ 241eda14cbcSMatt Macy boolean_t zfs_pause_spa_sync = B_FALSE; 242eda14cbcSMatt Macy 243eda14cbcSMatt Macy /* 244eda14cbcSMatt Macy * Variables to indicate the livelist condense zthr func should wait at certain 245eda14cbcSMatt Macy * points for the livelist to be removed - used to test condense/destroy races 246eda14cbcSMatt Macy */ 247eda14cbcSMatt Macy int zfs_livelist_condense_zthr_pause = 0; 248eda14cbcSMatt Macy int zfs_livelist_condense_sync_pause = 0; 249eda14cbcSMatt Macy 250eda14cbcSMatt Macy /* 251eda14cbcSMatt Macy * Variables to track whether or not condense cancellation has been 252eda14cbcSMatt Macy * triggered in testing. 253eda14cbcSMatt Macy */ 254eda14cbcSMatt Macy int zfs_livelist_condense_sync_cancel = 0; 255eda14cbcSMatt Macy int zfs_livelist_condense_zthr_cancel = 0; 256eda14cbcSMatt Macy 257eda14cbcSMatt Macy /* 258eda14cbcSMatt Macy * Variable to track whether or not extra ALLOC blkptrs were added to a 259eda14cbcSMatt Macy * livelist entry while it was being condensed (caused by the way we track 260eda14cbcSMatt Macy * remapped blkptrs in dbuf_remap_impl) 261eda14cbcSMatt Macy */ 262eda14cbcSMatt Macy int zfs_livelist_condense_new_alloc = 0; 263eda14cbcSMatt Macy 264eda14cbcSMatt Macy /* 265eda14cbcSMatt Macy * ========================================================================== 266eda14cbcSMatt Macy * SPA properties routines 267eda14cbcSMatt Macy * ========================================================================== 268eda14cbcSMatt Macy */ 269eda14cbcSMatt Macy 270eda14cbcSMatt Macy /* 271eda14cbcSMatt Macy * Add a (source=src, propname=propval) list to an nvlist. 272eda14cbcSMatt Macy */ 273eda14cbcSMatt Macy static void 274eda14cbcSMatt Macy spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval, 275eda14cbcSMatt Macy uint64_t intval, zprop_source_t src) 276eda14cbcSMatt Macy { 277eda14cbcSMatt Macy const char *propname = zpool_prop_to_name(prop); 278eda14cbcSMatt Macy nvlist_t *propval; 279eda14cbcSMatt Macy 280eda14cbcSMatt Macy VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0); 281eda14cbcSMatt Macy VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0); 282eda14cbcSMatt Macy 283eda14cbcSMatt Macy if (strval != NULL) 284eda14cbcSMatt Macy VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0); 285eda14cbcSMatt Macy else 286eda14cbcSMatt Macy VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0); 287eda14cbcSMatt Macy 288eda14cbcSMatt Macy VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0); 289eda14cbcSMatt Macy nvlist_free(propval); 290eda14cbcSMatt Macy } 291eda14cbcSMatt Macy 292eda14cbcSMatt Macy /* 293eda14cbcSMatt Macy * Get property values from the spa configuration. 294eda14cbcSMatt Macy */ 295eda14cbcSMatt Macy static void 296eda14cbcSMatt Macy spa_prop_get_config(spa_t *spa, nvlist_t **nvp) 297eda14cbcSMatt Macy { 298eda14cbcSMatt Macy vdev_t *rvd = spa->spa_root_vdev; 299eda14cbcSMatt Macy dsl_pool_t *pool = spa->spa_dsl_pool; 300eda14cbcSMatt Macy uint64_t size, alloc, cap, version; 301eda14cbcSMatt Macy const zprop_source_t src = ZPROP_SRC_NONE; 302eda14cbcSMatt Macy spa_config_dirent_t *dp; 303eda14cbcSMatt Macy metaslab_class_t *mc = spa_normal_class(spa); 304eda14cbcSMatt Macy 305eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&spa->spa_props_lock)); 306eda14cbcSMatt Macy 307eda14cbcSMatt Macy if (rvd != NULL) { 308eda14cbcSMatt Macy alloc = metaslab_class_get_alloc(mc); 309eda14cbcSMatt Macy alloc += metaslab_class_get_alloc(spa_special_class(spa)); 310eda14cbcSMatt Macy alloc += metaslab_class_get_alloc(spa_dedup_class(spa)); 311184c1b94SMartin Matuska alloc += metaslab_class_get_alloc(spa_embedded_log_class(spa)); 312eda14cbcSMatt Macy 313eda14cbcSMatt Macy size = metaslab_class_get_space(mc); 314eda14cbcSMatt Macy size += metaslab_class_get_space(spa_special_class(spa)); 315eda14cbcSMatt Macy size += metaslab_class_get_space(spa_dedup_class(spa)); 316184c1b94SMartin Matuska size += metaslab_class_get_space(spa_embedded_log_class(spa)); 317eda14cbcSMatt Macy 318eda14cbcSMatt Macy spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src); 319eda14cbcSMatt Macy spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src); 320eda14cbcSMatt Macy spa_prop_add_list(*nvp, ZPOOL_PROP_ALLOCATED, NULL, alloc, src); 321eda14cbcSMatt Macy spa_prop_add_list(*nvp, ZPOOL_PROP_FREE, NULL, 322eda14cbcSMatt Macy size - alloc, src); 323eda14cbcSMatt Macy spa_prop_add_list(*nvp, ZPOOL_PROP_CHECKPOINT, NULL, 324eda14cbcSMatt Macy spa->spa_checkpoint_info.sci_dspace, src); 325eda14cbcSMatt Macy 326eda14cbcSMatt Macy spa_prop_add_list(*nvp, ZPOOL_PROP_FRAGMENTATION, NULL, 327eda14cbcSMatt Macy metaslab_class_fragmentation(mc), src); 328eda14cbcSMatt Macy spa_prop_add_list(*nvp, ZPOOL_PROP_EXPANDSZ, NULL, 329eda14cbcSMatt Macy metaslab_class_expandable_space(mc), src); 330eda14cbcSMatt Macy spa_prop_add_list(*nvp, ZPOOL_PROP_READONLY, NULL, 331eda14cbcSMatt Macy (spa_mode(spa) == SPA_MODE_READ), src); 332eda14cbcSMatt Macy 333eda14cbcSMatt Macy cap = (size == 0) ? 0 : (alloc * 100 / size); 334eda14cbcSMatt Macy spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src); 335eda14cbcSMatt Macy 336eda14cbcSMatt Macy spa_prop_add_list(*nvp, ZPOOL_PROP_DEDUPRATIO, NULL, 337eda14cbcSMatt Macy ddt_get_pool_dedup_ratio(spa), src); 338eda14cbcSMatt Macy 339eda14cbcSMatt Macy spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL, 340eda14cbcSMatt Macy rvd->vdev_state, src); 341eda14cbcSMatt Macy 342eda14cbcSMatt Macy version = spa_version(spa); 343eda14cbcSMatt Macy if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION)) { 344eda14cbcSMatt Macy spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, 345eda14cbcSMatt Macy version, ZPROP_SRC_DEFAULT); 346eda14cbcSMatt Macy } else { 347eda14cbcSMatt Macy spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, 348eda14cbcSMatt Macy version, ZPROP_SRC_LOCAL); 349eda14cbcSMatt Macy } 350eda14cbcSMatt Macy spa_prop_add_list(*nvp, ZPOOL_PROP_LOAD_GUID, 351eda14cbcSMatt Macy NULL, spa_load_guid(spa), src); 352eda14cbcSMatt Macy } 353eda14cbcSMatt Macy 354eda14cbcSMatt Macy if (pool != NULL) { 355eda14cbcSMatt Macy /* 356eda14cbcSMatt Macy * The $FREE directory was introduced in SPA_VERSION_DEADLISTS, 357eda14cbcSMatt Macy * when opening pools before this version freedir will be NULL. 358eda14cbcSMatt Macy */ 359eda14cbcSMatt Macy if (pool->dp_free_dir != NULL) { 360eda14cbcSMatt Macy spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, NULL, 361eda14cbcSMatt Macy dsl_dir_phys(pool->dp_free_dir)->dd_used_bytes, 362eda14cbcSMatt Macy src); 363eda14cbcSMatt Macy } else { 364eda14cbcSMatt Macy spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, 365eda14cbcSMatt Macy NULL, 0, src); 366eda14cbcSMatt Macy } 367eda14cbcSMatt Macy 368eda14cbcSMatt Macy if (pool->dp_leak_dir != NULL) { 369eda14cbcSMatt Macy spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED, NULL, 370eda14cbcSMatt Macy dsl_dir_phys(pool->dp_leak_dir)->dd_used_bytes, 371eda14cbcSMatt Macy src); 372eda14cbcSMatt Macy } else { 373eda14cbcSMatt Macy spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED, 374eda14cbcSMatt Macy NULL, 0, src); 375eda14cbcSMatt Macy } 376eda14cbcSMatt Macy } 377eda14cbcSMatt Macy 378eda14cbcSMatt Macy spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src); 379eda14cbcSMatt Macy 380eda14cbcSMatt Macy if (spa->spa_comment != NULL) { 381eda14cbcSMatt Macy spa_prop_add_list(*nvp, ZPOOL_PROP_COMMENT, spa->spa_comment, 382eda14cbcSMatt Macy 0, ZPROP_SRC_LOCAL); 383eda14cbcSMatt Macy } 384eda14cbcSMatt Macy 385ee36e25aSMartin Matuska if (spa->spa_compatibility != NULL) { 386ee36e25aSMartin Matuska spa_prop_add_list(*nvp, ZPOOL_PROP_COMPATIBILITY, 387ee36e25aSMartin Matuska spa->spa_compatibility, 0, ZPROP_SRC_LOCAL); 388ee36e25aSMartin Matuska } 389ee36e25aSMartin Matuska 390eda14cbcSMatt Macy if (spa->spa_root != NULL) 391eda14cbcSMatt Macy spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root, 392eda14cbcSMatt Macy 0, ZPROP_SRC_LOCAL); 393eda14cbcSMatt Macy 394eda14cbcSMatt Macy if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) { 395eda14cbcSMatt Macy spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL, 396eda14cbcSMatt Macy MIN(zfs_max_recordsize, SPA_MAXBLOCKSIZE), ZPROP_SRC_NONE); 397eda14cbcSMatt Macy } else { 398eda14cbcSMatt Macy spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL, 399eda14cbcSMatt Macy SPA_OLD_MAXBLOCKSIZE, ZPROP_SRC_NONE); 400eda14cbcSMatt Macy } 401eda14cbcSMatt Macy 402eda14cbcSMatt Macy if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE)) { 403eda14cbcSMatt Macy spa_prop_add_list(*nvp, ZPOOL_PROP_MAXDNODESIZE, NULL, 404eda14cbcSMatt Macy DNODE_MAX_SIZE, ZPROP_SRC_NONE); 405eda14cbcSMatt Macy } else { 406eda14cbcSMatt Macy spa_prop_add_list(*nvp, ZPOOL_PROP_MAXDNODESIZE, NULL, 407eda14cbcSMatt Macy DNODE_MIN_SIZE, ZPROP_SRC_NONE); 408eda14cbcSMatt Macy } 409eda14cbcSMatt Macy 410eda14cbcSMatt Macy if ((dp = list_head(&spa->spa_config_list)) != NULL) { 411eda14cbcSMatt Macy if (dp->scd_path == NULL) { 412eda14cbcSMatt Macy spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE, 413eda14cbcSMatt Macy "none", 0, ZPROP_SRC_LOCAL); 414eda14cbcSMatt Macy } else if (strcmp(dp->scd_path, spa_config_path) != 0) { 415eda14cbcSMatt Macy spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE, 416eda14cbcSMatt Macy dp->scd_path, 0, ZPROP_SRC_LOCAL); 417eda14cbcSMatt Macy } 418eda14cbcSMatt Macy } 419eda14cbcSMatt Macy } 420eda14cbcSMatt Macy 421eda14cbcSMatt Macy /* 422eda14cbcSMatt Macy * Get zpool property values. 423eda14cbcSMatt Macy */ 424eda14cbcSMatt Macy int 425eda14cbcSMatt Macy spa_prop_get(spa_t *spa, nvlist_t **nvp) 426eda14cbcSMatt Macy { 427eda14cbcSMatt Macy objset_t *mos = spa->spa_meta_objset; 428eda14cbcSMatt Macy zap_cursor_t zc; 429eda14cbcSMatt Macy zap_attribute_t za; 430eda14cbcSMatt Macy dsl_pool_t *dp; 431eda14cbcSMatt Macy int err; 432eda14cbcSMatt Macy 433eda14cbcSMatt Macy err = nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP); 434eda14cbcSMatt Macy if (err) 435eda14cbcSMatt Macy return (err); 436eda14cbcSMatt Macy 437eda14cbcSMatt Macy dp = spa_get_dsl(spa); 438eda14cbcSMatt Macy dsl_pool_config_enter(dp, FTAG); 439eda14cbcSMatt Macy mutex_enter(&spa->spa_props_lock); 440eda14cbcSMatt Macy 441eda14cbcSMatt Macy /* 442eda14cbcSMatt Macy * Get properties from the spa config. 443eda14cbcSMatt Macy */ 444eda14cbcSMatt Macy spa_prop_get_config(spa, nvp); 445eda14cbcSMatt Macy 446eda14cbcSMatt Macy /* If no pool property object, no more prop to get. */ 447eda14cbcSMatt Macy if (mos == NULL || spa->spa_pool_props_object == 0) 448eda14cbcSMatt Macy goto out; 449eda14cbcSMatt Macy 450eda14cbcSMatt Macy /* 451eda14cbcSMatt Macy * Get properties from the MOS pool property object. 452eda14cbcSMatt Macy */ 453eda14cbcSMatt Macy for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object); 454eda14cbcSMatt Macy (err = zap_cursor_retrieve(&zc, &za)) == 0; 455eda14cbcSMatt Macy zap_cursor_advance(&zc)) { 456eda14cbcSMatt Macy uint64_t intval = 0; 457eda14cbcSMatt Macy char *strval = NULL; 458eda14cbcSMatt Macy zprop_source_t src = ZPROP_SRC_DEFAULT; 459eda14cbcSMatt Macy zpool_prop_t prop; 460eda14cbcSMatt Macy 461eda14cbcSMatt Macy if ((prop = zpool_name_to_prop(za.za_name)) == ZPOOL_PROP_INVAL) 462eda14cbcSMatt Macy continue; 463eda14cbcSMatt Macy 464eda14cbcSMatt Macy switch (za.za_integer_length) { 465eda14cbcSMatt Macy case 8: 466eda14cbcSMatt Macy /* integer property */ 467eda14cbcSMatt Macy if (za.za_first_integer != 468eda14cbcSMatt Macy zpool_prop_default_numeric(prop)) 469eda14cbcSMatt Macy src = ZPROP_SRC_LOCAL; 470eda14cbcSMatt Macy 471eda14cbcSMatt Macy if (prop == ZPOOL_PROP_BOOTFS) { 472eda14cbcSMatt Macy dsl_dataset_t *ds = NULL; 473eda14cbcSMatt Macy 474eda14cbcSMatt Macy err = dsl_dataset_hold_obj(dp, 475eda14cbcSMatt Macy za.za_first_integer, FTAG, &ds); 476eda14cbcSMatt Macy if (err != 0) 477eda14cbcSMatt Macy break; 478eda14cbcSMatt Macy 479eda14cbcSMatt Macy strval = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, 480eda14cbcSMatt Macy KM_SLEEP); 481eda14cbcSMatt Macy dsl_dataset_name(ds, strval); 482eda14cbcSMatt Macy dsl_dataset_rele(ds, FTAG); 483eda14cbcSMatt Macy } else { 484eda14cbcSMatt Macy strval = NULL; 485eda14cbcSMatt Macy intval = za.za_first_integer; 486eda14cbcSMatt Macy } 487eda14cbcSMatt Macy 488eda14cbcSMatt Macy spa_prop_add_list(*nvp, prop, strval, intval, src); 489eda14cbcSMatt Macy 490eda14cbcSMatt Macy if (strval != NULL) 491eda14cbcSMatt Macy kmem_free(strval, ZFS_MAX_DATASET_NAME_LEN); 492eda14cbcSMatt Macy 493eda14cbcSMatt Macy break; 494eda14cbcSMatt Macy 495eda14cbcSMatt Macy case 1: 496eda14cbcSMatt Macy /* string property */ 497eda14cbcSMatt Macy strval = kmem_alloc(za.za_num_integers, KM_SLEEP); 498eda14cbcSMatt Macy err = zap_lookup(mos, spa->spa_pool_props_object, 499eda14cbcSMatt Macy za.za_name, 1, za.za_num_integers, strval); 500eda14cbcSMatt Macy if (err) { 501eda14cbcSMatt Macy kmem_free(strval, za.za_num_integers); 502eda14cbcSMatt Macy break; 503eda14cbcSMatt Macy } 504eda14cbcSMatt Macy spa_prop_add_list(*nvp, prop, strval, 0, src); 505eda14cbcSMatt Macy kmem_free(strval, za.za_num_integers); 506eda14cbcSMatt Macy break; 507eda14cbcSMatt Macy 508eda14cbcSMatt Macy default: 509eda14cbcSMatt Macy break; 510eda14cbcSMatt Macy } 511eda14cbcSMatt Macy } 512eda14cbcSMatt Macy zap_cursor_fini(&zc); 513eda14cbcSMatt Macy out: 514eda14cbcSMatt Macy mutex_exit(&spa->spa_props_lock); 515eda14cbcSMatt Macy dsl_pool_config_exit(dp, FTAG); 516eda14cbcSMatt Macy if (err && err != ENOENT) { 517eda14cbcSMatt Macy nvlist_free(*nvp); 518eda14cbcSMatt Macy *nvp = NULL; 519eda14cbcSMatt Macy return (err); 520eda14cbcSMatt Macy } 521eda14cbcSMatt Macy 522eda14cbcSMatt Macy return (0); 523eda14cbcSMatt Macy } 524eda14cbcSMatt Macy 525eda14cbcSMatt Macy /* 526eda14cbcSMatt Macy * Validate the given pool properties nvlist and modify the list 527eda14cbcSMatt Macy * for the property values to be set. 528eda14cbcSMatt Macy */ 529eda14cbcSMatt Macy static int 530eda14cbcSMatt Macy spa_prop_validate(spa_t *spa, nvlist_t *props) 531eda14cbcSMatt Macy { 532eda14cbcSMatt Macy nvpair_t *elem; 533eda14cbcSMatt Macy int error = 0, reset_bootfs = 0; 534eda14cbcSMatt Macy uint64_t objnum = 0; 535eda14cbcSMatt Macy boolean_t has_feature = B_FALSE; 536eda14cbcSMatt Macy 537eda14cbcSMatt Macy elem = NULL; 538eda14cbcSMatt Macy while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 539eda14cbcSMatt Macy uint64_t intval; 540eda14cbcSMatt Macy char *strval, *slash, *check, *fname; 541eda14cbcSMatt Macy const char *propname = nvpair_name(elem); 542eda14cbcSMatt Macy zpool_prop_t prop = zpool_name_to_prop(propname); 543eda14cbcSMatt Macy 544eda14cbcSMatt Macy switch (prop) { 545eda14cbcSMatt Macy case ZPOOL_PROP_INVAL: 546eda14cbcSMatt Macy if (!zpool_prop_feature(propname)) { 547eda14cbcSMatt Macy error = SET_ERROR(EINVAL); 548eda14cbcSMatt Macy break; 549eda14cbcSMatt Macy } 550eda14cbcSMatt Macy 551eda14cbcSMatt Macy /* 552eda14cbcSMatt Macy * Sanitize the input. 553eda14cbcSMatt Macy */ 554eda14cbcSMatt Macy if (nvpair_type(elem) != DATA_TYPE_UINT64) { 555eda14cbcSMatt Macy error = SET_ERROR(EINVAL); 556eda14cbcSMatt Macy break; 557eda14cbcSMatt Macy } 558eda14cbcSMatt Macy 559eda14cbcSMatt Macy if (nvpair_value_uint64(elem, &intval) != 0) { 560eda14cbcSMatt Macy error = SET_ERROR(EINVAL); 561eda14cbcSMatt Macy break; 562eda14cbcSMatt Macy } 563eda14cbcSMatt Macy 564eda14cbcSMatt Macy if (intval != 0) { 565eda14cbcSMatt Macy error = SET_ERROR(EINVAL); 566eda14cbcSMatt Macy break; 567eda14cbcSMatt Macy } 568eda14cbcSMatt Macy 569eda14cbcSMatt Macy fname = strchr(propname, '@') + 1; 570eda14cbcSMatt Macy if (zfeature_lookup_name(fname, NULL) != 0) { 571eda14cbcSMatt Macy error = SET_ERROR(EINVAL); 572eda14cbcSMatt Macy break; 573eda14cbcSMatt Macy } 574eda14cbcSMatt Macy 575eda14cbcSMatt Macy has_feature = B_TRUE; 576eda14cbcSMatt Macy break; 577eda14cbcSMatt Macy 578eda14cbcSMatt Macy case ZPOOL_PROP_VERSION: 579eda14cbcSMatt Macy error = nvpair_value_uint64(elem, &intval); 580eda14cbcSMatt Macy if (!error && 581eda14cbcSMatt Macy (intval < spa_version(spa) || 582eda14cbcSMatt Macy intval > SPA_VERSION_BEFORE_FEATURES || 583eda14cbcSMatt Macy has_feature)) 584eda14cbcSMatt Macy error = SET_ERROR(EINVAL); 585eda14cbcSMatt Macy break; 586eda14cbcSMatt Macy 587eda14cbcSMatt Macy case ZPOOL_PROP_DELEGATION: 588eda14cbcSMatt Macy case ZPOOL_PROP_AUTOREPLACE: 589eda14cbcSMatt Macy case ZPOOL_PROP_LISTSNAPS: 590eda14cbcSMatt Macy case ZPOOL_PROP_AUTOEXPAND: 591eda14cbcSMatt Macy case ZPOOL_PROP_AUTOTRIM: 592eda14cbcSMatt Macy error = nvpair_value_uint64(elem, &intval); 593eda14cbcSMatt Macy if (!error && intval > 1) 594eda14cbcSMatt Macy error = SET_ERROR(EINVAL); 595eda14cbcSMatt Macy break; 596eda14cbcSMatt Macy 597eda14cbcSMatt Macy case ZPOOL_PROP_MULTIHOST: 598eda14cbcSMatt Macy error = nvpair_value_uint64(elem, &intval); 599eda14cbcSMatt Macy if (!error && intval > 1) 600eda14cbcSMatt Macy error = SET_ERROR(EINVAL); 601eda14cbcSMatt Macy 602eda14cbcSMatt Macy if (!error) { 603eda14cbcSMatt Macy uint32_t hostid = zone_get_hostid(NULL); 604eda14cbcSMatt Macy if (hostid) 605eda14cbcSMatt Macy spa->spa_hostid = hostid; 606eda14cbcSMatt Macy else 607eda14cbcSMatt Macy error = SET_ERROR(ENOTSUP); 608eda14cbcSMatt Macy } 609eda14cbcSMatt Macy 610eda14cbcSMatt Macy break; 611eda14cbcSMatt Macy 612eda14cbcSMatt Macy case ZPOOL_PROP_BOOTFS: 613eda14cbcSMatt Macy /* 614eda14cbcSMatt Macy * If the pool version is less than SPA_VERSION_BOOTFS, 615eda14cbcSMatt Macy * or the pool is still being created (version == 0), 616eda14cbcSMatt Macy * the bootfs property cannot be set. 617eda14cbcSMatt Macy */ 618eda14cbcSMatt Macy if (spa_version(spa) < SPA_VERSION_BOOTFS) { 619eda14cbcSMatt Macy error = SET_ERROR(ENOTSUP); 620eda14cbcSMatt Macy break; 621eda14cbcSMatt Macy } 622eda14cbcSMatt Macy 623eda14cbcSMatt Macy /* 624eda14cbcSMatt Macy * Make sure the vdev config is bootable 625eda14cbcSMatt Macy */ 626eda14cbcSMatt Macy if (!vdev_is_bootable(spa->spa_root_vdev)) { 627eda14cbcSMatt Macy error = SET_ERROR(ENOTSUP); 628eda14cbcSMatt Macy break; 629eda14cbcSMatt Macy } 630eda14cbcSMatt Macy 631eda14cbcSMatt Macy reset_bootfs = 1; 632eda14cbcSMatt Macy 633eda14cbcSMatt Macy error = nvpair_value_string(elem, &strval); 634eda14cbcSMatt Macy 635eda14cbcSMatt Macy if (!error) { 636eda14cbcSMatt Macy objset_t *os; 637eda14cbcSMatt Macy 638eda14cbcSMatt Macy if (strval == NULL || strval[0] == '\0') { 639eda14cbcSMatt Macy objnum = zpool_prop_default_numeric( 640eda14cbcSMatt Macy ZPOOL_PROP_BOOTFS); 641eda14cbcSMatt Macy break; 642eda14cbcSMatt Macy } 643eda14cbcSMatt Macy 644eda14cbcSMatt Macy error = dmu_objset_hold(strval, FTAG, &os); 645eda14cbcSMatt Macy if (error != 0) 646eda14cbcSMatt Macy break; 647eda14cbcSMatt Macy 648eda14cbcSMatt Macy /* Must be ZPL. */ 649eda14cbcSMatt Macy if (dmu_objset_type(os) != DMU_OST_ZFS) { 650eda14cbcSMatt Macy error = SET_ERROR(ENOTSUP); 651eda14cbcSMatt Macy } else { 652eda14cbcSMatt Macy objnum = dmu_objset_id(os); 653eda14cbcSMatt Macy } 654eda14cbcSMatt Macy dmu_objset_rele(os, FTAG); 655eda14cbcSMatt Macy } 656eda14cbcSMatt Macy break; 657eda14cbcSMatt Macy 658eda14cbcSMatt Macy case ZPOOL_PROP_FAILUREMODE: 659eda14cbcSMatt Macy error = nvpair_value_uint64(elem, &intval); 660eda14cbcSMatt Macy if (!error && intval > ZIO_FAILURE_MODE_PANIC) 661eda14cbcSMatt Macy error = SET_ERROR(EINVAL); 662eda14cbcSMatt Macy 663eda14cbcSMatt Macy /* 664eda14cbcSMatt Macy * This is a special case which only occurs when 665eda14cbcSMatt Macy * the pool has completely failed. This allows 666eda14cbcSMatt Macy * the user to change the in-core failmode property 667eda14cbcSMatt Macy * without syncing it out to disk (I/Os might 668eda14cbcSMatt Macy * currently be blocked). We do this by returning 669eda14cbcSMatt Macy * EIO to the caller (spa_prop_set) to trick it 670eda14cbcSMatt Macy * into thinking we encountered a property validation 671eda14cbcSMatt Macy * error. 672eda14cbcSMatt Macy */ 673eda14cbcSMatt Macy if (!error && spa_suspended(spa)) { 674eda14cbcSMatt Macy spa->spa_failmode = intval; 675eda14cbcSMatt Macy error = SET_ERROR(EIO); 676eda14cbcSMatt Macy } 677eda14cbcSMatt Macy break; 678eda14cbcSMatt Macy 679eda14cbcSMatt Macy case ZPOOL_PROP_CACHEFILE: 680eda14cbcSMatt Macy if ((error = nvpair_value_string(elem, &strval)) != 0) 681eda14cbcSMatt Macy break; 682eda14cbcSMatt Macy 683eda14cbcSMatt Macy if (strval[0] == '\0') 684eda14cbcSMatt Macy break; 685eda14cbcSMatt Macy 686eda14cbcSMatt Macy if (strcmp(strval, "none") == 0) 687eda14cbcSMatt Macy break; 688eda14cbcSMatt Macy 689eda14cbcSMatt Macy if (strval[0] != '/') { 690eda14cbcSMatt Macy error = SET_ERROR(EINVAL); 691eda14cbcSMatt Macy break; 692eda14cbcSMatt Macy } 693eda14cbcSMatt Macy 694eda14cbcSMatt Macy slash = strrchr(strval, '/'); 695eda14cbcSMatt Macy ASSERT(slash != NULL); 696eda14cbcSMatt Macy 697eda14cbcSMatt Macy if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 698eda14cbcSMatt Macy strcmp(slash, "/..") == 0) 699eda14cbcSMatt Macy error = SET_ERROR(EINVAL); 700eda14cbcSMatt Macy break; 701eda14cbcSMatt Macy 702eda14cbcSMatt Macy case ZPOOL_PROP_COMMENT: 703eda14cbcSMatt Macy if ((error = nvpair_value_string(elem, &strval)) != 0) 704eda14cbcSMatt Macy break; 705eda14cbcSMatt Macy for (check = strval; *check != '\0'; check++) { 706eda14cbcSMatt Macy if (!isprint(*check)) { 707eda14cbcSMatt Macy error = SET_ERROR(EINVAL); 708eda14cbcSMatt Macy break; 709eda14cbcSMatt Macy } 710eda14cbcSMatt Macy } 711eda14cbcSMatt Macy if (strlen(strval) > ZPROP_MAX_COMMENT) 712eda14cbcSMatt Macy error = SET_ERROR(E2BIG); 713eda14cbcSMatt Macy break; 714eda14cbcSMatt Macy 715eda14cbcSMatt Macy default: 716eda14cbcSMatt Macy break; 717eda14cbcSMatt Macy } 718eda14cbcSMatt Macy 719eda14cbcSMatt Macy if (error) 720eda14cbcSMatt Macy break; 721eda14cbcSMatt Macy } 722eda14cbcSMatt Macy 723eda14cbcSMatt Macy (void) nvlist_remove_all(props, 724eda14cbcSMatt Macy zpool_prop_to_name(ZPOOL_PROP_DEDUPDITTO)); 725eda14cbcSMatt Macy 726eda14cbcSMatt Macy if (!error && reset_bootfs) { 727eda14cbcSMatt Macy error = nvlist_remove(props, 728eda14cbcSMatt Macy zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING); 729eda14cbcSMatt Macy 730eda14cbcSMatt Macy if (!error) { 731eda14cbcSMatt Macy error = nvlist_add_uint64(props, 732eda14cbcSMatt Macy zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum); 733eda14cbcSMatt Macy } 734eda14cbcSMatt Macy } 735eda14cbcSMatt Macy 736eda14cbcSMatt Macy return (error); 737eda14cbcSMatt Macy } 738eda14cbcSMatt Macy 739eda14cbcSMatt Macy void 740eda14cbcSMatt Macy spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync) 741eda14cbcSMatt Macy { 742eda14cbcSMatt Macy char *cachefile; 743eda14cbcSMatt Macy spa_config_dirent_t *dp; 744eda14cbcSMatt Macy 745eda14cbcSMatt Macy if (nvlist_lookup_string(nvp, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), 746eda14cbcSMatt Macy &cachefile) != 0) 747eda14cbcSMatt Macy return; 748eda14cbcSMatt Macy 749eda14cbcSMatt Macy dp = kmem_alloc(sizeof (spa_config_dirent_t), 750eda14cbcSMatt Macy KM_SLEEP); 751eda14cbcSMatt Macy 752eda14cbcSMatt Macy if (cachefile[0] == '\0') 753eda14cbcSMatt Macy dp->scd_path = spa_strdup(spa_config_path); 754eda14cbcSMatt Macy else if (strcmp(cachefile, "none") == 0) 755eda14cbcSMatt Macy dp->scd_path = NULL; 756eda14cbcSMatt Macy else 757eda14cbcSMatt Macy dp->scd_path = spa_strdup(cachefile); 758eda14cbcSMatt Macy 759eda14cbcSMatt Macy list_insert_head(&spa->spa_config_list, dp); 760eda14cbcSMatt Macy if (need_sync) 761eda14cbcSMatt Macy spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 762eda14cbcSMatt Macy } 763eda14cbcSMatt Macy 764eda14cbcSMatt Macy int 765eda14cbcSMatt Macy spa_prop_set(spa_t *spa, nvlist_t *nvp) 766eda14cbcSMatt Macy { 767eda14cbcSMatt Macy int error; 768eda14cbcSMatt Macy nvpair_t *elem = NULL; 769eda14cbcSMatt Macy boolean_t need_sync = B_FALSE; 770eda14cbcSMatt Macy 771eda14cbcSMatt Macy if ((error = spa_prop_validate(spa, nvp)) != 0) 772eda14cbcSMatt Macy return (error); 773eda14cbcSMatt Macy 774eda14cbcSMatt Macy while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) { 775eda14cbcSMatt Macy zpool_prop_t prop = zpool_name_to_prop(nvpair_name(elem)); 776eda14cbcSMatt Macy 777eda14cbcSMatt Macy if (prop == ZPOOL_PROP_CACHEFILE || 778eda14cbcSMatt Macy prop == ZPOOL_PROP_ALTROOT || 779eda14cbcSMatt Macy prop == ZPOOL_PROP_READONLY) 780eda14cbcSMatt Macy continue; 781eda14cbcSMatt Macy 782eda14cbcSMatt Macy if (prop == ZPOOL_PROP_VERSION || prop == ZPOOL_PROP_INVAL) { 783eda14cbcSMatt Macy uint64_t ver; 784eda14cbcSMatt Macy 785eda14cbcSMatt Macy if (prop == ZPOOL_PROP_VERSION) { 786eda14cbcSMatt Macy VERIFY(nvpair_value_uint64(elem, &ver) == 0); 787eda14cbcSMatt Macy } else { 788eda14cbcSMatt Macy ASSERT(zpool_prop_feature(nvpair_name(elem))); 789eda14cbcSMatt Macy ver = SPA_VERSION_FEATURES; 790eda14cbcSMatt Macy need_sync = B_TRUE; 791eda14cbcSMatt Macy } 792eda14cbcSMatt Macy 793eda14cbcSMatt Macy /* Save time if the version is already set. */ 794eda14cbcSMatt Macy if (ver == spa_version(spa)) 795eda14cbcSMatt Macy continue; 796eda14cbcSMatt Macy 797eda14cbcSMatt Macy /* 798eda14cbcSMatt Macy * In addition to the pool directory object, we might 799eda14cbcSMatt Macy * create the pool properties object, the features for 800eda14cbcSMatt Macy * read object, the features for write object, or the 801eda14cbcSMatt Macy * feature descriptions object. 802eda14cbcSMatt Macy */ 803eda14cbcSMatt Macy error = dsl_sync_task(spa->spa_name, NULL, 804eda14cbcSMatt Macy spa_sync_version, &ver, 805eda14cbcSMatt Macy 6, ZFS_SPACE_CHECK_RESERVED); 806eda14cbcSMatt Macy if (error) 807eda14cbcSMatt Macy return (error); 808eda14cbcSMatt Macy continue; 809eda14cbcSMatt Macy } 810eda14cbcSMatt Macy 811eda14cbcSMatt Macy need_sync = B_TRUE; 812eda14cbcSMatt Macy break; 813eda14cbcSMatt Macy } 814eda14cbcSMatt Macy 815eda14cbcSMatt Macy if (need_sync) { 816eda14cbcSMatt Macy return (dsl_sync_task(spa->spa_name, NULL, spa_sync_props, 817eda14cbcSMatt Macy nvp, 6, ZFS_SPACE_CHECK_RESERVED)); 818eda14cbcSMatt Macy } 819eda14cbcSMatt Macy 820eda14cbcSMatt Macy return (0); 821eda14cbcSMatt Macy } 822eda14cbcSMatt Macy 823eda14cbcSMatt Macy /* 824eda14cbcSMatt Macy * If the bootfs property value is dsobj, clear it. 825eda14cbcSMatt Macy */ 826eda14cbcSMatt Macy void 827eda14cbcSMatt Macy spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx) 828eda14cbcSMatt Macy { 829eda14cbcSMatt Macy if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) { 830eda14cbcSMatt Macy VERIFY(zap_remove(spa->spa_meta_objset, 831eda14cbcSMatt Macy spa->spa_pool_props_object, 832eda14cbcSMatt Macy zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0); 833eda14cbcSMatt Macy spa->spa_bootfs = 0; 834eda14cbcSMatt Macy } 835eda14cbcSMatt Macy } 836eda14cbcSMatt Macy 837eda14cbcSMatt Macy /*ARGSUSED*/ 838eda14cbcSMatt Macy static int 839eda14cbcSMatt Macy spa_change_guid_check(void *arg, dmu_tx_t *tx) 840eda14cbcSMatt Macy { 841eda14cbcSMatt Macy uint64_t *newguid __maybe_unused = arg; 842eda14cbcSMatt Macy spa_t *spa = dmu_tx_pool(tx)->dp_spa; 843eda14cbcSMatt Macy vdev_t *rvd = spa->spa_root_vdev; 844eda14cbcSMatt Macy uint64_t vdev_state; 845eda14cbcSMatt Macy 846eda14cbcSMatt Macy if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) { 847eda14cbcSMatt Macy int error = (spa_has_checkpoint(spa)) ? 848eda14cbcSMatt Macy ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT; 849eda14cbcSMatt Macy return (SET_ERROR(error)); 850eda14cbcSMatt Macy } 851eda14cbcSMatt Macy 852eda14cbcSMatt Macy spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 853eda14cbcSMatt Macy vdev_state = rvd->vdev_state; 854eda14cbcSMatt Macy spa_config_exit(spa, SCL_STATE, FTAG); 855eda14cbcSMatt Macy 856eda14cbcSMatt Macy if (vdev_state != VDEV_STATE_HEALTHY) 857eda14cbcSMatt Macy return (SET_ERROR(ENXIO)); 858eda14cbcSMatt Macy 859eda14cbcSMatt Macy ASSERT3U(spa_guid(spa), !=, *newguid); 860eda14cbcSMatt Macy 861eda14cbcSMatt Macy return (0); 862eda14cbcSMatt Macy } 863eda14cbcSMatt Macy 864eda14cbcSMatt Macy static void 865eda14cbcSMatt Macy spa_change_guid_sync(void *arg, dmu_tx_t *tx) 866eda14cbcSMatt Macy { 867eda14cbcSMatt Macy uint64_t *newguid = arg; 868eda14cbcSMatt Macy spa_t *spa = dmu_tx_pool(tx)->dp_spa; 869eda14cbcSMatt Macy uint64_t oldguid; 870eda14cbcSMatt Macy vdev_t *rvd = spa->spa_root_vdev; 871eda14cbcSMatt Macy 872eda14cbcSMatt Macy oldguid = spa_guid(spa); 873eda14cbcSMatt Macy 874eda14cbcSMatt Macy spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 875eda14cbcSMatt Macy rvd->vdev_guid = *newguid; 876eda14cbcSMatt Macy rvd->vdev_guid_sum += (*newguid - oldguid); 877eda14cbcSMatt Macy vdev_config_dirty(rvd); 878eda14cbcSMatt Macy spa_config_exit(spa, SCL_STATE, FTAG); 879eda14cbcSMatt Macy 880eda14cbcSMatt Macy spa_history_log_internal(spa, "guid change", tx, "old=%llu new=%llu", 881eda14cbcSMatt Macy (u_longlong_t)oldguid, (u_longlong_t)*newguid); 882eda14cbcSMatt Macy } 883eda14cbcSMatt Macy 884eda14cbcSMatt Macy /* 885eda14cbcSMatt Macy * Change the GUID for the pool. This is done so that we can later 886eda14cbcSMatt Macy * re-import a pool built from a clone of our own vdevs. We will modify 887eda14cbcSMatt Macy * the root vdev's guid, our own pool guid, and then mark all of our 888eda14cbcSMatt Macy * vdevs dirty. Note that we must make sure that all our vdevs are 889eda14cbcSMatt Macy * online when we do this, or else any vdevs that weren't present 890eda14cbcSMatt Macy * would be orphaned from our pool. We are also going to issue a 891eda14cbcSMatt Macy * sysevent to update any watchers. 892eda14cbcSMatt Macy */ 893eda14cbcSMatt Macy int 894eda14cbcSMatt Macy spa_change_guid(spa_t *spa) 895eda14cbcSMatt Macy { 896eda14cbcSMatt Macy int error; 897eda14cbcSMatt Macy uint64_t guid; 898eda14cbcSMatt Macy 899eda14cbcSMatt Macy mutex_enter(&spa->spa_vdev_top_lock); 900eda14cbcSMatt Macy mutex_enter(&spa_namespace_lock); 901eda14cbcSMatt Macy guid = spa_generate_guid(NULL); 902eda14cbcSMatt Macy 903eda14cbcSMatt Macy error = dsl_sync_task(spa->spa_name, spa_change_guid_check, 904eda14cbcSMatt Macy spa_change_guid_sync, &guid, 5, ZFS_SPACE_CHECK_RESERVED); 905eda14cbcSMatt Macy 906eda14cbcSMatt Macy if (error == 0) { 907eda14cbcSMatt Macy spa_write_cachefile(spa, B_FALSE, B_TRUE); 908eda14cbcSMatt Macy spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_REGUID); 909eda14cbcSMatt Macy } 910eda14cbcSMatt Macy 911eda14cbcSMatt Macy mutex_exit(&spa_namespace_lock); 912eda14cbcSMatt Macy mutex_exit(&spa->spa_vdev_top_lock); 913eda14cbcSMatt Macy 914eda14cbcSMatt Macy return (error); 915eda14cbcSMatt Macy } 916eda14cbcSMatt Macy 917eda14cbcSMatt Macy /* 918eda14cbcSMatt Macy * ========================================================================== 919eda14cbcSMatt Macy * SPA state manipulation (open/create/destroy/import/export) 920eda14cbcSMatt Macy * ========================================================================== 921eda14cbcSMatt Macy */ 922eda14cbcSMatt Macy 923eda14cbcSMatt Macy static int 924eda14cbcSMatt Macy spa_error_entry_compare(const void *a, const void *b) 925eda14cbcSMatt Macy { 926eda14cbcSMatt Macy const spa_error_entry_t *sa = (const spa_error_entry_t *)a; 927eda14cbcSMatt Macy const spa_error_entry_t *sb = (const spa_error_entry_t *)b; 928eda14cbcSMatt Macy int ret; 929eda14cbcSMatt Macy 930eda14cbcSMatt Macy ret = memcmp(&sa->se_bookmark, &sb->se_bookmark, 931eda14cbcSMatt Macy sizeof (zbookmark_phys_t)); 932eda14cbcSMatt Macy 933eda14cbcSMatt Macy return (TREE_ISIGN(ret)); 934eda14cbcSMatt Macy } 935eda14cbcSMatt Macy 936eda14cbcSMatt Macy /* 937eda14cbcSMatt Macy * Utility function which retrieves copies of the current logs and 938eda14cbcSMatt Macy * re-initializes them in the process. 939eda14cbcSMatt Macy */ 940eda14cbcSMatt Macy void 941eda14cbcSMatt Macy spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub) 942eda14cbcSMatt Macy { 943eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&spa->spa_errlist_lock)); 944eda14cbcSMatt Macy 945eda14cbcSMatt Macy bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t)); 946eda14cbcSMatt Macy bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t)); 947eda14cbcSMatt Macy 948eda14cbcSMatt Macy avl_create(&spa->spa_errlist_scrub, 949eda14cbcSMatt Macy spa_error_entry_compare, sizeof (spa_error_entry_t), 950eda14cbcSMatt Macy offsetof(spa_error_entry_t, se_avl)); 951eda14cbcSMatt Macy avl_create(&spa->spa_errlist_last, 952eda14cbcSMatt Macy spa_error_entry_compare, sizeof (spa_error_entry_t), 953eda14cbcSMatt Macy offsetof(spa_error_entry_t, se_avl)); 954eda14cbcSMatt Macy } 955eda14cbcSMatt Macy 956eda14cbcSMatt Macy static void 957eda14cbcSMatt Macy spa_taskqs_init(spa_t *spa, zio_type_t t, zio_taskq_type_t q) 958eda14cbcSMatt Macy { 959eda14cbcSMatt Macy const zio_taskq_info_t *ztip = &zio_taskqs[t][q]; 960eda14cbcSMatt Macy enum zti_modes mode = ztip->zti_mode; 961eda14cbcSMatt Macy uint_t value = ztip->zti_value; 962eda14cbcSMatt Macy uint_t count = ztip->zti_count; 963eda14cbcSMatt Macy spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 96416038816SMartin Matuska uint_t cpus, flags = TASKQ_DYNAMIC; 965eda14cbcSMatt Macy boolean_t batch = B_FALSE; 966eda14cbcSMatt Macy 967eda14cbcSMatt Macy switch (mode) { 968eda14cbcSMatt Macy case ZTI_MODE_FIXED: 96916038816SMartin Matuska ASSERT3U(value, >, 0); 970eda14cbcSMatt Macy break; 971eda14cbcSMatt Macy 972eda14cbcSMatt Macy case ZTI_MODE_BATCH: 973eda14cbcSMatt Macy batch = B_TRUE; 974eda14cbcSMatt Macy flags |= TASKQ_THREADS_CPU_PCT; 975eda14cbcSMatt Macy value = MIN(zio_taskq_batch_pct, 100); 976eda14cbcSMatt Macy break; 977eda14cbcSMatt Macy 97816038816SMartin Matuska case ZTI_MODE_SCALE: 97916038816SMartin Matuska flags |= TASKQ_THREADS_CPU_PCT; 98016038816SMartin Matuska /* 98116038816SMartin Matuska * We want more taskqs to reduce lock contention, but we want 98216038816SMartin Matuska * less for better request ordering and CPU utilization. 98316038816SMartin Matuska */ 98416038816SMartin Matuska cpus = MAX(1, boot_ncpus * zio_taskq_batch_pct / 100); 98516038816SMartin Matuska if (zio_taskq_batch_tpq > 0) { 98616038816SMartin Matuska count = MAX(1, (cpus + zio_taskq_batch_tpq / 2) / 98716038816SMartin Matuska zio_taskq_batch_tpq); 98816038816SMartin Matuska } else { 98916038816SMartin Matuska /* 99016038816SMartin Matuska * Prefer 6 threads per taskq, but no more taskqs 99116038816SMartin Matuska * than threads in them on large systems. For 80%: 99216038816SMartin Matuska * 99316038816SMartin Matuska * taskq taskq total 99416038816SMartin Matuska * cpus taskqs percent threads threads 99516038816SMartin Matuska * ------- ------- ------- ------- ------- 99616038816SMartin Matuska * 1 1 80% 1 1 99716038816SMartin Matuska * 2 1 80% 1 1 99816038816SMartin Matuska * 4 1 80% 3 3 99916038816SMartin Matuska * 8 2 40% 3 6 100016038816SMartin Matuska * 16 3 27% 4 12 100116038816SMartin Matuska * 32 5 16% 5 25 100216038816SMartin Matuska * 64 7 11% 7 49 100316038816SMartin Matuska * 128 10 8% 10 100 100416038816SMartin Matuska * 256 14 6% 15 210 100516038816SMartin Matuska */ 100616038816SMartin Matuska count = 1 + cpus / 6; 100716038816SMartin Matuska while (count * count > cpus) 100816038816SMartin Matuska count--; 100916038816SMartin Matuska } 101016038816SMartin Matuska /* Limit each taskq within 100% to not trigger assertion. */ 101116038816SMartin Matuska count = MAX(count, (zio_taskq_batch_pct + 99) / 100); 101216038816SMartin Matuska value = (zio_taskq_batch_pct + count / 2) / count; 101316038816SMartin Matuska break; 101416038816SMartin Matuska 101516038816SMartin Matuska case ZTI_MODE_NULL: 101616038816SMartin Matuska tqs->stqs_count = 0; 101716038816SMartin Matuska tqs->stqs_taskq = NULL; 101816038816SMartin Matuska return; 101916038816SMartin Matuska 1020eda14cbcSMatt Macy default: 1021eda14cbcSMatt Macy panic("unrecognized mode for %s_%s taskq (%u:%u) in " 1022eda14cbcSMatt Macy "spa_activate()", 1023eda14cbcSMatt Macy zio_type_name[t], zio_taskq_types[q], mode, value); 1024eda14cbcSMatt Macy break; 1025eda14cbcSMatt Macy } 1026eda14cbcSMatt Macy 102716038816SMartin Matuska ASSERT3U(count, >, 0); 102816038816SMartin Matuska tqs->stqs_count = count; 102916038816SMartin Matuska tqs->stqs_taskq = kmem_alloc(count * sizeof (taskq_t *), KM_SLEEP); 103016038816SMartin Matuska 1031eda14cbcSMatt Macy for (uint_t i = 0; i < count; i++) { 1032eda14cbcSMatt Macy taskq_t *tq; 1033eda14cbcSMatt Macy char name[32]; 1034eda14cbcSMatt Macy 103516038816SMartin Matuska if (count > 1) 103616038816SMartin Matuska (void) snprintf(name, sizeof (name), "%s_%s_%u", 103716038816SMartin Matuska zio_type_name[t], zio_taskq_types[q], i); 103816038816SMartin Matuska else 1039eda14cbcSMatt Macy (void) snprintf(name, sizeof (name), "%s_%s", 1040eda14cbcSMatt Macy zio_type_name[t], zio_taskq_types[q]); 1041eda14cbcSMatt Macy 1042eda14cbcSMatt Macy if (zio_taskq_sysdc && spa->spa_proc != &p0) { 1043eda14cbcSMatt Macy if (batch) 1044eda14cbcSMatt Macy flags |= TASKQ_DC_BATCH; 1045eda14cbcSMatt Macy 1046eda14cbcSMatt Macy tq = taskq_create_sysdc(name, value, 50, INT_MAX, 1047eda14cbcSMatt Macy spa->spa_proc, zio_taskq_basedc, flags); 1048eda14cbcSMatt Macy } else { 1049eda14cbcSMatt Macy pri_t pri = maxclsyspri; 1050eda14cbcSMatt Macy /* 1051eda14cbcSMatt Macy * The write issue taskq can be extremely CPU 1052eda14cbcSMatt Macy * intensive. Run it at slightly less important 10532c48331dSMatt Macy * priority than the other taskqs. 10542c48331dSMatt Macy * 10552c48331dSMatt Macy * Under Linux and FreeBSD this means incrementing 10562c48331dSMatt Macy * the priority value as opposed to platforms like 10572c48331dSMatt Macy * illumos where it should be decremented. 10582c48331dSMatt Macy * 10592c48331dSMatt Macy * On FreeBSD, if priorities divided by four (RQ_PPQ) 10602c48331dSMatt Macy * are equal then a difference between them is 10612c48331dSMatt Macy * insignificant. 1062eda14cbcSMatt Macy */ 10632c48331dSMatt Macy if (t == ZIO_TYPE_WRITE && q == ZIO_TASKQ_ISSUE) { 10642c48331dSMatt Macy #if defined(__linux__) 1065eda14cbcSMatt Macy pri++; 10662c48331dSMatt Macy #elif defined(__FreeBSD__) 10672c48331dSMatt Macy pri += 4; 10682c48331dSMatt Macy #else 10692c48331dSMatt Macy #error "unknown OS" 10702c48331dSMatt Macy #endif 10712c48331dSMatt Macy } 1072eda14cbcSMatt Macy tq = taskq_create_proc(name, value, pri, 50, 1073eda14cbcSMatt Macy INT_MAX, spa->spa_proc, flags); 1074eda14cbcSMatt Macy } 1075eda14cbcSMatt Macy 1076eda14cbcSMatt Macy tqs->stqs_taskq[i] = tq; 1077eda14cbcSMatt Macy } 1078eda14cbcSMatt Macy } 1079eda14cbcSMatt Macy 1080eda14cbcSMatt Macy static void 1081eda14cbcSMatt Macy spa_taskqs_fini(spa_t *spa, zio_type_t t, zio_taskq_type_t q) 1082eda14cbcSMatt Macy { 1083eda14cbcSMatt Macy spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 1084eda14cbcSMatt Macy 1085eda14cbcSMatt Macy if (tqs->stqs_taskq == NULL) { 1086eda14cbcSMatt Macy ASSERT3U(tqs->stqs_count, ==, 0); 1087eda14cbcSMatt Macy return; 1088eda14cbcSMatt Macy } 1089eda14cbcSMatt Macy 1090eda14cbcSMatt Macy for (uint_t i = 0; i < tqs->stqs_count; i++) { 1091eda14cbcSMatt Macy ASSERT3P(tqs->stqs_taskq[i], !=, NULL); 1092eda14cbcSMatt Macy taskq_destroy(tqs->stqs_taskq[i]); 1093eda14cbcSMatt Macy } 1094eda14cbcSMatt Macy 1095eda14cbcSMatt Macy kmem_free(tqs->stqs_taskq, tqs->stqs_count * sizeof (taskq_t *)); 1096eda14cbcSMatt Macy tqs->stqs_taskq = NULL; 1097eda14cbcSMatt Macy } 1098eda14cbcSMatt Macy 1099eda14cbcSMatt Macy /* 1100eda14cbcSMatt Macy * Dispatch a task to the appropriate taskq for the ZFS I/O type and priority. 1101eda14cbcSMatt Macy * Note that a type may have multiple discrete taskqs to avoid lock contention 1102eda14cbcSMatt Macy * on the taskq itself. In that case we choose which taskq at random by using 1103eda14cbcSMatt Macy * the low bits of gethrtime(). 1104eda14cbcSMatt Macy */ 1105eda14cbcSMatt Macy void 1106eda14cbcSMatt Macy spa_taskq_dispatch_ent(spa_t *spa, zio_type_t t, zio_taskq_type_t q, 1107eda14cbcSMatt Macy task_func_t *func, void *arg, uint_t flags, taskq_ent_t *ent) 1108eda14cbcSMatt Macy { 1109eda14cbcSMatt Macy spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 1110eda14cbcSMatt Macy taskq_t *tq; 1111eda14cbcSMatt Macy 1112eda14cbcSMatt Macy ASSERT3P(tqs->stqs_taskq, !=, NULL); 1113eda14cbcSMatt Macy ASSERT3U(tqs->stqs_count, !=, 0); 1114eda14cbcSMatt Macy 1115eda14cbcSMatt Macy if (tqs->stqs_count == 1) { 1116eda14cbcSMatt Macy tq = tqs->stqs_taskq[0]; 1117eda14cbcSMatt Macy } else { 1118eda14cbcSMatt Macy tq = tqs->stqs_taskq[((uint64_t)gethrtime()) % tqs->stqs_count]; 1119eda14cbcSMatt Macy } 1120eda14cbcSMatt Macy 1121eda14cbcSMatt Macy taskq_dispatch_ent(tq, func, arg, flags, ent); 1122eda14cbcSMatt Macy } 1123eda14cbcSMatt Macy 1124eda14cbcSMatt Macy /* 1125eda14cbcSMatt Macy * Same as spa_taskq_dispatch_ent() but block on the task until completion. 1126eda14cbcSMatt Macy */ 1127eda14cbcSMatt Macy void 1128eda14cbcSMatt Macy spa_taskq_dispatch_sync(spa_t *spa, zio_type_t t, zio_taskq_type_t q, 1129eda14cbcSMatt Macy task_func_t *func, void *arg, uint_t flags) 1130eda14cbcSMatt Macy { 1131eda14cbcSMatt Macy spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 1132eda14cbcSMatt Macy taskq_t *tq; 1133eda14cbcSMatt Macy taskqid_t id; 1134eda14cbcSMatt Macy 1135eda14cbcSMatt Macy ASSERT3P(tqs->stqs_taskq, !=, NULL); 1136eda14cbcSMatt Macy ASSERT3U(tqs->stqs_count, !=, 0); 1137eda14cbcSMatt Macy 1138eda14cbcSMatt Macy if (tqs->stqs_count == 1) { 1139eda14cbcSMatt Macy tq = tqs->stqs_taskq[0]; 1140eda14cbcSMatt Macy } else { 1141eda14cbcSMatt Macy tq = tqs->stqs_taskq[((uint64_t)gethrtime()) % tqs->stqs_count]; 1142eda14cbcSMatt Macy } 1143eda14cbcSMatt Macy 1144eda14cbcSMatt Macy id = taskq_dispatch(tq, func, arg, flags); 1145eda14cbcSMatt Macy if (id) 1146eda14cbcSMatt Macy taskq_wait_id(tq, id); 1147eda14cbcSMatt Macy } 1148eda14cbcSMatt Macy 1149eda14cbcSMatt Macy static void 1150eda14cbcSMatt Macy spa_create_zio_taskqs(spa_t *spa) 1151eda14cbcSMatt Macy { 1152eda14cbcSMatt Macy for (int t = 0; t < ZIO_TYPES; t++) { 1153eda14cbcSMatt Macy for (int q = 0; q < ZIO_TASKQ_TYPES; q++) { 1154eda14cbcSMatt Macy spa_taskqs_init(spa, t, q); 1155eda14cbcSMatt Macy } 1156eda14cbcSMatt Macy } 1157eda14cbcSMatt Macy } 1158eda14cbcSMatt Macy 1159eda14cbcSMatt Macy /* 1160eda14cbcSMatt Macy * Disabled until spa_thread() can be adapted for Linux. 1161eda14cbcSMatt Macy */ 1162eda14cbcSMatt Macy #undef HAVE_SPA_THREAD 1163eda14cbcSMatt Macy 1164eda14cbcSMatt Macy #if defined(_KERNEL) && defined(HAVE_SPA_THREAD) 1165eda14cbcSMatt Macy static void 1166eda14cbcSMatt Macy spa_thread(void *arg) 1167eda14cbcSMatt Macy { 1168eda14cbcSMatt Macy psetid_t zio_taskq_psrset_bind = PS_NONE; 1169eda14cbcSMatt Macy callb_cpr_t cprinfo; 1170eda14cbcSMatt Macy 1171eda14cbcSMatt Macy spa_t *spa = arg; 1172eda14cbcSMatt Macy user_t *pu = PTOU(curproc); 1173eda14cbcSMatt Macy 1174eda14cbcSMatt Macy CALLB_CPR_INIT(&cprinfo, &spa->spa_proc_lock, callb_generic_cpr, 1175eda14cbcSMatt Macy spa->spa_name); 1176eda14cbcSMatt Macy 1177eda14cbcSMatt Macy ASSERT(curproc != &p0); 1178eda14cbcSMatt Macy (void) snprintf(pu->u_psargs, sizeof (pu->u_psargs), 1179eda14cbcSMatt Macy "zpool-%s", spa->spa_name); 1180eda14cbcSMatt Macy (void) strlcpy(pu->u_comm, pu->u_psargs, sizeof (pu->u_comm)); 1181eda14cbcSMatt Macy 1182eda14cbcSMatt Macy /* bind this thread to the requested psrset */ 1183eda14cbcSMatt Macy if (zio_taskq_psrset_bind != PS_NONE) { 1184eda14cbcSMatt Macy pool_lock(); 1185eda14cbcSMatt Macy mutex_enter(&cpu_lock); 1186eda14cbcSMatt Macy mutex_enter(&pidlock); 1187eda14cbcSMatt Macy mutex_enter(&curproc->p_lock); 1188eda14cbcSMatt Macy 1189eda14cbcSMatt Macy if (cpupart_bind_thread(curthread, zio_taskq_psrset_bind, 1190eda14cbcSMatt Macy 0, NULL, NULL) == 0) { 1191eda14cbcSMatt Macy curthread->t_bind_pset = zio_taskq_psrset_bind; 1192eda14cbcSMatt Macy } else { 1193eda14cbcSMatt Macy cmn_err(CE_WARN, 1194eda14cbcSMatt Macy "Couldn't bind process for zfs pool \"%s\" to " 1195eda14cbcSMatt Macy "pset %d\n", spa->spa_name, zio_taskq_psrset_bind); 1196eda14cbcSMatt Macy } 1197eda14cbcSMatt Macy 1198eda14cbcSMatt Macy mutex_exit(&curproc->p_lock); 1199eda14cbcSMatt Macy mutex_exit(&pidlock); 1200eda14cbcSMatt Macy mutex_exit(&cpu_lock); 1201eda14cbcSMatt Macy pool_unlock(); 1202eda14cbcSMatt Macy } 1203eda14cbcSMatt Macy 1204eda14cbcSMatt Macy if (zio_taskq_sysdc) { 1205eda14cbcSMatt Macy sysdc_thread_enter(curthread, 100, 0); 1206eda14cbcSMatt Macy } 1207eda14cbcSMatt Macy 1208eda14cbcSMatt Macy spa->spa_proc = curproc; 1209eda14cbcSMatt Macy spa->spa_did = curthread->t_did; 1210eda14cbcSMatt Macy 1211eda14cbcSMatt Macy spa_create_zio_taskqs(spa); 1212eda14cbcSMatt Macy 1213eda14cbcSMatt Macy mutex_enter(&spa->spa_proc_lock); 1214eda14cbcSMatt Macy ASSERT(spa->spa_proc_state == SPA_PROC_CREATED); 1215eda14cbcSMatt Macy 1216eda14cbcSMatt Macy spa->spa_proc_state = SPA_PROC_ACTIVE; 1217eda14cbcSMatt Macy cv_broadcast(&spa->spa_proc_cv); 1218eda14cbcSMatt Macy 1219eda14cbcSMatt Macy CALLB_CPR_SAFE_BEGIN(&cprinfo); 1220eda14cbcSMatt Macy while (spa->spa_proc_state == SPA_PROC_ACTIVE) 1221eda14cbcSMatt Macy cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock); 1222eda14cbcSMatt Macy CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_proc_lock); 1223eda14cbcSMatt Macy 1224eda14cbcSMatt Macy ASSERT(spa->spa_proc_state == SPA_PROC_DEACTIVATE); 1225eda14cbcSMatt Macy spa->spa_proc_state = SPA_PROC_GONE; 1226eda14cbcSMatt Macy spa->spa_proc = &p0; 1227eda14cbcSMatt Macy cv_broadcast(&spa->spa_proc_cv); 1228eda14cbcSMatt Macy CALLB_CPR_EXIT(&cprinfo); /* drops spa_proc_lock */ 1229eda14cbcSMatt Macy 1230eda14cbcSMatt Macy mutex_enter(&curproc->p_lock); 1231eda14cbcSMatt Macy lwp_exit(); 1232eda14cbcSMatt Macy } 1233eda14cbcSMatt Macy #endif 1234eda14cbcSMatt Macy 1235eda14cbcSMatt Macy /* 1236eda14cbcSMatt Macy * Activate an uninitialized pool. 1237eda14cbcSMatt Macy */ 1238eda14cbcSMatt Macy static void 1239eda14cbcSMatt Macy spa_activate(spa_t *spa, spa_mode_t mode) 1240eda14cbcSMatt Macy { 1241eda14cbcSMatt Macy ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); 1242eda14cbcSMatt Macy 1243eda14cbcSMatt Macy spa->spa_state = POOL_STATE_ACTIVE; 1244eda14cbcSMatt Macy spa->spa_mode = mode; 1245eda14cbcSMatt Macy 1246eda14cbcSMatt Macy spa->spa_normal_class = metaslab_class_create(spa, zfs_metaslab_ops); 1247eda14cbcSMatt Macy spa->spa_log_class = metaslab_class_create(spa, zfs_metaslab_ops); 1248184c1b94SMartin Matuska spa->spa_embedded_log_class = 1249184c1b94SMartin Matuska metaslab_class_create(spa, zfs_metaslab_ops); 1250eda14cbcSMatt Macy spa->spa_special_class = metaslab_class_create(spa, zfs_metaslab_ops); 1251eda14cbcSMatt Macy spa->spa_dedup_class = metaslab_class_create(spa, zfs_metaslab_ops); 1252eda14cbcSMatt Macy 1253eda14cbcSMatt Macy /* Try to create a covering process */ 1254eda14cbcSMatt Macy mutex_enter(&spa->spa_proc_lock); 1255eda14cbcSMatt Macy ASSERT(spa->spa_proc_state == SPA_PROC_NONE); 1256eda14cbcSMatt Macy ASSERT(spa->spa_proc == &p0); 1257eda14cbcSMatt Macy spa->spa_did = 0; 1258eda14cbcSMatt Macy 1259eda14cbcSMatt Macy #ifdef HAVE_SPA_THREAD 1260eda14cbcSMatt Macy /* Only create a process if we're going to be around a while. */ 1261eda14cbcSMatt Macy if (spa_create_process && strcmp(spa->spa_name, TRYIMPORT_NAME) != 0) { 1262eda14cbcSMatt Macy if (newproc(spa_thread, (caddr_t)spa, syscid, maxclsyspri, 1263eda14cbcSMatt Macy NULL, 0) == 0) { 1264eda14cbcSMatt Macy spa->spa_proc_state = SPA_PROC_CREATED; 1265eda14cbcSMatt Macy while (spa->spa_proc_state == SPA_PROC_CREATED) { 1266eda14cbcSMatt Macy cv_wait(&spa->spa_proc_cv, 1267eda14cbcSMatt Macy &spa->spa_proc_lock); 1268eda14cbcSMatt Macy } 1269eda14cbcSMatt Macy ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE); 1270eda14cbcSMatt Macy ASSERT(spa->spa_proc != &p0); 1271eda14cbcSMatt Macy ASSERT(spa->spa_did != 0); 1272eda14cbcSMatt Macy } else { 1273eda14cbcSMatt Macy #ifdef _KERNEL 1274eda14cbcSMatt Macy cmn_err(CE_WARN, 1275eda14cbcSMatt Macy "Couldn't create process for zfs pool \"%s\"\n", 1276eda14cbcSMatt Macy spa->spa_name); 1277eda14cbcSMatt Macy #endif 1278eda14cbcSMatt Macy } 1279eda14cbcSMatt Macy } 1280eda14cbcSMatt Macy #endif /* HAVE_SPA_THREAD */ 1281eda14cbcSMatt Macy mutex_exit(&spa->spa_proc_lock); 1282eda14cbcSMatt Macy 1283eda14cbcSMatt Macy /* If we didn't create a process, we need to create our taskqs. */ 1284eda14cbcSMatt Macy if (spa->spa_proc == &p0) { 1285eda14cbcSMatt Macy spa_create_zio_taskqs(spa); 1286eda14cbcSMatt Macy } 1287eda14cbcSMatt Macy 1288eda14cbcSMatt Macy for (size_t i = 0; i < TXG_SIZE; i++) { 1289eda14cbcSMatt Macy spa->spa_txg_zio[i] = zio_root(spa, NULL, NULL, 1290eda14cbcSMatt Macy ZIO_FLAG_CANFAIL); 1291eda14cbcSMatt Macy } 1292eda14cbcSMatt Macy 1293eda14cbcSMatt Macy list_create(&spa->spa_config_dirty_list, sizeof (vdev_t), 1294eda14cbcSMatt Macy offsetof(vdev_t, vdev_config_dirty_node)); 1295eda14cbcSMatt Macy list_create(&spa->spa_evicting_os_list, sizeof (objset_t), 1296eda14cbcSMatt Macy offsetof(objset_t, os_evicting_node)); 1297eda14cbcSMatt Macy list_create(&spa->spa_state_dirty_list, sizeof (vdev_t), 1298eda14cbcSMatt Macy offsetof(vdev_t, vdev_state_dirty_node)); 1299eda14cbcSMatt Macy 1300eda14cbcSMatt Macy txg_list_create(&spa->spa_vdev_txg_list, spa, 1301eda14cbcSMatt Macy offsetof(struct vdev, vdev_txg_node)); 1302eda14cbcSMatt Macy 1303eda14cbcSMatt Macy avl_create(&spa->spa_errlist_scrub, 1304eda14cbcSMatt Macy spa_error_entry_compare, sizeof (spa_error_entry_t), 1305eda14cbcSMatt Macy offsetof(spa_error_entry_t, se_avl)); 1306eda14cbcSMatt Macy avl_create(&spa->spa_errlist_last, 1307eda14cbcSMatt Macy spa_error_entry_compare, sizeof (spa_error_entry_t), 1308eda14cbcSMatt Macy offsetof(spa_error_entry_t, se_avl)); 1309eda14cbcSMatt Macy 1310eda14cbcSMatt Macy spa_keystore_init(&spa->spa_keystore); 1311eda14cbcSMatt Macy 1312eda14cbcSMatt Macy /* 1313eda14cbcSMatt Macy * This taskq is used to perform zvol-minor-related tasks 1314eda14cbcSMatt Macy * asynchronously. This has several advantages, including easy 1315180f8225SMatt Macy * resolution of various deadlocks. 1316eda14cbcSMatt Macy * 1317eda14cbcSMatt Macy * The taskq must be single threaded to ensure tasks are always 1318eda14cbcSMatt Macy * processed in the order in which they were dispatched. 1319eda14cbcSMatt Macy * 1320eda14cbcSMatt Macy * A taskq per pool allows one to keep the pools independent. 1321eda14cbcSMatt Macy * This way if one pool is suspended, it will not impact another. 1322eda14cbcSMatt Macy * 1323eda14cbcSMatt Macy * The preferred location to dispatch a zvol minor task is a sync 1324eda14cbcSMatt Macy * task. In this context, there is easy access to the spa_t and minimal 1325eda14cbcSMatt Macy * error handling is required because the sync task must succeed. 1326eda14cbcSMatt Macy */ 1327eda14cbcSMatt Macy spa->spa_zvol_taskq = taskq_create("z_zvol", 1, defclsyspri, 1328eda14cbcSMatt Macy 1, INT_MAX, 0); 1329eda14cbcSMatt Macy 1330eda14cbcSMatt Macy /* 1331eda14cbcSMatt Macy * Taskq dedicated to prefetcher threads: this is used to prevent the 1332eda14cbcSMatt Macy * pool traverse code from monopolizing the global (and limited) 1333eda14cbcSMatt Macy * system_taskq by inappropriately scheduling long running tasks on it. 1334eda14cbcSMatt Macy */ 13357877fdebSMatt Macy spa->spa_prefetch_taskq = taskq_create("z_prefetch", 100, 13367877fdebSMatt Macy defclsyspri, 1, INT_MAX, TASKQ_DYNAMIC | TASKQ_THREADS_CPU_PCT); 1337eda14cbcSMatt Macy 1338eda14cbcSMatt Macy /* 1339eda14cbcSMatt Macy * The taskq to upgrade datasets in this pool. Currently used by 1340eda14cbcSMatt Macy * feature SPA_FEATURE_USEROBJ_ACCOUNTING/SPA_FEATURE_PROJECT_QUOTA. 1341eda14cbcSMatt Macy */ 13427877fdebSMatt Macy spa->spa_upgrade_taskq = taskq_create("z_upgrade", 100, 13437877fdebSMatt Macy defclsyspri, 1, INT_MAX, TASKQ_DYNAMIC | TASKQ_THREADS_CPU_PCT); 1344eda14cbcSMatt Macy } 1345eda14cbcSMatt Macy 1346eda14cbcSMatt Macy /* 1347eda14cbcSMatt Macy * Opposite of spa_activate(). 1348eda14cbcSMatt Macy */ 1349eda14cbcSMatt Macy static void 1350eda14cbcSMatt Macy spa_deactivate(spa_t *spa) 1351eda14cbcSMatt Macy { 1352eda14cbcSMatt Macy ASSERT(spa->spa_sync_on == B_FALSE); 1353eda14cbcSMatt Macy ASSERT(spa->spa_dsl_pool == NULL); 1354eda14cbcSMatt Macy ASSERT(spa->spa_root_vdev == NULL); 1355eda14cbcSMatt Macy ASSERT(spa->spa_async_zio_root == NULL); 1356eda14cbcSMatt Macy ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED); 1357eda14cbcSMatt Macy 1358eda14cbcSMatt Macy spa_evicting_os_wait(spa); 1359eda14cbcSMatt Macy 1360eda14cbcSMatt Macy if (spa->spa_zvol_taskq) { 1361eda14cbcSMatt Macy taskq_destroy(spa->spa_zvol_taskq); 1362eda14cbcSMatt Macy spa->spa_zvol_taskq = NULL; 1363eda14cbcSMatt Macy } 1364eda14cbcSMatt Macy 1365eda14cbcSMatt Macy if (spa->spa_prefetch_taskq) { 1366eda14cbcSMatt Macy taskq_destroy(spa->spa_prefetch_taskq); 1367eda14cbcSMatt Macy spa->spa_prefetch_taskq = NULL; 1368eda14cbcSMatt Macy } 1369eda14cbcSMatt Macy 1370eda14cbcSMatt Macy if (spa->spa_upgrade_taskq) { 1371eda14cbcSMatt Macy taskq_destroy(spa->spa_upgrade_taskq); 1372eda14cbcSMatt Macy spa->spa_upgrade_taskq = NULL; 1373eda14cbcSMatt Macy } 1374eda14cbcSMatt Macy 1375eda14cbcSMatt Macy txg_list_destroy(&spa->spa_vdev_txg_list); 1376eda14cbcSMatt Macy 1377eda14cbcSMatt Macy list_destroy(&spa->spa_config_dirty_list); 1378eda14cbcSMatt Macy list_destroy(&spa->spa_evicting_os_list); 1379eda14cbcSMatt Macy list_destroy(&spa->spa_state_dirty_list); 1380eda14cbcSMatt Macy 1381eda14cbcSMatt Macy taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid); 1382eda14cbcSMatt Macy 1383eda14cbcSMatt Macy for (int t = 0; t < ZIO_TYPES; t++) { 1384eda14cbcSMatt Macy for (int q = 0; q < ZIO_TASKQ_TYPES; q++) { 1385eda14cbcSMatt Macy spa_taskqs_fini(spa, t, q); 1386eda14cbcSMatt Macy } 1387eda14cbcSMatt Macy } 1388eda14cbcSMatt Macy 1389eda14cbcSMatt Macy for (size_t i = 0; i < TXG_SIZE; i++) { 1390eda14cbcSMatt Macy ASSERT3P(spa->spa_txg_zio[i], !=, NULL); 1391eda14cbcSMatt Macy VERIFY0(zio_wait(spa->spa_txg_zio[i])); 1392eda14cbcSMatt Macy spa->spa_txg_zio[i] = NULL; 1393eda14cbcSMatt Macy } 1394eda14cbcSMatt Macy 1395eda14cbcSMatt Macy metaslab_class_destroy(spa->spa_normal_class); 1396eda14cbcSMatt Macy spa->spa_normal_class = NULL; 1397eda14cbcSMatt Macy 1398eda14cbcSMatt Macy metaslab_class_destroy(spa->spa_log_class); 1399eda14cbcSMatt Macy spa->spa_log_class = NULL; 1400eda14cbcSMatt Macy 1401184c1b94SMartin Matuska metaslab_class_destroy(spa->spa_embedded_log_class); 1402184c1b94SMartin Matuska spa->spa_embedded_log_class = NULL; 1403184c1b94SMartin Matuska 1404eda14cbcSMatt Macy metaslab_class_destroy(spa->spa_special_class); 1405eda14cbcSMatt Macy spa->spa_special_class = NULL; 1406eda14cbcSMatt Macy 1407eda14cbcSMatt Macy metaslab_class_destroy(spa->spa_dedup_class); 1408eda14cbcSMatt Macy spa->spa_dedup_class = NULL; 1409eda14cbcSMatt Macy 1410eda14cbcSMatt Macy /* 1411eda14cbcSMatt Macy * If this was part of an import or the open otherwise failed, we may 1412eda14cbcSMatt Macy * still have errors left in the queues. Empty them just in case. 1413eda14cbcSMatt Macy */ 1414eda14cbcSMatt Macy spa_errlog_drain(spa); 1415eda14cbcSMatt Macy avl_destroy(&spa->spa_errlist_scrub); 1416eda14cbcSMatt Macy avl_destroy(&spa->spa_errlist_last); 1417eda14cbcSMatt Macy 1418eda14cbcSMatt Macy spa_keystore_fini(&spa->spa_keystore); 1419eda14cbcSMatt Macy 1420eda14cbcSMatt Macy spa->spa_state = POOL_STATE_UNINITIALIZED; 1421eda14cbcSMatt Macy 1422eda14cbcSMatt Macy mutex_enter(&spa->spa_proc_lock); 1423eda14cbcSMatt Macy if (spa->spa_proc_state != SPA_PROC_NONE) { 1424eda14cbcSMatt Macy ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE); 1425eda14cbcSMatt Macy spa->spa_proc_state = SPA_PROC_DEACTIVATE; 1426eda14cbcSMatt Macy cv_broadcast(&spa->spa_proc_cv); 1427eda14cbcSMatt Macy while (spa->spa_proc_state == SPA_PROC_DEACTIVATE) { 1428eda14cbcSMatt Macy ASSERT(spa->spa_proc != &p0); 1429eda14cbcSMatt Macy cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock); 1430eda14cbcSMatt Macy } 1431eda14cbcSMatt Macy ASSERT(spa->spa_proc_state == SPA_PROC_GONE); 1432eda14cbcSMatt Macy spa->spa_proc_state = SPA_PROC_NONE; 1433eda14cbcSMatt Macy } 1434eda14cbcSMatt Macy ASSERT(spa->spa_proc == &p0); 1435eda14cbcSMatt Macy mutex_exit(&spa->spa_proc_lock); 1436eda14cbcSMatt Macy 1437eda14cbcSMatt Macy /* 1438eda14cbcSMatt Macy * We want to make sure spa_thread() has actually exited the ZFS 1439eda14cbcSMatt Macy * module, so that the module can't be unloaded out from underneath 1440eda14cbcSMatt Macy * it. 1441eda14cbcSMatt Macy */ 1442eda14cbcSMatt Macy if (spa->spa_did != 0) { 1443eda14cbcSMatt Macy thread_join(spa->spa_did); 1444eda14cbcSMatt Macy spa->spa_did = 0; 1445eda14cbcSMatt Macy } 1446eda14cbcSMatt Macy } 1447eda14cbcSMatt Macy 1448eda14cbcSMatt Macy /* 1449eda14cbcSMatt Macy * Verify a pool configuration, and construct the vdev tree appropriately. This 1450eda14cbcSMatt Macy * will create all the necessary vdevs in the appropriate layout, with each vdev 1451eda14cbcSMatt Macy * in the CLOSED state. This will prep the pool before open/creation/import. 1452eda14cbcSMatt Macy * All vdev validation is done by the vdev_alloc() routine. 1453eda14cbcSMatt Macy */ 1454eda14cbcSMatt Macy int 1455eda14cbcSMatt Macy spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, 1456eda14cbcSMatt Macy uint_t id, int atype) 1457eda14cbcSMatt Macy { 1458eda14cbcSMatt Macy nvlist_t **child; 1459eda14cbcSMatt Macy uint_t children; 1460eda14cbcSMatt Macy int error; 1461eda14cbcSMatt Macy 1462eda14cbcSMatt Macy if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0) 1463eda14cbcSMatt Macy return (error); 1464eda14cbcSMatt Macy 1465eda14cbcSMatt Macy if ((*vdp)->vdev_ops->vdev_op_leaf) 1466eda14cbcSMatt Macy return (0); 1467eda14cbcSMatt Macy 1468eda14cbcSMatt Macy error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1469eda14cbcSMatt Macy &child, &children); 1470eda14cbcSMatt Macy 1471eda14cbcSMatt Macy if (error == ENOENT) 1472eda14cbcSMatt Macy return (0); 1473eda14cbcSMatt Macy 1474eda14cbcSMatt Macy if (error) { 1475eda14cbcSMatt Macy vdev_free(*vdp); 1476eda14cbcSMatt Macy *vdp = NULL; 1477eda14cbcSMatt Macy return (SET_ERROR(EINVAL)); 1478eda14cbcSMatt Macy } 1479eda14cbcSMatt Macy 1480eda14cbcSMatt Macy for (int c = 0; c < children; c++) { 1481eda14cbcSMatt Macy vdev_t *vd; 1482eda14cbcSMatt Macy if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c, 1483eda14cbcSMatt Macy atype)) != 0) { 1484eda14cbcSMatt Macy vdev_free(*vdp); 1485eda14cbcSMatt Macy *vdp = NULL; 1486eda14cbcSMatt Macy return (error); 1487eda14cbcSMatt Macy } 1488eda14cbcSMatt Macy } 1489eda14cbcSMatt Macy 1490eda14cbcSMatt Macy ASSERT(*vdp != NULL); 1491eda14cbcSMatt Macy 1492eda14cbcSMatt Macy return (0); 1493eda14cbcSMatt Macy } 1494eda14cbcSMatt Macy 1495eda14cbcSMatt Macy static boolean_t 1496eda14cbcSMatt Macy spa_should_flush_logs_on_unload(spa_t *spa) 1497eda14cbcSMatt Macy { 1498eda14cbcSMatt Macy if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) 1499eda14cbcSMatt Macy return (B_FALSE); 1500eda14cbcSMatt Macy 1501eda14cbcSMatt Macy if (!spa_writeable(spa)) 1502eda14cbcSMatt Macy return (B_FALSE); 1503eda14cbcSMatt Macy 1504eda14cbcSMatt Macy if (!spa->spa_sync_on) 1505eda14cbcSMatt Macy return (B_FALSE); 1506eda14cbcSMatt Macy 1507eda14cbcSMatt Macy if (spa_state(spa) != POOL_STATE_EXPORTED) 1508eda14cbcSMatt Macy return (B_FALSE); 1509eda14cbcSMatt Macy 1510eda14cbcSMatt Macy if (zfs_keep_log_spacemaps_at_export) 1511eda14cbcSMatt Macy return (B_FALSE); 1512eda14cbcSMatt Macy 1513eda14cbcSMatt Macy return (B_TRUE); 1514eda14cbcSMatt Macy } 1515eda14cbcSMatt Macy 1516eda14cbcSMatt Macy /* 1517eda14cbcSMatt Macy * Opens a transaction that will set the flag that will instruct 1518eda14cbcSMatt Macy * spa_sync to attempt to flush all the metaslabs for that txg. 1519eda14cbcSMatt Macy */ 1520eda14cbcSMatt Macy static void 1521eda14cbcSMatt Macy spa_unload_log_sm_flush_all(spa_t *spa) 1522eda14cbcSMatt Macy { 1523eda14cbcSMatt Macy dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 1524eda14cbcSMatt Macy VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); 1525eda14cbcSMatt Macy 1526eda14cbcSMatt Macy ASSERT3U(spa->spa_log_flushall_txg, ==, 0); 1527eda14cbcSMatt Macy spa->spa_log_flushall_txg = dmu_tx_get_txg(tx); 1528eda14cbcSMatt Macy 1529eda14cbcSMatt Macy dmu_tx_commit(tx); 1530eda14cbcSMatt Macy txg_wait_synced(spa_get_dsl(spa), spa->spa_log_flushall_txg); 1531eda14cbcSMatt Macy } 1532eda14cbcSMatt Macy 1533eda14cbcSMatt Macy static void 1534eda14cbcSMatt Macy spa_unload_log_sm_metadata(spa_t *spa) 1535eda14cbcSMatt Macy { 1536eda14cbcSMatt Macy void *cookie = NULL; 1537eda14cbcSMatt Macy spa_log_sm_t *sls; 1538eda14cbcSMatt Macy while ((sls = avl_destroy_nodes(&spa->spa_sm_logs_by_txg, 1539eda14cbcSMatt Macy &cookie)) != NULL) { 1540eda14cbcSMatt Macy VERIFY0(sls->sls_mscount); 1541eda14cbcSMatt Macy kmem_free(sls, sizeof (spa_log_sm_t)); 1542eda14cbcSMatt Macy } 1543eda14cbcSMatt Macy 1544eda14cbcSMatt Macy for (log_summary_entry_t *e = list_head(&spa->spa_log_summary); 1545eda14cbcSMatt Macy e != NULL; e = list_head(&spa->spa_log_summary)) { 1546eda14cbcSMatt Macy VERIFY0(e->lse_mscount); 1547eda14cbcSMatt Macy list_remove(&spa->spa_log_summary, e); 1548eda14cbcSMatt Macy kmem_free(e, sizeof (log_summary_entry_t)); 1549eda14cbcSMatt Macy } 1550eda14cbcSMatt Macy 1551eda14cbcSMatt Macy spa->spa_unflushed_stats.sus_nblocks = 0; 1552eda14cbcSMatt Macy spa->spa_unflushed_stats.sus_memused = 0; 1553eda14cbcSMatt Macy spa->spa_unflushed_stats.sus_blocklimit = 0; 1554eda14cbcSMatt Macy } 1555eda14cbcSMatt Macy 1556eda14cbcSMatt Macy static void 1557eda14cbcSMatt Macy spa_destroy_aux_threads(spa_t *spa) 1558eda14cbcSMatt Macy { 1559eda14cbcSMatt Macy if (spa->spa_condense_zthr != NULL) { 1560eda14cbcSMatt Macy zthr_destroy(spa->spa_condense_zthr); 1561eda14cbcSMatt Macy spa->spa_condense_zthr = NULL; 1562eda14cbcSMatt Macy } 1563eda14cbcSMatt Macy if (spa->spa_checkpoint_discard_zthr != NULL) { 1564eda14cbcSMatt Macy zthr_destroy(spa->spa_checkpoint_discard_zthr); 1565eda14cbcSMatt Macy spa->spa_checkpoint_discard_zthr = NULL; 1566eda14cbcSMatt Macy } 1567eda14cbcSMatt Macy if (spa->spa_livelist_delete_zthr != NULL) { 1568eda14cbcSMatt Macy zthr_destroy(spa->spa_livelist_delete_zthr); 1569eda14cbcSMatt Macy spa->spa_livelist_delete_zthr = NULL; 1570eda14cbcSMatt Macy } 1571eda14cbcSMatt Macy if (spa->spa_livelist_condense_zthr != NULL) { 1572eda14cbcSMatt Macy zthr_destroy(spa->spa_livelist_condense_zthr); 1573eda14cbcSMatt Macy spa->spa_livelist_condense_zthr = NULL; 1574eda14cbcSMatt Macy } 1575eda14cbcSMatt Macy } 1576eda14cbcSMatt Macy 1577eda14cbcSMatt Macy /* 1578eda14cbcSMatt Macy * Opposite of spa_load(). 1579eda14cbcSMatt Macy */ 1580eda14cbcSMatt Macy static void 1581eda14cbcSMatt Macy spa_unload(spa_t *spa) 1582eda14cbcSMatt Macy { 1583eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1584eda14cbcSMatt Macy ASSERT(spa_state(spa) != POOL_STATE_UNINITIALIZED); 1585eda14cbcSMatt Macy 1586eda14cbcSMatt Macy spa_import_progress_remove(spa_guid(spa)); 1587eda14cbcSMatt Macy spa_load_note(spa, "UNLOADING"); 1588eda14cbcSMatt Macy 1589eda14cbcSMatt Macy spa_wake_waiters(spa); 1590eda14cbcSMatt Macy 1591eda14cbcSMatt Macy /* 1592eda14cbcSMatt Macy * If the log space map feature is enabled and the pool is getting 1593eda14cbcSMatt Macy * exported (but not destroyed), we want to spend some time flushing 1594eda14cbcSMatt Macy * as many metaslabs as we can in an attempt to destroy log space 1595eda14cbcSMatt Macy * maps and save import time. 1596eda14cbcSMatt Macy */ 1597eda14cbcSMatt Macy if (spa_should_flush_logs_on_unload(spa)) 1598eda14cbcSMatt Macy spa_unload_log_sm_flush_all(spa); 1599eda14cbcSMatt Macy 1600eda14cbcSMatt Macy /* 1601eda14cbcSMatt Macy * Stop async tasks. 1602eda14cbcSMatt Macy */ 1603eda14cbcSMatt Macy spa_async_suspend(spa); 1604eda14cbcSMatt Macy 1605eda14cbcSMatt Macy if (spa->spa_root_vdev) { 1606eda14cbcSMatt Macy vdev_t *root_vdev = spa->spa_root_vdev; 1607eda14cbcSMatt Macy vdev_initialize_stop_all(root_vdev, VDEV_INITIALIZE_ACTIVE); 1608eda14cbcSMatt Macy vdev_trim_stop_all(root_vdev, VDEV_TRIM_ACTIVE); 1609eda14cbcSMatt Macy vdev_autotrim_stop_all(spa); 1610eda14cbcSMatt Macy vdev_rebuild_stop_all(spa); 1611eda14cbcSMatt Macy } 1612eda14cbcSMatt Macy 1613eda14cbcSMatt Macy /* 1614eda14cbcSMatt Macy * Stop syncing. 1615eda14cbcSMatt Macy */ 1616eda14cbcSMatt Macy if (spa->spa_sync_on) { 1617eda14cbcSMatt Macy txg_sync_stop(spa->spa_dsl_pool); 1618eda14cbcSMatt Macy spa->spa_sync_on = B_FALSE; 1619eda14cbcSMatt Macy } 1620eda14cbcSMatt Macy 1621eda14cbcSMatt Macy /* 1622eda14cbcSMatt Macy * This ensures that there is no async metaslab prefetching 1623eda14cbcSMatt Macy * while we attempt to unload the spa. 1624eda14cbcSMatt Macy */ 1625eda14cbcSMatt Macy if (spa->spa_root_vdev != NULL) { 1626eda14cbcSMatt Macy for (int c = 0; c < spa->spa_root_vdev->vdev_children; c++) { 1627eda14cbcSMatt Macy vdev_t *vc = spa->spa_root_vdev->vdev_child[c]; 1628eda14cbcSMatt Macy if (vc->vdev_mg != NULL) 1629eda14cbcSMatt Macy taskq_wait(vc->vdev_mg->mg_taskq); 1630eda14cbcSMatt Macy } 1631eda14cbcSMatt Macy } 1632eda14cbcSMatt Macy 1633eda14cbcSMatt Macy if (spa->spa_mmp.mmp_thread) 1634eda14cbcSMatt Macy mmp_thread_stop(spa); 1635eda14cbcSMatt Macy 1636eda14cbcSMatt Macy /* 1637eda14cbcSMatt Macy * Wait for any outstanding async I/O to complete. 1638eda14cbcSMatt Macy */ 1639eda14cbcSMatt Macy if (spa->spa_async_zio_root != NULL) { 1640eda14cbcSMatt Macy for (int i = 0; i < max_ncpus; i++) 1641eda14cbcSMatt Macy (void) zio_wait(spa->spa_async_zio_root[i]); 1642eda14cbcSMatt Macy kmem_free(spa->spa_async_zio_root, max_ncpus * sizeof (void *)); 1643eda14cbcSMatt Macy spa->spa_async_zio_root = NULL; 1644eda14cbcSMatt Macy } 1645eda14cbcSMatt Macy 1646eda14cbcSMatt Macy if (spa->spa_vdev_removal != NULL) { 1647eda14cbcSMatt Macy spa_vdev_removal_destroy(spa->spa_vdev_removal); 1648eda14cbcSMatt Macy spa->spa_vdev_removal = NULL; 1649eda14cbcSMatt Macy } 1650eda14cbcSMatt Macy 1651eda14cbcSMatt Macy spa_destroy_aux_threads(spa); 1652eda14cbcSMatt Macy 1653eda14cbcSMatt Macy spa_condense_fini(spa); 1654eda14cbcSMatt Macy 1655eda14cbcSMatt Macy bpobj_close(&spa->spa_deferred_bpobj); 1656eda14cbcSMatt Macy 1657eda14cbcSMatt Macy spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 1658eda14cbcSMatt Macy 1659eda14cbcSMatt Macy /* 1660eda14cbcSMatt Macy * Close all vdevs. 1661eda14cbcSMatt Macy */ 1662eda14cbcSMatt Macy if (spa->spa_root_vdev) 1663eda14cbcSMatt Macy vdev_free(spa->spa_root_vdev); 1664eda14cbcSMatt Macy ASSERT(spa->spa_root_vdev == NULL); 1665eda14cbcSMatt Macy 1666eda14cbcSMatt Macy /* 1667eda14cbcSMatt Macy * Close the dsl pool. 1668eda14cbcSMatt Macy */ 1669eda14cbcSMatt Macy if (spa->spa_dsl_pool) { 1670eda14cbcSMatt Macy dsl_pool_close(spa->spa_dsl_pool); 1671eda14cbcSMatt Macy spa->spa_dsl_pool = NULL; 1672eda14cbcSMatt Macy spa->spa_meta_objset = NULL; 1673eda14cbcSMatt Macy } 1674eda14cbcSMatt Macy 1675eda14cbcSMatt Macy ddt_unload(spa); 1676eda14cbcSMatt Macy spa_unload_log_sm_metadata(spa); 1677eda14cbcSMatt Macy 1678eda14cbcSMatt Macy /* 1679eda14cbcSMatt Macy * Drop and purge level 2 cache 1680eda14cbcSMatt Macy */ 1681eda14cbcSMatt Macy spa_l2cache_drop(spa); 1682eda14cbcSMatt Macy 1683eda14cbcSMatt Macy for (int i = 0; i < spa->spa_spares.sav_count; i++) 1684eda14cbcSMatt Macy vdev_free(spa->spa_spares.sav_vdevs[i]); 1685eda14cbcSMatt Macy if (spa->spa_spares.sav_vdevs) { 1686eda14cbcSMatt Macy kmem_free(spa->spa_spares.sav_vdevs, 1687eda14cbcSMatt Macy spa->spa_spares.sav_count * sizeof (void *)); 1688eda14cbcSMatt Macy spa->spa_spares.sav_vdevs = NULL; 1689eda14cbcSMatt Macy } 1690eda14cbcSMatt Macy if (spa->spa_spares.sav_config) { 1691eda14cbcSMatt Macy nvlist_free(spa->spa_spares.sav_config); 1692eda14cbcSMatt Macy spa->spa_spares.sav_config = NULL; 1693eda14cbcSMatt Macy } 1694eda14cbcSMatt Macy spa->spa_spares.sav_count = 0; 1695eda14cbcSMatt Macy 1696eda14cbcSMatt Macy for (int i = 0; i < spa->spa_l2cache.sav_count; i++) { 1697eda14cbcSMatt Macy vdev_clear_stats(spa->spa_l2cache.sav_vdevs[i]); 1698eda14cbcSMatt Macy vdev_free(spa->spa_l2cache.sav_vdevs[i]); 1699eda14cbcSMatt Macy } 1700eda14cbcSMatt Macy if (spa->spa_l2cache.sav_vdevs) { 1701eda14cbcSMatt Macy kmem_free(spa->spa_l2cache.sav_vdevs, 1702eda14cbcSMatt Macy spa->spa_l2cache.sav_count * sizeof (void *)); 1703eda14cbcSMatt Macy spa->spa_l2cache.sav_vdevs = NULL; 1704eda14cbcSMatt Macy } 1705eda14cbcSMatt Macy if (spa->spa_l2cache.sav_config) { 1706eda14cbcSMatt Macy nvlist_free(spa->spa_l2cache.sav_config); 1707eda14cbcSMatt Macy spa->spa_l2cache.sav_config = NULL; 1708eda14cbcSMatt Macy } 1709eda14cbcSMatt Macy spa->spa_l2cache.sav_count = 0; 1710eda14cbcSMatt Macy 1711eda14cbcSMatt Macy spa->spa_async_suspended = 0; 1712eda14cbcSMatt Macy 1713eda14cbcSMatt Macy spa->spa_indirect_vdevs_loaded = B_FALSE; 1714eda14cbcSMatt Macy 1715eda14cbcSMatt Macy if (spa->spa_comment != NULL) { 1716eda14cbcSMatt Macy spa_strfree(spa->spa_comment); 1717eda14cbcSMatt Macy spa->spa_comment = NULL; 1718eda14cbcSMatt Macy } 1719ee36e25aSMartin Matuska if (spa->spa_compatibility != NULL) { 1720ee36e25aSMartin Matuska spa_strfree(spa->spa_compatibility); 1721ee36e25aSMartin Matuska spa->spa_compatibility = NULL; 1722ee36e25aSMartin Matuska } 1723eda14cbcSMatt Macy 1724eda14cbcSMatt Macy spa_config_exit(spa, SCL_ALL, spa); 1725eda14cbcSMatt Macy } 1726eda14cbcSMatt Macy 1727eda14cbcSMatt Macy /* 1728eda14cbcSMatt Macy * Load (or re-load) the current list of vdevs describing the active spares for 1729eda14cbcSMatt Macy * this pool. When this is called, we have some form of basic information in 1730eda14cbcSMatt Macy * 'spa_spares.sav_config'. We parse this into vdevs, try to open them, and 1731eda14cbcSMatt Macy * then re-generate a more complete list including status information. 1732eda14cbcSMatt Macy */ 1733eda14cbcSMatt Macy void 1734eda14cbcSMatt Macy spa_load_spares(spa_t *spa) 1735eda14cbcSMatt Macy { 1736eda14cbcSMatt Macy nvlist_t **spares; 1737eda14cbcSMatt Macy uint_t nspares; 1738eda14cbcSMatt Macy int i; 1739eda14cbcSMatt Macy vdev_t *vd, *tvd; 1740eda14cbcSMatt Macy 1741eda14cbcSMatt Macy #ifndef _KERNEL 1742eda14cbcSMatt Macy /* 1743eda14cbcSMatt Macy * zdb opens both the current state of the pool and the 1744eda14cbcSMatt Macy * checkpointed state (if present), with a different spa_t. 1745eda14cbcSMatt Macy * 1746eda14cbcSMatt Macy * As spare vdevs are shared among open pools, we skip loading 1747eda14cbcSMatt Macy * them when we load the checkpointed state of the pool. 1748eda14cbcSMatt Macy */ 1749eda14cbcSMatt Macy if (!spa_writeable(spa)) 1750eda14cbcSMatt Macy return; 1751eda14cbcSMatt Macy #endif 1752eda14cbcSMatt Macy 1753eda14cbcSMatt Macy ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 1754eda14cbcSMatt Macy 1755eda14cbcSMatt Macy /* 1756eda14cbcSMatt Macy * First, close and free any existing spare vdevs. 1757eda14cbcSMatt Macy */ 1758eda14cbcSMatt Macy for (i = 0; i < spa->spa_spares.sav_count; i++) { 1759eda14cbcSMatt Macy vd = spa->spa_spares.sav_vdevs[i]; 1760eda14cbcSMatt Macy 1761eda14cbcSMatt Macy /* Undo the call to spa_activate() below */ 1762eda14cbcSMatt Macy if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid, 1763eda14cbcSMatt Macy B_FALSE)) != NULL && tvd->vdev_isspare) 1764eda14cbcSMatt Macy spa_spare_remove(tvd); 1765eda14cbcSMatt Macy vdev_close(vd); 1766eda14cbcSMatt Macy vdev_free(vd); 1767eda14cbcSMatt Macy } 1768eda14cbcSMatt Macy 1769eda14cbcSMatt Macy if (spa->spa_spares.sav_vdevs) 1770eda14cbcSMatt Macy kmem_free(spa->spa_spares.sav_vdevs, 1771eda14cbcSMatt Macy spa->spa_spares.sav_count * sizeof (void *)); 1772eda14cbcSMatt Macy 1773eda14cbcSMatt Macy if (spa->spa_spares.sav_config == NULL) 1774eda14cbcSMatt Macy nspares = 0; 1775eda14cbcSMatt Macy else 1776eda14cbcSMatt Macy VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 1777eda14cbcSMatt Macy ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 1778eda14cbcSMatt Macy 1779eda14cbcSMatt Macy spa->spa_spares.sav_count = (int)nspares; 1780eda14cbcSMatt Macy spa->spa_spares.sav_vdevs = NULL; 1781eda14cbcSMatt Macy 1782eda14cbcSMatt Macy if (nspares == 0) 1783eda14cbcSMatt Macy return; 1784eda14cbcSMatt Macy 1785eda14cbcSMatt Macy /* 1786eda14cbcSMatt Macy * Construct the array of vdevs, opening them to get status in the 1787eda14cbcSMatt Macy * process. For each spare, there is potentially two different vdev_t 1788eda14cbcSMatt Macy * structures associated with it: one in the list of spares (used only 1789eda14cbcSMatt Macy * for basic validation purposes) and one in the active vdev 1790eda14cbcSMatt Macy * configuration (if it's spared in). During this phase we open and 1791eda14cbcSMatt Macy * validate each vdev on the spare list. If the vdev also exists in the 1792eda14cbcSMatt Macy * active configuration, then we also mark this vdev as an active spare. 1793eda14cbcSMatt Macy */ 1794eda14cbcSMatt Macy spa->spa_spares.sav_vdevs = kmem_zalloc(nspares * sizeof (void *), 1795eda14cbcSMatt Macy KM_SLEEP); 1796eda14cbcSMatt Macy for (i = 0; i < spa->spa_spares.sav_count; i++) { 1797eda14cbcSMatt Macy VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0, 1798eda14cbcSMatt Macy VDEV_ALLOC_SPARE) == 0); 1799eda14cbcSMatt Macy ASSERT(vd != NULL); 1800eda14cbcSMatt Macy 1801eda14cbcSMatt Macy spa->spa_spares.sav_vdevs[i] = vd; 1802eda14cbcSMatt Macy 1803eda14cbcSMatt Macy if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid, 1804eda14cbcSMatt Macy B_FALSE)) != NULL) { 1805eda14cbcSMatt Macy if (!tvd->vdev_isspare) 1806eda14cbcSMatt Macy spa_spare_add(tvd); 1807eda14cbcSMatt Macy 1808eda14cbcSMatt Macy /* 1809eda14cbcSMatt Macy * We only mark the spare active if we were successfully 1810eda14cbcSMatt Macy * able to load the vdev. Otherwise, importing a pool 1811eda14cbcSMatt Macy * with a bad active spare would result in strange 1812eda14cbcSMatt Macy * behavior, because multiple pool would think the spare 1813eda14cbcSMatt Macy * is actively in use. 1814eda14cbcSMatt Macy * 1815eda14cbcSMatt Macy * There is a vulnerability here to an equally bizarre 1816eda14cbcSMatt Macy * circumstance, where a dead active spare is later 1817eda14cbcSMatt Macy * brought back to life (onlined or otherwise). Given 1818eda14cbcSMatt Macy * the rarity of this scenario, and the extra complexity 1819eda14cbcSMatt Macy * it adds, we ignore the possibility. 1820eda14cbcSMatt Macy */ 1821eda14cbcSMatt Macy if (!vdev_is_dead(tvd)) 1822eda14cbcSMatt Macy spa_spare_activate(tvd); 1823eda14cbcSMatt Macy } 1824eda14cbcSMatt Macy 1825eda14cbcSMatt Macy vd->vdev_top = vd; 1826eda14cbcSMatt Macy vd->vdev_aux = &spa->spa_spares; 1827eda14cbcSMatt Macy 1828eda14cbcSMatt Macy if (vdev_open(vd) != 0) 1829eda14cbcSMatt Macy continue; 1830eda14cbcSMatt Macy 1831eda14cbcSMatt Macy if (vdev_validate_aux(vd) == 0) 1832eda14cbcSMatt Macy spa_spare_add(vd); 1833eda14cbcSMatt Macy } 1834eda14cbcSMatt Macy 1835eda14cbcSMatt Macy /* 1836eda14cbcSMatt Macy * Recompute the stashed list of spares, with status information 1837eda14cbcSMatt Macy * this time. 1838eda14cbcSMatt Macy */ 1839eda14cbcSMatt Macy VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES, 1840eda14cbcSMatt Macy DATA_TYPE_NVLIST_ARRAY) == 0); 1841eda14cbcSMatt Macy 1842eda14cbcSMatt Macy spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *), 1843eda14cbcSMatt Macy KM_SLEEP); 1844eda14cbcSMatt Macy for (i = 0; i < spa->spa_spares.sav_count; i++) 1845eda14cbcSMatt Macy spares[i] = vdev_config_generate(spa, 1846eda14cbcSMatt Macy spa->spa_spares.sav_vdevs[i], B_TRUE, VDEV_CONFIG_SPARE); 1847eda14cbcSMatt Macy VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 1848eda14cbcSMatt Macy ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0); 1849eda14cbcSMatt Macy for (i = 0; i < spa->spa_spares.sav_count; i++) 1850eda14cbcSMatt Macy nvlist_free(spares[i]); 1851eda14cbcSMatt Macy kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *)); 1852eda14cbcSMatt Macy } 1853eda14cbcSMatt Macy 1854eda14cbcSMatt Macy /* 1855eda14cbcSMatt Macy * Load (or re-load) the current list of vdevs describing the active l2cache for 1856eda14cbcSMatt Macy * this pool. When this is called, we have some form of basic information in 1857eda14cbcSMatt Macy * 'spa_l2cache.sav_config'. We parse this into vdevs, try to open them, and 1858eda14cbcSMatt Macy * then re-generate a more complete list including status information. 1859eda14cbcSMatt Macy * Devices which are already active have their details maintained, and are 1860eda14cbcSMatt Macy * not re-opened. 1861eda14cbcSMatt Macy */ 1862eda14cbcSMatt Macy void 1863eda14cbcSMatt Macy spa_load_l2cache(spa_t *spa) 1864eda14cbcSMatt Macy { 1865eda14cbcSMatt Macy nvlist_t **l2cache = NULL; 1866eda14cbcSMatt Macy uint_t nl2cache; 1867eda14cbcSMatt Macy int i, j, oldnvdevs; 1868eda14cbcSMatt Macy uint64_t guid; 1869eda14cbcSMatt Macy vdev_t *vd, **oldvdevs, **newvdevs; 1870eda14cbcSMatt Macy spa_aux_vdev_t *sav = &spa->spa_l2cache; 1871eda14cbcSMatt Macy 1872eda14cbcSMatt Macy #ifndef _KERNEL 1873eda14cbcSMatt Macy /* 1874eda14cbcSMatt Macy * zdb opens both the current state of the pool and the 1875eda14cbcSMatt Macy * checkpointed state (if present), with a different spa_t. 1876eda14cbcSMatt Macy * 1877eda14cbcSMatt Macy * As L2 caches are part of the ARC which is shared among open 1878eda14cbcSMatt Macy * pools, we skip loading them when we load the checkpointed 1879eda14cbcSMatt Macy * state of the pool. 1880eda14cbcSMatt Macy */ 1881eda14cbcSMatt Macy if (!spa_writeable(spa)) 1882eda14cbcSMatt Macy return; 1883eda14cbcSMatt Macy #endif 1884eda14cbcSMatt Macy 1885eda14cbcSMatt Macy ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 1886eda14cbcSMatt Macy 1887eda14cbcSMatt Macy oldvdevs = sav->sav_vdevs; 1888eda14cbcSMatt Macy oldnvdevs = sav->sav_count; 1889eda14cbcSMatt Macy sav->sav_vdevs = NULL; 1890eda14cbcSMatt Macy sav->sav_count = 0; 1891eda14cbcSMatt Macy 1892eda14cbcSMatt Macy if (sav->sav_config == NULL) { 1893eda14cbcSMatt Macy nl2cache = 0; 1894eda14cbcSMatt Macy newvdevs = NULL; 1895eda14cbcSMatt Macy goto out; 1896eda14cbcSMatt Macy } 1897eda14cbcSMatt Macy 1898eda14cbcSMatt Macy VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, 1899eda14cbcSMatt Macy ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 1900eda14cbcSMatt Macy newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP); 1901eda14cbcSMatt Macy 1902eda14cbcSMatt Macy /* 1903eda14cbcSMatt Macy * Process new nvlist of vdevs. 1904eda14cbcSMatt Macy */ 1905eda14cbcSMatt Macy for (i = 0; i < nl2cache; i++) { 1906eda14cbcSMatt Macy VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID, 1907eda14cbcSMatt Macy &guid) == 0); 1908eda14cbcSMatt Macy 1909eda14cbcSMatt Macy newvdevs[i] = NULL; 1910eda14cbcSMatt Macy for (j = 0; j < oldnvdevs; j++) { 1911eda14cbcSMatt Macy vd = oldvdevs[j]; 1912eda14cbcSMatt Macy if (vd != NULL && guid == vd->vdev_guid) { 1913eda14cbcSMatt Macy /* 1914eda14cbcSMatt Macy * Retain previous vdev for add/remove ops. 1915eda14cbcSMatt Macy */ 1916eda14cbcSMatt Macy newvdevs[i] = vd; 1917eda14cbcSMatt Macy oldvdevs[j] = NULL; 1918eda14cbcSMatt Macy break; 1919eda14cbcSMatt Macy } 1920eda14cbcSMatt Macy } 1921eda14cbcSMatt Macy 1922eda14cbcSMatt Macy if (newvdevs[i] == NULL) { 1923eda14cbcSMatt Macy /* 1924eda14cbcSMatt Macy * Create new vdev 1925eda14cbcSMatt Macy */ 1926eda14cbcSMatt Macy VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0, 1927eda14cbcSMatt Macy VDEV_ALLOC_L2CACHE) == 0); 1928eda14cbcSMatt Macy ASSERT(vd != NULL); 1929eda14cbcSMatt Macy newvdevs[i] = vd; 1930eda14cbcSMatt Macy 1931eda14cbcSMatt Macy /* 1932eda14cbcSMatt Macy * Commit this vdev as an l2cache device, 1933eda14cbcSMatt Macy * even if it fails to open. 1934eda14cbcSMatt Macy */ 1935eda14cbcSMatt Macy spa_l2cache_add(vd); 1936eda14cbcSMatt Macy 1937eda14cbcSMatt Macy vd->vdev_top = vd; 1938eda14cbcSMatt Macy vd->vdev_aux = sav; 1939eda14cbcSMatt Macy 1940eda14cbcSMatt Macy spa_l2cache_activate(vd); 1941eda14cbcSMatt Macy 1942eda14cbcSMatt Macy if (vdev_open(vd) != 0) 1943eda14cbcSMatt Macy continue; 1944eda14cbcSMatt Macy 1945eda14cbcSMatt Macy (void) vdev_validate_aux(vd); 1946eda14cbcSMatt Macy 1947eda14cbcSMatt Macy if (!vdev_is_dead(vd)) 1948eda14cbcSMatt Macy l2arc_add_vdev(spa, vd); 1949eda14cbcSMatt Macy 1950eda14cbcSMatt Macy /* 1951eda14cbcSMatt Macy * Upon cache device addition to a pool or pool 1952eda14cbcSMatt Macy * creation with a cache device or if the header 1953eda14cbcSMatt Macy * of the device is invalid we issue an async 1954eda14cbcSMatt Macy * TRIM command for the whole device which will 1955eda14cbcSMatt Macy * execute if l2arc_trim_ahead > 0. 1956eda14cbcSMatt Macy */ 1957eda14cbcSMatt Macy spa_async_request(spa, SPA_ASYNC_L2CACHE_TRIM); 1958eda14cbcSMatt Macy } 1959eda14cbcSMatt Macy } 1960eda14cbcSMatt Macy 1961eda14cbcSMatt Macy sav->sav_vdevs = newvdevs; 1962eda14cbcSMatt Macy sav->sav_count = (int)nl2cache; 1963eda14cbcSMatt Macy 1964eda14cbcSMatt Macy /* 1965eda14cbcSMatt Macy * Recompute the stashed list of l2cache devices, with status 1966eda14cbcSMatt Macy * information this time. 1967eda14cbcSMatt Macy */ 1968eda14cbcSMatt Macy VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE, 1969eda14cbcSMatt Macy DATA_TYPE_NVLIST_ARRAY) == 0); 1970eda14cbcSMatt Macy 1971eda14cbcSMatt Macy if (sav->sav_count > 0) 1972eda14cbcSMatt Macy l2cache = kmem_alloc(sav->sav_count * sizeof (void *), 1973eda14cbcSMatt Macy KM_SLEEP); 1974eda14cbcSMatt Macy for (i = 0; i < sav->sav_count; i++) 1975eda14cbcSMatt Macy l2cache[i] = vdev_config_generate(spa, 1976eda14cbcSMatt Macy sav->sav_vdevs[i], B_TRUE, VDEV_CONFIG_L2CACHE); 1977eda14cbcSMatt Macy VERIFY(nvlist_add_nvlist_array(sav->sav_config, 1978eda14cbcSMatt Macy ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0); 1979eda14cbcSMatt Macy 1980eda14cbcSMatt Macy out: 1981eda14cbcSMatt Macy /* 1982eda14cbcSMatt Macy * Purge vdevs that were dropped 1983eda14cbcSMatt Macy */ 1984eda14cbcSMatt Macy for (i = 0; i < oldnvdevs; i++) { 1985eda14cbcSMatt Macy uint64_t pool; 1986eda14cbcSMatt Macy 1987eda14cbcSMatt Macy vd = oldvdevs[i]; 1988eda14cbcSMatt Macy if (vd != NULL) { 1989eda14cbcSMatt Macy ASSERT(vd->vdev_isl2cache); 1990eda14cbcSMatt Macy 1991eda14cbcSMatt Macy if (spa_l2cache_exists(vd->vdev_guid, &pool) && 1992eda14cbcSMatt Macy pool != 0ULL && l2arc_vdev_present(vd)) 1993eda14cbcSMatt Macy l2arc_remove_vdev(vd); 1994eda14cbcSMatt Macy vdev_clear_stats(vd); 1995eda14cbcSMatt Macy vdev_free(vd); 1996eda14cbcSMatt Macy } 1997eda14cbcSMatt Macy } 1998eda14cbcSMatt Macy 1999eda14cbcSMatt Macy if (oldvdevs) 2000eda14cbcSMatt Macy kmem_free(oldvdevs, oldnvdevs * sizeof (void *)); 2001eda14cbcSMatt Macy 2002eda14cbcSMatt Macy for (i = 0; i < sav->sav_count; i++) 2003eda14cbcSMatt Macy nvlist_free(l2cache[i]); 2004eda14cbcSMatt Macy if (sav->sav_count) 2005eda14cbcSMatt Macy kmem_free(l2cache, sav->sav_count * sizeof (void *)); 2006eda14cbcSMatt Macy } 2007eda14cbcSMatt Macy 2008eda14cbcSMatt Macy static int 2009eda14cbcSMatt Macy load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value) 2010eda14cbcSMatt Macy { 2011eda14cbcSMatt Macy dmu_buf_t *db; 2012eda14cbcSMatt Macy char *packed = NULL; 2013eda14cbcSMatt Macy size_t nvsize = 0; 2014eda14cbcSMatt Macy int error; 2015eda14cbcSMatt Macy *value = NULL; 2016eda14cbcSMatt Macy 2017eda14cbcSMatt Macy error = dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db); 2018eda14cbcSMatt Macy if (error) 2019eda14cbcSMatt Macy return (error); 2020eda14cbcSMatt Macy 2021eda14cbcSMatt Macy nvsize = *(uint64_t *)db->db_data; 2022eda14cbcSMatt Macy dmu_buf_rele(db, FTAG); 2023eda14cbcSMatt Macy 2024eda14cbcSMatt Macy packed = vmem_alloc(nvsize, KM_SLEEP); 2025eda14cbcSMatt Macy error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed, 2026eda14cbcSMatt Macy DMU_READ_PREFETCH); 2027eda14cbcSMatt Macy if (error == 0) 2028eda14cbcSMatt Macy error = nvlist_unpack(packed, nvsize, value, 0); 2029eda14cbcSMatt Macy vmem_free(packed, nvsize); 2030eda14cbcSMatt Macy 2031eda14cbcSMatt Macy return (error); 2032eda14cbcSMatt Macy } 2033eda14cbcSMatt Macy 2034eda14cbcSMatt Macy /* 2035eda14cbcSMatt Macy * Concrete top-level vdevs that are not missing and are not logs. At every 2036eda14cbcSMatt Macy * spa_sync we write new uberblocks to at least SPA_SYNC_MIN_VDEVS core tvds. 2037eda14cbcSMatt Macy */ 2038eda14cbcSMatt Macy static uint64_t 2039eda14cbcSMatt Macy spa_healthy_core_tvds(spa_t *spa) 2040eda14cbcSMatt Macy { 2041eda14cbcSMatt Macy vdev_t *rvd = spa->spa_root_vdev; 2042eda14cbcSMatt Macy uint64_t tvds = 0; 2043eda14cbcSMatt Macy 2044eda14cbcSMatt Macy for (uint64_t i = 0; i < rvd->vdev_children; i++) { 2045eda14cbcSMatt Macy vdev_t *vd = rvd->vdev_child[i]; 2046eda14cbcSMatt Macy if (vd->vdev_islog) 2047eda14cbcSMatt Macy continue; 2048eda14cbcSMatt Macy if (vdev_is_concrete(vd) && !vdev_is_dead(vd)) 2049eda14cbcSMatt Macy tvds++; 2050eda14cbcSMatt Macy } 2051eda14cbcSMatt Macy 2052eda14cbcSMatt Macy return (tvds); 2053eda14cbcSMatt Macy } 2054eda14cbcSMatt Macy 2055eda14cbcSMatt Macy /* 2056eda14cbcSMatt Macy * Checks to see if the given vdev could not be opened, in which case we post a 2057eda14cbcSMatt Macy * sysevent to notify the autoreplace code that the device has been removed. 2058eda14cbcSMatt Macy */ 2059eda14cbcSMatt Macy static void 2060eda14cbcSMatt Macy spa_check_removed(vdev_t *vd) 2061eda14cbcSMatt Macy { 2062eda14cbcSMatt Macy for (uint64_t c = 0; c < vd->vdev_children; c++) 2063eda14cbcSMatt Macy spa_check_removed(vd->vdev_child[c]); 2064eda14cbcSMatt Macy 2065eda14cbcSMatt Macy if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd) && 2066eda14cbcSMatt Macy vdev_is_concrete(vd)) { 2067eda14cbcSMatt Macy zfs_post_autoreplace(vd->vdev_spa, vd); 2068eda14cbcSMatt Macy spa_event_notify(vd->vdev_spa, vd, NULL, ESC_ZFS_VDEV_CHECK); 2069eda14cbcSMatt Macy } 2070eda14cbcSMatt Macy } 2071eda14cbcSMatt Macy 2072eda14cbcSMatt Macy static int 2073eda14cbcSMatt Macy spa_check_for_missing_logs(spa_t *spa) 2074eda14cbcSMatt Macy { 2075eda14cbcSMatt Macy vdev_t *rvd = spa->spa_root_vdev; 2076eda14cbcSMatt Macy 2077eda14cbcSMatt Macy /* 2078eda14cbcSMatt Macy * If we're doing a normal import, then build up any additional 2079eda14cbcSMatt Macy * diagnostic information about missing log devices. 2080eda14cbcSMatt Macy * We'll pass this up to the user for further processing. 2081eda14cbcSMatt Macy */ 2082eda14cbcSMatt Macy if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG)) { 2083eda14cbcSMatt Macy nvlist_t **child, *nv; 2084eda14cbcSMatt Macy uint64_t idx = 0; 2085eda14cbcSMatt Macy 2086eda14cbcSMatt Macy child = kmem_alloc(rvd->vdev_children * sizeof (nvlist_t *), 2087eda14cbcSMatt Macy KM_SLEEP); 2088eda14cbcSMatt Macy VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2089eda14cbcSMatt Macy 2090eda14cbcSMatt Macy for (uint64_t c = 0; c < rvd->vdev_children; c++) { 2091eda14cbcSMatt Macy vdev_t *tvd = rvd->vdev_child[c]; 2092eda14cbcSMatt Macy 2093eda14cbcSMatt Macy /* 2094eda14cbcSMatt Macy * We consider a device as missing only if it failed 2095eda14cbcSMatt Macy * to open (i.e. offline or faulted is not considered 2096eda14cbcSMatt Macy * as missing). 2097eda14cbcSMatt Macy */ 2098eda14cbcSMatt Macy if (tvd->vdev_islog && 2099eda14cbcSMatt Macy tvd->vdev_state == VDEV_STATE_CANT_OPEN) { 2100eda14cbcSMatt Macy child[idx++] = vdev_config_generate(spa, tvd, 2101eda14cbcSMatt Macy B_FALSE, VDEV_CONFIG_MISSING); 2102eda14cbcSMatt Macy } 2103eda14cbcSMatt Macy } 2104eda14cbcSMatt Macy 2105eda14cbcSMatt Macy if (idx > 0) { 2106eda14cbcSMatt Macy fnvlist_add_nvlist_array(nv, 2107eda14cbcSMatt Macy ZPOOL_CONFIG_CHILDREN, child, idx); 2108eda14cbcSMatt Macy fnvlist_add_nvlist(spa->spa_load_info, 2109eda14cbcSMatt Macy ZPOOL_CONFIG_MISSING_DEVICES, nv); 2110eda14cbcSMatt Macy 2111eda14cbcSMatt Macy for (uint64_t i = 0; i < idx; i++) 2112eda14cbcSMatt Macy nvlist_free(child[i]); 2113eda14cbcSMatt Macy } 2114eda14cbcSMatt Macy nvlist_free(nv); 2115eda14cbcSMatt Macy kmem_free(child, rvd->vdev_children * sizeof (char **)); 2116eda14cbcSMatt Macy 2117eda14cbcSMatt Macy if (idx > 0) { 2118eda14cbcSMatt Macy spa_load_failed(spa, "some log devices are missing"); 2119eda14cbcSMatt Macy vdev_dbgmsg_print_tree(rvd, 2); 2120eda14cbcSMatt Macy return (SET_ERROR(ENXIO)); 2121eda14cbcSMatt Macy } 2122eda14cbcSMatt Macy } else { 2123eda14cbcSMatt Macy for (uint64_t c = 0; c < rvd->vdev_children; c++) { 2124eda14cbcSMatt Macy vdev_t *tvd = rvd->vdev_child[c]; 2125eda14cbcSMatt Macy 2126eda14cbcSMatt Macy if (tvd->vdev_islog && 2127eda14cbcSMatt Macy tvd->vdev_state == VDEV_STATE_CANT_OPEN) { 2128eda14cbcSMatt Macy spa_set_log_state(spa, SPA_LOG_CLEAR); 2129eda14cbcSMatt Macy spa_load_note(spa, "some log devices are " 2130eda14cbcSMatt Macy "missing, ZIL is dropped."); 2131eda14cbcSMatt Macy vdev_dbgmsg_print_tree(rvd, 2); 2132eda14cbcSMatt Macy break; 2133eda14cbcSMatt Macy } 2134eda14cbcSMatt Macy } 2135eda14cbcSMatt Macy } 2136eda14cbcSMatt Macy 2137eda14cbcSMatt Macy return (0); 2138eda14cbcSMatt Macy } 2139eda14cbcSMatt Macy 2140eda14cbcSMatt Macy /* 2141eda14cbcSMatt Macy * Check for missing log devices 2142eda14cbcSMatt Macy */ 2143eda14cbcSMatt Macy static boolean_t 2144eda14cbcSMatt Macy spa_check_logs(spa_t *spa) 2145eda14cbcSMatt Macy { 2146eda14cbcSMatt Macy boolean_t rv = B_FALSE; 2147eda14cbcSMatt Macy dsl_pool_t *dp = spa_get_dsl(spa); 2148eda14cbcSMatt Macy 2149eda14cbcSMatt Macy switch (spa->spa_log_state) { 2150eda14cbcSMatt Macy default: 2151eda14cbcSMatt Macy break; 2152eda14cbcSMatt Macy case SPA_LOG_MISSING: 2153eda14cbcSMatt Macy /* need to recheck in case slog has been restored */ 2154eda14cbcSMatt Macy case SPA_LOG_UNKNOWN: 2155eda14cbcSMatt Macy rv = (dmu_objset_find_dp(dp, dp->dp_root_dir_obj, 2156eda14cbcSMatt Macy zil_check_log_chain, NULL, DS_FIND_CHILDREN) != 0); 2157eda14cbcSMatt Macy if (rv) 2158eda14cbcSMatt Macy spa_set_log_state(spa, SPA_LOG_MISSING); 2159eda14cbcSMatt Macy break; 2160eda14cbcSMatt Macy } 2161eda14cbcSMatt Macy return (rv); 2162eda14cbcSMatt Macy } 2163eda14cbcSMatt Macy 2164184c1b94SMartin Matuska /* 2165184c1b94SMartin Matuska * Passivate any log vdevs (note, does not apply to embedded log metaslabs). 2166184c1b94SMartin Matuska */ 2167eda14cbcSMatt Macy static boolean_t 2168eda14cbcSMatt Macy spa_passivate_log(spa_t *spa) 2169eda14cbcSMatt Macy { 2170eda14cbcSMatt Macy vdev_t *rvd = spa->spa_root_vdev; 2171eda14cbcSMatt Macy boolean_t slog_found = B_FALSE; 2172eda14cbcSMatt Macy 2173eda14cbcSMatt Macy ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER)); 2174eda14cbcSMatt Macy 2175eda14cbcSMatt Macy for (int c = 0; c < rvd->vdev_children; c++) { 2176eda14cbcSMatt Macy vdev_t *tvd = rvd->vdev_child[c]; 2177eda14cbcSMatt Macy 2178eda14cbcSMatt Macy if (tvd->vdev_islog) { 2179184c1b94SMartin Matuska ASSERT3P(tvd->vdev_log_mg, ==, NULL); 2180184c1b94SMartin Matuska metaslab_group_passivate(tvd->vdev_mg); 2181eda14cbcSMatt Macy slog_found = B_TRUE; 2182eda14cbcSMatt Macy } 2183eda14cbcSMatt Macy } 2184eda14cbcSMatt Macy 2185eda14cbcSMatt Macy return (slog_found); 2186eda14cbcSMatt Macy } 2187eda14cbcSMatt Macy 2188184c1b94SMartin Matuska /* 2189184c1b94SMartin Matuska * Activate any log vdevs (note, does not apply to embedded log metaslabs). 2190184c1b94SMartin Matuska */ 2191eda14cbcSMatt Macy static void 2192eda14cbcSMatt Macy spa_activate_log(spa_t *spa) 2193eda14cbcSMatt Macy { 2194eda14cbcSMatt Macy vdev_t *rvd = spa->spa_root_vdev; 2195eda14cbcSMatt Macy 2196eda14cbcSMatt Macy ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER)); 2197eda14cbcSMatt Macy 2198eda14cbcSMatt Macy for (int c = 0; c < rvd->vdev_children; c++) { 2199eda14cbcSMatt Macy vdev_t *tvd = rvd->vdev_child[c]; 2200eda14cbcSMatt Macy 2201184c1b94SMartin Matuska if (tvd->vdev_islog) { 2202184c1b94SMartin Matuska ASSERT3P(tvd->vdev_log_mg, ==, NULL); 2203184c1b94SMartin Matuska metaslab_group_activate(tvd->vdev_mg); 2204184c1b94SMartin Matuska } 2205eda14cbcSMatt Macy } 2206eda14cbcSMatt Macy } 2207eda14cbcSMatt Macy 2208eda14cbcSMatt Macy int 2209eda14cbcSMatt Macy spa_reset_logs(spa_t *spa) 2210eda14cbcSMatt Macy { 2211eda14cbcSMatt Macy int error; 2212eda14cbcSMatt Macy 2213eda14cbcSMatt Macy error = dmu_objset_find(spa_name(spa), zil_reset, 2214eda14cbcSMatt Macy NULL, DS_FIND_CHILDREN); 2215eda14cbcSMatt Macy if (error == 0) { 2216eda14cbcSMatt Macy /* 2217eda14cbcSMatt Macy * We successfully offlined the log device, sync out the 2218eda14cbcSMatt Macy * current txg so that the "stubby" block can be removed 2219eda14cbcSMatt Macy * by zil_sync(). 2220eda14cbcSMatt Macy */ 2221eda14cbcSMatt Macy txg_wait_synced(spa->spa_dsl_pool, 0); 2222eda14cbcSMatt Macy } 2223eda14cbcSMatt Macy return (error); 2224eda14cbcSMatt Macy } 2225eda14cbcSMatt Macy 2226eda14cbcSMatt Macy static void 2227eda14cbcSMatt Macy spa_aux_check_removed(spa_aux_vdev_t *sav) 2228eda14cbcSMatt Macy { 2229eda14cbcSMatt Macy for (int i = 0; i < sav->sav_count; i++) 2230eda14cbcSMatt Macy spa_check_removed(sav->sav_vdevs[i]); 2231eda14cbcSMatt Macy } 2232eda14cbcSMatt Macy 2233eda14cbcSMatt Macy void 2234eda14cbcSMatt Macy spa_claim_notify(zio_t *zio) 2235eda14cbcSMatt Macy { 2236eda14cbcSMatt Macy spa_t *spa = zio->io_spa; 2237eda14cbcSMatt Macy 2238eda14cbcSMatt Macy if (zio->io_error) 2239eda14cbcSMatt Macy return; 2240eda14cbcSMatt Macy 2241eda14cbcSMatt Macy mutex_enter(&spa->spa_props_lock); /* any mutex will do */ 2242eda14cbcSMatt Macy if (spa->spa_claim_max_txg < zio->io_bp->blk_birth) 2243eda14cbcSMatt Macy spa->spa_claim_max_txg = zio->io_bp->blk_birth; 2244eda14cbcSMatt Macy mutex_exit(&spa->spa_props_lock); 2245eda14cbcSMatt Macy } 2246eda14cbcSMatt Macy 2247eda14cbcSMatt Macy typedef struct spa_load_error { 2248eda14cbcSMatt Macy uint64_t sle_meta_count; 2249eda14cbcSMatt Macy uint64_t sle_data_count; 2250eda14cbcSMatt Macy } spa_load_error_t; 2251eda14cbcSMatt Macy 2252eda14cbcSMatt Macy static void 2253eda14cbcSMatt Macy spa_load_verify_done(zio_t *zio) 2254eda14cbcSMatt Macy { 2255eda14cbcSMatt Macy blkptr_t *bp = zio->io_bp; 2256eda14cbcSMatt Macy spa_load_error_t *sle = zio->io_private; 2257eda14cbcSMatt Macy dmu_object_type_t type = BP_GET_TYPE(bp); 2258eda14cbcSMatt Macy int error = zio->io_error; 2259eda14cbcSMatt Macy spa_t *spa = zio->io_spa; 2260eda14cbcSMatt Macy 2261eda14cbcSMatt Macy abd_free(zio->io_abd); 2262eda14cbcSMatt Macy if (error) { 2263eda14cbcSMatt Macy if ((BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type)) && 2264eda14cbcSMatt Macy type != DMU_OT_INTENT_LOG) 2265eda14cbcSMatt Macy atomic_inc_64(&sle->sle_meta_count); 2266eda14cbcSMatt Macy else 2267eda14cbcSMatt Macy atomic_inc_64(&sle->sle_data_count); 2268eda14cbcSMatt Macy } 2269eda14cbcSMatt Macy 2270eda14cbcSMatt Macy mutex_enter(&spa->spa_scrub_lock); 2271eda14cbcSMatt Macy spa->spa_load_verify_bytes -= BP_GET_PSIZE(bp); 2272eda14cbcSMatt Macy cv_broadcast(&spa->spa_scrub_io_cv); 2273eda14cbcSMatt Macy mutex_exit(&spa->spa_scrub_lock); 2274eda14cbcSMatt Macy } 2275eda14cbcSMatt Macy 2276eda14cbcSMatt Macy /* 2277eda14cbcSMatt Macy * Maximum number of inflight bytes is the log2 fraction of the arc size. 2278eda14cbcSMatt Macy * By default, we set it to 1/16th of the arc. 2279eda14cbcSMatt Macy */ 2280eda14cbcSMatt Macy int spa_load_verify_shift = 4; 2281eda14cbcSMatt Macy int spa_load_verify_metadata = B_TRUE; 2282eda14cbcSMatt Macy int spa_load_verify_data = B_TRUE; 2283eda14cbcSMatt Macy 2284eda14cbcSMatt Macy /*ARGSUSED*/ 2285eda14cbcSMatt Macy static int 2286eda14cbcSMatt Macy spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, 2287eda14cbcSMatt Macy const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg) 2288eda14cbcSMatt Macy { 2289eda14cbcSMatt Macy if (zb->zb_level == ZB_DNODE_LEVEL || BP_IS_HOLE(bp) || 2290eda14cbcSMatt Macy BP_IS_EMBEDDED(bp) || BP_IS_REDACTED(bp)) 2291eda14cbcSMatt Macy return (0); 2292eda14cbcSMatt Macy /* 2293eda14cbcSMatt Macy * Note: normally this routine will not be called if 2294eda14cbcSMatt Macy * spa_load_verify_metadata is not set. However, it may be useful 2295eda14cbcSMatt Macy * to manually set the flag after the traversal has begun. 2296eda14cbcSMatt Macy */ 2297eda14cbcSMatt Macy if (!spa_load_verify_metadata) 2298eda14cbcSMatt Macy return (0); 2299eda14cbcSMatt Macy if (!BP_IS_METADATA(bp) && !spa_load_verify_data) 2300eda14cbcSMatt Macy return (0); 2301eda14cbcSMatt Macy 2302eda14cbcSMatt Macy uint64_t maxinflight_bytes = 2303eda14cbcSMatt Macy arc_target_bytes() >> spa_load_verify_shift; 2304eda14cbcSMatt Macy zio_t *rio = arg; 2305eda14cbcSMatt Macy size_t size = BP_GET_PSIZE(bp); 2306eda14cbcSMatt Macy 2307eda14cbcSMatt Macy mutex_enter(&spa->spa_scrub_lock); 2308eda14cbcSMatt Macy while (spa->spa_load_verify_bytes >= maxinflight_bytes) 2309eda14cbcSMatt Macy cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 2310eda14cbcSMatt Macy spa->spa_load_verify_bytes += size; 2311eda14cbcSMatt Macy mutex_exit(&spa->spa_scrub_lock); 2312eda14cbcSMatt Macy 2313eda14cbcSMatt Macy zio_nowait(zio_read(rio, spa, bp, abd_alloc_for_io(size, B_FALSE), size, 2314eda14cbcSMatt Macy spa_load_verify_done, rio->io_private, ZIO_PRIORITY_SCRUB, 2315eda14cbcSMatt Macy ZIO_FLAG_SPECULATIVE | ZIO_FLAG_CANFAIL | 2316eda14cbcSMatt Macy ZIO_FLAG_SCRUB | ZIO_FLAG_RAW, zb)); 2317eda14cbcSMatt Macy return (0); 2318eda14cbcSMatt Macy } 2319eda14cbcSMatt Macy 2320eda14cbcSMatt Macy /* ARGSUSED */ 2321eda14cbcSMatt Macy static int 2322eda14cbcSMatt Macy verify_dataset_name_len(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg) 2323eda14cbcSMatt Macy { 2324eda14cbcSMatt Macy if (dsl_dataset_namelen(ds) >= ZFS_MAX_DATASET_NAME_LEN) 2325eda14cbcSMatt Macy return (SET_ERROR(ENAMETOOLONG)); 2326eda14cbcSMatt Macy 2327eda14cbcSMatt Macy return (0); 2328eda14cbcSMatt Macy } 2329eda14cbcSMatt Macy 2330eda14cbcSMatt Macy static int 2331eda14cbcSMatt Macy spa_load_verify(spa_t *spa) 2332eda14cbcSMatt Macy { 2333eda14cbcSMatt Macy zio_t *rio; 2334eda14cbcSMatt Macy spa_load_error_t sle = { 0 }; 2335eda14cbcSMatt Macy zpool_load_policy_t policy; 2336eda14cbcSMatt Macy boolean_t verify_ok = B_FALSE; 2337eda14cbcSMatt Macy int error = 0; 2338eda14cbcSMatt Macy 2339eda14cbcSMatt Macy zpool_get_load_policy(spa->spa_config, &policy); 2340eda14cbcSMatt Macy 2341eda14cbcSMatt Macy if (policy.zlp_rewind & ZPOOL_NEVER_REWIND) 2342eda14cbcSMatt Macy return (0); 2343eda14cbcSMatt Macy 2344eda14cbcSMatt Macy dsl_pool_config_enter(spa->spa_dsl_pool, FTAG); 2345eda14cbcSMatt Macy error = dmu_objset_find_dp(spa->spa_dsl_pool, 2346eda14cbcSMatt Macy spa->spa_dsl_pool->dp_root_dir_obj, verify_dataset_name_len, NULL, 2347eda14cbcSMatt Macy DS_FIND_CHILDREN); 2348eda14cbcSMatt Macy dsl_pool_config_exit(spa->spa_dsl_pool, FTAG); 2349eda14cbcSMatt Macy if (error != 0) 2350eda14cbcSMatt Macy return (error); 2351eda14cbcSMatt Macy 2352eda14cbcSMatt Macy rio = zio_root(spa, NULL, &sle, 2353eda14cbcSMatt Macy ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE); 2354eda14cbcSMatt Macy 2355eda14cbcSMatt Macy if (spa_load_verify_metadata) { 2356eda14cbcSMatt Macy if (spa->spa_extreme_rewind) { 2357eda14cbcSMatt Macy spa_load_note(spa, "performing a complete scan of the " 2358eda14cbcSMatt Macy "pool since extreme rewind is on. This may take " 2359eda14cbcSMatt Macy "a very long time.\n (spa_load_verify_data=%u, " 2360eda14cbcSMatt Macy "spa_load_verify_metadata=%u)", 2361eda14cbcSMatt Macy spa_load_verify_data, spa_load_verify_metadata); 2362eda14cbcSMatt Macy } 2363eda14cbcSMatt Macy 2364eda14cbcSMatt Macy error = traverse_pool(spa, spa->spa_verify_min_txg, 2365eda14cbcSMatt Macy TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA | 2366eda14cbcSMatt Macy TRAVERSE_NO_DECRYPT, spa_load_verify_cb, rio); 2367eda14cbcSMatt Macy } 2368eda14cbcSMatt Macy 2369eda14cbcSMatt Macy (void) zio_wait(rio); 2370eda14cbcSMatt Macy ASSERT0(spa->spa_load_verify_bytes); 2371eda14cbcSMatt Macy 2372eda14cbcSMatt Macy spa->spa_load_meta_errors = sle.sle_meta_count; 2373eda14cbcSMatt Macy spa->spa_load_data_errors = sle.sle_data_count; 2374eda14cbcSMatt Macy 2375eda14cbcSMatt Macy if (sle.sle_meta_count != 0 || sle.sle_data_count != 0) { 2376eda14cbcSMatt Macy spa_load_note(spa, "spa_load_verify found %llu metadata errors " 2377eda14cbcSMatt Macy "and %llu data errors", (u_longlong_t)sle.sle_meta_count, 2378eda14cbcSMatt Macy (u_longlong_t)sle.sle_data_count); 2379eda14cbcSMatt Macy } 2380eda14cbcSMatt Macy 2381eda14cbcSMatt Macy if (spa_load_verify_dryrun || 2382eda14cbcSMatt Macy (!error && sle.sle_meta_count <= policy.zlp_maxmeta && 2383eda14cbcSMatt Macy sle.sle_data_count <= policy.zlp_maxdata)) { 2384eda14cbcSMatt Macy int64_t loss = 0; 2385eda14cbcSMatt Macy 2386eda14cbcSMatt Macy verify_ok = B_TRUE; 2387eda14cbcSMatt Macy spa->spa_load_txg = spa->spa_uberblock.ub_txg; 2388eda14cbcSMatt Macy spa->spa_load_txg_ts = spa->spa_uberblock.ub_timestamp; 2389eda14cbcSMatt Macy 2390eda14cbcSMatt Macy loss = spa->spa_last_ubsync_txg_ts - spa->spa_load_txg_ts; 2391eda14cbcSMatt Macy VERIFY(nvlist_add_uint64(spa->spa_load_info, 2392eda14cbcSMatt Macy ZPOOL_CONFIG_LOAD_TIME, spa->spa_load_txg_ts) == 0); 2393eda14cbcSMatt Macy VERIFY(nvlist_add_int64(spa->spa_load_info, 2394eda14cbcSMatt Macy ZPOOL_CONFIG_REWIND_TIME, loss) == 0); 2395eda14cbcSMatt Macy VERIFY(nvlist_add_uint64(spa->spa_load_info, 2396eda14cbcSMatt Macy ZPOOL_CONFIG_LOAD_DATA_ERRORS, sle.sle_data_count) == 0); 2397eda14cbcSMatt Macy } else { 2398eda14cbcSMatt Macy spa->spa_load_max_txg = spa->spa_uberblock.ub_txg; 2399eda14cbcSMatt Macy } 2400eda14cbcSMatt Macy 2401eda14cbcSMatt Macy if (spa_load_verify_dryrun) 2402eda14cbcSMatt Macy return (0); 2403eda14cbcSMatt Macy 2404eda14cbcSMatt Macy if (error) { 2405eda14cbcSMatt Macy if (error != ENXIO && error != EIO) 2406eda14cbcSMatt Macy error = SET_ERROR(EIO); 2407eda14cbcSMatt Macy return (error); 2408eda14cbcSMatt Macy } 2409eda14cbcSMatt Macy 2410eda14cbcSMatt Macy return (verify_ok ? 0 : EIO); 2411eda14cbcSMatt Macy } 2412eda14cbcSMatt Macy 2413eda14cbcSMatt Macy /* 2414eda14cbcSMatt Macy * Find a value in the pool props object. 2415eda14cbcSMatt Macy */ 2416eda14cbcSMatt Macy static void 2417eda14cbcSMatt Macy spa_prop_find(spa_t *spa, zpool_prop_t prop, uint64_t *val) 2418eda14cbcSMatt Macy { 2419eda14cbcSMatt Macy (void) zap_lookup(spa->spa_meta_objset, spa->spa_pool_props_object, 2420eda14cbcSMatt Macy zpool_prop_to_name(prop), sizeof (uint64_t), 1, val); 2421eda14cbcSMatt Macy } 2422eda14cbcSMatt Macy 2423eda14cbcSMatt Macy /* 2424eda14cbcSMatt Macy * Find a value in the pool directory object. 2425eda14cbcSMatt Macy */ 2426eda14cbcSMatt Macy static int 2427eda14cbcSMatt Macy spa_dir_prop(spa_t *spa, const char *name, uint64_t *val, boolean_t log_enoent) 2428eda14cbcSMatt Macy { 2429eda14cbcSMatt Macy int error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 2430eda14cbcSMatt Macy name, sizeof (uint64_t), 1, val); 2431eda14cbcSMatt Macy 2432eda14cbcSMatt Macy if (error != 0 && (error != ENOENT || log_enoent)) { 2433eda14cbcSMatt Macy spa_load_failed(spa, "couldn't get '%s' value in MOS directory " 2434eda14cbcSMatt Macy "[error=%d]", name, error); 2435eda14cbcSMatt Macy } 2436eda14cbcSMatt Macy 2437eda14cbcSMatt Macy return (error); 2438eda14cbcSMatt Macy } 2439eda14cbcSMatt Macy 2440eda14cbcSMatt Macy static int 2441eda14cbcSMatt Macy spa_vdev_err(vdev_t *vdev, vdev_aux_t aux, int err) 2442eda14cbcSMatt Macy { 2443eda14cbcSMatt Macy vdev_set_state(vdev, B_TRUE, VDEV_STATE_CANT_OPEN, aux); 2444eda14cbcSMatt Macy return (SET_ERROR(err)); 2445eda14cbcSMatt Macy } 2446eda14cbcSMatt Macy 2447eda14cbcSMatt Macy boolean_t 2448eda14cbcSMatt Macy spa_livelist_delete_check(spa_t *spa) 2449eda14cbcSMatt Macy { 2450eda14cbcSMatt Macy return (spa->spa_livelists_to_delete != 0); 2451eda14cbcSMatt Macy } 2452eda14cbcSMatt Macy 2453eda14cbcSMatt Macy /* ARGSUSED */ 2454eda14cbcSMatt Macy static boolean_t 2455eda14cbcSMatt Macy spa_livelist_delete_cb_check(void *arg, zthr_t *z) 2456eda14cbcSMatt Macy { 2457eda14cbcSMatt Macy spa_t *spa = arg; 2458eda14cbcSMatt Macy return (spa_livelist_delete_check(spa)); 2459eda14cbcSMatt Macy } 2460eda14cbcSMatt Macy 2461eda14cbcSMatt Macy static int 2462eda14cbcSMatt Macy delete_blkptr_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 2463eda14cbcSMatt Macy { 2464eda14cbcSMatt Macy spa_t *spa = arg; 2465eda14cbcSMatt Macy zio_free(spa, tx->tx_txg, bp); 2466eda14cbcSMatt Macy dsl_dir_diduse_space(tx->tx_pool->dp_free_dir, DD_USED_HEAD, 2467eda14cbcSMatt Macy -bp_get_dsize_sync(spa, bp), 2468eda14cbcSMatt Macy -BP_GET_PSIZE(bp), -BP_GET_UCSIZE(bp), tx); 2469eda14cbcSMatt Macy return (0); 2470eda14cbcSMatt Macy } 2471eda14cbcSMatt Macy 2472eda14cbcSMatt Macy static int 2473eda14cbcSMatt Macy dsl_get_next_livelist_obj(objset_t *os, uint64_t zap_obj, uint64_t *llp) 2474eda14cbcSMatt Macy { 2475eda14cbcSMatt Macy int err; 2476eda14cbcSMatt Macy zap_cursor_t zc; 2477eda14cbcSMatt Macy zap_attribute_t za; 2478eda14cbcSMatt Macy zap_cursor_init(&zc, os, zap_obj); 2479eda14cbcSMatt Macy err = zap_cursor_retrieve(&zc, &za); 2480eda14cbcSMatt Macy zap_cursor_fini(&zc); 2481eda14cbcSMatt Macy if (err == 0) 2482eda14cbcSMatt Macy *llp = za.za_first_integer; 2483eda14cbcSMatt Macy return (err); 2484eda14cbcSMatt Macy } 2485eda14cbcSMatt Macy 2486eda14cbcSMatt Macy /* 2487eda14cbcSMatt Macy * Components of livelist deletion that must be performed in syncing 2488eda14cbcSMatt Macy * context: freeing block pointers and updating the pool-wide data 2489eda14cbcSMatt Macy * structures to indicate how much work is left to do 2490eda14cbcSMatt Macy */ 2491eda14cbcSMatt Macy typedef struct sublist_delete_arg { 2492eda14cbcSMatt Macy spa_t *spa; 2493eda14cbcSMatt Macy dsl_deadlist_t *ll; 2494eda14cbcSMatt Macy uint64_t key; 2495eda14cbcSMatt Macy bplist_t *to_free; 2496eda14cbcSMatt Macy } sublist_delete_arg_t; 2497eda14cbcSMatt Macy 2498eda14cbcSMatt Macy static void 2499eda14cbcSMatt Macy sublist_delete_sync(void *arg, dmu_tx_t *tx) 2500eda14cbcSMatt Macy { 2501eda14cbcSMatt Macy sublist_delete_arg_t *sda = arg; 2502eda14cbcSMatt Macy spa_t *spa = sda->spa; 2503eda14cbcSMatt Macy dsl_deadlist_t *ll = sda->ll; 2504eda14cbcSMatt Macy uint64_t key = sda->key; 2505eda14cbcSMatt Macy bplist_t *to_free = sda->to_free; 2506eda14cbcSMatt Macy 2507eda14cbcSMatt Macy bplist_iterate(to_free, delete_blkptr_cb, spa, tx); 2508eda14cbcSMatt Macy dsl_deadlist_remove_entry(ll, key, tx); 2509eda14cbcSMatt Macy } 2510eda14cbcSMatt Macy 2511eda14cbcSMatt Macy typedef struct livelist_delete_arg { 2512eda14cbcSMatt Macy spa_t *spa; 2513eda14cbcSMatt Macy uint64_t ll_obj; 2514eda14cbcSMatt Macy uint64_t zap_obj; 2515eda14cbcSMatt Macy } livelist_delete_arg_t; 2516eda14cbcSMatt Macy 2517eda14cbcSMatt Macy static void 2518eda14cbcSMatt Macy livelist_delete_sync(void *arg, dmu_tx_t *tx) 2519eda14cbcSMatt Macy { 2520eda14cbcSMatt Macy livelist_delete_arg_t *lda = arg; 2521eda14cbcSMatt Macy spa_t *spa = lda->spa; 2522eda14cbcSMatt Macy uint64_t ll_obj = lda->ll_obj; 2523eda14cbcSMatt Macy uint64_t zap_obj = lda->zap_obj; 2524eda14cbcSMatt Macy objset_t *mos = spa->spa_meta_objset; 2525eda14cbcSMatt Macy uint64_t count; 2526eda14cbcSMatt Macy 2527eda14cbcSMatt Macy /* free the livelist and decrement the feature count */ 2528eda14cbcSMatt Macy VERIFY0(zap_remove_int(mos, zap_obj, ll_obj, tx)); 2529eda14cbcSMatt Macy dsl_deadlist_free(mos, ll_obj, tx); 2530eda14cbcSMatt Macy spa_feature_decr(spa, SPA_FEATURE_LIVELIST, tx); 2531eda14cbcSMatt Macy VERIFY0(zap_count(mos, zap_obj, &count)); 2532eda14cbcSMatt Macy if (count == 0) { 2533eda14cbcSMatt Macy /* no more livelists to delete */ 2534eda14cbcSMatt Macy VERIFY0(zap_remove(mos, DMU_POOL_DIRECTORY_OBJECT, 2535eda14cbcSMatt Macy DMU_POOL_DELETED_CLONES, tx)); 2536eda14cbcSMatt Macy VERIFY0(zap_destroy(mos, zap_obj, tx)); 2537eda14cbcSMatt Macy spa->spa_livelists_to_delete = 0; 2538eda14cbcSMatt Macy spa_notify_waiters(spa); 2539eda14cbcSMatt Macy } 2540eda14cbcSMatt Macy } 2541eda14cbcSMatt Macy 2542eda14cbcSMatt Macy /* 2543eda14cbcSMatt Macy * Load in the value for the livelist to be removed and open it. Then, 2544eda14cbcSMatt Macy * load its first sublist and determine which block pointers should actually 2545eda14cbcSMatt Macy * be freed. Then, call a synctask which performs the actual frees and updates 2546eda14cbcSMatt Macy * the pool-wide livelist data. 2547eda14cbcSMatt Macy */ 2548eda14cbcSMatt Macy /* ARGSUSED */ 2549eda14cbcSMatt Macy static void 2550eda14cbcSMatt Macy spa_livelist_delete_cb(void *arg, zthr_t *z) 2551eda14cbcSMatt Macy { 2552eda14cbcSMatt Macy spa_t *spa = arg; 2553eda14cbcSMatt Macy uint64_t ll_obj = 0, count; 2554eda14cbcSMatt Macy objset_t *mos = spa->spa_meta_objset; 2555eda14cbcSMatt Macy uint64_t zap_obj = spa->spa_livelists_to_delete; 2556eda14cbcSMatt Macy /* 2557eda14cbcSMatt Macy * Determine the next livelist to delete. This function should only 2558eda14cbcSMatt Macy * be called if there is at least one deleted clone. 2559eda14cbcSMatt Macy */ 2560eda14cbcSMatt Macy VERIFY0(dsl_get_next_livelist_obj(mos, zap_obj, &ll_obj)); 2561eda14cbcSMatt Macy VERIFY0(zap_count(mos, ll_obj, &count)); 2562eda14cbcSMatt Macy if (count > 0) { 25632c48331dSMatt Macy dsl_deadlist_t *ll; 2564eda14cbcSMatt Macy dsl_deadlist_entry_t *dle; 2565eda14cbcSMatt Macy bplist_t to_free; 25662c48331dSMatt Macy ll = kmem_zalloc(sizeof (dsl_deadlist_t), KM_SLEEP); 25672c48331dSMatt Macy dsl_deadlist_open(ll, mos, ll_obj); 25682c48331dSMatt Macy dle = dsl_deadlist_first(ll); 2569eda14cbcSMatt Macy ASSERT3P(dle, !=, NULL); 2570eda14cbcSMatt Macy bplist_create(&to_free); 2571eda14cbcSMatt Macy int err = dsl_process_sub_livelist(&dle->dle_bpobj, &to_free, 2572eda14cbcSMatt Macy z, NULL); 2573eda14cbcSMatt Macy if (err == 0) { 2574eda14cbcSMatt Macy sublist_delete_arg_t sync_arg = { 2575eda14cbcSMatt Macy .spa = spa, 25762c48331dSMatt Macy .ll = ll, 2577eda14cbcSMatt Macy .key = dle->dle_mintxg, 2578eda14cbcSMatt Macy .to_free = &to_free 2579eda14cbcSMatt Macy }; 2580eda14cbcSMatt Macy zfs_dbgmsg("deleting sublist (id %llu) from" 258133b8c039SMartin Matuska " livelist %llu, %lld remaining", 258233b8c039SMartin Matuska (u_longlong_t)dle->dle_bpobj.bpo_object, 258333b8c039SMartin Matuska (u_longlong_t)ll_obj, (longlong_t)count - 1); 2584eda14cbcSMatt Macy VERIFY0(dsl_sync_task(spa_name(spa), NULL, 2585eda14cbcSMatt Macy sublist_delete_sync, &sync_arg, 0, 2586eda14cbcSMatt Macy ZFS_SPACE_CHECK_DESTROY)); 2587eda14cbcSMatt Macy } else { 2588eda14cbcSMatt Macy VERIFY3U(err, ==, EINTR); 2589eda14cbcSMatt Macy } 2590eda14cbcSMatt Macy bplist_clear(&to_free); 2591eda14cbcSMatt Macy bplist_destroy(&to_free); 25922c48331dSMatt Macy dsl_deadlist_close(ll); 25932c48331dSMatt Macy kmem_free(ll, sizeof (dsl_deadlist_t)); 2594eda14cbcSMatt Macy } else { 2595eda14cbcSMatt Macy livelist_delete_arg_t sync_arg = { 2596eda14cbcSMatt Macy .spa = spa, 2597eda14cbcSMatt Macy .ll_obj = ll_obj, 2598eda14cbcSMatt Macy .zap_obj = zap_obj 2599eda14cbcSMatt Macy }; 260033b8c039SMartin Matuska zfs_dbgmsg("deletion of livelist %llu completed", 260133b8c039SMartin Matuska (u_longlong_t)ll_obj); 2602eda14cbcSMatt Macy VERIFY0(dsl_sync_task(spa_name(spa), NULL, livelist_delete_sync, 2603eda14cbcSMatt Macy &sync_arg, 0, ZFS_SPACE_CHECK_DESTROY)); 2604eda14cbcSMatt Macy } 2605eda14cbcSMatt Macy } 2606eda14cbcSMatt Macy 2607eda14cbcSMatt Macy static void 2608eda14cbcSMatt Macy spa_start_livelist_destroy_thread(spa_t *spa) 2609eda14cbcSMatt Macy { 2610eda14cbcSMatt Macy ASSERT3P(spa->spa_livelist_delete_zthr, ==, NULL); 2611eda14cbcSMatt Macy spa->spa_livelist_delete_zthr = 2612eda14cbcSMatt Macy zthr_create("z_livelist_destroy", 2613*2faf504dSMartin Matuska spa_livelist_delete_cb_check, spa_livelist_delete_cb, spa, 2614*2faf504dSMartin Matuska minclsyspri); 2615eda14cbcSMatt Macy } 2616eda14cbcSMatt Macy 2617eda14cbcSMatt Macy typedef struct livelist_new_arg { 2618eda14cbcSMatt Macy bplist_t *allocs; 2619eda14cbcSMatt Macy bplist_t *frees; 2620eda14cbcSMatt Macy } livelist_new_arg_t; 2621eda14cbcSMatt Macy 2622eda14cbcSMatt Macy static int 2623eda14cbcSMatt Macy livelist_track_new_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed, 2624eda14cbcSMatt Macy dmu_tx_t *tx) 2625eda14cbcSMatt Macy { 2626eda14cbcSMatt Macy ASSERT(tx == NULL); 2627eda14cbcSMatt Macy livelist_new_arg_t *lna = arg; 2628eda14cbcSMatt Macy if (bp_freed) { 2629eda14cbcSMatt Macy bplist_append(lna->frees, bp); 2630eda14cbcSMatt Macy } else { 2631eda14cbcSMatt Macy bplist_append(lna->allocs, bp); 2632eda14cbcSMatt Macy zfs_livelist_condense_new_alloc++; 2633eda14cbcSMatt Macy } 2634eda14cbcSMatt Macy return (0); 2635eda14cbcSMatt Macy } 2636eda14cbcSMatt Macy 2637eda14cbcSMatt Macy typedef struct livelist_condense_arg { 2638eda14cbcSMatt Macy spa_t *spa; 2639eda14cbcSMatt Macy bplist_t to_keep; 2640eda14cbcSMatt Macy uint64_t first_size; 2641eda14cbcSMatt Macy uint64_t next_size; 2642eda14cbcSMatt Macy } livelist_condense_arg_t; 2643eda14cbcSMatt Macy 2644eda14cbcSMatt Macy static void 2645eda14cbcSMatt Macy spa_livelist_condense_sync(void *arg, dmu_tx_t *tx) 2646eda14cbcSMatt Macy { 2647eda14cbcSMatt Macy livelist_condense_arg_t *lca = arg; 2648eda14cbcSMatt Macy spa_t *spa = lca->spa; 2649eda14cbcSMatt Macy bplist_t new_frees; 2650eda14cbcSMatt Macy dsl_dataset_t *ds = spa->spa_to_condense.ds; 2651eda14cbcSMatt Macy 2652eda14cbcSMatt Macy /* Have we been cancelled? */ 2653eda14cbcSMatt Macy if (spa->spa_to_condense.cancelled) { 2654eda14cbcSMatt Macy zfs_livelist_condense_sync_cancel++; 2655eda14cbcSMatt Macy goto out; 2656eda14cbcSMatt Macy } 2657eda14cbcSMatt Macy 2658eda14cbcSMatt Macy dsl_deadlist_entry_t *first = spa->spa_to_condense.first; 2659eda14cbcSMatt Macy dsl_deadlist_entry_t *next = spa->spa_to_condense.next; 2660eda14cbcSMatt Macy dsl_deadlist_t *ll = &ds->ds_dir->dd_livelist; 2661eda14cbcSMatt Macy 2662eda14cbcSMatt Macy /* 2663eda14cbcSMatt Macy * It's possible that the livelist was changed while the zthr was 2664eda14cbcSMatt Macy * running. Therefore, we need to check for new blkptrs in the two 2665eda14cbcSMatt Macy * entries being condensed and continue to track them in the livelist. 2666eda14cbcSMatt Macy * Because of the way we handle remapped blkptrs (see dbuf_remap_impl), 2667eda14cbcSMatt Macy * it's possible that the newly added blkptrs are FREEs or ALLOCs so 2668eda14cbcSMatt Macy * we need to sort them into two different bplists. 2669eda14cbcSMatt Macy */ 2670eda14cbcSMatt Macy uint64_t first_obj = first->dle_bpobj.bpo_object; 2671eda14cbcSMatt Macy uint64_t next_obj = next->dle_bpobj.bpo_object; 2672eda14cbcSMatt Macy uint64_t cur_first_size = first->dle_bpobj.bpo_phys->bpo_num_blkptrs; 2673eda14cbcSMatt Macy uint64_t cur_next_size = next->dle_bpobj.bpo_phys->bpo_num_blkptrs; 2674eda14cbcSMatt Macy 2675eda14cbcSMatt Macy bplist_create(&new_frees); 2676eda14cbcSMatt Macy livelist_new_arg_t new_bps = { 2677eda14cbcSMatt Macy .allocs = &lca->to_keep, 2678eda14cbcSMatt Macy .frees = &new_frees, 2679eda14cbcSMatt Macy }; 2680eda14cbcSMatt Macy 2681eda14cbcSMatt Macy if (cur_first_size > lca->first_size) { 2682eda14cbcSMatt Macy VERIFY0(livelist_bpobj_iterate_from_nofree(&first->dle_bpobj, 2683eda14cbcSMatt Macy livelist_track_new_cb, &new_bps, lca->first_size)); 2684eda14cbcSMatt Macy } 2685eda14cbcSMatt Macy if (cur_next_size > lca->next_size) { 2686eda14cbcSMatt Macy VERIFY0(livelist_bpobj_iterate_from_nofree(&next->dle_bpobj, 2687eda14cbcSMatt Macy livelist_track_new_cb, &new_bps, lca->next_size)); 2688eda14cbcSMatt Macy } 2689eda14cbcSMatt Macy 2690eda14cbcSMatt Macy dsl_deadlist_clear_entry(first, ll, tx); 2691eda14cbcSMatt Macy ASSERT(bpobj_is_empty(&first->dle_bpobj)); 2692eda14cbcSMatt Macy dsl_deadlist_remove_entry(ll, next->dle_mintxg, tx); 2693eda14cbcSMatt Macy 2694eda14cbcSMatt Macy bplist_iterate(&lca->to_keep, dsl_deadlist_insert_alloc_cb, ll, tx); 2695eda14cbcSMatt Macy bplist_iterate(&new_frees, dsl_deadlist_insert_free_cb, ll, tx); 2696eda14cbcSMatt Macy bplist_destroy(&new_frees); 2697eda14cbcSMatt Macy 2698eda14cbcSMatt Macy char dsname[ZFS_MAX_DATASET_NAME_LEN]; 2699eda14cbcSMatt Macy dsl_dataset_name(ds, dsname); 2700eda14cbcSMatt Macy zfs_dbgmsg("txg %llu condensing livelist of %s (id %llu), bpobj %llu " 2701eda14cbcSMatt Macy "(%llu blkptrs) and bpobj %llu (%llu blkptrs) -> bpobj %llu " 270233b8c039SMartin Matuska "(%llu blkptrs)", (u_longlong_t)tx->tx_txg, dsname, 270333b8c039SMartin Matuska (u_longlong_t)ds->ds_object, (u_longlong_t)first_obj, 270433b8c039SMartin Matuska (u_longlong_t)cur_first_size, (u_longlong_t)next_obj, 270533b8c039SMartin Matuska (u_longlong_t)cur_next_size, 270633b8c039SMartin Matuska (u_longlong_t)first->dle_bpobj.bpo_object, 270733b8c039SMartin Matuska (u_longlong_t)first->dle_bpobj.bpo_phys->bpo_num_blkptrs); 2708eda14cbcSMatt Macy out: 2709eda14cbcSMatt Macy dmu_buf_rele(ds->ds_dbuf, spa); 2710eda14cbcSMatt Macy spa->spa_to_condense.ds = NULL; 2711eda14cbcSMatt Macy bplist_clear(&lca->to_keep); 2712eda14cbcSMatt Macy bplist_destroy(&lca->to_keep); 2713eda14cbcSMatt Macy kmem_free(lca, sizeof (livelist_condense_arg_t)); 2714eda14cbcSMatt Macy spa->spa_to_condense.syncing = B_FALSE; 2715eda14cbcSMatt Macy } 2716eda14cbcSMatt Macy 2717eda14cbcSMatt Macy static void 2718eda14cbcSMatt Macy spa_livelist_condense_cb(void *arg, zthr_t *t) 2719eda14cbcSMatt Macy { 2720eda14cbcSMatt Macy while (zfs_livelist_condense_zthr_pause && 2721eda14cbcSMatt Macy !(zthr_has_waiters(t) || zthr_iscancelled(t))) 2722eda14cbcSMatt Macy delay(1); 2723eda14cbcSMatt Macy 2724eda14cbcSMatt Macy spa_t *spa = arg; 2725eda14cbcSMatt Macy dsl_deadlist_entry_t *first = spa->spa_to_condense.first; 2726eda14cbcSMatt Macy dsl_deadlist_entry_t *next = spa->spa_to_condense.next; 2727eda14cbcSMatt Macy uint64_t first_size, next_size; 2728eda14cbcSMatt Macy 2729eda14cbcSMatt Macy livelist_condense_arg_t *lca = 2730eda14cbcSMatt Macy kmem_alloc(sizeof (livelist_condense_arg_t), KM_SLEEP); 2731eda14cbcSMatt Macy bplist_create(&lca->to_keep); 2732eda14cbcSMatt Macy 2733eda14cbcSMatt Macy /* 2734eda14cbcSMatt Macy * Process the livelists (matching FREEs and ALLOCs) in open context 2735eda14cbcSMatt Macy * so we have minimal work in syncing context to condense. 2736eda14cbcSMatt Macy * 2737eda14cbcSMatt Macy * We save bpobj sizes (first_size and next_size) to use later in 2738eda14cbcSMatt Macy * syncing context to determine if entries were added to these sublists 2739eda14cbcSMatt Macy * while in open context. This is possible because the clone is still 2740eda14cbcSMatt Macy * active and open for normal writes and we want to make sure the new, 2741eda14cbcSMatt Macy * unprocessed blockpointers are inserted into the livelist normally. 2742eda14cbcSMatt Macy * 2743eda14cbcSMatt Macy * Note that dsl_process_sub_livelist() both stores the size number of 2744eda14cbcSMatt Macy * blockpointers and iterates over them while the bpobj's lock held, so 2745eda14cbcSMatt Macy * the sizes returned to us are consistent which what was actually 2746eda14cbcSMatt Macy * processed. 2747eda14cbcSMatt Macy */ 2748eda14cbcSMatt Macy int err = dsl_process_sub_livelist(&first->dle_bpobj, &lca->to_keep, t, 2749eda14cbcSMatt Macy &first_size); 2750eda14cbcSMatt Macy if (err == 0) 2751eda14cbcSMatt Macy err = dsl_process_sub_livelist(&next->dle_bpobj, &lca->to_keep, 2752eda14cbcSMatt Macy t, &next_size); 2753eda14cbcSMatt Macy 2754eda14cbcSMatt Macy if (err == 0) { 2755eda14cbcSMatt Macy while (zfs_livelist_condense_sync_pause && 2756eda14cbcSMatt Macy !(zthr_has_waiters(t) || zthr_iscancelled(t))) 2757eda14cbcSMatt Macy delay(1); 2758eda14cbcSMatt Macy 2759eda14cbcSMatt Macy dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 2760eda14cbcSMatt Macy dmu_tx_mark_netfree(tx); 2761eda14cbcSMatt Macy dmu_tx_hold_space(tx, 1); 2762eda14cbcSMatt Macy err = dmu_tx_assign(tx, TXG_NOWAIT | TXG_NOTHROTTLE); 2763eda14cbcSMatt Macy if (err == 0) { 2764eda14cbcSMatt Macy /* 2765eda14cbcSMatt Macy * Prevent the condense zthr restarting before 2766eda14cbcSMatt Macy * the synctask completes. 2767eda14cbcSMatt Macy */ 2768eda14cbcSMatt Macy spa->spa_to_condense.syncing = B_TRUE; 2769eda14cbcSMatt Macy lca->spa = spa; 2770eda14cbcSMatt Macy lca->first_size = first_size; 2771eda14cbcSMatt Macy lca->next_size = next_size; 2772eda14cbcSMatt Macy dsl_sync_task_nowait(spa_get_dsl(spa), 27732c48331dSMatt Macy spa_livelist_condense_sync, lca, tx); 2774eda14cbcSMatt Macy dmu_tx_commit(tx); 2775eda14cbcSMatt Macy return; 2776eda14cbcSMatt Macy } 2777eda14cbcSMatt Macy } 2778eda14cbcSMatt Macy /* 2779eda14cbcSMatt Macy * Condensing can not continue: either it was externally stopped or 2780eda14cbcSMatt Macy * we were unable to assign to a tx because the pool has run out of 2781eda14cbcSMatt Macy * space. In the second case, we'll just end up trying to condense 2782eda14cbcSMatt Macy * again in a later txg. 2783eda14cbcSMatt Macy */ 2784eda14cbcSMatt Macy ASSERT(err != 0); 2785eda14cbcSMatt Macy bplist_clear(&lca->to_keep); 2786eda14cbcSMatt Macy bplist_destroy(&lca->to_keep); 2787eda14cbcSMatt Macy kmem_free(lca, sizeof (livelist_condense_arg_t)); 2788eda14cbcSMatt Macy dmu_buf_rele(spa->spa_to_condense.ds->ds_dbuf, spa); 2789eda14cbcSMatt Macy spa->spa_to_condense.ds = NULL; 2790eda14cbcSMatt Macy if (err == EINTR) 2791eda14cbcSMatt Macy zfs_livelist_condense_zthr_cancel++; 2792eda14cbcSMatt Macy } 2793eda14cbcSMatt Macy 2794eda14cbcSMatt Macy /* ARGSUSED */ 2795eda14cbcSMatt Macy /* 2796eda14cbcSMatt Macy * Check that there is something to condense but that a condense is not 2797eda14cbcSMatt Macy * already in progress and that condensing has not been cancelled. 2798eda14cbcSMatt Macy */ 2799eda14cbcSMatt Macy static boolean_t 2800eda14cbcSMatt Macy spa_livelist_condense_cb_check(void *arg, zthr_t *z) 2801eda14cbcSMatt Macy { 2802eda14cbcSMatt Macy spa_t *spa = arg; 2803eda14cbcSMatt Macy if ((spa->spa_to_condense.ds != NULL) && 2804eda14cbcSMatt Macy (spa->spa_to_condense.syncing == B_FALSE) && 2805eda14cbcSMatt Macy (spa->spa_to_condense.cancelled == B_FALSE)) { 2806eda14cbcSMatt Macy return (B_TRUE); 2807eda14cbcSMatt Macy } 2808eda14cbcSMatt Macy return (B_FALSE); 2809eda14cbcSMatt Macy } 2810eda14cbcSMatt Macy 2811eda14cbcSMatt Macy static void 2812eda14cbcSMatt Macy spa_start_livelist_condensing_thread(spa_t *spa) 2813eda14cbcSMatt Macy { 2814eda14cbcSMatt Macy spa->spa_to_condense.ds = NULL; 2815eda14cbcSMatt Macy spa->spa_to_condense.first = NULL; 2816eda14cbcSMatt Macy spa->spa_to_condense.next = NULL; 2817eda14cbcSMatt Macy spa->spa_to_condense.syncing = B_FALSE; 2818eda14cbcSMatt Macy spa->spa_to_condense.cancelled = B_FALSE; 2819eda14cbcSMatt Macy 2820eda14cbcSMatt Macy ASSERT3P(spa->spa_livelist_condense_zthr, ==, NULL); 2821eda14cbcSMatt Macy spa->spa_livelist_condense_zthr = 2822eda14cbcSMatt Macy zthr_create("z_livelist_condense", 2823eda14cbcSMatt Macy spa_livelist_condense_cb_check, 2824*2faf504dSMartin Matuska spa_livelist_condense_cb, spa, minclsyspri); 2825eda14cbcSMatt Macy } 2826eda14cbcSMatt Macy 2827eda14cbcSMatt Macy static void 2828eda14cbcSMatt Macy spa_spawn_aux_threads(spa_t *spa) 2829eda14cbcSMatt Macy { 2830eda14cbcSMatt Macy ASSERT(spa_writeable(spa)); 2831eda14cbcSMatt Macy 2832eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&spa_namespace_lock)); 2833eda14cbcSMatt Macy 2834eda14cbcSMatt Macy spa_start_indirect_condensing_thread(spa); 2835eda14cbcSMatt Macy spa_start_livelist_destroy_thread(spa); 2836eda14cbcSMatt Macy spa_start_livelist_condensing_thread(spa); 2837eda14cbcSMatt Macy 2838eda14cbcSMatt Macy ASSERT3P(spa->spa_checkpoint_discard_zthr, ==, NULL); 2839eda14cbcSMatt Macy spa->spa_checkpoint_discard_zthr = 2840eda14cbcSMatt Macy zthr_create("z_checkpoint_discard", 2841eda14cbcSMatt Macy spa_checkpoint_discard_thread_check, 2842*2faf504dSMartin Matuska spa_checkpoint_discard_thread, spa, minclsyspri); 2843eda14cbcSMatt Macy } 2844eda14cbcSMatt Macy 2845eda14cbcSMatt Macy /* 2846eda14cbcSMatt Macy * Fix up config after a partly-completed split. This is done with the 2847eda14cbcSMatt Macy * ZPOOL_CONFIG_SPLIT nvlist. Both the splitting pool and the split-off 2848eda14cbcSMatt Macy * pool have that entry in their config, but only the splitting one contains 2849eda14cbcSMatt Macy * a list of all the guids of the vdevs that are being split off. 2850eda14cbcSMatt Macy * 2851eda14cbcSMatt Macy * This function determines what to do with that list: either rejoin 2852eda14cbcSMatt Macy * all the disks to the pool, or complete the splitting process. To attempt 2853eda14cbcSMatt Macy * the rejoin, each disk that is offlined is marked online again, and 2854eda14cbcSMatt Macy * we do a reopen() call. If the vdev label for every disk that was 2855eda14cbcSMatt Macy * marked online indicates it was successfully split off (VDEV_AUX_SPLIT_POOL) 2856eda14cbcSMatt Macy * then we call vdev_split() on each disk, and complete the split. 2857eda14cbcSMatt Macy * 2858eda14cbcSMatt Macy * Otherwise we leave the config alone, with all the vdevs in place in 2859eda14cbcSMatt Macy * the original pool. 2860eda14cbcSMatt Macy */ 2861eda14cbcSMatt Macy static void 2862eda14cbcSMatt Macy spa_try_repair(spa_t *spa, nvlist_t *config) 2863eda14cbcSMatt Macy { 2864eda14cbcSMatt Macy uint_t extracted; 2865eda14cbcSMatt Macy uint64_t *glist; 2866eda14cbcSMatt Macy uint_t i, gcount; 2867eda14cbcSMatt Macy nvlist_t *nvl; 2868eda14cbcSMatt Macy vdev_t **vd; 2869eda14cbcSMatt Macy boolean_t attempt_reopen; 2870eda14cbcSMatt Macy 2871eda14cbcSMatt Macy if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) != 0) 2872eda14cbcSMatt Macy return; 2873eda14cbcSMatt Macy 2874eda14cbcSMatt Macy /* check that the config is complete */ 2875eda14cbcSMatt Macy if (nvlist_lookup_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST, 2876eda14cbcSMatt Macy &glist, &gcount) != 0) 2877eda14cbcSMatt Macy return; 2878eda14cbcSMatt Macy 2879eda14cbcSMatt Macy vd = kmem_zalloc(gcount * sizeof (vdev_t *), KM_SLEEP); 2880eda14cbcSMatt Macy 2881eda14cbcSMatt Macy /* attempt to online all the vdevs & validate */ 2882eda14cbcSMatt Macy attempt_reopen = B_TRUE; 2883eda14cbcSMatt Macy for (i = 0; i < gcount; i++) { 2884eda14cbcSMatt Macy if (glist[i] == 0) /* vdev is hole */ 2885eda14cbcSMatt Macy continue; 2886eda14cbcSMatt Macy 2887eda14cbcSMatt Macy vd[i] = spa_lookup_by_guid(spa, glist[i], B_FALSE); 2888eda14cbcSMatt Macy if (vd[i] == NULL) { 2889eda14cbcSMatt Macy /* 2890eda14cbcSMatt Macy * Don't bother attempting to reopen the disks; 2891eda14cbcSMatt Macy * just do the split. 2892eda14cbcSMatt Macy */ 2893eda14cbcSMatt Macy attempt_reopen = B_FALSE; 2894eda14cbcSMatt Macy } else { 2895eda14cbcSMatt Macy /* attempt to re-online it */ 2896eda14cbcSMatt Macy vd[i]->vdev_offline = B_FALSE; 2897eda14cbcSMatt Macy } 2898eda14cbcSMatt Macy } 2899eda14cbcSMatt Macy 2900eda14cbcSMatt Macy if (attempt_reopen) { 2901eda14cbcSMatt Macy vdev_reopen(spa->spa_root_vdev); 2902eda14cbcSMatt Macy 2903eda14cbcSMatt Macy /* check each device to see what state it's in */ 2904eda14cbcSMatt Macy for (extracted = 0, i = 0; i < gcount; i++) { 2905eda14cbcSMatt Macy if (vd[i] != NULL && 2906eda14cbcSMatt Macy vd[i]->vdev_stat.vs_aux != VDEV_AUX_SPLIT_POOL) 2907eda14cbcSMatt Macy break; 2908eda14cbcSMatt Macy ++extracted; 2909eda14cbcSMatt Macy } 2910eda14cbcSMatt Macy } 2911eda14cbcSMatt Macy 2912eda14cbcSMatt Macy /* 2913eda14cbcSMatt Macy * If every disk has been moved to the new pool, or if we never 2914eda14cbcSMatt Macy * even attempted to look at them, then we split them off for 2915eda14cbcSMatt Macy * good. 2916eda14cbcSMatt Macy */ 2917eda14cbcSMatt Macy if (!attempt_reopen || gcount == extracted) { 2918eda14cbcSMatt Macy for (i = 0; i < gcount; i++) 2919eda14cbcSMatt Macy if (vd[i] != NULL) 2920eda14cbcSMatt Macy vdev_split(vd[i]); 2921eda14cbcSMatt Macy vdev_reopen(spa->spa_root_vdev); 2922eda14cbcSMatt Macy } 2923eda14cbcSMatt Macy 2924eda14cbcSMatt Macy kmem_free(vd, gcount * sizeof (vdev_t *)); 2925eda14cbcSMatt Macy } 2926eda14cbcSMatt Macy 2927eda14cbcSMatt Macy static int 2928eda14cbcSMatt Macy spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type) 2929eda14cbcSMatt Macy { 2930eda14cbcSMatt Macy char *ereport = FM_EREPORT_ZFS_POOL; 2931eda14cbcSMatt Macy int error; 2932eda14cbcSMatt Macy 2933eda14cbcSMatt Macy spa->spa_load_state = state; 2934eda14cbcSMatt Macy (void) spa_import_progress_set_state(spa_guid(spa), 2935eda14cbcSMatt Macy spa_load_state(spa)); 2936eda14cbcSMatt Macy 2937eda14cbcSMatt Macy gethrestime(&spa->spa_loaded_ts); 2938eda14cbcSMatt Macy error = spa_load_impl(spa, type, &ereport); 2939eda14cbcSMatt Macy 2940eda14cbcSMatt Macy /* 2941eda14cbcSMatt Macy * Don't count references from objsets that are already closed 2942eda14cbcSMatt Macy * and are making their way through the eviction process. 2943eda14cbcSMatt Macy */ 2944eda14cbcSMatt Macy spa_evicting_os_wait(spa); 2945eda14cbcSMatt Macy spa->spa_minref = zfs_refcount_count(&spa->spa_refcount); 2946eda14cbcSMatt Macy if (error) { 2947eda14cbcSMatt Macy if (error != EEXIST) { 2948eda14cbcSMatt Macy spa->spa_loaded_ts.tv_sec = 0; 2949eda14cbcSMatt Macy spa->spa_loaded_ts.tv_nsec = 0; 2950eda14cbcSMatt Macy } 2951eda14cbcSMatt Macy if (error != EBADF) { 2952eac7052fSMatt Macy (void) zfs_ereport_post(ereport, spa, 29532c48331dSMatt Macy NULL, NULL, NULL, 0); 2954eda14cbcSMatt Macy } 2955eda14cbcSMatt Macy } 2956eda14cbcSMatt Macy spa->spa_load_state = error ? SPA_LOAD_ERROR : SPA_LOAD_NONE; 2957eda14cbcSMatt Macy spa->spa_ena = 0; 2958eda14cbcSMatt Macy 2959eda14cbcSMatt Macy (void) spa_import_progress_set_state(spa_guid(spa), 2960eda14cbcSMatt Macy spa_load_state(spa)); 2961eda14cbcSMatt Macy 2962eda14cbcSMatt Macy return (error); 2963eda14cbcSMatt Macy } 2964eda14cbcSMatt Macy 2965eda14cbcSMatt Macy #ifdef ZFS_DEBUG 2966eda14cbcSMatt Macy /* 2967eda14cbcSMatt Macy * Count the number of per-vdev ZAPs associated with all of the vdevs in the 2968eda14cbcSMatt Macy * vdev tree rooted in the given vd, and ensure that each ZAP is present in the 2969eda14cbcSMatt Macy * spa's per-vdev ZAP list. 2970eda14cbcSMatt Macy */ 2971eda14cbcSMatt Macy static uint64_t 2972eda14cbcSMatt Macy vdev_count_verify_zaps(vdev_t *vd) 2973eda14cbcSMatt Macy { 2974eda14cbcSMatt Macy spa_t *spa = vd->vdev_spa; 2975eda14cbcSMatt Macy uint64_t total = 0; 2976eda14cbcSMatt Macy 2977eda14cbcSMatt Macy if (vd->vdev_top_zap != 0) { 2978eda14cbcSMatt Macy total++; 2979eda14cbcSMatt Macy ASSERT0(zap_lookup_int(spa->spa_meta_objset, 2980eda14cbcSMatt Macy spa->spa_all_vdev_zaps, vd->vdev_top_zap)); 2981eda14cbcSMatt Macy } 2982eda14cbcSMatt Macy if (vd->vdev_leaf_zap != 0) { 2983eda14cbcSMatt Macy total++; 2984eda14cbcSMatt Macy ASSERT0(zap_lookup_int(spa->spa_meta_objset, 2985eda14cbcSMatt Macy spa->spa_all_vdev_zaps, vd->vdev_leaf_zap)); 2986eda14cbcSMatt Macy } 2987eda14cbcSMatt Macy 2988eda14cbcSMatt Macy for (uint64_t i = 0; i < vd->vdev_children; i++) { 2989eda14cbcSMatt Macy total += vdev_count_verify_zaps(vd->vdev_child[i]); 2990eda14cbcSMatt Macy } 2991eda14cbcSMatt Macy 2992eda14cbcSMatt Macy return (total); 2993eda14cbcSMatt Macy } 2994eda14cbcSMatt Macy #endif 2995eda14cbcSMatt Macy 2996eda14cbcSMatt Macy /* 2997eda14cbcSMatt Macy * Determine whether the activity check is required. 2998eda14cbcSMatt Macy */ 2999eda14cbcSMatt Macy static boolean_t 3000eda14cbcSMatt Macy spa_activity_check_required(spa_t *spa, uberblock_t *ub, nvlist_t *label, 3001eda14cbcSMatt Macy nvlist_t *config) 3002eda14cbcSMatt Macy { 3003eda14cbcSMatt Macy uint64_t state = 0; 3004eda14cbcSMatt Macy uint64_t hostid = 0; 3005eda14cbcSMatt Macy uint64_t tryconfig_txg = 0; 3006eda14cbcSMatt Macy uint64_t tryconfig_timestamp = 0; 3007eda14cbcSMatt Macy uint16_t tryconfig_mmp_seq = 0; 3008eda14cbcSMatt Macy nvlist_t *nvinfo; 3009eda14cbcSMatt Macy 3010eda14cbcSMatt Macy if (nvlist_exists(config, ZPOOL_CONFIG_LOAD_INFO)) { 3011eda14cbcSMatt Macy nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO); 3012eda14cbcSMatt Macy (void) nvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_MMP_TXG, 3013eda14cbcSMatt Macy &tryconfig_txg); 3014eda14cbcSMatt Macy (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_TIMESTAMP, 3015eda14cbcSMatt Macy &tryconfig_timestamp); 3016eda14cbcSMatt Macy (void) nvlist_lookup_uint16(nvinfo, ZPOOL_CONFIG_MMP_SEQ, 3017eda14cbcSMatt Macy &tryconfig_mmp_seq); 3018eda14cbcSMatt Macy } 3019eda14cbcSMatt Macy 3020eda14cbcSMatt Macy (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, &state); 3021eda14cbcSMatt Macy 3022eda14cbcSMatt Macy /* 3023eda14cbcSMatt Macy * Disable the MMP activity check - This is used by zdb which 3024eda14cbcSMatt Macy * is intended to be used on potentially active pools. 3025eda14cbcSMatt Macy */ 3026eda14cbcSMatt Macy if (spa->spa_import_flags & ZFS_IMPORT_SKIP_MMP) 3027eda14cbcSMatt Macy return (B_FALSE); 3028eda14cbcSMatt Macy 3029eda14cbcSMatt Macy /* 3030eda14cbcSMatt Macy * Skip the activity check when the MMP feature is disabled. 3031eda14cbcSMatt Macy */ 3032eda14cbcSMatt Macy if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay == 0) 3033eda14cbcSMatt Macy return (B_FALSE); 3034eda14cbcSMatt Macy 3035eda14cbcSMatt Macy /* 3036eda14cbcSMatt Macy * If the tryconfig_ values are nonzero, they are the results of an 3037eda14cbcSMatt Macy * earlier tryimport. If they all match the uberblock we just found, 3038eda14cbcSMatt Macy * then the pool has not changed and we return false so we do not test 3039eda14cbcSMatt Macy * a second time. 3040eda14cbcSMatt Macy */ 3041eda14cbcSMatt Macy if (tryconfig_txg && tryconfig_txg == ub->ub_txg && 3042eda14cbcSMatt Macy tryconfig_timestamp && tryconfig_timestamp == ub->ub_timestamp && 3043eda14cbcSMatt Macy tryconfig_mmp_seq && tryconfig_mmp_seq == 3044eda14cbcSMatt Macy (MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0)) 3045eda14cbcSMatt Macy return (B_FALSE); 3046eda14cbcSMatt Macy 3047eda14cbcSMatt Macy /* 3048eda14cbcSMatt Macy * Allow the activity check to be skipped when importing the pool 3049eda14cbcSMatt Macy * on the same host which last imported it. Since the hostid from 3050eda14cbcSMatt Macy * configuration may be stale use the one read from the label. 3051eda14cbcSMatt Macy */ 3052eda14cbcSMatt Macy if (nvlist_exists(label, ZPOOL_CONFIG_HOSTID)) 3053eda14cbcSMatt Macy hostid = fnvlist_lookup_uint64(label, ZPOOL_CONFIG_HOSTID); 3054eda14cbcSMatt Macy 3055eda14cbcSMatt Macy if (hostid == spa_get_hostid(spa)) 3056eda14cbcSMatt Macy return (B_FALSE); 3057eda14cbcSMatt Macy 3058eda14cbcSMatt Macy /* 3059eda14cbcSMatt Macy * Skip the activity test when the pool was cleanly exported. 3060eda14cbcSMatt Macy */ 3061eda14cbcSMatt Macy if (state != POOL_STATE_ACTIVE) 3062eda14cbcSMatt Macy return (B_FALSE); 3063eda14cbcSMatt Macy 3064eda14cbcSMatt Macy return (B_TRUE); 3065eda14cbcSMatt Macy } 3066eda14cbcSMatt Macy 3067eda14cbcSMatt Macy /* 3068eda14cbcSMatt Macy * Nanoseconds the activity check must watch for changes on-disk. 3069eda14cbcSMatt Macy */ 3070eda14cbcSMatt Macy static uint64_t 3071eda14cbcSMatt Macy spa_activity_check_duration(spa_t *spa, uberblock_t *ub) 3072eda14cbcSMatt Macy { 3073eda14cbcSMatt Macy uint64_t import_intervals = MAX(zfs_multihost_import_intervals, 1); 3074eda14cbcSMatt Macy uint64_t multihost_interval = MSEC2NSEC( 3075eda14cbcSMatt Macy MMP_INTERVAL_OK(zfs_multihost_interval)); 3076eda14cbcSMatt Macy uint64_t import_delay = MAX(NANOSEC, import_intervals * 3077eda14cbcSMatt Macy multihost_interval); 3078eda14cbcSMatt Macy 3079eda14cbcSMatt Macy /* 3080eda14cbcSMatt Macy * Local tunables determine a minimum duration except for the case 3081eda14cbcSMatt Macy * where we know when the remote host will suspend the pool if MMP 3082eda14cbcSMatt Macy * writes do not land. 3083eda14cbcSMatt Macy * 3084eda14cbcSMatt Macy * See Big Theory comment at the top of mmp.c for the reasoning behind 3085eda14cbcSMatt Macy * these cases and times. 3086eda14cbcSMatt Macy */ 3087eda14cbcSMatt Macy 3088eda14cbcSMatt Macy ASSERT(MMP_IMPORT_SAFETY_FACTOR >= 100); 3089eda14cbcSMatt Macy 3090eda14cbcSMatt Macy if (MMP_INTERVAL_VALID(ub) && MMP_FAIL_INT_VALID(ub) && 3091eda14cbcSMatt Macy MMP_FAIL_INT(ub) > 0) { 3092eda14cbcSMatt Macy 3093eda14cbcSMatt Macy /* MMP on remote host will suspend pool after failed writes */ 3094eda14cbcSMatt Macy import_delay = MMP_FAIL_INT(ub) * MSEC2NSEC(MMP_INTERVAL(ub)) * 3095eda14cbcSMatt Macy MMP_IMPORT_SAFETY_FACTOR / 100; 3096eda14cbcSMatt Macy 3097eda14cbcSMatt Macy zfs_dbgmsg("fail_intvals>0 import_delay=%llu ub_mmp " 3098eda14cbcSMatt Macy "mmp_fails=%llu ub_mmp mmp_interval=%llu " 309933b8c039SMartin Matuska "import_intervals=%llu", (u_longlong_t)import_delay, 310033b8c039SMartin Matuska (u_longlong_t)MMP_FAIL_INT(ub), 310133b8c039SMartin Matuska (u_longlong_t)MMP_INTERVAL(ub), 310233b8c039SMartin Matuska (u_longlong_t)import_intervals); 3103eda14cbcSMatt Macy 3104eda14cbcSMatt Macy } else if (MMP_INTERVAL_VALID(ub) && MMP_FAIL_INT_VALID(ub) && 3105eda14cbcSMatt Macy MMP_FAIL_INT(ub) == 0) { 3106eda14cbcSMatt Macy 3107eda14cbcSMatt Macy /* MMP on remote host will never suspend pool */ 3108eda14cbcSMatt Macy import_delay = MAX(import_delay, (MSEC2NSEC(MMP_INTERVAL(ub)) + 3109eda14cbcSMatt Macy ub->ub_mmp_delay) * import_intervals); 3110eda14cbcSMatt Macy 3111eda14cbcSMatt Macy zfs_dbgmsg("fail_intvals=0 import_delay=%llu ub_mmp " 3112eda14cbcSMatt Macy "mmp_interval=%llu ub_mmp_delay=%llu " 311333b8c039SMartin Matuska "import_intervals=%llu", (u_longlong_t)import_delay, 311433b8c039SMartin Matuska (u_longlong_t)MMP_INTERVAL(ub), 311533b8c039SMartin Matuska (u_longlong_t)ub->ub_mmp_delay, 311633b8c039SMartin Matuska (u_longlong_t)import_intervals); 3117eda14cbcSMatt Macy 3118eda14cbcSMatt Macy } else if (MMP_VALID(ub)) { 3119eda14cbcSMatt Macy /* 3120eda14cbcSMatt Macy * zfs-0.7 compatibility case 3121eda14cbcSMatt Macy */ 3122eda14cbcSMatt Macy 3123eda14cbcSMatt Macy import_delay = MAX(import_delay, (multihost_interval + 3124eda14cbcSMatt Macy ub->ub_mmp_delay) * import_intervals); 3125eda14cbcSMatt Macy 3126eda14cbcSMatt Macy zfs_dbgmsg("import_delay=%llu ub_mmp_delay=%llu " 312733b8c039SMartin Matuska "import_intervals=%llu leaves=%u", 312833b8c039SMartin Matuska (u_longlong_t)import_delay, 312933b8c039SMartin Matuska (u_longlong_t)ub->ub_mmp_delay, 313033b8c039SMartin Matuska (u_longlong_t)import_intervals, 3131eda14cbcSMatt Macy vdev_count_leaves(spa)); 3132eda14cbcSMatt Macy } else { 3133eda14cbcSMatt Macy /* Using local tunings is the only reasonable option */ 3134eda14cbcSMatt Macy zfs_dbgmsg("pool last imported on non-MMP aware " 3135eda14cbcSMatt Macy "host using import_delay=%llu multihost_interval=%llu " 313633b8c039SMartin Matuska "import_intervals=%llu", (u_longlong_t)import_delay, 313733b8c039SMartin Matuska (u_longlong_t)multihost_interval, 313833b8c039SMartin Matuska (u_longlong_t)import_intervals); 3139eda14cbcSMatt Macy } 3140eda14cbcSMatt Macy 3141eda14cbcSMatt Macy return (import_delay); 3142eda14cbcSMatt Macy } 3143eda14cbcSMatt Macy 3144eda14cbcSMatt Macy /* 3145eda14cbcSMatt Macy * Perform the import activity check. If the user canceled the import or 3146eda14cbcSMatt Macy * we detected activity then fail. 3147eda14cbcSMatt Macy */ 3148eda14cbcSMatt Macy static int 3149eda14cbcSMatt Macy spa_activity_check(spa_t *spa, uberblock_t *ub, nvlist_t *config) 3150eda14cbcSMatt Macy { 3151eda14cbcSMatt Macy uint64_t txg = ub->ub_txg; 3152eda14cbcSMatt Macy uint64_t timestamp = ub->ub_timestamp; 3153eda14cbcSMatt Macy uint64_t mmp_config = ub->ub_mmp_config; 3154eda14cbcSMatt Macy uint16_t mmp_seq = MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0; 3155eda14cbcSMatt Macy uint64_t import_delay; 3156eda14cbcSMatt Macy hrtime_t import_expire; 3157eda14cbcSMatt Macy nvlist_t *mmp_label = NULL; 3158eda14cbcSMatt Macy vdev_t *rvd = spa->spa_root_vdev; 3159eda14cbcSMatt Macy kcondvar_t cv; 3160eda14cbcSMatt Macy kmutex_t mtx; 3161eda14cbcSMatt Macy int error = 0; 3162eda14cbcSMatt Macy 3163eda14cbcSMatt Macy cv_init(&cv, NULL, CV_DEFAULT, NULL); 3164eda14cbcSMatt Macy mutex_init(&mtx, NULL, MUTEX_DEFAULT, NULL); 3165eda14cbcSMatt Macy mutex_enter(&mtx); 3166eda14cbcSMatt Macy 3167eda14cbcSMatt Macy /* 3168eda14cbcSMatt Macy * If ZPOOL_CONFIG_MMP_TXG is present an activity check was performed 3169eda14cbcSMatt Macy * during the earlier tryimport. If the txg recorded there is 0 then 3170eda14cbcSMatt Macy * the pool is known to be active on another host. 3171eda14cbcSMatt Macy * 3172eda14cbcSMatt Macy * Otherwise, the pool might be in use on another host. Check for 3173eda14cbcSMatt Macy * changes in the uberblocks on disk if necessary. 3174eda14cbcSMatt Macy */ 3175eda14cbcSMatt Macy if (nvlist_exists(config, ZPOOL_CONFIG_LOAD_INFO)) { 3176eda14cbcSMatt Macy nvlist_t *nvinfo = fnvlist_lookup_nvlist(config, 3177eda14cbcSMatt Macy ZPOOL_CONFIG_LOAD_INFO); 3178eda14cbcSMatt Macy 3179eda14cbcSMatt Macy if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_TXG) && 3180eda14cbcSMatt Macy fnvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_MMP_TXG) == 0) { 3181eda14cbcSMatt Macy vdev_uberblock_load(rvd, ub, &mmp_label); 3182eda14cbcSMatt Macy error = SET_ERROR(EREMOTEIO); 3183eda14cbcSMatt Macy goto out; 3184eda14cbcSMatt Macy } 3185eda14cbcSMatt Macy } 3186eda14cbcSMatt Macy 3187eda14cbcSMatt Macy import_delay = spa_activity_check_duration(spa, ub); 3188eda14cbcSMatt Macy 3189eda14cbcSMatt Macy /* Add a small random factor in case of simultaneous imports (0-25%) */ 319033b8c039SMartin Matuska import_delay += import_delay * random_in_range(250) / 1000; 3191eda14cbcSMatt Macy 3192eda14cbcSMatt Macy import_expire = gethrtime() + import_delay; 3193eda14cbcSMatt Macy 3194eda14cbcSMatt Macy while (gethrtime() < import_expire) { 3195eda14cbcSMatt Macy (void) spa_import_progress_set_mmp_check(spa_guid(spa), 3196eda14cbcSMatt Macy NSEC2SEC(import_expire - gethrtime())); 3197eda14cbcSMatt Macy 3198eda14cbcSMatt Macy vdev_uberblock_load(rvd, ub, &mmp_label); 3199eda14cbcSMatt Macy 3200eda14cbcSMatt Macy if (txg != ub->ub_txg || timestamp != ub->ub_timestamp || 3201eda14cbcSMatt Macy mmp_seq != (MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0)) { 3202eda14cbcSMatt Macy zfs_dbgmsg("multihost activity detected " 3203eda14cbcSMatt Macy "txg %llu ub_txg %llu " 3204eda14cbcSMatt Macy "timestamp %llu ub_timestamp %llu " 3205eda14cbcSMatt Macy "mmp_config %#llx ub_mmp_config %#llx", 320633b8c039SMartin Matuska (u_longlong_t)txg, (u_longlong_t)ub->ub_txg, 320733b8c039SMartin Matuska (u_longlong_t)timestamp, 320833b8c039SMartin Matuska (u_longlong_t)ub->ub_timestamp, 320933b8c039SMartin Matuska (u_longlong_t)mmp_config, 321033b8c039SMartin Matuska (u_longlong_t)ub->ub_mmp_config); 3211eda14cbcSMatt Macy 3212eda14cbcSMatt Macy error = SET_ERROR(EREMOTEIO); 3213eda14cbcSMatt Macy break; 3214eda14cbcSMatt Macy } 3215eda14cbcSMatt Macy 3216eda14cbcSMatt Macy if (mmp_label) { 3217eda14cbcSMatt Macy nvlist_free(mmp_label); 3218eda14cbcSMatt Macy mmp_label = NULL; 3219eda14cbcSMatt Macy } 3220eda14cbcSMatt Macy 3221eda14cbcSMatt Macy error = cv_timedwait_sig(&cv, &mtx, ddi_get_lbolt() + hz); 3222eda14cbcSMatt Macy if (error != -1) { 3223eda14cbcSMatt Macy error = SET_ERROR(EINTR); 3224eda14cbcSMatt Macy break; 3225eda14cbcSMatt Macy } 3226eda14cbcSMatt Macy error = 0; 3227eda14cbcSMatt Macy } 3228eda14cbcSMatt Macy 3229eda14cbcSMatt Macy out: 3230eda14cbcSMatt Macy mutex_exit(&mtx); 3231eda14cbcSMatt Macy mutex_destroy(&mtx); 3232eda14cbcSMatt Macy cv_destroy(&cv); 3233eda14cbcSMatt Macy 3234eda14cbcSMatt Macy /* 3235eda14cbcSMatt Macy * If the pool is determined to be active store the status in the 3236eda14cbcSMatt Macy * spa->spa_load_info nvlist. If the remote hostname or hostid are 3237eda14cbcSMatt Macy * available from configuration read from disk store them as well. 3238eda14cbcSMatt Macy * This allows 'zpool import' to generate a more useful message. 3239eda14cbcSMatt Macy * 3240eda14cbcSMatt Macy * ZPOOL_CONFIG_MMP_STATE - observed pool status (mandatory) 3241eda14cbcSMatt Macy * ZPOOL_CONFIG_MMP_HOSTNAME - hostname from the active pool 3242eda14cbcSMatt Macy * ZPOOL_CONFIG_MMP_HOSTID - hostid from the active pool 3243eda14cbcSMatt Macy */ 3244eda14cbcSMatt Macy if (error == EREMOTEIO) { 3245eda14cbcSMatt Macy char *hostname = "<unknown>"; 3246eda14cbcSMatt Macy uint64_t hostid = 0; 3247eda14cbcSMatt Macy 3248eda14cbcSMatt Macy if (mmp_label) { 3249eda14cbcSMatt Macy if (nvlist_exists(mmp_label, ZPOOL_CONFIG_HOSTNAME)) { 3250eda14cbcSMatt Macy hostname = fnvlist_lookup_string(mmp_label, 3251eda14cbcSMatt Macy ZPOOL_CONFIG_HOSTNAME); 3252eda14cbcSMatt Macy fnvlist_add_string(spa->spa_load_info, 3253eda14cbcSMatt Macy ZPOOL_CONFIG_MMP_HOSTNAME, hostname); 3254eda14cbcSMatt Macy } 3255eda14cbcSMatt Macy 3256eda14cbcSMatt Macy if (nvlist_exists(mmp_label, ZPOOL_CONFIG_HOSTID)) { 3257eda14cbcSMatt Macy hostid = fnvlist_lookup_uint64(mmp_label, 3258eda14cbcSMatt Macy ZPOOL_CONFIG_HOSTID); 3259eda14cbcSMatt Macy fnvlist_add_uint64(spa->spa_load_info, 3260eda14cbcSMatt Macy ZPOOL_CONFIG_MMP_HOSTID, hostid); 3261eda14cbcSMatt Macy } 3262eda14cbcSMatt Macy } 3263eda14cbcSMatt Macy 3264eda14cbcSMatt Macy fnvlist_add_uint64(spa->spa_load_info, 3265eda14cbcSMatt Macy ZPOOL_CONFIG_MMP_STATE, MMP_STATE_ACTIVE); 3266eda14cbcSMatt Macy fnvlist_add_uint64(spa->spa_load_info, 3267eda14cbcSMatt Macy ZPOOL_CONFIG_MMP_TXG, 0); 3268eda14cbcSMatt Macy 3269eda14cbcSMatt Macy error = spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO); 3270eda14cbcSMatt Macy } 3271eda14cbcSMatt Macy 3272eda14cbcSMatt Macy if (mmp_label) 3273eda14cbcSMatt Macy nvlist_free(mmp_label); 3274eda14cbcSMatt Macy 3275eda14cbcSMatt Macy return (error); 3276eda14cbcSMatt Macy } 3277eda14cbcSMatt Macy 3278eda14cbcSMatt Macy static int 3279eda14cbcSMatt Macy spa_verify_host(spa_t *spa, nvlist_t *mos_config) 3280eda14cbcSMatt Macy { 3281eda14cbcSMatt Macy uint64_t hostid; 3282eda14cbcSMatt Macy char *hostname; 3283eda14cbcSMatt Macy uint64_t myhostid = 0; 3284eda14cbcSMatt Macy 3285eda14cbcSMatt Macy if (!spa_is_root(spa) && nvlist_lookup_uint64(mos_config, 3286eda14cbcSMatt Macy ZPOOL_CONFIG_HOSTID, &hostid) == 0) { 3287eda14cbcSMatt Macy hostname = fnvlist_lookup_string(mos_config, 3288eda14cbcSMatt Macy ZPOOL_CONFIG_HOSTNAME); 3289eda14cbcSMatt Macy 3290eda14cbcSMatt Macy myhostid = zone_get_hostid(NULL); 3291eda14cbcSMatt Macy 3292eda14cbcSMatt Macy if (hostid != 0 && myhostid != 0 && hostid != myhostid) { 3293eda14cbcSMatt Macy cmn_err(CE_WARN, "pool '%s' could not be " 3294eda14cbcSMatt Macy "loaded as it was last accessed by " 3295eda14cbcSMatt Macy "another system (host: %s hostid: 0x%llx). " 3296ac0bf12eSMatt Macy "See: https://openzfs.github.io/openzfs-docs/msg/" 3297ac0bf12eSMatt Macy "ZFS-8000-EY", 3298eda14cbcSMatt Macy spa_name(spa), hostname, (u_longlong_t)hostid); 3299eda14cbcSMatt Macy spa_load_failed(spa, "hostid verification failed: pool " 3300eda14cbcSMatt Macy "last accessed by host: %s (hostid: 0x%llx)", 3301eda14cbcSMatt Macy hostname, (u_longlong_t)hostid); 3302eda14cbcSMatt Macy return (SET_ERROR(EBADF)); 3303eda14cbcSMatt Macy } 3304eda14cbcSMatt Macy } 3305eda14cbcSMatt Macy 3306eda14cbcSMatt Macy return (0); 3307eda14cbcSMatt Macy } 3308eda14cbcSMatt Macy 3309eda14cbcSMatt Macy static int 3310eda14cbcSMatt Macy spa_ld_parse_config(spa_t *spa, spa_import_type_t type) 3311eda14cbcSMatt Macy { 3312eda14cbcSMatt Macy int error = 0; 3313eda14cbcSMatt Macy nvlist_t *nvtree, *nvl, *config = spa->spa_config; 3314eda14cbcSMatt Macy int parse; 3315eda14cbcSMatt Macy vdev_t *rvd; 3316eda14cbcSMatt Macy uint64_t pool_guid; 3317eda14cbcSMatt Macy char *comment; 3318ee36e25aSMartin Matuska char *compatibility; 3319eda14cbcSMatt Macy 3320eda14cbcSMatt Macy /* 3321eda14cbcSMatt Macy * Versioning wasn't explicitly added to the label until later, so if 3322eda14cbcSMatt Macy * it's not present treat it as the initial version. 3323eda14cbcSMatt Macy */ 3324eda14cbcSMatt Macy if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 3325eda14cbcSMatt Macy &spa->spa_ubsync.ub_version) != 0) 3326eda14cbcSMatt Macy spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL; 3327eda14cbcSMatt Macy 3328eda14cbcSMatt Macy if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) { 3329eda14cbcSMatt Macy spa_load_failed(spa, "invalid config provided: '%s' missing", 3330eda14cbcSMatt Macy ZPOOL_CONFIG_POOL_GUID); 3331eda14cbcSMatt Macy return (SET_ERROR(EINVAL)); 3332eda14cbcSMatt Macy } 3333eda14cbcSMatt Macy 3334eda14cbcSMatt Macy /* 3335eda14cbcSMatt Macy * If we are doing an import, ensure that the pool is not already 3336eda14cbcSMatt Macy * imported by checking if its pool guid already exists in the 3337eda14cbcSMatt Macy * spa namespace. 3338eda14cbcSMatt Macy * 3339eda14cbcSMatt Macy * The only case that we allow an already imported pool to be 3340eda14cbcSMatt Macy * imported again, is when the pool is checkpointed and we want to 3341eda14cbcSMatt Macy * look at its checkpointed state from userland tools like zdb. 3342eda14cbcSMatt Macy */ 3343eda14cbcSMatt Macy #ifdef _KERNEL 3344eda14cbcSMatt Macy if ((spa->spa_load_state == SPA_LOAD_IMPORT || 3345eda14cbcSMatt Macy spa->spa_load_state == SPA_LOAD_TRYIMPORT) && 3346eda14cbcSMatt Macy spa_guid_exists(pool_guid, 0)) { 3347eda14cbcSMatt Macy #else 3348eda14cbcSMatt Macy if ((spa->spa_load_state == SPA_LOAD_IMPORT || 3349eda14cbcSMatt Macy spa->spa_load_state == SPA_LOAD_TRYIMPORT) && 3350eda14cbcSMatt Macy spa_guid_exists(pool_guid, 0) && 3351eda14cbcSMatt Macy !spa_importing_readonly_checkpoint(spa)) { 3352eda14cbcSMatt Macy #endif 3353eda14cbcSMatt Macy spa_load_failed(spa, "a pool with guid %llu is already open", 3354eda14cbcSMatt Macy (u_longlong_t)pool_guid); 3355eda14cbcSMatt Macy return (SET_ERROR(EEXIST)); 3356eda14cbcSMatt Macy } 3357eda14cbcSMatt Macy 3358eda14cbcSMatt Macy spa->spa_config_guid = pool_guid; 3359eda14cbcSMatt Macy 3360eda14cbcSMatt Macy nvlist_free(spa->spa_load_info); 3361eda14cbcSMatt Macy spa->spa_load_info = fnvlist_alloc(); 3362eda14cbcSMatt Macy 3363eda14cbcSMatt Macy ASSERT(spa->spa_comment == NULL); 3364eda14cbcSMatt Macy if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0) 3365eda14cbcSMatt Macy spa->spa_comment = spa_strdup(comment); 3366eda14cbcSMatt Macy 3367ee36e25aSMartin Matuska ASSERT(spa->spa_compatibility == NULL); 3368ee36e25aSMartin Matuska if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMPATIBILITY, 3369ee36e25aSMartin Matuska &compatibility) == 0) 3370ee36e25aSMartin Matuska spa->spa_compatibility = spa_strdup(compatibility); 3371ee36e25aSMartin Matuska 3372eda14cbcSMatt Macy (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, 3373eda14cbcSMatt Macy &spa->spa_config_txg); 3374eda14cbcSMatt Macy 3375eda14cbcSMatt Macy if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) == 0) 3376eda14cbcSMatt Macy spa->spa_config_splitting = fnvlist_dup(nvl); 3377eda14cbcSMatt Macy 3378eda14cbcSMatt Macy if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvtree)) { 3379eda14cbcSMatt Macy spa_load_failed(spa, "invalid config provided: '%s' missing", 3380eda14cbcSMatt Macy ZPOOL_CONFIG_VDEV_TREE); 3381eda14cbcSMatt Macy return (SET_ERROR(EINVAL)); 3382eda14cbcSMatt Macy } 3383eda14cbcSMatt Macy 3384eda14cbcSMatt Macy /* 3385eda14cbcSMatt Macy * Create "The Godfather" zio to hold all async IOs 3386eda14cbcSMatt Macy */ 3387eda14cbcSMatt Macy spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *), 3388eda14cbcSMatt Macy KM_SLEEP); 3389eda14cbcSMatt Macy for (int i = 0; i < max_ncpus; i++) { 3390eda14cbcSMatt Macy spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL, 3391eda14cbcSMatt Macy ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | 3392eda14cbcSMatt Macy ZIO_FLAG_GODFATHER); 3393eda14cbcSMatt Macy } 3394eda14cbcSMatt Macy 3395eda14cbcSMatt Macy /* 3396eda14cbcSMatt Macy * Parse the configuration into a vdev tree. We explicitly set the 3397eda14cbcSMatt Macy * value that will be returned by spa_version() since parsing the 3398eda14cbcSMatt Macy * configuration requires knowing the version number. 3399eda14cbcSMatt Macy */ 3400eda14cbcSMatt Macy spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3401eda14cbcSMatt Macy parse = (type == SPA_IMPORT_EXISTING ? 3402eda14cbcSMatt Macy VDEV_ALLOC_LOAD : VDEV_ALLOC_SPLIT); 3403eda14cbcSMatt Macy error = spa_config_parse(spa, &rvd, nvtree, NULL, 0, parse); 3404eda14cbcSMatt Macy spa_config_exit(spa, SCL_ALL, FTAG); 3405eda14cbcSMatt Macy 3406eda14cbcSMatt Macy if (error != 0) { 3407eda14cbcSMatt Macy spa_load_failed(spa, "unable to parse config [error=%d]", 3408eda14cbcSMatt Macy error); 3409eda14cbcSMatt Macy return (error); 3410eda14cbcSMatt Macy } 3411eda14cbcSMatt Macy 3412eda14cbcSMatt Macy ASSERT(spa->spa_root_vdev == rvd); 3413eda14cbcSMatt Macy ASSERT3U(spa->spa_min_ashift, >=, SPA_MINBLOCKSHIFT); 3414eda14cbcSMatt Macy ASSERT3U(spa->spa_max_ashift, <=, SPA_MAXBLOCKSHIFT); 3415eda14cbcSMatt Macy 3416eda14cbcSMatt Macy if (type != SPA_IMPORT_ASSEMBLE) { 3417eda14cbcSMatt Macy ASSERT(spa_guid(spa) == pool_guid); 3418eda14cbcSMatt Macy } 3419eda14cbcSMatt Macy 3420eda14cbcSMatt Macy return (0); 3421eda14cbcSMatt Macy } 3422eda14cbcSMatt Macy 3423eda14cbcSMatt Macy /* 3424eda14cbcSMatt Macy * Recursively open all vdevs in the vdev tree. This function is called twice: 3425eda14cbcSMatt Macy * first with the untrusted config, then with the trusted config. 3426eda14cbcSMatt Macy */ 3427eda14cbcSMatt Macy static int 3428eda14cbcSMatt Macy spa_ld_open_vdevs(spa_t *spa) 3429eda14cbcSMatt Macy { 3430eda14cbcSMatt Macy int error = 0; 3431eda14cbcSMatt Macy 3432eda14cbcSMatt Macy /* 3433eda14cbcSMatt Macy * spa_missing_tvds_allowed defines how many top-level vdevs can be 3434eda14cbcSMatt Macy * missing/unopenable for the root vdev to be still considered openable. 3435eda14cbcSMatt Macy */ 3436eda14cbcSMatt Macy if (spa->spa_trust_config) { 3437eda14cbcSMatt Macy spa->spa_missing_tvds_allowed = zfs_max_missing_tvds; 3438eda14cbcSMatt Macy } else if (spa->spa_config_source == SPA_CONFIG_SRC_CACHEFILE) { 3439eda14cbcSMatt Macy spa->spa_missing_tvds_allowed = zfs_max_missing_tvds_cachefile; 3440eda14cbcSMatt Macy } else if (spa->spa_config_source == SPA_CONFIG_SRC_SCAN) { 3441eda14cbcSMatt Macy spa->spa_missing_tvds_allowed = zfs_max_missing_tvds_scan; 3442eda14cbcSMatt Macy } else { 3443eda14cbcSMatt Macy spa->spa_missing_tvds_allowed = 0; 3444eda14cbcSMatt Macy } 3445eda14cbcSMatt Macy 3446eda14cbcSMatt Macy spa->spa_missing_tvds_allowed = 3447eda14cbcSMatt Macy MAX(zfs_max_missing_tvds, spa->spa_missing_tvds_allowed); 3448eda14cbcSMatt Macy 3449eda14cbcSMatt Macy spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3450eda14cbcSMatt Macy error = vdev_open(spa->spa_root_vdev); 3451eda14cbcSMatt Macy spa_config_exit(spa, SCL_ALL, FTAG); 3452eda14cbcSMatt Macy 3453eda14cbcSMatt Macy if (spa->spa_missing_tvds != 0) { 3454eda14cbcSMatt Macy spa_load_note(spa, "vdev tree has %lld missing top-level " 3455eda14cbcSMatt Macy "vdevs.", (u_longlong_t)spa->spa_missing_tvds); 3456eda14cbcSMatt Macy if (spa->spa_trust_config && (spa->spa_mode & SPA_MODE_WRITE)) { 3457eda14cbcSMatt Macy /* 3458eda14cbcSMatt Macy * Although theoretically we could allow users to open 3459eda14cbcSMatt Macy * incomplete pools in RW mode, we'd need to add a lot 3460eda14cbcSMatt Macy * of extra logic (e.g. adjust pool space to account 3461eda14cbcSMatt Macy * for missing vdevs). 3462eda14cbcSMatt Macy * This limitation also prevents users from accidentally 3463eda14cbcSMatt Macy * opening the pool in RW mode during data recovery and 3464eda14cbcSMatt Macy * damaging it further. 3465eda14cbcSMatt Macy */ 3466eda14cbcSMatt Macy spa_load_note(spa, "pools with missing top-level " 3467eda14cbcSMatt Macy "vdevs can only be opened in read-only mode."); 3468eda14cbcSMatt Macy error = SET_ERROR(ENXIO); 3469eda14cbcSMatt Macy } else { 3470eda14cbcSMatt Macy spa_load_note(spa, "current settings allow for maximum " 3471eda14cbcSMatt Macy "%lld missing top-level vdevs at this stage.", 3472eda14cbcSMatt Macy (u_longlong_t)spa->spa_missing_tvds_allowed); 3473eda14cbcSMatt Macy } 3474eda14cbcSMatt Macy } 3475eda14cbcSMatt Macy if (error != 0) { 3476eda14cbcSMatt Macy spa_load_failed(spa, "unable to open vdev tree [error=%d]", 3477eda14cbcSMatt Macy error); 3478eda14cbcSMatt Macy } 3479eda14cbcSMatt Macy if (spa->spa_missing_tvds != 0 || error != 0) 3480eda14cbcSMatt Macy vdev_dbgmsg_print_tree(spa->spa_root_vdev, 2); 3481eda14cbcSMatt Macy 3482eda14cbcSMatt Macy return (error); 3483eda14cbcSMatt Macy } 3484eda14cbcSMatt Macy 3485eda14cbcSMatt Macy /* 3486eda14cbcSMatt Macy * We need to validate the vdev labels against the configuration that 3487eda14cbcSMatt Macy * we have in hand. This function is called twice: first with an untrusted 3488eda14cbcSMatt Macy * config, then with a trusted config. The validation is more strict when the 3489eda14cbcSMatt Macy * config is trusted. 3490eda14cbcSMatt Macy */ 3491eda14cbcSMatt Macy static int 3492eda14cbcSMatt Macy spa_ld_validate_vdevs(spa_t *spa) 3493eda14cbcSMatt Macy { 3494eda14cbcSMatt Macy int error = 0; 3495eda14cbcSMatt Macy vdev_t *rvd = spa->spa_root_vdev; 3496eda14cbcSMatt Macy 3497eda14cbcSMatt Macy spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3498eda14cbcSMatt Macy error = vdev_validate(rvd); 3499eda14cbcSMatt Macy spa_config_exit(spa, SCL_ALL, FTAG); 3500eda14cbcSMatt Macy 3501eda14cbcSMatt Macy if (error != 0) { 3502eda14cbcSMatt Macy spa_load_failed(spa, "vdev_validate failed [error=%d]", error); 3503eda14cbcSMatt Macy return (error); 3504eda14cbcSMatt Macy } 3505eda14cbcSMatt Macy 3506eda14cbcSMatt Macy if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) { 3507eda14cbcSMatt Macy spa_load_failed(spa, "cannot open vdev tree after invalidating " 3508eda14cbcSMatt Macy "some vdevs"); 3509eda14cbcSMatt Macy vdev_dbgmsg_print_tree(rvd, 2); 3510eda14cbcSMatt Macy return (SET_ERROR(ENXIO)); 3511eda14cbcSMatt Macy } 3512eda14cbcSMatt Macy 3513eda14cbcSMatt Macy return (0); 3514eda14cbcSMatt Macy } 3515eda14cbcSMatt Macy 3516eda14cbcSMatt Macy static void 3517eda14cbcSMatt Macy spa_ld_select_uberblock_done(spa_t *spa, uberblock_t *ub) 3518eda14cbcSMatt Macy { 3519eda14cbcSMatt Macy spa->spa_state = POOL_STATE_ACTIVE; 3520eda14cbcSMatt Macy spa->spa_ubsync = spa->spa_uberblock; 3521eda14cbcSMatt Macy spa->spa_verify_min_txg = spa->spa_extreme_rewind ? 3522eda14cbcSMatt Macy TXG_INITIAL - 1 : spa_last_synced_txg(spa) - TXG_DEFER_SIZE - 1; 3523eda14cbcSMatt Macy spa->spa_first_txg = spa->spa_last_ubsync_txg ? 3524eda14cbcSMatt Macy spa->spa_last_ubsync_txg : spa_last_synced_txg(spa) + 1; 3525eda14cbcSMatt Macy spa->spa_claim_max_txg = spa->spa_first_txg; 3526eda14cbcSMatt Macy spa->spa_prev_software_version = ub->ub_software_version; 3527eda14cbcSMatt Macy } 3528eda14cbcSMatt Macy 3529eda14cbcSMatt Macy static int 3530eda14cbcSMatt Macy spa_ld_select_uberblock(spa_t *spa, spa_import_type_t type) 3531eda14cbcSMatt Macy { 3532eda14cbcSMatt Macy vdev_t *rvd = spa->spa_root_vdev; 3533eda14cbcSMatt Macy nvlist_t *label; 3534eda14cbcSMatt Macy uberblock_t *ub = &spa->spa_uberblock; 3535eda14cbcSMatt Macy boolean_t activity_check = B_FALSE; 3536eda14cbcSMatt Macy 3537eda14cbcSMatt Macy /* 3538eda14cbcSMatt Macy * If we are opening the checkpointed state of the pool by 3539eda14cbcSMatt Macy * rewinding to it, at this point we will have written the 3540eda14cbcSMatt Macy * checkpointed uberblock to the vdev labels, so searching 3541eda14cbcSMatt Macy * the labels will find the right uberblock. However, if 3542eda14cbcSMatt Macy * we are opening the checkpointed state read-only, we have 3543eda14cbcSMatt Macy * not modified the labels. Therefore, we must ignore the 3544eda14cbcSMatt Macy * labels and continue using the spa_uberblock that was set 3545eda14cbcSMatt Macy * by spa_ld_checkpoint_rewind. 3546eda14cbcSMatt Macy * 3547eda14cbcSMatt Macy * Note that it would be fine to ignore the labels when 3548eda14cbcSMatt Macy * rewinding (opening writeable) as well. However, if we 3549eda14cbcSMatt Macy * crash just after writing the labels, we will end up 3550eda14cbcSMatt Macy * searching the labels. Doing so in the common case means 3551eda14cbcSMatt Macy * that this code path gets exercised normally, rather than 3552eda14cbcSMatt Macy * just in the edge case. 3553eda14cbcSMatt Macy */ 3554eda14cbcSMatt Macy if (ub->ub_checkpoint_txg != 0 && 3555eda14cbcSMatt Macy spa_importing_readonly_checkpoint(spa)) { 3556eda14cbcSMatt Macy spa_ld_select_uberblock_done(spa, ub); 3557eda14cbcSMatt Macy return (0); 3558eda14cbcSMatt Macy } 3559eda14cbcSMatt Macy 3560eda14cbcSMatt Macy /* 3561eda14cbcSMatt Macy * Find the best uberblock. 3562eda14cbcSMatt Macy */ 3563eda14cbcSMatt Macy vdev_uberblock_load(rvd, ub, &label); 3564eda14cbcSMatt Macy 3565eda14cbcSMatt Macy /* 3566eda14cbcSMatt Macy * If we weren't able to find a single valid uberblock, return failure. 3567eda14cbcSMatt Macy */ 3568eda14cbcSMatt Macy if (ub->ub_txg == 0) { 3569eda14cbcSMatt Macy nvlist_free(label); 3570eda14cbcSMatt Macy spa_load_failed(spa, "no valid uberblock found"); 3571eda14cbcSMatt Macy return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, ENXIO)); 3572eda14cbcSMatt Macy } 3573eda14cbcSMatt Macy 3574eda14cbcSMatt Macy if (spa->spa_load_max_txg != UINT64_MAX) { 3575eda14cbcSMatt Macy (void) spa_import_progress_set_max_txg(spa_guid(spa), 3576eda14cbcSMatt Macy (u_longlong_t)spa->spa_load_max_txg); 3577eda14cbcSMatt Macy } 3578eda14cbcSMatt Macy spa_load_note(spa, "using uberblock with txg=%llu", 3579eda14cbcSMatt Macy (u_longlong_t)ub->ub_txg); 3580eda14cbcSMatt Macy 3581eda14cbcSMatt Macy 3582eda14cbcSMatt Macy /* 3583eda14cbcSMatt Macy * For pools which have the multihost property on determine if the 3584eda14cbcSMatt Macy * pool is truly inactive and can be safely imported. Prevent 3585eda14cbcSMatt Macy * hosts which don't have a hostid set from importing the pool. 3586eda14cbcSMatt Macy */ 3587eda14cbcSMatt Macy activity_check = spa_activity_check_required(spa, ub, label, 3588eda14cbcSMatt Macy spa->spa_config); 3589eda14cbcSMatt Macy if (activity_check) { 3590eda14cbcSMatt Macy if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay && 3591eda14cbcSMatt Macy spa_get_hostid(spa) == 0) { 3592eda14cbcSMatt Macy nvlist_free(label); 3593eda14cbcSMatt Macy fnvlist_add_uint64(spa->spa_load_info, 3594eda14cbcSMatt Macy ZPOOL_CONFIG_MMP_STATE, MMP_STATE_NO_HOSTID); 3595eda14cbcSMatt Macy return (spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO)); 3596eda14cbcSMatt Macy } 3597eda14cbcSMatt Macy 3598eda14cbcSMatt Macy int error = spa_activity_check(spa, ub, spa->spa_config); 3599eda14cbcSMatt Macy if (error) { 3600eda14cbcSMatt Macy nvlist_free(label); 3601eda14cbcSMatt Macy return (error); 3602eda14cbcSMatt Macy } 3603eda14cbcSMatt Macy 3604eda14cbcSMatt Macy fnvlist_add_uint64(spa->spa_load_info, 3605eda14cbcSMatt Macy ZPOOL_CONFIG_MMP_STATE, MMP_STATE_INACTIVE); 3606eda14cbcSMatt Macy fnvlist_add_uint64(spa->spa_load_info, 3607eda14cbcSMatt Macy ZPOOL_CONFIG_MMP_TXG, ub->ub_txg); 3608eda14cbcSMatt Macy fnvlist_add_uint16(spa->spa_load_info, 3609eda14cbcSMatt Macy ZPOOL_CONFIG_MMP_SEQ, 3610eda14cbcSMatt Macy (MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0)); 3611eda14cbcSMatt Macy } 3612eda14cbcSMatt Macy 3613eda14cbcSMatt Macy /* 3614eda14cbcSMatt Macy * If the pool has an unsupported version we can't open it. 3615eda14cbcSMatt Macy */ 3616eda14cbcSMatt Macy if (!SPA_VERSION_IS_SUPPORTED(ub->ub_version)) { 3617eda14cbcSMatt Macy nvlist_free(label); 3618eda14cbcSMatt Macy spa_load_failed(spa, "version %llu is not supported", 3619eda14cbcSMatt Macy (u_longlong_t)ub->ub_version); 3620eda14cbcSMatt Macy return (spa_vdev_err(rvd, VDEV_AUX_VERSION_NEWER, ENOTSUP)); 3621eda14cbcSMatt Macy } 3622eda14cbcSMatt Macy 3623eda14cbcSMatt Macy if (ub->ub_version >= SPA_VERSION_FEATURES) { 3624eda14cbcSMatt Macy nvlist_t *features; 3625eda14cbcSMatt Macy 3626eda14cbcSMatt Macy /* 3627eda14cbcSMatt Macy * If we weren't able to find what's necessary for reading the 3628eda14cbcSMatt Macy * MOS in the label, return failure. 3629eda14cbcSMatt Macy */ 3630eda14cbcSMatt Macy if (label == NULL) { 3631eda14cbcSMatt Macy spa_load_failed(spa, "label config unavailable"); 3632eda14cbcSMatt Macy return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, 3633eda14cbcSMatt Macy ENXIO)); 3634eda14cbcSMatt Macy } 3635eda14cbcSMatt Macy 3636eda14cbcSMatt Macy if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_FEATURES_FOR_READ, 3637eda14cbcSMatt Macy &features) != 0) { 3638eda14cbcSMatt Macy nvlist_free(label); 3639eda14cbcSMatt Macy spa_load_failed(spa, "invalid label: '%s' missing", 3640eda14cbcSMatt Macy ZPOOL_CONFIG_FEATURES_FOR_READ); 3641eda14cbcSMatt Macy return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, 3642eda14cbcSMatt Macy ENXIO)); 3643eda14cbcSMatt Macy } 3644eda14cbcSMatt Macy 3645eda14cbcSMatt Macy /* 3646eda14cbcSMatt Macy * Update our in-core representation with the definitive values 3647eda14cbcSMatt Macy * from the label. 3648eda14cbcSMatt Macy */ 3649eda14cbcSMatt Macy nvlist_free(spa->spa_label_features); 3650eda14cbcSMatt Macy VERIFY(nvlist_dup(features, &spa->spa_label_features, 0) == 0); 3651eda14cbcSMatt Macy } 3652eda14cbcSMatt Macy 3653eda14cbcSMatt Macy nvlist_free(label); 3654eda14cbcSMatt Macy 3655eda14cbcSMatt Macy /* 3656eda14cbcSMatt Macy * Look through entries in the label nvlist's features_for_read. If 3657eda14cbcSMatt Macy * there is a feature listed there which we don't understand then we 3658eda14cbcSMatt Macy * cannot open a pool. 3659eda14cbcSMatt Macy */ 3660eda14cbcSMatt Macy if (ub->ub_version >= SPA_VERSION_FEATURES) { 3661eda14cbcSMatt Macy nvlist_t *unsup_feat; 3662eda14cbcSMatt Macy 3663eda14cbcSMatt Macy VERIFY(nvlist_alloc(&unsup_feat, NV_UNIQUE_NAME, KM_SLEEP) == 3664eda14cbcSMatt Macy 0); 3665eda14cbcSMatt Macy 3666eda14cbcSMatt Macy for (nvpair_t *nvp = nvlist_next_nvpair(spa->spa_label_features, 3667eda14cbcSMatt Macy NULL); nvp != NULL; 3668eda14cbcSMatt Macy nvp = nvlist_next_nvpair(spa->spa_label_features, nvp)) { 3669eda14cbcSMatt Macy if (!zfeature_is_supported(nvpair_name(nvp))) { 3670eda14cbcSMatt Macy VERIFY(nvlist_add_string(unsup_feat, 3671eda14cbcSMatt Macy nvpair_name(nvp), "") == 0); 3672eda14cbcSMatt Macy } 3673eda14cbcSMatt Macy } 3674eda14cbcSMatt Macy 3675eda14cbcSMatt Macy if (!nvlist_empty(unsup_feat)) { 3676eda14cbcSMatt Macy VERIFY(nvlist_add_nvlist(spa->spa_load_info, 3677eda14cbcSMatt Macy ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat) == 0); 3678eda14cbcSMatt Macy nvlist_free(unsup_feat); 3679eda14cbcSMatt Macy spa_load_failed(spa, "some features are unsupported"); 3680eda14cbcSMatt Macy return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, 3681eda14cbcSMatt Macy ENOTSUP)); 3682eda14cbcSMatt Macy } 3683eda14cbcSMatt Macy 3684eda14cbcSMatt Macy nvlist_free(unsup_feat); 3685eda14cbcSMatt Macy } 3686eda14cbcSMatt Macy 3687eda14cbcSMatt Macy if (type != SPA_IMPORT_ASSEMBLE && spa->spa_config_splitting) { 3688eda14cbcSMatt Macy spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3689eda14cbcSMatt Macy spa_try_repair(spa, spa->spa_config); 3690eda14cbcSMatt Macy spa_config_exit(spa, SCL_ALL, FTAG); 3691eda14cbcSMatt Macy nvlist_free(spa->spa_config_splitting); 3692eda14cbcSMatt Macy spa->spa_config_splitting = NULL; 3693eda14cbcSMatt Macy } 3694eda14cbcSMatt Macy 3695eda14cbcSMatt Macy /* 3696eda14cbcSMatt Macy * Initialize internal SPA structures. 3697eda14cbcSMatt Macy */ 3698eda14cbcSMatt Macy spa_ld_select_uberblock_done(spa, ub); 3699eda14cbcSMatt Macy 3700eda14cbcSMatt Macy return (0); 3701eda14cbcSMatt Macy } 3702eda14cbcSMatt Macy 3703eda14cbcSMatt Macy static int 3704eda14cbcSMatt Macy spa_ld_open_rootbp(spa_t *spa) 3705eda14cbcSMatt Macy { 3706eda14cbcSMatt Macy int error = 0; 3707eda14cbcSMatt Macy vdev_t *rvd = spa->spa_root_vdev; 3708eda14cbcSMatt Macy 3709eda14cbcSMatt Macy error = dsl_pool_init(spa, spa->spa_first_txg, &spa->spa_dsl_pool); 3710eda14cbcSMatt Macy if (error != 0) { 3711eda14cbcSMatt Macy spa_load_failed(spa, "unable to open rootbp in dsl_pool_init " 3712eda14cbcSMatt Macy "[error=%d]", error); 3713eda14cbcSMatt Macy return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3714eda14cbcSMatt Macy } 3715eda14cbcSMatt Macy spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset; 3716eda14cbcSMatt Macy 3717eda14cbcSMatt Macy return (0); 3718eda14cbcSMatt Macy } 3719eda14cbcSMatt Macy 3720eda14cbcSMatt Macy static int 3721eda14cbcSMatt Macy spa_ld_trusted_config(spa_t *spa, spa_import_type_t type, 3722eda14cbcSMatt Macy boolean_t reloading) 3723eda14cbcSMatt Macy { 3724eda14cbcSMatt Macy vdev_t *mrvd, *rvd = spa->spa_root_vdev; 3725eda14cbcSMatt Macy nvlist_t *nv, *mos_config, *policy; 3726eda14cbcSMatt Macy int error = 0, copy_error; 3727eda14cbcSMatt Macy uint64_t healthy_tvds, healthy_tvds_mos; 3728eda14cbcSMatt Macy uint64_t mos_config_txg; 3729eda14cbcSMatt Macy 3730eda14cbcSMatt Macy if (spa_dir_prop(spa, DMU_POOL_CONFIG, &spa->spa_config_object, B_TRUE) 3731eda14cbcSMatt Macy != 0) 3732eda14cbcSMatt Macy return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3733eda14cbcSMatt Macy 3734eda14cbcSMatt Macy /* 3735eda14cbcSMatt Macy * If we're assembling a pool from a split, the config provided is 3736eda14cbcSMatt Macy * already trusted so there is nothing to do. 3737eda14cbcSMatt Macy */ 3738eda14cbcSMatt Macy if (type == SPA_IMPORT_ASSEMBLE) 3739eda14cbcSMatt Macy return (0); 3740eda14cbcSMatt Macy 3741eda14cbcSMatt Macy healthy_tvds = spa_healthy_core_tvds(spa); 3742eda14cbcSMatt Macy 3743eda14cbcSMatt Macy if (load_nvlist(spa, spa->spa_config_object, &mos_config) 3744eda14cbcSMatt Macy != 0) { 3745eda14cbcSMatt Macy spa_load_failed(spa, "unable to retrieve MOS config"); 3746eda14cbcSMatt Macy return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3747eda14cbcSMatt Macy } 3748eda14cbcSMatt Macy 3749eda14cbcSMatt Macy /* 3750eda14cbcSMatt Macy * If we are doing an open, pool owner wasn't verified yet, thus do 3751eda14cbcSMatt Macy * the verification here. 3752eda14cbcSMatt Macy */ 3753eda14cbcSMatt Macy if (spa->spa_load_state == SPA_LOAD_OPEN) { 3754eda14cbcSMatt Macy error = spa_verify_host(spa, mos_config); 3755eda14cbcSMatt Macy if (error != 0) { 3756eda14cbcSMatt Macy nvlist_free(mos_config); 3757eda14cbcSMatt Macy return (error); 3758eda14cbcSMatt Macy } 3759eda14cbcSMatt Macy } 3760eda14cbcSMatt Macy 3761eda14cbcSMatt Macy nv = fnvlist_lookup_nvlist(mos_config, ZPOOL_CONFIG_VDEV_TREE); 3762eda14cbcSMatt Macy 3763eda14cbcSMatt Macy spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3764eda14cbcSMatt Macy 3765eda14cbcSMatt Macy /* 3766eda14cbcSMatt Macy * Build a new vdev tree from the trusted config 3767eda14cbcSMatt Macy */ 37687877fdebSMatt Macy error = spa_config_parse(spa, &mrvd, nv, NULL, 0, VDEV_ALLOC_LOAD); 37697877fdebSMatt Macy if (error != 0) { 37707877fdebSMatt Macy nvlist_free(mos_config); 37717877fdebSMatt Macy spa_config_exit(spa, SCL_ALL, FTAG); 37727877fdebSMatt Macy spa_load_failed(spa, "spa_config_parse failed [error=%d]", 37737877fdebSMatt Macy error); 37747877fdebSMatt Macy return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error)); 37757877fdebSMatt Macy } 3776eda14cbcSMatt Macy 3777eda14cbcSMatt Macy /* 3778eda14cbcSMatt Macy * Vdev paths in the MOS may be obsolete. If the untrusted config was 3779eda14cbcSMatt Macy * obtained by scanning /dev/dsk, then it will have the right vdev 3780eda14cbcSMatt Macy * paths. We update the trusted MOS config with this information. 3781eda14cbcSMatt Macy * We first try to copy the paths with vdev_copy_path_strict, which 3782eda14cbcSMatt Macy * succeeds only when both configs have exactly the same vdev tree. 3783eda14cbcSMatt Macy * If that fails, we fall back to a more flexible method that has a 3784eda14cbcSMatt Macy * best effort policy. 3785eda14cbcSMatt Macy */ 3786eda14cbcSMatt Macy copy_error = vdev_copy_path_strict(rvd, mrvd); 3787eda14cbcSMatt Macy if (copy_error != 0 || spa_load_print_vdev_tree) { 3788eda14cbcSMatt Macy spa_load_note(spa, "provided vdev tree:"); 3789eda14cbcSMatt Macy vdev_dbgmsg_print_tree(rvd, 2); 3790eda14cbcSMatt Macy spa_load_note(spa, "MOS vdev tree:"); 3791eda14cbcSMatt Macy vdev_dbgmsg_print_tree(mrvd, 2); 3792eda14cbcSMatt Macy } 3793eda14cbcSMatt Macy if (copy_error != 0) { 3794eda14cbcSMatt Macy spa_load_note(spa, "vdev_copy_path_strict failed, falling " 3795eda14cbcSMatt Macy "back to vdev_copy_path_relaxed"); 3796eda14cbcSMatt Macy vdev_copy_path_relaxed(rvd, mrvd); 3797eda14cbcSMatt Macy } 3798eda14cbcSMatt Macy 3799eda14cbcSMatt Macy vdev_close(rvd); 3800eda14cbcSMatt Macy vdev_free(rvd); 3801eda14cbcSMatt Macy spa->spa_root_vdev = mrvd; 3802eda14cbcSMatt Macy rvd = mrvd; 3803eda14cbcSMatt Macy spa_config_exit(spa, SCL_ALL, FTAG); 3804eda14cbcSMatt Macy 3805eda14cbcSMatt Macy /* 3806eda14cbcSMatt Macy * We will use spa_config if we decide to reload the spa or if spa_load 3807eda14cbcSMatt Macy * fails and we rewind. We must thus regenerate the config using the 3808eda14cbcSMatt Macy * MOS information with the updated paths. ZPOOL_LOAD_POLICY is used to 3809eda14cbcSMatt Macy * pass settings on how to load the pool and is not stored in the MOS. 3810eda14cbcSMatt Macy * We copy it over to our new, trusted config. 3811eda14cbcSMatt Macy */ 3812eda14cbcSMatt Macy mos_config_txg = fnvlist_lookup_uint64(mos_config, 3813eda14cbcSMatt Macy ZPOOL_CONFIG_POOL_TXG); 3814eda14cbcSMatt Macy nvlist_free(mos_config); 3815eda14cbcSMatt Macy mos_config = spa_config_generate(spa, NULL, mos_config_txg, B_FALSE); 3816eda14cbcSMatt Macy if (nvlist_lookup_nvlist(spa->spa_config, ZPOOL_LOAD_POLICY, 3817eda14cbcSMatt Macy &policy) == 0) 3818eda14cbcSMatt Macy fnvlist_add_nvlist(mos_config, ZPOOL_LOAD_POLICY, policy); 3819eda14cbcSMatt Macy spa_config_set(spa, mos_config); 3820eda14cbcSMatt Macy spa->spa_config_source = SPA_CONFIG_SRC_MOS; 3821eda14cbcSMatt Macy 3822eda14cbcSMatt Macy /* 3823eda14cbcSMatt Macy * Now that we got the config from the MOS, we should be more strict 3824eda14cbcSMatt Macy * in checking blkptrs and can make assumptions about the consistency 3825eda14cbcSMatt Macy * of the vdev tree. spa_trust_config must be set to true before opening 3826eda14cbcSMatt Macy * vdevs in order for them to be writeable. 3827eda14cbcSMatt Macy */ 3828eda14cbcSMatt Macy spa->spa_trust_config = B_TRUE; 3829eda14cbcSMatt Macy 3830eda14cbcSMatt Macy /* 3831eda14cbcSMatt Macy * Open and validate the new vdev tree 3832eda14cbcSMatt Macy */ 3833eda14cbcSMatt Macy error = spa_ld_open_vdevs(spa); 3834eda14cbcSMatt Macy if (error != 0) 3835eda14cbcSMatt Macy return (error); 3836eda14cbcSMatt Macy 3837eda14cbcSMatt Macy error = spa_ld_validate_vdevs(spa); 3838eda14cbcSMatt Macy if (error != 0) 3839eda14cbcSMatt Macy return (error); 3840eda14cbcSMatt Macy 3841eda14cbcSMatt Macy if (copy_error != 0 || spa_load_print_vdev_tree) { 3842eda14cbcSMatt Macy spa_load_note(spa, "final vdev tree:"); 3843eda14cbcSMatt Macy vdev_dbgmsg_print_tree(rvd, 2); 3844eda14cbcSMatt Macy } 3845eda14cbcSMatt Macy 3846eda14cbcSMatt Macy if (spa->spa_load_state != SPA_LOAD_TRYIMPORT && 3847eda14cbcSMatt Macy !spa->spa_extreme_rewind && zfs_max_missing_tvds == 0) { 3848eda14cbcSMatt Macy /* 3849eda14cbcSMatt Macy * Sanity check to make sure that we are indeed loading the 3850eda14cbcSMatt Macy * latest uberblock. If we missed SPA_SYNC_MIN_VDEVS tvds 3851eda14cbcSMatt Macy * in the config provided and they happened to be the only ones 3852eda14cbcSMatt Macy * to have the latest uberblock, we could involuntarily perform 3853eda14cbcSMatt Macy * an extreme rewind. 3854eda14cbcSMatt Macy */ 3855eda14cbcSMatt Macy healthy_tvds_mos = spa_healthy_core_tvds(spa); 3856eda14cbcSMatt Macy if (healthy_tvds_mos - healthy_tvds >= 3857eda14cbcSMatt Macy SPA_SYNC_MIN_VDEVS) { 3858eda14cbcSMatt Macy spa_load_note(spa, "config provided misses too many " 3859eda14cbcSMatt Macy "top-level vdevs compared to MOS (%lld vs %lld). ", 3860eda14cbcSMatt Macy (u_longlong_t)healthy_tvds, 3861eda14cbcSMatt Macy (u_longlong_t)healthy_tvds_mos); 3862eda14cbcSMatt Macy spa_load_note(spa, "vdev tree:"); 3863eda14cbcSMatt Macy vdev_dbgmsg_print_tree(rvd, 2); 3864eda14cbcSMatt Macy if (reloading) { 3865eda14cbcSMatt Macy spa_load_failed(spa, "config was already " 3866eda14cbcSMatt Macy "provided from MOS. Aborting."); 3867eda14cbcSMatt Macy return (spa_vdev_err(rvd, 3868eda14cbcSMatt Macy VDEV_AUX_CORRUPT_DATA, EIO)); 3869eda14cbcSMatt Macy } 3870eda14cbcSMatt Macy spa_load_note(spa, "spa must be reloaded using MOS " 3871eda14cbcSMatt Macy "config"); 3872eda14cbcSMatt Macy return (SET_ERROR(EAGAIN)); 3873eda14cbcSMatt Macy } 3874eda14cbcSMatt Macy } 3875eda14cbcSMatt Macy 3876eda14cbcSMatt Macy error = spa_check_for_missing_logs(spa); 3877eda14cbcSMatt Macy if (error != 0) 3878eda14cbcSMatt Macy return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, ENXIO)); 3879eda14cbcSMatt Macy 3880eda14cbcSMatt Macy if (rvd->vdev_guid_sum != spa->spa_uberblock.ub_guid_sum) { 3881eda14cbcSMatt Macy spa_load_failed(spa, "uberblock guid sum doesn't match MOS " 3882eda14cbcSMatt Macy "guid sum (%llu != %llu)", 3883eda14cbcSMatt Macy (u_longlong_t)spa->spa_uberblock.ub_guid_sum, 3884eda14cbcSMatt Macy (u_longlong_t)rvd->vdev_guid_sum); 3885eda14cbcSMatt Macy return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, 3886eda14cbcSMatt Macy ENXIO)); 3887eda14cbcSMatt Macy } 3888eda14cbcSMatt Macy 3889eda14cbcSMatt Macy return (0); 3890eda14cbcSMatt Macy } 3891eda14cbcSMatt Macy 3892eda14cbcSMatt Macy static int 3893eda14cbcSMatt Macy spa_ld_open_indirect_vdev_metadata(spa_t *spa) 3894eda14cbcSMatt Macy { 3895eda14cbcSMatt Macy int error = 0; 3896eda14cbcSMatt Macy vdev_t *rvd = spa->spa_root_vdev; 3897eda14cbcSMatt Macy 3898eda14cbcSMatt Macy /* 3899eda14cbcSMatt Macy * Everything that we read before spa_remove_init() must be stored 3900eda14cbcSMatt Macy * on concreted vdevs. Therefore we do this as early as possible. 3901eda14cbcSMatt Macy */ 3902eda14cbcSMatt Macy error = spa_remove_init(spa); 3903eda14cbcSMatt Macy if (error != 0) { 3904eda14cbcSMatt Macy spa_load_failed(spa, "spa_remove_init failed [error=%d]", 3905eda14cbcSMatt Macy error); 3906eda14cbcSMatt Macy return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3907eda14cbcSMatt Macy } 3908eda14cbcSMatt Macy 3909eda14cbcSMatt Macy /* 3910eda14cbcSMatt Macy * Retrieve information needed to condense indirect vdev mappings. 3911eda14cbcSMatt Macy */ 3912eda14cbcSMatt Macy error = spa_condense_init(spa); 3913eda14cbcSMatt Macy if (error != 0) { 3914eda14cbcSMatt Macy spa_load_failed(spa, "spa_condense_init failed [error=%d]", 3915eda14cbcSMatt Macy error); 3916eda14cbcSMatt Macy return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error)); 3917eda14cbcSMatt Macy } 3918eda14cbcSMatt Macy 3919eda14cbcSMatt Macy return (0); 3920eda14cbcSMatt Macy } 3921eda14cbcSMatt Macy 3922eda14cbcSMatt Macy static int 3923eda14cbcSMatt Macy spa_ld_check_features(spa_t *spa, boolean_t *missing_feat_writep) 3924eda14cbcSMatt Macy { 3925eda14cbcSMatt Macy int error = 0; 3926eda14cbcSMatt Macy vdev_t *rvd = spa->spa_root_vdev; 3927eda14cbcSMatt Macy 3928eda14cbcSMatt Macy if (spa_version(spa) >= SPA_VERSION_FEATURES) { 3929eda14cbcSMatt Macy boolean_t missing_feat_read = B_FALSE; 3930eda14cbcSMatt Macy nvlist_t *unsup_feat, *enabled_feat; 3931eda14cbcSMatt Macy 3932eda14cbcSMatt Macy if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_READ, 3933eda14cbcSMatt Macy &spa->spa_feat_for_read_obj, B_TRUE) != 0) { 3934eda14cbcSMatt Macy return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3935eda14cbcSMatt Macy } 3936eda14cbcSMatt Macy 3937eda14cbcSMatt Macy if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_WRITE, 3938eda14cbcSMatt Macy &spa->spa_feat_for_write_obj, B_TRUE) != 0) { 3939eda14cbcSMatt Macy return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3940eda14cbcSMatt Macy } 3941eda14cbcSMatt Macy 3942eda14cbcSMatt Macy if (spa_dir_prop(spa, DMU_POOL_FEATURE_DESCRIPTIONS, 3943eda14cbcSMatt Macy &spa->spa_feat_desc_obj, B_TRUE) != 0) { 3944eda14cbcSMatt Macy return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3945eda14cbcSMatt Macy } 3946eda14cbcSMatt Macy 3947eda14cbcSMatt Macy enabled_feat = fnvlist_alloc(); 3948eda14cbcSMatt Macy unsup_feat = fnvlist_alloc(); 3949eda14cbcSMatt Macy 3950eda14cbcSMatt Macy if (!spa_features_check(spa, B_FALSE, 3951eda14cbcSMatt Macy unsup_feat, enabled_feat)) 3952eda14cbcSMatt Macy missing_feat_read = B_TRUE; 3953eda14cbcSMatt Macy 3954eda14cbcSMatt Macy if (spa_writeable(spa) || 3955eda14cbcSMatt Macy spa->spa_load_state == SPA_LOAD_TRYIMPORT) { 3956eda14cbcSMatt Macy if (!spa_features_check(spa, B_TRUE, 3957eda14cbcSMatt Macy unsup_feat, enabled_feat)) { 3958eda14cbcSMatt Macy *missing_feat_writep = B_TRUE; 3959eda14cbcSMatt Macy } 3960eda14cbcSMatt Macy } 3961eda14cbcSMatt Macy 3962eda14cbcSMatt Macy fnvlist_add_nvlist(spa->spa_load_info, 3963eda14cbcSMatt Macy ZPOOL_CONFIG_ENABLED_FEAT, enabled_feat); 3964eda14cbcSMatt Macy 3965eda14cbcSMatt Macy if (!nvlist_empty(unsup_feat)) { 3966eda14cbcSMatt Macy fnvlist_add_nvlist(spa->spa_load_info, 3967eda14cbcSMatt Macy ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat); 3968eda14cbcSMatt Macy } 3969eda14cbcSMatt Macy 3970eda14cbcSMatt Macy fnvlist_free(enabled_feat); 3971eda14cbcSMatt Macy fnvlist_free(unsup_feat); 3972eda14cbcSMatt Macy 3973eda14cbcSMatt Macy if (!missing_feat_read) { 3974eda14cbcSMatt Macy fnvlist_add_boolean(spa->spa_load_info, 3975eda14cbcSMatt Macy ZPOOL_CONFIG_CAN_RDONLY); 3976eda14cbcSMatt Macy } 3977eda14cbcSMatt Macy 3978eda14cbcSMatt Macy /* 3979eda14cbcSMatt Macy * If the state is SPA_LOAD_TRYIMPORT, our objective is 3980eda14cbcSMatt Macy * twofold: to determine whether the pool is available for 3981eda14cbcSMatt Macy * import in read-write mode and (if it is not) whether the 3982eda14cbcSMatt Macy * pool is available for import in read-only mode. If the pool 3983eda14cbcSMatt Macy * is available for import in read-write mode, it is displayed 3984eda14cbcSMatt Macy * as available in userland; if it is not available for import 3985eda14cbcSMatt Macy * in read-only mode, it is displayed as unavailable in 3986eda14cbcSMatt Macy * userland. If the pool is available for import in read-only 3987eda14cbcSMatt Macy * mode but not read-write mode, it is displayed as unavailable 3988eda14cbcSMatt Macy * in userland with a special note that the pool is actually 3989eda14cbcSMatt Macy * available for open in read-only mode. 3990eda14cbcSMatt Macy * 3991eda14cbcSMatt Macy * As a result, if the state is SPA_LOAD_TRYIMPORT and we are 3992eda14cbcSMatt Macy * missing a feature for write, we must first determine whether 3993eda14cbcSMatt Macy * the pool can be opened read-only before returning to 3994eda14cbcSMatt Macy * userland in order to know whether to display the 3995eda14cbcSMatt Macy * abovementioned note. 3996eda14cbcSMatt Macy */ 3997eda14cbcSMatt Macy if (missing_feat_read || (*missing_feat_writep && 3998eda14cbcSMatt Macy spa_writeable(spa))) { 3999eda14cbcSMatt Macy spa_load_failed(spa, "pool uses unsupported features"); 4000eda14cbcSMatt Macy return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, 4001eda14cbcSMatt Macy ENOTSUP)); 4002eda14cbcSMatt Macy } 4003eda14cbcSMatt Macy 4004eda14cbcSMatt Macy /* 4005eda14cbcSMatt Macy * Load refcounts for ZFS features from disk into an in-memory 4006eda14cbcSMatt Macy * cache during SPA initialization. 4007eda14cbcSMatt Macy */ 4008eda14cbcSMatt Macy for (spa_feature_t i = 0; i < SPA_FEATURES; i++) { 4009eda14cbcSMatt Macy uint64_t refcount; 4010eda14cbcSMatt Macy 4011eda14cbcSMatt Macy error = feature_get_refcount_from_disk(spa, 4012eda14cbcSMatt Macy &spa_feature_table[i], &refcount); 4013eda14cbcSMatt Macy if (error == 0) { 4014eda14cbcSMatt Macy spa->spa_feat_refcount_cache[i] = refcount; 4015eda14cbcSMatt Macy } else if (error == ENOTSUP) { 4016eda14cbcSMatt Macy spa->spa_feat_refcount_cache[i] = 4017eda14cbcSMatt Macy SPA_FEATURE_DISABLED; 4018eda14cbcSMatt Macy } else { 4019eda14cbcSMatt Macy spa_load_failed(spa, "error getting refcount " 4020eda14cbcSMatt Macy "for feature %s [error=%d]", 4021eda14cbcSMatt Macy spa_feature_table[i].fi_guid, error); 4022eda14cbcSMatt Macy return (spa_vdev_err(rvd, 4023eda14cbcSMatt Macy VDEV_AUX_CORRUPT_DATA, EIO)); 4024eda14cbcSMatt Macy } 4025eda14cbcSMatt Macy } 4026eda14cbcSMatt Macy } 4027eda14cbcSMatt Macy 4028eda14cbcSMatt Macy if (spa_feature_is_active(spa, SPA_FEATURE_ENABLED_TXG)) { 4029eda14cbcSMatt Macy if (spa_dir_prop(spa, DMU_POOL_FEATURE_ENABLED_TXG, 4030eda14cbcSMatt Macy &spa->spa_feat_enabled_txg_obj, B_TRUE) != 0) 4031eda14cbcSMatt Macy return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4032eda14cbcSMatt Macy } 4033eda14cbcSMatt Macy 4034eda14cbcSMatt Macy /* 4035eda14cbcSMatt Macy * Encryption was added before bookmark_v2, even though bookmark_v2 4036eda14cbcSMatt Macy * is now a dependency. If this pool has encryption enabled without 4037eda14cbcSMatt Macy * bookmark_v2, trigger an errata message. 4038eda14cbcSMatt Macy */ 4039eda14cbcSMatt Macy if (spa_feature_is_enabled(spa, SPA_FEATURE_ENCRYPTION) && 4040eda14cbcSMatt Macy !spa_feature_is_enabled(spa, SPA_FEATURE_BOOKMARK_V2)) { 4041eda14cbcSMatt Macy spa->spa_errata = ZPOOL_ERRATA_ZOL_8308_ENCRYPTION; 4042eda14cbcSMatt Macy } 4043eda14cbcSMatt Macy 4044eda14cbcSMatt Macy return (0); 4045eda14cbcSMatt Macy } 4046eda14cbcSMatt Macy 4047eda14cbcSMatt Macy static int 4048eda14cbcSMatt Macy spa_ld_load_special_directories(spa_t *spa) 4049eda14cbcSMatt Macy { 4050eda14cbcSMatt Macy int error = 0; 4051eda14cbcSMatt Macy vdev_t *rvd = spa->spa_root_vdev; 4052eda14cbcSMatt Macy 4053eda14cbcSMatt Macy spa->spa_is_initializing = B_TRUE; 4054eda14cbcSMatt Macy error = dsl_pool_open(spa->spa_dsl_pool); 4055eda14cbcSMatt Macy spa->spa_is_initializing = B_FALSE; 4056eda14cbcSMatt Macy if (error != 0) { 4057eda14cbcSMatt Macy spa_load_failed(spa, "dsl_pool_open failed [error=%d]", error); 4058eda14cbcSMatt Macy return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4059eda14cbcSMatt Macy } 4060eda14cbcSMatt Macy 4061eda14cbcSMatt Macy return (0); 4062eda14cbcSMatt Macy } 4063eda14cbcSMatt Macy 4064eda14cbcSMatt Macy static int 4065eda14cbcSMatt Macy spa_ld_get_props(spa_t *spa) 4066eda14cbcSMatt Macy { 4067eda14cbcSMatt Macy int error = 0; 4068eda14cbcSMatt Macy uint64_t obj; 4069eda14cbcSMatt Macy vdev_t *rvd = spa->spa_root_vdev; 4070eda14cbcSMatt Macy 4071eda14cbcSMatt Macy /* Grab the checksum salt from the MOS. */ 4072eda14cbcSMatt Macy error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 4073eda14cbcSMatt Macy DMU_POOL_CHECKSUM_SALT, 1, 4074eda14cbcSMatt Macy sizeof (spa->spa_cksum_salt.zcs_bytes), 4075eda14cbcSMatt Macy spa->spa_cksum_salt.zcs_bytes); 4076eda14cbcSMatt Macy if (error == ENOENT) { 4077eda14cbcSMatt Macy /* Generate a new salt for subsequent use */ 4078eda14cbcSMatt Macy (void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes, 4079eda14cbcSMatt Macy sizeof (spa->spa_cksum_salt.zcs_bytes)); 4080eda14cbcSMatt Macy } else if (error != 0) { 4081eda14cbcSMatt Macy spa_load_failed(spa, "unable to retrieve checksum salt from " 4082eda14cbcSMatt Macy "MOS [error=%d]", error); 4083eda14cbcSMatt Macy return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4084eda14cbcSMatt Macy } 4085eda14cbcSMatt Macy 4086eda14cbcSMatt Macy if (spa_dir_prop(spa, DMU_POOL_SYNC_BPOBJ, &obj, B_TRUE) != 0) 4087eda14cbcSMatt Macy return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4088eda14cbcSMatt Macy error = bpobj_open(&spa->spa_deferred_bpobj, spa->spa_meta_objset, obj); 4089eda14cbcSMatt Macy if (error != 0) { 4090eda14cbcSMatt Macy spa_load_failed(spa, "error opening deferred-frees bpobj " 4091eda14cbcSMatt Macy "[error=%d]", error); 4092eda14cbcSMatt Macy return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4093eda14cbcSMatt Macy } 4094eda14cbcSMatt Macy 4095eda14cbcSMatt Macy /* 4096eda14cbcSMatt Macy * Load the bit that tells us to use the new accounting function 4097eda14cbcSMatt Macy * (raid-z deflation). If we have an older pool, this will not 4098eda14cbcSMatt Macy * be present. 4099eda14cbcSMatt Macy */ 4100eda14cbcSMatt Macy error = spa_dir_prop(spa, DMU_POOL_DEFLATE, &spa->spa_deflate, B_FALSE); 4101eda14cbcSMatt Macy if (error != 0 && error != ENOENT) 4102eda14cbcSMatt Macy return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4103eda14cbcSMatt Macy 4104eda14cbcSMatt Macy error = spa_dir_prop(spa, DMU_POOL_CREATION_VERSION, 4105eda14cbcSMatt Macy &spa->spa_creation_version, B_FALSE); 4106eda14cbcSMatt Macy if (error != 0 && error != ENOENT) 4107eda14cbcSMatt Macy return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4108eda14cbcSMatt Macy 4109eda14cbcSMatt Macy /* 4110eda14cbcSMatt Macy * Load the persistent error log. If we have an older pool, this will 4111eda14cbcSMatt Macy * not be present. 4112eda14cbcSMatt Macy */ 4113eda14cbcSMatt Macy error = spa_dir_prop(spa, DMU_POOL_ERRLOG_LAST, &spa->spa_errlog_last, 4114eda14cbcSMatt Macy B_FALSE); 4115eda14cbcSMatt Macy if (error != 0 && error != ENOENT) 4116eda14cbcSMatt Macy return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4117eda14cbcSMatt Macy 4118eda14cbcSMatt Macy error = spa_dir_prop(spa, DMU_POOL_ERRLOG_SCRUB, 4119eda14cbcSMatt Macy &spa->spa_errlog_scrub, B_FALSE); 4120eda14cbcSMatt Macy if (error != 0 && error != ENOENT) 4121eda14cbcSMatt Macy return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4122eda14cbcSMatt Macy 4123eda14cbcSMatt Macy /* 4124eda14cbcSMatt Macy * Load the livelist deletion field. If a livelist is queued for 4125eda14cbcSMatt Macy * deletion, indicate that in the spa 4126eda14cbcSMatt Macy */ 4127eda14cbcSMatt Macy error = spa_dir_prop(spa, DMU_POOL_DELETED_CLONES, 4128eda14cbcSMatt Macy &spa->spa_livelists_to_delete, B_FALSE); 4129eda14cbcSMatt Macy if (error != 0 && error != ENOENT) 4130eda14cbcSMatt Macy return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4131eda14cbcSMatt Macy 4132eda14cbcSMatt Macy /* 4133eda14cbcSMatt Macy * Load the history object. If we have an older pool, this 4134eda14cbcSMatt Macy * will not be present. 4135eda14cbcSMatt Macy */ 4136eda14cbcSMatt Macy error = spa_dir_prop(spa, DMU_POOL_HISTORY, &spa->spa_history, B_FALSE); 4137eda14cbcSMatt Macy if (error != 0 && error != ENOENT) 4138eda14cbcSMatt Macy return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4139eda14cbcSMatt Macy 4140eda14cbcSMatt Macy /* 4141eda14cbcSMatt Macy * Load the per-vdev ZAP map. If we have an older pool, this will not 4142eda14cbcSMatt Macy * be present; in this case, defer its creation to a later time to 4143eda14cbcSMatt Macy * avoid dirtying the MOS this early / out of sync context. See 4144eda14cbcSMatt Macy * spa_sync_config_object. 4145eda14cbcSMatt Macy */ 4146eda14cbcSMatt Macy 4147eda14cbcSMatt Macy /* The sentinel is only available in the MOS config. */ 4148eda14cbcSMatt Macy nvlist_t *mos_config; 4149eda14cbcSMatt Macy if (load_nvlist(spa, spa->spa_config_object, &mos_config) != 0) { 4150eda14cbcSMatt Macy spa_load_failed(spa, "unable to retrieve MOS config"); 4151eda14cbcSMatt Macy return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4152eda14cbcSMatt Macy } 4153eda14cbcSMatt Macy 4154eda14cbcSMatt Macy error = spa_dir_prop(spa, DMU_POOL_VDEV_ZAP_MAP, 4155eda14cbcSMatt Macy &spa->spa_all_vdev_zaps, B_FALSE); 4156eda14cbcSMatt Macy 4157eda14cbcSMatt Macy if (error == ENOENT) { 4158eda14cbcSMatt Macy VERIFY(!nvlist_exists(mos_config, 4159eda14cbcSMatt Macy ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS)); 4160eda14cbcSMatt Macy spa->spa_avz_action = AVZ_ACTION_INITIALIZE; 4161eda14cbcSMatt Macy ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev)); 4162eda14cbcSMatt Macy } else if (error != 0) { 4163eda14cbcSMatt Macy return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4164eda14cbcSMatt Macy } else if (!nvlist_exists(mos_config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS)) { 4165eda14cbcSMatt Macy /* 4166eda14cbcSMatt Macy * An older version of ZFS overwrote the sentinel value, so 4167eda14cbcSMatt Macy * we have orphaned per-vdev ZAPs in the MOS. Defer their 4168eda14cbcSMatt Macy * destruction to later; see spa_sync_config_object. 4169eda14cbcSMatt Macy */ 4170eda14cbcSMatt Macy spa->spa_avz_action = AVZ_ACTION_DESTROY; 4171eda14cbcSMatt Macy /* 4172eda14cbcSMatt Macy * We're assuming that no vdevs have had their ZAPs created 4173eda14cbcSMatt Macy * before this. Better be sure of it. 4174eda14cbcSMatt Macy */ 4175eda14cbcSMatt Macy ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev)); 4176eda14cbcSMatt Macy } 4177eda14cbcSMatt Macy nvlist_free(mos_config); 4178eda14cbcSMatt Macy 4179eda14cbcSMatt Macy spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION); 4180eda14cbcSMatt Macy 4181eda14cbcSMatt Macy error = spa_dir_prop(spa, DMU_POOL_PROPS, &spa->spa_pool_props_object, 4182eda14cbcSMatt Macy B_FALSE); 4183eda14cbcSMatt Macy if (error && error != ENOENT) 4184eda14cbcSMatt Macy return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4185eda14cbcSMatt Macy 4186eda14cbcSMatt Macy if (error == 0) { 41871f88aa09SMartin Matuska uint64_t autoreplace = 0; 4188eda14cbcSMatt Macy 4189eda14cbcSMatt Macy spa_prop_find(spa, ZPOOL_PROP_BOOTFS, &spa->spa_bootfs); 4190eda14cbcSMatt Macy spa_prop_find(spa, ZPOOL_PROP_AUTOREPLACE, &autoreplace); 4191eda14cbcSMatt Macy spa_prop_find(spa, ZPOOL_PROP_DELEGATION, &spa->spa_delegation); 4192eda14cbcSMatt Macy spa_prop_find(spa, ZPOOL_PROP_FAILUREMODE, &spa->spa_failmode); 4193eda14cbcSMatt Macy spa_prop_find(spa, ZPOOL_PROP_AUTOEXPAND, &spa->spa_autoexpand); 4194eda14cbcSMatt Macy spa_prop_find(spa, ZPOOL_PROP_MULTIHOST, &spa->spa_multihost); 4195eda14cbcSMatt Macy spa_prop_find(spa, ZPOOL_PROP_AUTOTRIM, &spa->spa_autotrim); 4196eda14cbcSMatt Macy spa->spa_autoreplace = (autoreplace != 0); 4197eda14cbcSMatt Macy } 4198eda14cbcSMatt Macy 4199eda14cbcSMatt Macy /* 4200eda14cbcSMatt Macy * If we are importing a pool with missing top-level vdevs, 4201eda14cbcSMatt Macy * we enforce that the pool doesn't panic or get suspended on 4202eda14cbcSMatt Macy * error since the likelihood of missing data is extremely high. 4203eda14cbcSMatt Macy */ 4204eda14cbcSMatt Macy if (spa->spa_missing_tvds > 0 && 4205eda14cbcSMatt Macy spa->spa_failmode != ZIO_FAILURE_MODE_CONTINUE && 4206eda14cbcSMatt Macy spa->spa_load_state != SPA_LOAD_TRYIMPORT) { 4207eda14cbcSMatt Macy spa_load_note(spa, "forcing failmode to 'continue' " 4208eda14cbcSMatt Macy "as some top level vdevs are missing"); 4209eda14cbcSMatt Macy spa->spa_failmode = ZIO_FAILURE_MODE_CONTINUE; 4210eda14cbcSMatt Macy } 4211eda14cbcSMatt Macy 4212eda14cbcSMatt Macy return (0); 4213eda14cbcSMatt Macy } 4214eda14cbcSMatt Macy 4215eda14cbcSMatt Macy static int 4216eda14cbcSMatt Macy spa_ld_open_aux_vdevs(spa_t *spa, spa_import_type_t type) 4217eda14cbcSMatt Macy { 4218eda14cbcSMatt Macy int error = 0; 4219eda14cbcSMatt Macy vdev_t *rvd = spa->spa_root_vdev; 4220eda14cbcSMatt Macy 4221eda14cbcSMatt Macy /* 4222eda14cbcSMatt Macy * If we're assembling the pool from the split-off vdevs of 4223eda14cbcSMatt Macy * an existing pool, we don't want to attach the spares & cache 4224eda14cbcSMatt Macy * devices. 4225eda14cbcSMatt Macy */ 4226eda14cbcSMatt Macy 4227eda14cbcSMatt Macy /* 4228eda14cbcSMatt Macy * Load any hot spares for this pool. 4229eda14cbcSMatt Macy */ 4230eda14cbcSMatt Macy error = spa_dir_prop(spa, DMU_POOL_SPARES, &spa->spa_spares.sav_object, 4231eda14cbcSMatt Macy B_FALSE); 4232eda14cbcSMatt Macy if (error != 0 && error != ENOENT) 4233eda14cbcSMatt Macy return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4234eda14cbcSMatt Macy if (error == 0 && type != SPA_IMPORT_ASSEMBLE) { 4235eda14cbcSMatt Macy ASSERT(spa_version(spa) >= SPA_VERSION_SPARES); 4236eda14cbcSMatt Macy if (load_nvlist(spa, spa->spa_spares.sav_object, 4237eda14cbcSMatt Macy &spa->spa_spares.sav_config) != 0) { 4238eda14cbcSMatt Macy spa_load_failed(spa, "error loading spares nvlist"); 4239eda14cbcSMatt Macy return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4240eda14cbcSMatt Macy } 4241eda14cbcSMatt Macy 4242eda14cbcSMatt Macy spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4243eda14cbcSMatt Macy spa_load_spares(spa); 4244eda14cbcSMatt Macy spa_config_exit(spa, SCL_ALL, FTAG); 4245eda14cbcSMatt Macy } else if (error == 0) { 4246eda14cbcSMatt Macy spa->spa_spares.sav_sync = B_TRUE; 4247eda14cbcSMatt Macy } 4248eda14cbcSMatt Macy 4249eda14cbcSMatt Macy /* 4250eda14cbcSMatt Macy * Load any level 2 ARC devices for this pool. 4251eda14cbcSMatt Macy */ 4252eda14cbcSMatt Macy error = spa_dir_prop(spa, DMU_POOL_L2CACHE, 4253eda14cbcSMatt Macy &spa->spa_l2cache.sav_object, B_FALSE); 4254eda14cbcSMatt Macy if (error != 0 && error != ENOENT) 4255eda14cbcSMatt Macy return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4256eda14cbcSMatt Macy if (error == 0 && type != SPA_IMPORT_ASSEMBLE) { 4257eda14cbcSMatt Macy ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE); 4258eda14cbcSMatt Macy if (load_nvlist(spa, spa->spa_l2cache.sav_object, 4259eda14cbcSMatt Macy &spa->spa_l2cache.sav_config) != 0) { 4260eda14cbcSMatt Macy spa_load_failed(spa, "error loading l2cache nvlist"); 4261eda14cbcSMatt Macy return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4262eda14cbcSMatt Macy } 4263eda14cbcSMatt Macy 4264eda14cbcSMatt Macy spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4265eda14cbcSMatt Macy spa_load_l2cache(spa); 4266eda14cbcSMatt Macy spa_config_exit(spa, SCL_ALL, FTAG); 4267eda14cbcSMatt Macy } else if (error == 0) { 4268eda14cbcSMatt Macy spa->spa_l2cache.sav_sync = B_TRUE; 4269eda14cbcSMatt Macy } 4270eda14cbcSMatt Macy 4271eda14cbcSMatt Macy return (0); 4272eda14cbcSMatt Macy } 4273eda14cbcSMatt Macy 4274eda14cbcSMatt Macy static int 4275eda14cbcSMatt Macy spa_ld_load_vdev_metadata(spa_t *spa) 4276eda14cbcSMatt Macy { 4277eda14cbcSMatt Macy int error = 0; 4278eda14cbcSMatt Macy vdev_t *rvd = spa->spa_root_vdev; 4279eda14cbcSMatt Macy 4280eda14cbcSMatt Macy /* 4281eda14cbcSMatt Macy * If the 'multihost' property is set, then never allow a pool to 4282eda14cbcSMatt Macy * be imported when the system hostid is zero. The exception to 4283eda14cbcSMatt Macy * this rule is zdb which is always allowed to access pools. 4284eda14cbcSMatt Macy */ 4285eda14cbcSMatt Macy if (spa_multihost(spa) && spa_get_hostid(spa) == 0 && 4286eda14cbcSMatt Macy (spa->spa_import_flags & ZFS_IMPORT_SKIP_MMP) == 0) { 4287eda14cbcSMatt Macy fnvlist_add_uint64(spa->spa_load_info, 4288eda14cbcSMatt Macy ZPOOL_CONFIG_MMP_STATE, MMP_STATE_NO_HOSTID); 4289eda14cbcSMatt Macy return (spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO)); 4290eda14cbcSMatt Macy } 4291eda14cbcSMatt Macy 4292eda14cbcSMatt Macy /* 4293eda14cbcSMatt Macy * If the 'autoreplace' property is set, then post a resource notifying 4294eda14cbcSMatt Macy * the ZFS DE that it should not issue any faults for unopenable 4295eda14cbcSMatt Macy * devices. We also iterate over the vdevs, and post a sysevent for any 4296eda14cbcSMatt Macy * unopenable vdevs so that the normal autoreplace handler can take 4297eda14cbcSMatt Macy * over. 4298eda14cbcSMatt Macy */ 4299eda14cbcSMatt Macy if (spa->spa_autoreplace && spa->spa_load_state != SPA_LOAD_TRYIMPORT) { 4300eda14cbcSMatt Macy spa_check_removed(spa->spa_root_vdev); 4301eda14cbcSMatt Macy /* 4302eda14cbcSMatt Macy * For the import case, this is done in spa_import(), because 4303eda14cbcSMatt Macy * at this point we're using the spare definitions from 4304eda14cbcSMatt Macy * the MOS config, not necessarily from the userland config. 4305eda14cbcSMatt Macy */ 4306eda14cbcSMatt Macy if (spa->spa_load_state != SPA_LOAD_IMPORT) { 4307eda14cbcSMatt Macy spa_aux_check_removed(&spa->spa_spares); 4308eda14cbcSMatt Macy spa_aux_check_removed(&spa->spa_l2cache); 4309eda14cbcSMatt Macy } 4310eda14cbcSMatt Macy } 4311eda14cbcSMatt Macy 4312eda14cbcSMatt Macy /* 4313eda14cbcSMatt Macy * Load the vdev metadata such as metaslabs, DTLs, spacemap object, etc. 4314eda14cbcSMatt Macy */ 4315eda14cbcSMatt Macy error = vdev_load(rvd); 4316eda14cbcSMatt Macy if (error != 0) { 4317eda14cbcSMatt Macy spa_load_failed(spa, "vdev_load failed [error=%d]", error); 4318eda14cbcSMatt Macy return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error)); 4319eda14cbcSMatt Macy } 4320eda14cbcSMatt Macy 4321eda14cbcSMatt Macy error = spa_ld_log_spacemaps(spa); 4322eda14cbcSMatt Macy if (error != 0) { 4323eda14cbcSMatt Macy spa_load_failed(spa, "spa_ld_log_sm_data failed [error=%d]", 4324eda14cbcSMatt Macy error); 4325eda14cbcSMatt Macy return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error)); 4326eda14cbcSMatt Macy } 4327eda14cbcSMatt Macy 4328eda14cbcSMatt Macy /* 4329eda14cbcSMatt Macy * Propagate the leaf DTLs we just loaded all the way up the vdev tree. 4330eda14cbcSMatt Macy */ 4331eda14cbcSMatt Macy spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4332eda14cbcSMatt Macy vdev_dtl_reassess(rvd, 0, 0, B_FALSE, B_FALSE); 4333eda14cbcSMatt Macy spa_config_exit(spa, SCL_ALL, FTAG); 4334eda14cbcSMatt Macy 4335eda14cbcSMatt Macy return (0); 4336eda14cbcSMatt Macy } 4337eda14cbcSMatt Macy 4338eda14cbcSMatt Macy static int 4339eda14cbcSMatt Macy spa_ld_load_dedup_tables(spa_t *spa) 4340eda14cbcSMatt Macy { 4341eda14cbcSMatt Macy int error = 0; 4342eda14cbcSMatt Macy vdev_t *rvd = spa->spa_root_vdev; 4343eda14cbcSMatt Macy 4344eda14cbcSMatt Macy error = ddt_load(spa); 4345eda14cbcSMatt Macy if (error != 0) { 4346eda14cbcSMatt Macy spa_load_failed(spa, "ddt_load failed [error=%d]", error); 4347eda14cbcSMatt Macy return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4348eda14cbcSMatt Macy } 4349eda14cbcSMatt Macy 4350eda14cbcSMatt Macy return (0); 4351eda14cbcSMatt Macy } 4352eda14cbcSMatt Macy 4353eda14cbcSMatt Macy static int 4354eda14cbcSMatt Macy spa_ld_verify_logs(spa_t *spa, spa_import_type_t type, char **ereport) 4355eda14cbcSMatt Macy { 4356eda14cbcSMatt Macy vdev_t *rvd = spa->spa_root_vdev; 4357eda14cbcSMatt Macy 4358eda14cbcSMatt Macy if (type != SPA_IMPORT_ASSEMBLE && spa_writeable(spa)) { 4359eda14cbcSMatt Macy boolean_t missing = spa_check_logs(spa); 4360eda14cbcSMatt Macy if (missing) { 4361eda14cbcSMatt Macy if (spa->spa_missing_tvds != 0) { 4362eda14cbcSMatt Macy spa_load_note(spa, "spa_check_logs failed " 4363eda14cbcSMatt Macy "so dropping the logs"); 4364eda14cbcSMatt Macy } else { 4365eda14cbcSMatt Macy *ereport = FM_EREPORT_ZFS_LOG_REPLAY; 4366eda14cbcSMatt Macy spa_load_failed(spa, "spa_check_logs failed"); 4367eda14cbcSMatt Macy return (spa_vdev_err(rvd, VDEV_AUX_BAD_LOG, 4368eda14cbcSMatt Macy ENXIO)); 4369eda14cbcSMatt Macy } 4370eda14cbcSMatt Macy } 4371eda14cbcSMatt Macy } 4372eda14cbcSMatt Macy 4373eda14cbcSMatt Macy return (0); 4374eda14cbcSMatt Macy } 4375eda14cbcSMatt Macy 4376eda14cbcSMatt Macy static int 4377eda14cbcSMatt Macy spa_ld_verify_pool_data(spa_t *spa) 4378eda14cbcSMatt Macy { 4379eda14cbcSMatt Macy int error = 0; 4380eda14cbcSMatt Macy vdev_t *rvd = spa->spa_root_vdev; 4381eda14cbcSMatt Macy 4382eda14cbcSMatt Macy /* 4383eda14cbcSMatt Macy * We've successfully opened the pool, verify that we're ready 4384eda14cbcSMatt Macy * to start pushing transactions. 4385eda14cbcSMatt Macy */ 4386eda14cbcSMatt Macy if (spa->spa_load_state != SPA_LOAD_TRYIMPORT) { 4387eda14cbcSMatt Macy error = spa_load_verify(spa); 4388eda14cbcSMatt Macy if (error != 0) { 4389eda14cbcSMatt Macy spa_load_failed(spa, "spa_load_verify failed " 4390eda14cbcSMatt Macy "[error=%d]", error); 4391eda14cbcSMatt Macy return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, 4392eda14cbcSMatt Macy error)); 4393eda14cbcSMatt Macy } 4394eda14cbcSMatt Macy } 4395eda14cbcSMatt Macy 4396eda14cbcSMatt Macy return (0); 4397eda14cbcSMatt Macy } 4398eda14cbcSMatt Macy 4399eda14cbcSMatt Macy static void 4400eda14cbcSMatt Macy spa_ld_claim_log_blocks(spa_t *spa) 4401eda14cbcSMatt Macy { 4402eda14cbcSMatt Macy dmu_tx_t *tx; 4403eda14cbcSMatt Macy dsl_pool_t *dp = spa_get_dsl(spa); 4404eda14cbcSMatt Macy 4405eda14cbcSMatt Macy /* 4406eda14cbcSMatt Macy * Claim log blocks that haven't been committed yet. 4407eda14cbcSMatt Macy * This must all happen in a single txg. 4408eda14cbcSMatt Macy * Note: spa_claim_max_txg is updated by spa_claim_notify(), 4409eda14cbcSMatt Macy * invoked from zil_claim_log_block()'s i/o done callback. 4410eda14cbcSMatt Macy * Price of rollback is that we abandon the log. 4411eda14cbcSMatt Macy */ 4412eda14cbcSMatt Macy spa->spa_claiming = B_TRUE; 4413eda14cbcSMatt Macy 4414eda14cbcSMatt Macy tx = dmu_tx_create_assigned(dp, spa_first_txg(spa)); 4415eda14cbcSMatt Macy (void) dmu_objset_find_dp(dp, dp->dp_root_dir_obj, 4416eda14cbcSMatt Macy zil_claim, tx, DS_FIND_CHILDREN); 4417eda14cbcSMatt Macy dmu_tx_commit(tx); 4418eda14cbcSMatt Macy 4419eda14cbcSMatt Macy spa->spa_claiming = B_FALSE; 4420eda14cbcSMatt Macy 4421eda14cbcSMatt Macy spa_set_log_state(spa, SPA_LOG_GOOD); 4422eda14cbcSMatt Macy } 4423eda14cbcSMatt Macy 4424eda14cbcSMatt Macy static void 4425eda14cbcSMatt Macy spa_ld_check_for_config_update(spa_t *spa, uint64_t config_cache_txg, 4426eda14cbcSMatt Macy boolean_t update_config_cache) 4427eda14cbcSMatt Macy { 4428eda14cbcSMatt Macy vdev_t *rvd = spa->spa_root_vdev; 4429eda14cbcSMatt Macy int need_update = B_FALSE; 4430eda14cbcSMatt Macy 4431eda14cbcSMatt Macy /* 4432eda14cbcSMatt Macy * If the config cache is stale, or we have uninitialized 4433eda14cbcSMatt Macy * metaslabs (see spa_vdev_add()), then update the config. 4434eda14cbcSMatt Macy * 4435eda14cbcSMatt Macy * If this is a verbatim import, trust the current 4436eda14cbcSMatt Macy * in-core spa_config and update the disk labels. 4437eda14cbcSMatt Macy */ 4438eda14cbcSMatt Macy if (update_config_cache || config_cache_txg != spa->spa_config_txg || 4439eda14cbcSMatt Macy spa->spa_load_state == SPA_LOAD_IMPORT || 4440eda14cbcSMatt Macy spa->spa_load_state == SPA_LOAD_RECOVER || 4441eda14cbcSMatt Macy (spa->spa_import_flags & ZFS_IMPORT_VERBATIM)) 4442eda14cbcSMatt Macy need_update = B_TRUE; 4443eda14cbcSMatt Macy 4444eda14cbcSMatt Macy for (int c = 0; c < rvd->vdev_children; c++) 4445eda14cbcSMatt Macy if (rvd->vdev_child[c]->vdev_ms_array == 0) 4446eda14cbcSMatt Macy need_update = B_TRUE; 4447eda14cbcSMatt Macy 4448eda14cbcSMatt Macy /* 4449eda14cbcSMatt Macy * Update the config cache asynchronously in case we're the 4450eda14cbcSMatt Macy * root pool, in which case the config cache isn't writable yet. 4451eda14cbcSMatt Macy */ 4452eda14cbcSMatt Macy if (need_update) 4453eda14cbcSMatt Macy spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 4454eda14cbcSMatt Macy } 4455eda14cbcSMatt Macy 4456eda14cbcSMatt Macy static void 4457eda14cbcSMatt Macy spa_ld_prepare_for_reload(spa_t *spa) 4458eda14cbcSMatt Macy { 4459eda14cbcSMatt Macy spa_mode_t mode = spa->spa_mode; 4460eda14cbcSMatt Macy int async_suspended = spa->spa_async_suspended; 4461eda14cbcSMatt Macy 4462eda14cbcSMatt Macy spa_unload(spa); 4463eda14cbcSMatt Macy spa_deactivate(spa); 4464eda14cbcSMatt Macy spa_activate(spa, mode); 4465eda14cbcSMatt Macy 4466eda14cbcSMatt Macy /* 4467eda14cbcSMatt Macy * We save the value of spa_async_suspended as it gets reset to 0 by 4468eda14cbcSMatt Macy * spa_unload(). We want to restore it back to the original value before 4469eda14cbcSMatt Macy * returning as we might be calling spa_async_resume() later. 4470eda14cbcSMatt Macy */ 4471eda14cbcSMatt Macy spa->spa_async_suspended = async_suspended; 4472eda14cbcSMatt Macy } 4473eda14cbcSMatt Macy 4474eda14cbcSMatt Macy static int 4475eda14cbcSMatt Macy spa_ld_read_checkpoint_txg(spa_t *spa) 4476eda14cbcSMatt Macy { 4477eda14cbcSMatt Macy uberblock_t checkpoint; 4478eda14cbcSMatt Macy int error = 0; 4479eda14cbcSMatt Macy 4480eda14cbcSMatt Macy ASSERT0(spa->spa_checkpoint_txg); 4481eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&spa_namespace_lock)); 4482eda14cbcSMatt Macy 4483eda14cbcSMatt Macy error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 4484eda14cbcSMatt Macy DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t), 4485eda14cbcSMatt Macy sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint); 4486eda14cbcSMatt Macy 4487eda14cbcSMatt Macy if (error == ENOENT) 4488eda14cbcSMatt Macy return (0); 4489eda14cbcSMatt Macy 4490eda14cbcSMatt Macy if (error != 0) 4491eda14cbcSMatt Macy return (error); 4492eda14cbcSMatt Macy 4493eda14cbcSMatt Macy ASSERT3U(checkpoint.ub_txg, !=, 0); 4494eda14cbcSMatt Macy ASSERT3U(checkpoint.ub_checkpoint_txg, !=, 0); 4495eda14cbcSMatt Macy ASSERT3U(checkpoint.ub_timestamp, !=, 0); 4496eda14cbcSMatt Macy spa->spa_checkpoint_txg = checkpoint.ub_txg; 4497eda14cbcSMatt Macy spa->spa_checkpoint_info.sci_timestamp = checkpoint.ub_timestamp; 4498eda14cbcSMatt Macy 4499eda14cbcSMatt Macy return (0); 4500eda14cbcSMatt Macy } 4501eda14cbcSMatt Macy 4502eda14cbcSMatt Macy static int 4503eda14cbcSMatt Macy spa_ld_mos_init(spa_t *spa, spa_import_type_t type) 4504eda14cbcSMatt Macy { 4505eda14cbcSMatt Macy int error = 0; 4506eda14cbcSMatt Macy 4507eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&spa_namespace_lock)); 4508eda14cbcSMatt Macy ASSERT(spa->spa_config_source != SPA_CONFIG_SRC_NONE); 4509eda14cbcSMatt Macy 4510eda14cbcSMatt Macy /* 4511eda14cbcSMatt Macy * Never trust the config that is provided unless we are assembling 4512eda14cbcSMatt Macy * a pool following a split. 4513eda14cbcSMatt Macy * This means don't trust blkptrs and the vdev tree in general. This 4514eda14cbcSMatt Macy * also effectively puts the spa in read-only mode since 4515eda14cbcSMatt Macy * spa_writeable() checks for spa_trust_config to be true. 4516eda14cbcSMatt Macy * We will later load a trusted config from the MOS. 4517eda14cbcSMatt Macy */ 4518eda14cbcSMatt Macy if (type != SPA_IMPORT_ASSEMBLE) 4519eda14cbcSMatt Macy spa->spa_trust_config = B_FALSE; 4520eda14cbcSMatt Macy 4521eda14cbcSMatt Macy /* 4522eda14cbcSMatt Macy * Parse the config provided to create a vdev tree. 4523eda14cbcSMatt Macy */ 4524eda14cbcSMatt Macy error = spa_ld_parse_config(spa, type); 4525eda14cbcSMatt Macy if (error != 0) 4526eda14cbcSMatt Macy return (error); 4527eda14cbcSMatt Macy 4528eda14cbcSMatt Macy spa_import_progress_add(spa); 4529eda14cbcSMatt Macy 4530eda14cbcSMatt Macy /* 4531eda14cbcSMatt Macy * Now that we have the vdev tree, try to open each vdev. This involves 4532eda14cbcSMatt Macy * opening the underlying physical device, retrieving its geometry and 4533eda14cbcSMatt Macy * probing the vdev with a dummy I/O. The state of each vdev will be set 4534eda14cbcSMatt Macy * based on the success of those operations. After this we'll be ready 4535eda14cbcSMatt Macy * to read from the vdevs. 4536eda14cbcSMatt Macy */ 4537eda14cbcSMatt Macy error = spa_ld_open_vdevs(spa); 4538eda14cbcSMatt Macy if (error != 0) 4539eda14cbcSMatt Macy return (error); 4540eda14cbcSMatt Macy 4541eda14cbcSMatt Macy /* 4542eda14cbcSMatt Macy * Read the label of each vdev and make sure that the GUIDs stored 4543eda14cbcSMatt Macy * there match the GUIDs in the config provided. 4544eda14cbcSMatt Macy * If we're assembling a new pool that's been split off from an 4545eda14cbcSMatt Macy * existing pool, the labels haven't yet been updated so we skip 4546eda14cbcSMatt Macy * validation for now. 4547eda14cbcSMatt Macy */ 4548eda14cbcSMatt Macy if (type != SPA_IMPORT_ASSEMBLE) { 4549eda14cbcSMatt Macy error = spa_ld_validate_vdevs(spa); 4550eda14cbcSMatt Macy if (error != 0) 4551eda14cbcSMatt Macy return (error); 4552eda14cbcSMatt Macy } 4553eda14cbcSMatt Macy 4554eda14cbcSMatt Macy /* 4555eda14cbcSMatt Macy * Read all vdev labels to find the best uberblock (i.e. latest, 4556eda14cbcSMatt Macy * unless spa_load_max_txg is set) and store it in spa_uberblock. We 4557eda14cbcSMatt Macy * get the list of features required to read blkptrs in the MOS from 4558eda14cbcSMatt Macy * the vdev label with the best uberblock and verify that our version 4559eda14cbcSMatt Macy * of zfs supports them all. 4560eda14cbcSMatt Macy */ 4561eda14cbcSMatt Macy error = spa_ld_select_uberblock(spa, type); 4562eda14cbcSMatt Macy if (error != 0) 4563eda14cbcSMatt Macy return (error); 4564eda14cbcSMatt Macy 4565eda14cbcSMatt Macy /* 4566eda14cbcSMatt Macy * Pass that uberblock to the dsl_pool layer which will open the root 4567eda14cbcSMatt Macy * blkptr. This blkptr points to the latest version of the MOS and will 4568eda14cbcSMatt Macy * allow us to read its contents. 4569eda14cbcSMatt Macy */ 4570eda14cbcSMatt Macy error = spa_ld_open_rootbp(spa); 4571eda14cbcSMatt Macy if (error != 0) 4572eda14cbcSMatt Macy return (error); 4573eda14cbcSMatt Macy 4574eda14cbcSMatt Macy return (0); 4575eda14cbcSMatt Macy } 4576eda14cbcSMatt Macy 4577eda14cbcSMatt Macy static int 4578eda14cbcSMatt Macy spa_ld_checkpoint_rewind(spa_t *spa) 4579eda14cbcSMatt Macy { 4580eda14cbcSMatt Macy uberblock_t checkpoint; 4581eda14cbcSMatt Macy int error = 0; 4582eda14cbcSMatt Macy 4583eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&spa_namespace_lock)); 4584eda14cbcSMatt Macy ASSERT(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT); 4585eda14cbcSMatt Macy 4586eda14cbcSMatt Macy error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 4587eda14cbcSMatt Macy DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t), 4588eda14cbcSMatt Macy sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint); 4589eda14cbcSMatt Macy 4590eda14cbcSMatt Macy if (error != 0) { 4591eda14cbcSMatt Macy spa_load_failed(spa, "unable to retrieve checkpointed " 4592eda14cbcSMatt Macy "uberblock from the MOS config [error=%d]", error); 4593eda14cbcSMatt Macy 4594eda14cbcSMatt Macy if (error == ENOENT) 4595eda14cbcSMatt Macy error = ZFS_ERR_NO_CHECKPOINT; 4596eda14cbcSMatt Macy 4597eda14cbcSMatt Macy return (error); 4598eda14cbcSMatt Macy } 4599eda14cbcSMatt Macy 4600eda14cbcSMatt Macy ASSERT3U(checkpoint.ub_txg, <, spa->spa_uberblock.ub_txg); 4601eda14cbcSMatt Macy ASSERT3U(checkpoint.ub_txg, ==, checkpoint.ub_checkpoint_txg); 4602eda14cbcSMatt Macy 4603eda14cbcSMatt Macy /* 4604eda14cbcSMatt Macy * We need to update the txg and timestamp of the checkpointed 4605eda14cbcSMatt Macy * uberblock to be higher than the latest one. This ensures that 4606eda14cbcSMatt Macy * the checkpointed uberblock is selected if we were to close and 4607eda14cbcSMatt Macy * reopen the pool right after we've written it in the vdev labels. 4608eda14cbcSMatt Macy * (also see block comment in vdev_uberblock_compare) 4609eda14cbcSMatt Macy */ 4610eda14cbcSMatt Macy checkpoint.ub_txg = spa->spa_uberblock.ub_txg + 1; 4611eda14cbcSMatt Macy checkpoint.ub_timestamp = gethrestime_sec(); 4612eda14cbcSMatt Macy 4613eda14cbcSMatt Macy /* 4614eda14cbcSMatt Macy * Set current uberblock to be the checkpointed uberblock. 4615eda14cbcSMatt Macy */ 4616eda14cbcSMatt Macy spa->spa_uberblock = checkpoint; 4617eda14cbcSMatt Macy 4618eda14cbcSMatt Macy /* 4619eda14cbcSMatt Macy * If we are doing a normal rewind, then the pool is open for 4620eda14cbcSMatt Macy * writing and we sync the "updated" checkpointed uberblock to 4621eda14cbcSMatt Macy * disk. Once this is done, we've basically rewound the whole 4622eda14cbcSMatt Macy * pool and there is no way back. 4623eda14cbcSMatt Macy * 4624eda14cbcSMatt Macy * There are cases when we don't want to attempt and sync the 4625eda14cbcSMatt Macy * checkpointed uberblock to disk because we are opening a 4626eda14cbcSMatt Macy * pool as read-only. Specifically, verifying the checkpointed 4627eda14cbcSMatt Macy * state with zdb, and importing the checkpointed state to get 4628eda14cbcSMatt Macy * a "preview" of its content. 4629eda14cbcSMatt Macy */ 4630eda14cbcSMatt Macy if (spa_writeable(spa)) { 4631eda14cbcSMatt Macy vdev_t *rvd = spa->spa_root_vdev; 4632eda14cbcSMatt Macy 4633eda14cbcSMatt Macy spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4634eda14cbcSMatt Macy vdev_t *svd[SPA_SYNC_MIN_VDEVS] = { NULL }; 4635eda14cbcSMatt Macy int svdcount = 0; 4636eda14cbcSMatt Macy int children = rvd->vdev_children; 463733b8c039SMartin Matuska int c0 = random_in_range(children); 4638eda14cbcSMatt Macy 4639eda14cbcSMatt Macy for (int c = 0; c < children; c++) { 4640eda14cbcSMatt Macy vdev_t *vd = rvd->vdev_child[(c0 + c) % children]; 4641eda14cbcSMatt Macy 4642eda14cbcSMatt Macy /* Stop when revisiting the first vdev */ 4643eda14cbcSMatt Macy if (c > 0 && svd[0] == vd) 4644eda14cbcSMatt Macy break; 4645eda14cbcSMatt Macy 4646eda14cbcSMatt Macy if (vd->vdev_ms_array == 0 || vd->vdev_islog || 4647eda14cbcSMatt Macy !vdev_is_concrete(vd)) 4648eda14cbcSMatt Macy continue; 4649eda14cbcSMatt Macy 4650eda14cbcSMatt Macy svd[svdcount++] = vd; 4651eda14cbcSMatt Macy if (svdcount == SPA_SYNC_MIN_VDEVS) 4652eda14cbcSMatt Macy break; 4653eda14cbcSMatt Macy } 4654eda14cbcSMatt Macy error = vdev_config_sync(svd, svdcount, spa->spa_first_txg); 4655eda14cbcSMatt Macy if (error == 0) 4656eda14cbcSMatt Macy spa->spa_last_synced_guid = rvd->vdev_guid; 4657eda14cbcSMatt Macy spa_config_exit(spa, SCL_ALL, FTAG); 4658eda14cbcSMatt Macy 4659eda14cbcSMatt Macy if (error != 0) { 4660eda14cbcSMatt Macy spa_load_failed(spa, "failed to write checkpointed " 4661eda14cbcSMatt Macy "uberblock to the vdev labels [error=%d]", error); 4662eda14cbcSMatt Macy return (error); 4663eda14cbcSMatt Macy } 4664eda14cbcSMatt Macy } 4665eda14cbcSMatt Macy 4666eda14cbcSMatt Macy return (0); 4667eda14cbcSMatt Macy } 4668eda14cbcSMatt Macy 4669eda14cbcSMatt Macy static int 4670eda14cbcSMatt Macy spa_ld_mos_with_trusted_config(spa_t *spa, spa_import_type_t type, 4671eda14cbcSMatt Macy boolean_t *update_config_cache) 4672eda14cbcSMatt Macy { 4673eda14cbcSMatt Macy int error; 4674eda14cbcSMatt Macy 4675eda14cbcSMatt Macy /* 4676eda14cbcSMatt Macy * Parse the config for pool, open and validate vdevs, 4677eda14cbcSMatt Macy * select an uberblock, and use that uberblock to open 4678eda14cbcSMatt Macy * the MOS. 4679eda14cbcSMatt Macy */ 4680eda14cbcSMatt Macy error = spa_ld_mos_init(spa, type); 4681eda14cbcSMatt Macy if (error != 0) 4682eda14cbcSMatt Macy return (error); 4683eda14cbcSMatt Macy 4684eda14cbcSMatt Macy /* 4685eda14cbcSMatt Macy * Retrieve the trusted config stored in the MOS and use it to create 4686eda14cbcSMatt Macy * a new, exact version of the vdev tree, then reopen all vdevs. 4687eda14cbcSMatt Macy */ 4688eda14cbcSMatt Macy error = spa_ld_trusted_config(spa, type, B_FALSE); 4689eda14cbcSMatt Macy if (error == EAGAIN) { 4690eda14cbcSMatt Macy if (update_config_cache != NULL) 4691eda14cbcSMatt Macy *update_config_cache = B_TRUE; 4692eda14cbcSMatt Macy 4693eda14cbcSMatt Macy /* 4694eda14cbcSMatt Macy * Redo the loading process with the trusted config if it is 4695eda14cbcSMatt Macy * too different from the untrusted config. 4696eda14cbcSMatt Macy */ 4697eda14cbcSMatt Macy spa_ld_prepare_for_reload(spa); 4698eda14cbcSMatt Macy spa_load_note(spa, "RELOADING"); 4699eda14cbcSMatt Macy error = spa_ld_mos_init(spa, type); 4700eda14cbcSMatt Macy if (error != 0) 4701eda14cbcSMatt Macy return (error); 4702eda14cbcSMatt Macy 4703eda14cbcSMatt Macy error = spa_ld_trusted_config(spa, type, B_TRUE); 4704eda14cbcSMatt Macy if (error != 0) 4705eda14cbcSMatt Macy return (error); 4706eda14cbcSMatt Macy 4707eda14cbcSMatt Macy } else if (error != 0) { 4708eda14cbcSMatt Macy return (error); 4709eda14cbcSMatt Macy } 4710eda14cbcSMatt Macy 4711eda14cbcSMatt Macy return (0); 4712eda14cbcSMatt Macy } 4713eda14cbcSMatt Macy 4714eda14cbcSMatt Macy /* 4715eda14cbcSMatt Macy * Load an existing storage pool, using the config provided. This config 4716eda14cbcSMatt Macy * describes which vdevs are part of the pool and is later validated against 4717eda14cbcSMatt Macy * partial configs present in each vdev's label and an entire copy of the 4718eda14cbcSMatt Macy * config stored in the MOS. 4719eda14cbcSMatt Macy */ 4720eda14cbcSMatt Macy static int 4721eda14cbcSMatt Macy spa_load_impl(spa_t *spa, spa_import_type_t type, char **ereport) 4722eda14cbcSMatt Macy { 4723eda14cbcSMatt Macy int error = 0; 4724eda14cbcSMatt Macy boolean_t missing_feat_write = B_FALSE; 4725eda14cbcSMatt Macy boolean_t checkpoint_rewind = 4726eda14cbcSMatt Macy (spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT); 4727eda14cbcSMatt Macy boolean_t update_config_cache = B_FALSE; 4728eda14cbcSMatt Macy 4729eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&spa_namespace_lock)); 4730eda14cbcSMatt Macy ASSERT(spa->spa_config_source != SPA_CONFIG_SRC_NONE); 4731eda14cbcSMatt Macy 4732eda14cbcSMatt Macy spa_load_note(spa, "LOADING"); 4733eda14cbcSMatt Macy 4734eda14cbcSMatt Macy error = spa_ld_mos_with_trusted_config(spa, type, &update_config_cache); 4735eda14cbcSMatt Macy if (error != 0) 4736eda14cbcSMatt Macy return (error); 4737eda14cbcSMatt Macy 4738eda14cbcSMatt Macy /* 4739eda14cbcSMatt Macy * If we are rewinding to the checkpoint then we need to repeat 4740eda14cbcSMatt Macy * everything we've done so far in this function but this time 4741eda14cbcSMatt Macy * selecting the checkpointed uberblock and using that to open 4742eda14cbcSMatt Macy * the MOS. 4743eda14cbcSMatt Macy */ 4744eda14cbcSMatt Macy if (checkpoint_rewind) { 4745eda14cbcSMatt Macy /* 4746eda14cbcSMatt Macy * If we are rewinding to the checkpoint update config cache 4747eda14cbcSMatt Macy * anyway. 4748eda14cbcSMatt Macy */ 4749eda14cbcSMatt Macy update_config_cache = B_TRUE; 4750eda14cbcSMatt Macy 4751eda14cbcSMatt Macy /* 4752eda14cbcSMatt Macy * Extract the checkpointed uberblock from the current MOS 4753eda14cbcSMatt Macy * and use this as the pool's uberblock from now on. If the 4754eda14cbcSMatt Macy * pool is imported as writeable we also write the checkpoint 4755eda14cbcSMatt Macy * uberblock to the labels, making the rewind permanent. 4756eda14cbcSMatt Macy */ 4757eda14cbcSMatt Macy error = spa_ld_checkpoint_rewind(spa); 4758eda14cbcSMatt Macy if (error != 0) 4759eda14cbcSMatt Macy return (error); 4760eda14cbcSMatt Macy 4761eda14cbcSMatt Macy /* 4762eda14cbcSMatt Macy * Redo the loading process again with the 4763eda14cbcSMatt Macy * checkpointed uberblock. 4764eda14cbcSMatt Macy */ 4765eda14cbcSMatt Macy spa_ld_prepare_for_reload(spa); 4766eda14cbcSMatt Macy spa_load_note(spa, "LOADING checkpointed uberblock"); 4767eda14cbcSMatt Macy error = spa_ld_mos_with_trusted_config(spa, type, NULL); 4768eda14cbcSMatt Macy if (error != 0) 4769eda14cbcSMatt Macy return (error); 4770eda14cbcSMatt Macy } 4771eda14cbcSMatt Macy 4772eda14cbcSMatt Macy /* 4773eda14cbcSMatt Macy * Retrieve the checkpoint txg if the pool has a checkpoint. 4774eda14cbcSMatt Macy */ 4775eda14cbcSMatt Macy error = spa_ld_read_checkpoint_txg(spa); 4776eda14cbcSMatt Macy if (error != 0) 4777eda14cbcSMatt Macy return (error); 4778eda14cbcSMatt Macy 4779eda14cbcSMatt Macy /* 4780eda14cbcSMatt Macy * Retrieve the mapping of indirect vdevs. Those vdevs were removed 4781eda14cbcSMatt Macy * from the pool and their contents were re-mapped to other vdevs. Note 4782eda14cbcSMatt Macy * that everything that we read before this step must have been 4783eda14cbcSMatt Macy * rewritten on concrete vdevs after the last device removal was 4784eda14cbcSMatt Macy * initiated. Otherwise we could be reading from indirect vdevs before 4785eda14cbcSMatt Macy * we have loaded their mappings. 4786eda14cbcSMatt Macy */ 4787eda14cbcSMatt Macy error = spa_ld_open_indirect_vdev_metadata(spa); 4788eda14cbcSMatt Macy if (error != 0) 4789eda14cbcSMatt Macy return (error); 4790eda14cbcSMatt Macy 4791eda14cbcSMatt Macy /* 4792eda14cbcSMatt Macy * Retrieve the full list of active features from the MOS and check if 4793eda14cbcSMatt Macy * they are all supported. 4794eda14cbcSMatt Macy */ 4795eda14cbcSMatt Macy error = spa_ld_check_features(spa, &missing_feat_write); 4796eda14cbcSMatt Macy if (error != 0) 4797eda14cbcSMatt Macy return (error); 4798eda14cbcSMatt Macy 4799eda14cbcSMatt Macy /* 4800eda14cbcSMatt Macy * Load several special directories from the MOS needed by the dsl_pool 4801eda14cbcSMatt Macy * layer. 4802eda14cbcSMatt Macy */ 4803eda14cbcSMatt Macy error = spa_ld_load_special_directories(spa); 4804eda14cbcSMatt Macy if (error != 0) 4805eda14cbcSMatt Macy return (error); 4806eda14cbcSMatt Macy 4807eda14cbcSMatt Macy /* 4808eda14cbcSMatt Macy * Retrieve pool properties from the MOS. 4809eda14cbcSMatt Macy */ 4810eda14cbcSMatt Macy error = spa_ld_get_props(spa); 4811eda14cbcSMatt Macy if (error != 0) 4812eda14cbcSMatt Macy return (error); 4813eda14cbcSMatt Macy 4814eda14cbcSMatt Macy /* 4815eda14cbcSMatt Macy * Retrieve the list of auxiliary devices - cache devices and spares - 4816eda14cbcSMatt Macy * and open them. 4817eda14cbcSMatt Macy */ 4818eda14cbcSMatt Macy error = spa_ld_open_aux_vdevs(spa, type); 4819eda14cbcSMatt Macy if (error != 0) 4820eda14cbcSMatt Macy return (error); 4821eda14cbcSMatt Macy 4822eda14cbcSMatt Macy /* 4823eda14cbcSMatt Macy * Load the metadata for all vdevs. Also check if unopenable devices 4824eda14cbcSMatt Macy * should be autoreplaced. 4825eda14cbcSMatt Macy */ 4826eda14cbcSMatt Macy error = spa_ld_load_vdev_metadata(spa); 4827eda14cbcSMatt Macy if (error != 0) 4828eda14cbcSMatt Macy return (error); 4829eda14cbcSMatt Macy 4830eda14cbcSMatt Macy error = spa_ld_load_dedup_tables(spa); 4831eda14cbcSMatt Macy if (error != 0) 4832eda14cbcSMatt Macy return (error); 4833eda14cbcSMatt Macy 4834eda14cbcSMatt Macy /* 4835eda14cbcSMatt Macy * Verify the logs now to make sure we don't have any unexpected errors 4836eda14cbcSMatt Macy * when we claim log blocks later. 4837eda14cbcSMatt Macy */ 4838eda14cbcSMatt Macy error = spa_ld_verify_logs(spa, type, ereport); 4839eda14cbcSMatt Macy if (error != 0) 4840eda14cbcSMatt Macy return (error); 4841eda14cbcSMatt Macy 4842eda14cbcSMatt Macy if (missing_feat_write) { 4843eda14cbcSMatt Macy ASSERT(spa->spa_load_state == SPA_LOAD_TRYIMPORT); 4844eda14cbcSMatt Macy 4845eda14cbcSMatt Macy /* 4846eda14cbcSMatt Macy * At this point, we know that we can open the pool in 4847eda14cbcSMatt Macy * read-only mode but not read-write mode. We now have enough 4848eda14cbcSMatt Macy * information and can return to userland. 4849eda14cbcSMatt Macy */ 4850eda14cbcSMatt Macy return (spa_vdev_err(spa->spa_root_vdev, VDEV_AUX_UNSUP_FEAT, 4851eda14cbcSMatt Macy ENOTSUP)); 4852eda14cbcSMatt Macy } 4853eda14cbcSMatt Macy 4854eda14cbcSMatt Macy /* 4855eda14cbcSMatt Macy * Traverse the last txgs to make sure the pool was left off in a safe 4856eda14cbcSMatt Macy * state. When performing an extreme rewind, we verify the whole pool, 4857eda14cbcSMatt Macy * which can take a very long time. 4858eda14cbcSMatt Macy */ 4859eda14cbcSMatt Macy error = spa_ld_verify_pool_data(spa); 4860eda14cbcSMatt Macy if (error != 0) 4861eda14cbcSMatt Macy return (error); 4862eda14cbcSMatt Macy 4863eda14cbcSMatt Macy /* 4864eda14cbcSMatt Macy * Calculate the deflated space for the pool. This must be done before 4865eda14cbcSMatt Macy * we write anything to the pool because we'd need to update the space 4866eda14cbcSMatt Macy * accounting using the deflated sizes. 4867eda14cbcSMatt Macy */ 4868eda14cbcSMatt Macy spa_update_dspace(spa); 4869eda14cbcSMatt Macy 4870eda14cbcSMatt Macy /* 4871eda14cbcSMatt Macy * We have now retrieved all the information we needed to open the 4872eda14cbcSMatt Macy * pool. If we are importing the pool in read-write mode, a few 4873eda14cbcSMatt Macy * additional steps must be performed to finish the import. 4874eda14cbcSMatt Macy */ 4875eda14cbcSMatt Macy if (spa_writeable(spa) && (spa->spa_load_state == SPA_LOAD_RECOVER || 4876eda14cbcSMatt Macy spa->spa_load_max_txg == UINT64_MAX)) { 4877eda14cbcSMatt Macy uint64_t config_cache_txg = spa->spa_config_txg; 4878eda14cbcSMatt Macy 4879eda14cbcSMatt Macy ASSERT(spa->spa_load_state != SPA_LOAD_TRYIMPORT); 4880eda14cbcSMatt Macy 4881eda14cbcSMatt Macy /* 4882eda14cbcSMatt Macy * In case of a checkpoint rewind, log the original txg 4883eda14cbcSMatt Macy * of the checkpointed uberblock. 4884eda14cbcSMatt Macy */ 4885eda14cbcSMatt Macy if (checkpoint_rewind) { 4886eda14cbcSMatt Macy spa_history_log_internal(spa, "checkpoint rewind", 4887eda14cbcSMatt Macy NULL, "rewound state to txg=%llu", 4888eda14cbcSMatt Macy (u_longlong_t)spa->spa_uberblock.ub_checkpoint_txg); 4889eda14cbcSMatt Macy } 4890eda14cbcSMatt Macy 4891eda14cbcSMatt Macy /* 4892eda14cbcSMatt Macy * Traverse the ZIL and claim all blocks. 4893eda14cbcSMatt Macy */ 4894eda14cbcSMatt Macy spa_ld_claim_log_blocks(spa); 4895eda14cbcSMatt Macy 4896eda14cbcSMatt Macy /* 4897eda14cbcSMatt Macy * Kick-off the syncing thread. 4898eda14cbcSMatt Macy */ 4899eda14cbcSMatt Macy spa->spa_sync_on = B_TRUE; 4900eda14cbcSMatt Macy txg_sync_start(spa->spa_dsl_pool); 4901eda14cbcSMatt Macy mmp_thread_start(spa); 4902eda14cbcSMatt Macy 4903eda14cbcSMatt Macy /* 4904eda14cbcSMatt Macy * Wait for all claims to sync. We sync up to the highest 4905eda14cbcSMatt Macy * claimed log block birth time so that claimed log blocks 4906eda14cbcSMatt Macy * don't appear to be from the future. spa_claim_max_txg 4907eda14cbcSMatt Macy * will have been set for us by ZIL traversal operations 4908eda14cbcSMatt Macy * performed above. 4909eda14cbcSMatt Macy */ 4910eda14cbcSMatt Macy txg_wait_synced(spa->spa_dsl_pool, spa->spa_claim_max_txg); 4911eda14cbcSMatt Macy 4912eda14cbcSMatt Macy /* 4913eda14cbcSMatt Macy * Check if we need to request an update of the config. On the 4914eda14cbcSMatt Macy * next sync, we would update the config stored in vdev labels 4915eda14cbcSMatt Macy * and the cachefile (by default /etc/zfs/zpool.cache). 4916eda14cbcSMatt Macy */ 4917eda14cbcSMatt Macy spa_ld_check_for_config_update(spa, config_cache_txg, 4918eda14cbcSMatt Macy update_config_cache); 4919eda14cbcSMatt Macy 4920eda14cbcSMatt Macy /* 4921eda14cbcSMatt Macy * Check if a rebuild was in progress and if so resume it. 4922eda14cbcSMatt Macy * Then check all DTLs to see if anything needs resilvering. 4923eda14cbcSMatt Macy * The resilver will be deferred if a rebuild was started. 4924eda14cbcSMatt Macy */ 4925eda14cbcSMatt Macy if (vdev_rebuild_active(spa->spa_root_vdev)) { 4926eda14cbcSMatt Macy vdev_rebuild_restart(spa); 4927eda14cbcSMatt Macy } else if (!dsl_scan_resilvering(spa->spa_dsl_pool) && 4928eda14cbcSMatt Macy vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) { 4929eda14cbcSMatt Macy spa_async_request(spa, SPA_ASYNC_RESILVER); 4930eda14cbcSMatt Macy } 4931eda14cbcSMatt Macy 4932eda14cbcSMatt Macy /* 4933eda14cbcSMatt Macy * Log the fact that we booted up (so that we can detect if 4934eda14cbcSMatt Macy * we rebooted in the middle of an operation). 4935eda14cbcSMatt Macy */ 4936eda14cbcSMatt Macy spa_history_log_version(spa, "open", NULL); 4937eda14cbcSMatt Macy 4938eda14cbcSMatt Macy spa_restart_removal(spa); 4939eda14cbcSMatt Macy spa_spawn_aux_threads(spa); 4940eda14cbcSMatt Macy 4941eda14cbcSMatt Macy /* 4942eda14cbcSMatt Macy * Delete any inconsistent datasets. 4943eda14cbcSMatt Macy * 4944eda14cbcSMatt Macy * Note: 4945eda14cbcSMatt Macy * Since we may be issuing deletes for clones here, 4946eda14cbcSMatt Macy * we make sure to do so after we've spawned all the 4947eda14cbcSMatt Macy * auxiliary threads above (from which the livelist 4948eda14cbcSMatt Macy * deletion zthr is part of). 4949eda14cbcSMatt Macy */ 4950eda14cbcSMatt Macy (void) dmu_objset_find(spa_name(spa), 4951eda14cbcSMatt Macy dsl_destroy_inconsistent, NULL, DS_FIND_CHILDREN); 4952eda14cbcSMatt Macy 4953eda14cbcSMatt Macy /* 4954eda14cbcSMatt Macy * Clean up any stale temporary dataset userrefs. 4955eda14cbcSMatt Macy */ 4956eda14cbcSMatt Macy dsl_pool_clean_tmp_userrefs(spa->spa_dsl_pool); 4957eda14cbcSMatt Macy 4958eda14cbcSMatt Macy spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 4959eda14cbcSMatt Macy vdev_initialize_restart(spa->spa_root_vdev); 4960eda14cbcSMatt Macy vdev_trim_restart(spa->spa_root_vdev); 4961eda14cbcSMatt Macy vdev_autotrim_restart(spa); 4962eda14cbcSMatt Macy spa_config_exit(spa, SCL_CONFIG, FTAG); 4963eda14cbcSMatt Macy } 4964eda14cbcSMatt Macy 4965eda14cbcSMatt Macy spa_import_progress_remove(spa_guid(spa)); 4966eda14cbcSMatt Macy spa_async_request(spa, SPA_ASYNC_L2CACHE_REBUILD); 4967eda14cbcSMatt Macy 4968eda14cbcSMatt Macy spa_load_note(spa, "LOADED"); 4969eda14cbcSMatt Macy 4970eda14cbcSMatt Macy return (0); 4971eda14cbcSMatt Macy } 4972eda14cbcSMatt Macy 4973eda14cbcSMatt Macy static int 4974eda14cbcSMatt Macy spa_load_retry(spa_t *spa, spa_load_state_t state) 4975eda14cbcSMatt Macy { 4976eda14cbcSMatt Macy spa_mode_t mode = spa->spa_mode; 4977eda14cbcSMatt Macy 4978eda14cbcSMatt Macy spa_unload(spa); 4979eda14cbcSMatt Macy spa_deactivate(spa); 4980eda14cbcSMatt Macy 4981eda14cbcSMatt Macy spa->spa_load_max_txg = spa->spa_uberblock.ub_txg - 1; 4982eda14cbcSMatt Macy 4983eda14cbcSMatt Macy spa_activate(spa, mode); 4984eda14cbcSMatt Macy spa_async_suspend(spa); 4985eda14cbcSMatt Macy 4986eda14cbcSMatt Macy spa_load_note(spa, "spa_load_retry: rewind, max txg: %llu", 4987eda14cbcSMatt Macy (u_longlong_t)spa->spa_load_max_txg); 4988eda14cbcSMatt Macy 4989eda14cbcSMatt Macy return (spa_load(spa, state, SPA_IMPORT_EXISTING)); 4990eda14cbcSMatt Macy } 4991eda14cbcSMatt Macy 4992eda14cbcSMatt Macy /* 4993eda14cbcSMatt Macy * If spa_load() fails this function will try loading prior txg's. If 4994eda14cbcSMatt Macy * 'state' is SPA_LOAD_RECOVER and one of these loads succeeds the pool 4995eda14cbcSMatt Macy * will be rewound to that txg. If 'state' is not SPA_LOAD_RECOVER this 4996eda14cbcSMatt Macy * function will not rewind the pool and will return the same error as 4997eda14cbcSMatt Macy * spa_load(). 4998eda14cbcSMatt Macy */ 4999eda14cbcSMatt Macy static int 5000eda14cbcSMatt Macy spa_load_best(spa_t *spa, spa_load_state_t state, uint64_t max_request, 5001eda14cbcSMatt Macy int rewind_flags) 5002eda14cbcSMatt Macy { 5003eda14cbcSMatt Macy nvlist_t *loadinfo = NULL; 5004eda14cbcSMatt Macy nvlist_t *config = NULL; 5005eda14cbcSMatt Macy int load_error, rewind_error; 5006eda14cbcSMatt Macy uint64_t safe_rewind_txg; 5007eda14cbcSMatt Macy uint64_t min_txg; 5008eda14cbcSMatt Macy 5009eda14cbcSMatt Macy if (spa->spa_load_txg && state == SPA_LOAD_RECOVER) { 5010eda14cbcSMatt Macy spa->spa_load_max_txg = spa->spa_load_txg; 5011eda14cbcSMatt Macy spa_set_log_state(spa, SPA_LOG_CLEAR); 5012eda14cbcSMatt Macy } else { 5013eda14cbcSMatt Macy spa->spa_load_max_txg = max_request; 5014eda14cbcSMatt Macy if (max_request != UINT64_MAX) 5015eda14cbcSMatt Macy spa->spa_extreme_rewind = B_TRUE; 5016eda14cbcSMatt Macy } 5017eda14cbcSMatt Macy 5018eda14cbcSMatt Macy load_error = rewind_error = spa_load(spa, state, SPA_IMPORT_EXISTING); 5019eda14cbcSMatt Macy if (load_error == 0) 5020eda14cbcSMatt Macy return (0); 5021eda14cbcSMatt Macy if (load_error == ZFS_ERR_NO_CHECKPOINT) { 5022eda14cbcSMatt Macy /* 5023eda14cbcSMatt Macy * When attempting checkpoint-rewind on a pool with no 5024eda14cbcSMatt Macy * checkpoint, we should not attempt to load uberblocks 5025eda14cbcSMatt Macy * from previous txgs when spa_load fails. 5026eda14cbcSMatt Macy */ 5027eda14cbcSMatt Macy ASSERT(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT); 5028eda14cbcSMatt Macy spa_import_progress_remove(spa_guid(spa)); 5029eda14cbcSMatt Macy return (load_error); 5030eda14cbcSMatt Macy } 5031eda14cbcSMatt Macy 5032eda14cbcSMatt Macy if (spa->spa_root_vdev != NULL) 5033eda14cbcSMatt Macy config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 5034eda14cbcSMatt Macy 5035eda14cbcSMatt Macy spa->spa_last_ubsync_txg = spa->spa_uberblock.ub_txg; 5036eda14cbcSMatt Macy spa->spa_last_ubsync_txg_ts = spa->spa_uberblock.ub_timestamp; 5037eda14cbcSMatt Macy 5038eda14cbcSMatt Macy if (rewind_flags & ZPOOL_NEVER_REWIND) { 5039eda14cbcSMatt Macy nvlist_free(config); 5040eda14cbcSMatt Macy spa_import_progress_remove(spa_guid(spa)); 5041eda14cbcSMatt Macy return (load_error); 5042eda14cbcSMatt Macy } 5043eda14cbcSMatt Macy 5044eda14cbcSMatt Macy if (state == SPA_LOAD_RECOVER) { 5045eda14cbcSMatt Macy /* Price of rolling back is discarding txgs, including log */ 5046eda14cbcSMatt Macy spa_set_log_state(spa, SPA_LOG_CLEAR); 5047eda14cbcSMatt Macy } else { 5048eda14cbcSMatt Macy /* 5049eda14cbcSMatt Macy * If we aren't rolling back save the load info from our first 5050eda14cbcSMatt Macy * import attempt so that we can restore it after attempting 5051eda14cbcSMatt Macy * to rewind. 5052eda14cbcSMatt Macy */ 5053eda14cbcSMatt Macy loadinfo = spa->spa_load_info; 5054eda14cbcSMatt Macy spa->spa_load_info = fnvlist_alloc(); 5055eda14cbcSMatt Macy } 5056eda14cbcSMatt Macy 5057eda14cbcSMatt Macy spa->spa_load_max_txg = spa->spa_last_ubsync_txg; 5058eda14cbcSMatt Macy safe_rewind_txg = spa->spa_last_ubsync_txg - TXG_DEFER_SIZE; 5059eda14cbcSMatt Macy min_txg = (rewind_flags & ZPOOL_EXTREME_REWIND) ? 5060eda14cbcSMatt Macy TXG_INITIAL : safe_rewind_txg; 5061eda14cbcSMatt Macy 5062eda14cbcSMatt Macy /* 5063eda14cbcSMatt Macy * Continue as long as we're finding errors, we're still within 5064eda14cbcSMatt Macy * the acceptable rewind range, and we're still finding uberblocks 5065eda14cbcSMatt Macy */ 5066eda14cbcSMatt Macy while (rewind_error && spa->spa_uberblock.ub_txg >= min_txg && 5067eda14cbcSMatt Macy spa->spa_uberblock.ub_txg <= spa->spa_load_max_txg) { 5068eda14cbcSMatt Macy if (spa->spa_load_max_txg < safe_rewind_txg) 5069eda14cbcSMatt Macy spa->spa_extreme_rewind = B_TRUE; 5070eda14cbcSMatt Macy rewind_error = spa_load_retry(spa, state); 5071eda14cbcSMatt Macy } 5072eda14cbcSMatt Macy 5073eda14cbcSMatt Macy spa->spa_extreme_rewind = B_FALSE; 5074eda14cbcSMatt Macy spa->spa_load_max_txg = UINT64_MAX; 5075eda14cbcSMatt Macy 5076eda14cbcSMatt Macy if (config && (rewind_error || state != SPA_LOAD_RECOVER)) 5077eda14cbcSMatt Macy spa_config_set(spa, config); 5078eda14cbcSMatt Macy else 5079eda14cbcSMatt Macy nvlist_free(config); 5080eda14cbcSMatt Macy 5081eda14cbcSMatt Macy if (state == SPA_LOAD_RECOVER) { 5082eda14cbcSMatt Macy ASSERT3P(loadinfo, ==, NULL); 5083eda14cbcSMatt Macy spa_import_progress_remove(spa_guid(spa)); 5084eda14cbcSMatt Macy return (rewind_error); 5085eda14cbcSMatt Macy } else { 5086eda14cbcSMatt Macy /* Store the rewind info as part of the initial load info */ 5087eda14cbcSMatt Macy fnvlist_add_nvlist(loadinfo, ZPOOL_CONFIG_REWIND_INFO, 5088eda14cbcSMatt Macy spa->spa_load_info); 5089eda14cbcSMatt Macy 5090eda14cbcSMatt Macy /* Restore the initial load info */ 5091eda14cbcSMatt Macy fnvlist_free(spa->spa_load_info); 5092eda14cbcSMatt Macy spa->spa_load_info = loadinfo; 5093eda14cbcSMatt Macy 5094eda14cbcSMatt Macy spa_import_progress_remove(spa_guid(spa)); 5095eda14cbcSMatt Macy return (load_error); 5096eda14cbcSMatt Macy } 5097eda14cbcSMatt Macy } 5098eda14cbcSMatt Macy 5099eda14cbcSMatt Macy /* 5100eda14cbcSMatt Macy * Pool Open/Import 5101eda14cbcSMatt Macy * 5102eda14cbcSMatt Macy * The import case is identical to an open except that the configuration is sent 5103eda14cbcSMatt Macy * down from userland, instead of grabbed from the configuration cache. For the 5104eda14cbcSMatt Macy * case of an open, the pool configuration will exist in the 5105eda14cbcSMatt Macy * POOL_STATE_UNINITIALIZED state. 5106eda14cbcSMatt Macy * 5107eda14cbcSMatt Macy * The stats information (gen/count/ustats) is used to gather vdev statistics at 5108eda14cbcSMatt Macy * the same time open the pool, without having to keep around the spa_t in some 5109eda14cbcSMatt Macy * ambiguous state. 5110eda14cbcSMatt Macy */ 5111eda14cbcSMatt Macy static int 5112eda14cbcSMatt Macy spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy, 5113eda14cbcSMatt Macy nvlist_t **config) 5114eda14cbcSMatt Macy { 5115eda14cbcSMatt Macy spa_t *spa; 5116eda14cbcSMatt Macy spa_load_state_t state = SPA_LOAD_OPEN; 5117eda14cbcSMatt Macy int error; 5118eda14cbcSMatt Macy int locked = B_FALSE; 5119eda14cbcSMatt Macy int firstopen = B_FALSE; 5120eda14cbcSMatt Macy 5121eda14cbcSMatt Macy *spapp = NULL; 5122eda14cbcSMatt Macy 5123eda14cbcSMatt Macy /* 5124eda14cbcSMatt Macy * As disgusting as this is, we need to support recursive calls to this 5125eda14cbcSMatt Macy * function because dsl_dir_open() is called during spa_load(), and ends 5126eda14cbcSMatt Macy * up calling spa_open() again. The real fix is to figure out how to 5127eda14cbcSMatt Macy * avoid dsl_dir_open() calling this in the first place. 5128eda14cbcSMatt Macy */ 5129eda14cbcSMatt Macy if (MUTEX_NOT_HELD(&spa_namespace_lock)) { 5130eda14cbcSMatt Macy mutex_enter(&spa_namespace_lock); 5131eda14cbcSMatt Macy locked = B_TRUE; 5132eda14cbcSMatt Macy } 5133eda14cbcSMatt Macy 5134eda14cbcSMatt Macy if ((spa = spa_lookup(pool)) == NULL) { 5135eda14cbcSMatt Macy if (locked) 5136eda14cbcSMatt Macy mutex_exit(&spa_namespace_lock); 5137eda14cbcSMatt Macy return (SET_ERROR(ENOENT)); 5138eda14cbcSMatt Macy } 5139eda14cbcSMatt Macy 5140eda14cbcSMatt Macy if (spa->spa_state == POOL_STATE_UNINITIALIZED) { 5141eda14cbcSMatt Macy zpool_load_policy_t policy; 5142eda14cbcSMatt Macy 5143eda14cbcSMatt Macy firstopen = B_TRUE; 5144eda14cbcSMatt Macy 5145eda14cbcSMatt Macy zpool_get_load_policy(nvpolicy ? nvpolicy : spa->spa_config, 5146eda14cbcSMatt Macy &policy); 5147eda14cbcSMatt Macy if (policy.zlp_rewind & ZPOOL_DO_REWIND) 5148eda14cbcSMatt Macy state = SPA_LOAD_RECOVER; 5149eda14cbcSMatt Macy 5150eda14cbcSMatt Macy spa_activate(spa, spa_mode_global); 5151eda14cbcSMatt Macy 5152eda14cbcSMatt Macy if (state != SPA_LOAD_RECOVER) 5153eda14cbcSMatt Macy spa->spa_last_ubsync_txg = spa->spa_load_txg = 0; 5154eda14cbcSMatt Macy spa->spa_config_source = SPA_CONFIG_SRC_CACHEFILE; 5155eda14cbcSMatt Macy 5156eda14cbcSMatt Macy zfs_dbgmsg("spa_open_common: opening %s", pool); 5157eda14cbcSMatt Macy error = spa_load_best(spa, state, policy.zlp_txg, 5158eda14cbcSMatt Macy policy.zlp_rewind); 5159eda14cbcSMatt Macy 5160eda14cbcSMatt Macy if (error == EBADF) { 5161eda14cbcSMatt Macy /* 5162eda14cbcSMatt Macy * If vdev_validate() returns failure (indicated by 5163eda14cbcSMatt Macy * EBADF), it indicates that one of the vdevs indicates 5164eda14cbcSMatt Macy * that the pool has been exported or destroyed. If 5165eda14cbcSMatt Macy * this is the case, the config cache is out of sync and 5166eda14cbcSMatt Macy * we should remove the pool from the namespace. 5167eda14cbcSMatt Macy */ 5168eda14cbcSMatt Macy spa_unload(spa); 5169eda14cbcSMatt Macy spa_deactivate(spa); 5170eda14cbcSMatt Macy spa_write_cachefile(spa, B_TRUE, B_TRUE); 5171eda14cbcSMatt Macy spa_remove(spa); 5172eda14cbcSMatt Macy if (locked) 5173eda14cbcSMatt Macy mutex_exit(&spa_namespace_lock); 5174eda14cbcSMatt Macy return (SET_ERROR(ENOENT)); 5175eda14cbcSMatt Macy } 5176eda14cbcSMatt Macy 5177eda14cbcSMatt Macy if (error) { 5178eda14cbcSMatt Macy /* 5179eda14cbcSMatt Macy * We can't open the pool, but we still have useful 5180eda14cbcSMatt Macy * information: the state of each vdev after the 5181eda14cbcSMatt Macy * attempted vdev_open(). Return this to the user. 5182eda14cbcSMatt Macy */ 5183eda14cbcSMatt Macy if (config != NULL && spa->spa_config) { 5184eda14cbcSMatt Macy VERIFY(nvlist_dup(spa->spa_config, config, 5185eda14cbcSMatt Macy KM_SLEEP) == 0); 5186eda14cbcSMatt Macy VERIFY(nvlist_add_nvlist(*config, 5187eda14cbcSMatt Macy ZPOOL_CONFIG_LOAD_INFO, 5188eda14cbcSMatt Macy spa->spa_load_info) == 0); 5189eda14cbcSMatt Macy } 5190eda14cbcSMatt Macy spa_unload(spa); 5191eda14cbcSMatt Macy spa_deactivate(spa); 5192eda14cbcSMatt Macy spa->spa_last_open_failed = error; 5193eda14cbcSMatt Macy if (locked) 5194eda14cbcSMatt Macy mutex_exit(&spa_namespace_lock); 5195eda14cbcSMatt Macy *spapp = NULL; 5196eda14cbcSMatt Macy return (error); 5197eda14cbcSMatt Macy } 5198eda14cbcSMatt Macy } 5199eda14cbcSMatt Macy 5200eda14cbcSMatt Macy spa_open_ref(spa, tag); 5201eda14cbcSMatt Macy 5202eda14cbcSMatt Macy if (config != NULL) 5203eda14cbcSMatt Macy *config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 5204eda14cbcSMatt Macy 5205eda14cbcSMatt Macy /* 5206eda14cbcSMatt Macy * If we've recovered the pool, pass back any information we 5207eda14cbcSMatt Macy * gathered while doing the load. 5208eda14cbcSMatt Macy */ 5209eda14cbcSMatt Macy if (state == SPA_LOAD_RECOVER) { 5210eda14cbcSMatt Macy VERIFY(nvlist_add_nvlist(*config, ZPOOL_CONFIG_LOAD_INFO, 5211eda14cbcSMatt Macy spa->spa_load_info) == 0); 5212eda14cbcSMatt Macy } 5213eda14cbcSMatt Macy 5214eda14cbcSMatt Macy if (locked) { 5215eda14cbcSMatt Macy spa->spa_last_open_failed = 0; 5216eda14cbcSMatt Macy spa->spa_last_ubsync_txg = 0; 5217eda14cbcSMatt Macy spa->spa_load_txg = 0; 5218eda14cbcSMatt Macy mutex_exit(&spa_namespace_lock); 5219eda14cbcSMatt Macy } 5220eda14cbcSMatt Macy 5221eda14cbcSMatt Macy if (firstopen) 5222eda14cbcSMatt Macy zvol_create_minors_recursive(spa_name(spa)); 5223eda14cbcSMatt Macy 5224eda14cbcSMatt Macy *spapp = spa; 5225eda14cbcSMatt Macy 5226eda14cbcSMatt Macy return (0); 5227eda14cbcSMatt Macy } 5228eda14cbcSMatt Macy 5229eda14cbcSMatt Macy int 5230eda14cbcSMatt Macy spa_open_rewind(const char *name, spa_t **spapp, void *tag, nvlist_t *policy, 5231eda14cbcSMatt Macy nvlist_t **config) 5232eda14cbcSMatt Macy { 5233eda14cbcSMatt Macy return (spa_open_common(name, spapp, tag, policy, config)); 5234eda14cbcSMatt Macy } 5235eda14cbcSMatt Macy 5236eda14cbcSMatt Macy int 5237eda14cbcSMatt Macy spa_open(const char *name, spa_t **spapp, void *tag) 5238eda14cbcSMatt Macy { 5239eda14cbcSMatt Macy return (spa_open_common(name, spapp, tag, NULL, NULL)); 5240eda14cbcSMatt Macy } 5241eda14cbcSMatt Macy 5242eda14cbcSMatt Macy /* 5243eda14cbcSMatt Macy * Lookup the given spa_t, incrementing the inject count in the process, 5244eda14cbcSMatt Macy * preventing it from being exported or destroyed. 5245eda14cbcSMatt Macy */ 5246eda14cbcSMatt Macy spa_t * 5247eda14cbcSMatt Macy spa_inject_addref(char *name) 5248eda14cbcSMatt Macy { 5249eda14cbcSMatt Macy spa_t *spa; 5250eda14cbcSMatt Macy 5251eda14cbcSMatt Macy mutex_enter(&spa_namespace_lock); 5252eda14cbcSMatt Macy if ((spa = spa_lookup(name)) == NULL) { 5253eda14cbcSMatt Macy mutex_exit(&spa_namespace_lock); 5254eda14cbcSMatt Macy return (NULL); 5255eda14cbcSMatt Macy } 5256eda14cbcSMatt Macy spa->spa_inject_ref++; 5257eda14cbcSMatt Macy mutex_exit(&spa_namespace_lock); 5258eda14cbcSMatt Macy 5259eda14cbcSMatt Macy return (spa); 5260eda14cbcSMatt Macy } 5261eda14cbcSMatt Macy 5262eda14cbcSMatt Macy void 5263eda14cbcSMatt Macy spa_inject_delref(spa_t *spa) 5264eda14cbcSMatt Macy { 5265eda14cbcSMatt Macy mutex_enter(&spa_namespace_lock); 5266eda14cbcSMatt Macy spa->spa_inject_ref--; 5267eda14cbcSMatt Macy mutex_exit(&spa_namespace_lock); 5268eda14cbcSMatt Macy } 5269eda14cbcSMatt Macy 5270eda14cbcSMatt Macy /* 5271eda14cbcSMatt Macy * Add spares device information to the nvlist. 5272eda14cbcSMatt Macy */ 5273eda14cbcSMatt Macy static void 5274eda14cbcSMatt Macy spa_add_spares(spa_t *spa, nvlist_t *config) 5275eda14cbcSMatt Macy { 5276eda14cbcSMatt Macy nvlist_t **spares; 5277eda14cbcSMatt Macy uint_t i, nspares; 5278eda14cbcSMatt Macy nvlist_t *nvroot; 5279eda14cbcSMatt Macy uint64_t guid; 5280eda14cbcSMatt Macy vdev_stat_t *vs; 5281eda14cbcSMatt Macy uint_t vsc; 5282eda14cbcSMatt Macy uint64_t pool; 5283eda14cbcSMatt Macy 5284eda14cbcSMatt Macy ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); 5285eda14cbcSMatt Macy 5286eda14cbcSMatt Macy if (spa->spa_spares.sav_count == 0) 5287eda14cbcSMatt Macy return; 5288eda14cbcSMatt Macy 5289eda14cbcSMatt Macy VERIFY(nvlist_lookup_nvlist(config, 5290eda14cbcSMatt Macy ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 5291eda14cbcSMatt Macy VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 5292eda14cbcSMatt Macy ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 5293eda14cbcSMatt Macy if (nspares != 0) { 5294eda14cbcSMatt Macy VERIFY(nvlist_add_nvlist_array(nvroot, 5295eda14cbcSMatt Macy ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 5296eda14cbcSMatt Macy VERIFY(nvlist_lookup_nvlist_array(nvroot, 5297eda14cbcSMatt Macy ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 5298eda14cbcSMatt Macy 5299eda14cbcSMatt Macy /* 5300eda14cbcSMatt Macy * Go through and find any spares which have since been 5301eda14cbcSMatt Macy * repurposed as an active spare. If this is the case, update 5302eda14cbcSMatt Macy * their status appropriately. 5303eda14cbcSMatt Macy */ 5304eda14cbcSMatt Macy for (i = 0; i < nspares; i++) { 5305eda14cbcSMatt Macy VERIFY(nvlist_lookup_uint64(spares[i], 5306eda14cbcSMatt Macy ZPOOL_CONFIG_GUID, &guid) == 0); 5307eda14cbcSMatt Macy if (spa_spare_exists(guid, &pool, NULL) && 5308eda14cbcSMatt Macy pool != 0ULL) { 5309eda14cbcSMatt Macy VERIFY(nvlist_lookup_uint64_array( 5310eda14cbcSMatt Macy spares[i], ZPOOL_CONFIG_VDEV_STATS, 5311eda14cbcSMatt Macy (uint64_t **)&vs, &vsc) == 0); 5312eda14cbcSMatt Macy vs->vs_state = VDEV_STATE_CANT_OPEN; 5313eda14cbcSMatt Macy vs->vs_aux = VDEV_AUX_SPARED; 5314eda14cbcSMatt Macy } 5315eda14cbcSMatt Macy } 5316eda14cbcSMatt Macy } 5317eda14cbcSMatt Macy } 5318eda14cbcSMatt Macy 5319eda14cbcSMatt Macy /* 5320eda14cbcSMatt Macy * Add l2cache device information to the nvlist, including vdev stats. 5321eda14cbcSMatt Macy */ 5322eda14cbcSMatt Macy static void 5323eda14cbcSMatt Macy spa_add_l2cache(spa_t *spa, nvlist_t *config) 5324eda14cbcSMatt Macy { 5325eda14cbcSMatt Macy nvlist_t **l2cache; 5326eda14cbcSMatt Macy uint_t i, j, nl2cache; 5327eda14cbcSMatt Macy nvlist_t *nvroot; 5328eda14cbcSMatt Macy uint64_t guid; 5329eda14cbcSMatt Macy vdev_t *vd; 5330eda14cbcSMatt Macy vdev_stat_t *vs; 5331eda14cbcSMatt Macy uint_t vsc; 5332eda14cbcSMatt Macy 5333eda14cbcSMatt Macy ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); 5334eda14cbcSMatt Macy 5335eda14cbcSMatt Macy if (spa->spa_l2cache.sav_count == 0) 5336eda14cbcSMatt Macy return; 5337eda14cbcSMatt Macy 5338eda14cbcSMatt Macy VERIFY(nvlist_lookup_nvlist(config, 5339eda14cbcSMatt Macy ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 5340eda14cbcSMatt Macy VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config, 5341eda14cbcSMatt Macy ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 5342eda14cbcSMatt Macy if (nl2cache != 0) { 5343eda14cbcSMatt Macy VERIFY(nvlist_add_nvlist_array(nvroot, 5344eda14cbcSMatt Macy ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 5345eda14cbcSMatt Macy VERIFY(nvlist_lookup_nvlist_array(nvroot, 5346eda14cbcSMatt Macy ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 5347eda14cbcSMatt Macy 5348eda14cbcSMatt Macy /* 5349eda14cbcSMatt Macy * Update level 2 cache device stats. 5350eda14cbcSMatt Macy */ 5351eda14cbcSMatt Macy 5352eda14cbcSMatt Macy for (i = 0; i < nl2cache; i++) { 5353eda14cbcSMatt Macy VERIFY(nvlist_lookup_uint64(l2cache[i], 5354eda14cbcSMatt Macy ZPOOL_CONFIG_GUID, &guid) == 0); 5355eda14cbcSMatt Macy 5356eda14cbcSMatt Macy vd = NULL; 5357eda14cbcSMatt Macy for (j = 0; j < spa->spa_l2cache.sav_count; j++) { 5358eda14cbcSMatt Macy if (guid == 5359eda14cbcSMatt Macy spa->spa_l2cache.sav_vdevs[j]->vdev_guid) { 5360eda14cbcSMatt Macy vd = spa->spa_l2cache.sav_vdevs[j]; 5361eda14cbcSMatt Macy break; 5362eda14cbcSMatt Macy } 5363eda14cbcSMatt Macy } 5364eda14cbcSMatt Macy ASSERT(vd != NULL); 5365eda14cbcSMatt Macy 5366eda14cbcSMatt Macy VERIFY(nvlist_lookup_uint64_array(l2cache[i], 5367eda14cbcSMatt Macy ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc) 5368eda14cbcSMatt Macy == 0); 5369eda14cbcSMatt Macy vdev_get_stats(vd, vs); 5370eda14cbcSMatt Macy vdev_config_generate_stats(vd, l2cache[i]); 5371eda14cbcSMatt Macy 5372eda14cbcSMatt Macy } 5373eda14cbcSMatt Macy } 5374eda14cbcSMatt Macy } 5375eda14cbcSMatt Macy 5376eda14cbcSMatt Macy static void 5377eda14cbcSMatt Macy spa_feature_stats_from_disk(spa_t *spa, nvlist_t *features) 5378eda14cbcSMatt Macy { 5379eda14cbcSMatt Macy zap_cursor_t zc; 5380eda14cbcSMatt Macy zap_attribute_t za; 5381eda14cbcSMatt Macy 5382eda14cbcSMatt Macy if (spa->spa_feat_for_read_obj != 0) { 5383eda14cbcSMatt Macy for (zap_cursor_init(&zc, spa->spa_meta_objset, 5384eda14cbcSMatt Macy spa->spa_feat_for_read_obj); 5385eda14cbcSMatt Macy zap_cursor_retrieve(&zc, &za) == 0; 5386eda14cbcSMatt Macy zap_cursor_advance(&zc)) { 5387eda14cbcSMatt Macy ASSERT(za.za_integer_length == sizeof (uint64_t) && 5388eda14cbcSMatt Macy za.za_num_integers == 1); 5389eda14cbcSMatt Macy VERIFY0(nvlist_add_uint64(features, za.za_name, 5390eda14cbcSMatt Macy za.za_first_integer)); 5391eda14cbcSMatt Macy } 5392eda14cbcSMatt Macy zap_cursor_fini(&zc); 5393eda14cbcSMatt Macy } 5394eda14cbcSMatt Macy 5395eda14cbcSMatt Macy if (spa->spa_feat_for_write_obj != 0) { 5396eda14cbcSMatt Macy for (zap_cursor_init(&zc, spa->spa_meta_objset, 5397eda14cbcSMatt Macy spa->spa_feat_for_write_obj); 5398eda14cbcSMatt Macy zap_cursor_retrieve(&zc, &za) == 0; 5399eda14cbcSMatt Macy zap_cursor_advance(&zc)) { 5400eda14cbcSMatt Macy ASSERT(za.za_integer_length == sizeof (uint64_t) && 5401eda14cbcSMatt Macy za.za_num_integers == 1); 5402eda14cbcSMatt Macy VERIFY0(nvlist_add_uint64(features, za.za_name, 5403eda14cbcSMatt Macy za.za_first_integer)); 5404eda14cbcSMatt Macy } 5405eda14cbcSMatt Macy zap_cursor_fini(&zc); 5406eda14cbcSMatt Macy } 5407eda14cbcSMatt Macy } 5408eda14cbcSMatt Macy 5409eda14cbcSMatt Macy static void 5410eda14cbcSMatt Macy spa_feature_stats_from_cache(spa_t *spa, nvlist_t *features) 5411eda14cbcSMatt Macy { 5412eda14cbcSMatt Macy int i; 5413eda14cbcSMatt Macy 5414eda14cbcSMatt Macy for (i = 0; i < SPA_FEATURES; i++) { 5415eda14cbcSMatt Macy zfeature_info_t feature = spa_feature_table[i]; 5416eda14cbcSMatt Macy uint64_t refcount; 5417eda14cbcSMatt Macy 5418eda14cbcSMatt Macy if (feature_get_refcount(spa, &feature, &refcount) != 0) 5419eda14cbcSMatt Macy continue; 5420eda14cbcSMatt Macy 5421eda14cbcSMatt Macy VERIFY0(nvlist_add_uint64(features, feature.fi_guid, refcount)); 5422eda14cbcSMatt Macy } 5423eda14cbcSMatt Macy } 5424eda14cbcSMatt Macy 5425eda14cbcSMatt Macy /* 5426eda14cbcSMatt Macy * Store a list of pool features and their reference counts in the 5427eda14cbcSMatt Macy * config. 5428eda14cbcSMatt Macy * 5429eda14cbcSMatt Macy * The first time this is called on a spa, allocate a new nvlist, fetch 5430eda14cbcSMatt Macy * the pool features and reference counts from disk, then save the list 5431eda14cbcSMatt Macy * in the spa. In subsequent calls on the same spa use the saved nvlist 5432eda14cbcSMatt Macy * and refresh its values from the cached reference counts. This 5433eda14cbcSMatt Macy * ensures we don't block here on I/O on a suspended pool so 'zpool 5434eda14cbcSMatt Macy * clear' can resume the pool. 5435eda14cbcSMatt Macy */ 5436eda14cbcSMatt Macy static void 5437eda14cbcSMatt Macy spa_add_feature_stats(spa_t *spa, nvlist_t *config) 5438eda14cbcSMatt Macy { 5439eda14cbcSMatt Macy nvlist_t *features; 5440eda14cbcSMatt Macy 5441eda14cbcSMatt Macy ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); 5442eda14cbcSMatt Macy 5443eda14cbcSMatt Macy mutex_enter(&spa->spa_feat_stats_lock); 5444eda14cbcSMatt Macy features = spa->spa_feat_stats; 5445eda14cbcSMatt Macy 5446eda14cbcSMatt Macy if (features != NULL) { 5447eda14cbcSMatt Macy spa_feature_stats_from_cache(spa, features); 5448eda14cbcSMatt Macy } else { 5449eda14cbcSMatt Macy VERIFY0(nvlist_alloc(&features, NV_UNIQUE_NAME, KM_SLEEP)); 5450eda14cbcSMatt Macy spa->spa_feat_stats = features; 5451eda14cbcSMatt Macy spa_feature_stats_from_disk(spa, features); 5452eda14cbcSMatt Macy } 5453eda14cbcSMatt Macy 5454eda14cbcSMatt Macy VERIFY0(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS, 5455eda14cbcSMatt Macy features)); 5456eda14cbcSMatt Macy 5457eda14cbcSMatt Macy mutex_exit(&spa->spa_feat_stats_lock); 5458eda14cbcSMatt Macy } 5459eda14cbcSMatt Macy 5460eda14cbcSMatt Macy int 5461eda14cbcSMatt Macy spa_get_stats(const char *name, nvlist_t **config, 5462eda14cbcSMatt Macy char *altroot, size_t buflen) 5463eda14cbcSMatt Macy { 5464eda14cbcSMatt Macy int error; 5465eda14cbcSMatt Macy spa_t *spa; 5466eda14cbcSMatt Macy 5467eda14cbcSMatt Macy *config = NULL; 5468eda14cbcSMatt Macy error = spa_open_common(name, &spa, FTAG, NULL, config); 5469eda14cbcSMatt Macy 5470eda14cbcSMatt Macy if (spa != NULL) { 5471eda14cbcSMatt Macy /* 5472eda14cbcSMatt Macy * This still leaves a window of inconsistency where the spares 5473eda14cbcSMatt Macy * or l2cache devices could change and the config would be 5474eda14cbcSMatt Macy * self-inconsistent. 5475eda14cbcSMatt Macy */ 5476eda14cbcSMatt Macy spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 5477eda14cbcSMatt Macy 5478eda14cbcSMatt Macy if (*config != NULL) { 5479eda14cbcSMatt Macy uint64_t loadtimes[2]; 5480eda14cbcSMatt Macy 5481eda14cbcSMatt Macy loadtimes[0] = spa->spa_loaded_ts.tv_sec; 5482eda14cbcSMatt Macy loadtimes[1] = spa->spa_loaded_ts.tv_nsec; 5483eda14cbcSMatt Macy VERIFY(nvlist_add_uint64_array(*config, 5484eda14cbcSMatt Macy ZPOOL_CONFIG_LOADED_TIME, loadtimes, 2) == 0); 5485eda14cbcSMatt Macy 5486eda14cbcSMatt Macy VERIFY(nvlist_add_uint64(*config, 5487eda14cbcSMatt Macy ZPOOL_CONFIG_ERRCOUNT, 5488eda14cbcSMatt Macy spa_get_errlog_size(spa)) == 0); 5489eda14cbcSMatt Macy 5490eda14cbcSMatt Macy if (spa_suspended(spa)) { 5491eda14cbcSMatt Macy VERIFY(nvlist_add_uint64(*config, 5492eda14cbcSMatt Macy ZPOOL_CONFIG_SUSPENDED, 5493eda14cbcSMatt Macy spa->spa_failmode) == 0); 5494eda14cbcSMatt Macy VERIFY(nvlist_add_uint64(*config, 5495eda14cbcSMatt Macy ZPOOL_CONFIG_SUSPENDED_REASON, 5496eda14cbcSMatt Macy spa->spa_suspended) == 0); 5497eda14cbcSMatt Macy } 5498eda14cbcSMatt Macy 5499eda14cbcSMatt Macy spa_add_spares(spa, *config); 5500eda14cbcSMatt Macy spa_add_l2cache(spa, *config); 5501eda14cbcSMatt Macy spa_add_feature_stats(spa, *config); 5502eda14cbcSMatt Macy } 5503eda14cbcSMatt Macy } 5504eda14cbcSMatt Macy 5505eda14cbcSMatt Macy /* 5506eda14cbcSMatt Macy * We want to get the alternate root even for faulted pools, so we cheat 5507eda14cbcSMatt Macy * and call spa_lookup() directly. 5508eda14cbcSMatt Macy */ 5509eda14cbcSMatt Macy if (altroot) { 5510eda14cbcSMatt Macy if (spa == NULL) { 5511eda14cbcSMatt Macy mutex_enter(&spa_namespace_lock); 5512eda14cbcSMatt Macy spa = spa_lookup(name); 5513eda14cbcSMatt Macy if (spa) 5514eda14cbcSMatt Macy spa_altroot(spa, altroot, buflen); 5515eda14cbcSMatt Macy else 5516eda14cbcSMatt Macy altroot[0] = '\0'; 5517eda14cbcSMatt Macy spa = NULL; 5518eda14cbcSMatt Macy mutex_exit(&spa_namespace_lock); 5519eda14cbcSMatt Macy } else { 5520eda14cbcSMatt Macy spa_altroot(spa, altroot, buflen); 5521eda14cbcSMatt Macy } 5522eda14cbcSMatt Macy } 5523eda14cbcSMatt Macy 5524eda14cbcSMatt Macy if (spa != NULL) { 5525eda14cbcSMatt Macy spa_config_exit(spa, SCL_CONFIG, FTAG); 5526eda14cbcSMatt Macy spa_close(spa, FTAG); 5527eda14cbcSMatt Macy } 5528eda14cbcSMatt Macy 5529eda14cbcSMatt Macy return (error); 5530eda14cbcSMatt Macy } 5531eda14cbcSMatt Macy 5532eda14cbcSMatt Macy /* 5533eda14cbcSMatt Macy * Validate that the auxiliary device array is well formed. We must have an 5534eda14cbcSMatt Macy * array of nvlists, each which describes a valid leaf vdev. If this is an 5535eda14cbcSMatt Macy * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be 5536eda14cbcSMatt Macy * specified, as long as they are well-formed. 5537eda14cbcSMatt Macy */ 5538eda14cbcSMatt Macy static int 5539eda14cbcSMatt Macy spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode, 5540eda14cbcSMatt Macy spa_aux_vdev_t *sav, const char *config, uint64_t version, 5541eda14cbcSMatt Macy vdev_labeltype_t label) 5542eda14cbcSMatt Macy { 5543eda14cbcSMatt Macy nvlist_t **dev; 5544eda14cbcSMatt Macy uint_t i, ndev; 5545eda14cbcSMatt Macy vdev_t *vd; 5546eda14cbcSMatt Macy int error; 5547eda14cbcSMatt Macy 5548eda14cbcSMatt Macy ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 5549eda14cbcSMatt Macy 5550eda14cbcSMatt Macy /* 5551eda14cbcSMatt Macy * It's acceptable to have no devs specified. 5552eda14cbcSMatt Macy */ 5553eda14cbcSMatt Macy if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0) 5554eda14cbcSMatt Macy return (0); 5555eda14cbcSMatt Macy 5556eda14cbcSMatt Macy if (ndev == 0) 5557eda14cbcSMatt Macy return (SET_ERROR(EINVAL)); 5558eda14cbcSMatt Macy 5559eda14cbcSMatt Macy /* 5560eda14cbcSMatt Macy * Make sure the pool is formatted with a version that supports this 5561eda14cbcSMatt Macy * device type. 5562eda14cbcSMatt Macy */ 5563eda14cbcSMatt Macy if (spa_version(spa) < version) 5564eda14cbcSMatt Macy return (SET_ERROR(ENOTSUP)); 5565eda14cbcSMatt Macy 5566eda14cbcSMatt Macy /* 5567eda14cbcSMatt Macy * Set the pending device list so we correctly handle device in-use 5568eda14cbcSMatt Macy * checking. 5569eda14cbcSMatt Macy */ 5570eda14cbcSMatt Macy sav->sav_pending = dev; 5571eda14cbcSMatt Macy sav->sav_npending = ndev; 5572eda14cbcSMatt Macy 5573eda14cbcSMatt Macy for (i = 0; i < ndev; i++) { 5574eda14cbcSMatt Macy if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0, 5575eda14cbcSMatt Macy mode)) != 0) 5576eda14cbcSMatt Macy goto out; 5577eda14cbcSMatt Macy 5578eda14cbcSMatt Macy if (!vd->vdev_ops->vdev_op_leaf) { 5579eda14cbcSMatt Macy vdev_free(vd); 5580eda14cbcSMatt Macy error = SET_ERROR(EINVAL); 5581eda14cbcSMatt Macy goto out; 5582eda14cbcSMatt Macy } 5583eda14cbcSMatt Macy 5584eda14cbcSMatt Macy vd->vdev_top = vd; 5585eda14cbcSMatt Macy 5586eda14cbcSMatt Macy if ((error = vdev_open(vd)) == 0 && 5587eda14cbcSMatt Macy (error = vdev_label_init(vd, crtxg, label)) == 0) { 5588eda14cbcSMatt Macy VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID, 5589eda14cbcSMatt Macy vd->vdev_guid) == 0); 5590eda14cbcSMatt Macy } 5591eda14cbcSMatt Macy 5592eda14cbcSMatt Macy vdev_free(vd); 5593eda14cbcSMatt Macy 5594eda14cbcSMatt Macy if (error && 5595eda14cbcSMatt Macy (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE)) 5596eda14cbcSMatt Macy goto out; 5597eda14cbcSMatt Macy else 5598eda14cbcSMatt Macy error = 0; 5599eda14cbcSMatt Macy } 5600eda14cbcSMatt Macy 5601eda14cbcSMatt Macy out: 5602eda14cbcSMatt Macy sav->sav_pending = NULL; 5603eda14cbcSMatt Macy sav->sav_npending = 0; 5604eda14cbcSMatt Macy return (error); 5605eda14cbcSMatt Macy } 5606eda14cbcSMatt Macy 5607eda14cbcSMatt Macy static int 5608eda14cbcSMatt Macy spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode) 5609eda14cbcSMatt Macy { 5610eda14cbcSMatt Macy int error; 5611eda14cbcSMatt Macy 5612eda14cbcSMatt Macy ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 5613eda14cbcSMatt Macy 5614eda14cbcSMatt Macy if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode, 5615eda14cbcSMatt Macy &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES, 5616eda14cbcSMatt Macy VDEV_LABEL_SPARE)) != 0) { 5617eda14cbcSMatt Macy return (error); 5618eda14cbcSMatt Macy } 5619eda14cbcSMatt Macy 5620eda14cbcSMatt Macy return (spa_validate_aux_devs(spa, nvroot, crtxg, mode, 5621eda14cbcSMatt Macy &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE, 5622eda14cbcSMatt Macy VDEV_LABEL_L2CACHE)); 5623eda14cbcSMatt Macy } 5624eda14cbcSMatt Macy 5625eda14cbcSMatt Macy static void 5626eda14cbcSMatt Macy spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs, 5627eda14cbcSMatt Macy const char *config) 5628eda14cbcSMatt Macy { 5629eda14cbcSMatt Macy int i; 5630eda14cbcSMatt Macy 5631eda14cbcSMatt Macy if (sav->sav_config != NULL) { 5632eda14cbcSMatt Macy nvlist_t **olddevs; 5633eda14cbcSMatt Macy uint_t oldndevs; 5634eda14cbcSMatt Macy nvlist_t **newdevs; 5635eda14cbcSMatt Macy 5636eda14cbcSMatt Macy /* 5637eda14cbcSMatt Macy * Generate new dev list by concatenating with the 5638eda14cbcSMatt Macy * current dev list. 5639eda14cbcSMatt Macy */ 5640eda14cbcSMatt Macy VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config, 5641eda14cbcSMatt Macy &olddevs, &oldndevs) == 0); 5642eda14cbcSMatt Macy 5643eda14cbcSMatt Macy newdevs = kmem_alloc(sizeof (void *) * 5644eda14cbcSMatt Macy (ndevs + oldndevs), KM_SLEEP); 5645eda14cbcSMatt Macy for (i = 0; i < oldndevs; i++) 5646eda14cbcSMatt Macy VERIFY(nvlist_dup(olddevs[i], &newdevs[i], 5647eda14cbcSMatt Macy KM_SLEEP) == 0); 5648eda14cbcSMatt Macy for (i = 0; i < ndevs; i++) 5649eda14cbcSMatt Macy VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs], 5650eda14cbcSMatt Macy KM_SLEEP) == 0); 5651eda14cbcSMatt Macy 5652eda14cbcSMatt Macy VERIFY(nvlist_remove(sav->sav_config, config, 5653eda14cbcSMatt Macy DATA_TYPE_NVLIST_ARRAY) == 0); 5654eda14cbcSMatt Macy 5655eda14cbcSMatt Macy VERIFY(nvlist_add_nvlist_array(sav->sav_config, 5656eda14cbcSMatt Macy config, newdevs, ndevs + oldndevs) == 0); 5657eda14cbcSMatt Macy for (i = 0; i < oldndevs + ndevs; i++) 5658eda14cbcSMatt Macy nvlist_free(newdevs[i]); 5659eda14cbcSMatt Macy kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *)); 5660eda14cbcSMatt Macy } else { 5661eda14cbcSMatt Macy /* 5662eda14cbcSMatt Macy * Generate a new dev list. 5663eda14cbcSMatt Macy */ 5664eda14cbcSMatt Macy VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME, 5665eda14cbcSMatt Macy KM_SLEEP) == 0); 5666eda14cbcSMatt Macy VERIFY(nvlist_add_nvlist_array(sav->sav_config, config, 5667eda14cbcSMatt Macy devs, ndevs) == 0); 5668eda14cbcSMatt Macy } 5669eda14cbcSMatt Macy } 5670eda14cbcSMatt Macy 5671eda14cbcSMatt Macy /* 5672eda14cbcSMatt Macy * Stop and drop level 2 ARC devices 5673eda14cbcSMatt Macy */ 5674eda14cbcSMatt Macy void 5675eda14cbcSMatt Macy spa_l2cache_drop(spa_t *spa) 5676eda14cbcSMatt Macy { 5677eda14cbcSMatt Macy vdev_t *vd; 5678eda14cbcSMatt Macy int i; 5679eda14cbcSMatt Macy spa_aux_vdev_t *sav = &spa->spa_l2cache; 5680eda14cbcSMatt Macy 5681eda14cbcSMatt Macy for (i = 0; i < sav->sav_count; i++) { 5682eda14cbcSMatt Macy uint64_t pool; 5683eda14cbcSMatt Macy 5684eda14cbcSMatt Macy vd = sav->sav_vdevs[i]; 5685eda14cbcSMatt Macy ASSERT(vd != NULL); 5686eda14cbcSMatt Macy 5687eda14cbcSMatt Macy if (spa_l2cache_exists(vd->vdev_guid, &pool) && 5688eda14cbcSMatt Macy pool != 0ULL && l2arc_vdev_present(vd)) 5689eda14cbcSMatt Macy l2arc_remove_vdev(vd); 5690eda14cbcSMatt Macy } 5691eda14cbcSMatt Macy } 5692eda14cbcSMatt Macy 5693eda14cbcSMatt Macy /* 5694eda14cbcSMatt Macy * Verify encryption parameters for spa creation. If we are encrypting, we must 5695eda14cbcSMatt Macy * have the encryption feature flag enabled. 5696eda14cbcSMatt Macy */ 5697eda14cbcSMatt Macy static int 5698eda14cbcSMatt Macy spa_create_check_encryption_params(dsl_crypto_params_t *dcp, 5699eda14cbcSMatt Macy boolean_t has_encryption) 5700eda14cbcSMatt Macy { 5701eda14cbcSMatt Macy if (dcp->cp_crypt != ZIO_CRYPT_OFF && 5702eda14cbcSMatt Macy dcp->cp_crypt != ZIO_CRYPT_INHERIT && 5703eda14cbcSMatt Macy !has_encryption) 5704eda14cbcSMatt Macy return (SET_ERROR(ENOTSUP)); 5705eda14cbcSMatt Macy 5706eda14cbcSMatt Macy return (dmu_objset_create_crypt_check(NULL, dcp, NULL)); 5707eda14cbcSMatt Macy } 5708eda14cbcSMatt Macy 5709eda14cbcSMatt Macy /* 5710eda14cbcSMatt Macy * Pool Creation 5711eda14cbcSMatt Macy */ 5712eda14cbcSMatt Macy int 5713eda14cbcSMatt Macy spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props, 5714eda14cbcSMatt Macy nvlist_t *zplprops, dsl_crypto_params_t *dcp) 5715eda14cbcSMatt Macy { 5716eda14cbcSMatt Macy spa_t *spa; 5717eda14cbcSMatt Macy char *altroot = NULL; 5718eda14cbcSMatt Macy vdev_t *rvd; 5719eda14cbcSMatt Macy dsl_pool_t *dp; 5720eda14cbcSMatt Macy dmu_tx_t *tx; 5721eda14cbcSMatt Macy int error = 0; 5722eda14cbcSMatt Macy uint64_t txg = TXG_INITIAL; 5723eda14cbcSMatt Macy nvlist_t **spares, **l2cache; 5724eda14cbcSMatt Macy uint_t nspares, nl2cache; 57257877fdebSMatt Macy uint64_t version, obj, ndraid = 0; 5726eda14cbcSMatt Macy boolean_t has_features; 5727eda14cbcSMatt Macy boolean_t has_encryption; 5728eda14cbcSMatt Macy boolean_t has_allocclass; 5729eda14cbcSMatt Macy spa_feature_t feat; 5730eda14cbcSMatt Macy char *feat_name; 5731eda14cbcSMatt Macy char *poolname; 5732eda14cbcSMatt Macy nvlist_t *nvl; 5733eda14cbcSMatt Macy 5734eda14cbcSMatt Macy if (props == NULL || 5735eda14cbcSMatt Macy nvlist_lookup_string(props, "tname", &poolname) != 0) 5736eda14cbcSMatt Macy poolname = (char *)pool; 5737eda14cbcSMatt Macy 5738eda14cbcSMatt Macy /* 5739eda14cbcSMatt Macy * If this pool already exists, return failure. 5740eda14cbcSMatt Macy */ 5741eda14cbcSMatt Macy mutex_enter(&spa_namespace_lock); 5742eda14cbcSMatt Macy if (spa_lookup(poolname) != NULL) { 5743eda14cbcSMatt Macy mutex_exit(&spa_namespace_lock); 5744eda14cbcSMatt Macy return (SET_ERROR(EEXIST)); 5745eda14cbcSMatt Macy } 5746eda14cbcSMatt Macy 5747eda14cbcSMatt Macy /* 5748eda14cbcSMatt Macy * Allocate a new spa_t structure. 5749eda14cbcSMatt Macy */ 5750eda14cbcSMatt Macy nvl = fnvlist_alloc(); 5751eda14cbcSMatt Macy fnvlist_add_string(nvl, ZPOOL_CONFIG_POOL_NAME, pool); 5752eda14cbcSMatt Macy (void) nvlist_lookup_string(props, 5753eda14cbcSMatt Macy zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 5754eda14cbcSMatt Macy spa = spa_add(poolname, nvl, altroot); 5755eda14cbcSMatt Macy fnvlist_free(nvl); 5756eda14cbcSMatt Macy spa_activate(spa, spa_mode_global); 5757eda14cbcSMatt Macy 5758eda14cbcSMatt Macy if (props && (error = spa_prop_validate(spa, props))) { 5759eda14cbcSMatt Macy spa_deactivate(spa); 5760eda14cbcSMatt Macy spa_remove(spa); 5761eda14cbcSMatt Macy mutex_exit(&spa_namespace_lock); 5762eda14cbcSMatt Macy return (error); 5763eda14cbcSMatt Macy } 5764eda14cbcSMatt Macy 5765eda14cbcSMatt Macy /* 5766eda14cbcSMatt Macy * Temporary pool names should never be written to disk. 5767eda14cbcSMatt Macy */ 5768eda14cbcSMatt Macy if (poolname != pool) 5769eda14cbcSMatt Macy spa->spa_import_flags |= ZFS_IMPORT_TEMP_NAME; 5770eda14cbcSMatt Macy 5771eda14cbcSMatt Macy has_features = B_FALSE; 5772eda14cbcSMatt Macy has_encryption = B_FALSE; 5773eda14cbcSMatt Macy has_allocclass = B_FALSE; 5774eda14cbcSMatt Macy for (nvpair_t *elem = nvlist_next_nvpair(props, NULL); 5775eda14cbcSMatt Macy elem != NULL; elem = nvlist_next_nvpair(props, elem)) { 5776eda14cbcSMatt Macy if (zpool_prop_feature(nvpair_name(elem))) { 5777eda14cbcSMatt Macy has_features = B_TRUE; 5778eda14cbcSMatt Macy 5779eda14cbcSMatt Macy feat_name = strchr(nvpair_name(elem), '@') + 1; 5780eda14cbcSMatt Macy VERIFY0(zfeature_lookup_name(feat_name, &feat)); 5781eda14cbcSMatt Macy if (feat == SPA_FEATURE_ENCRYPTION) 5782eda14cbcSMatt Macy has_encryption = B_TRUE; 5783eda14cbcSMatt Macy if (feat == SPA_FEATURE_ALLOCATION_CLASSES) 5784eda14cbcSMatt Macy has_allocclass = B_TRUE; 5785eda14cbcSMatt Macy } 5786eda14cbcSMatt Macy } 5787eda14cbcSMatt Macy 5788eda14cbcSMatt Macy /* verify encryption params, if they were provided */ 5789eda14cbcSMatt Macy if (dcp != NULL) { 5790eda14cbcSMatt Macy error = spa_create_check_encryption_params(dcp, has_encryption); 5791eda14cbcSMatt Macy if (error != 0) { 5792eda14cbcSMatt Macy spa_deactivate(spa); 5793eda14cbcSMatt Macy spa_remove(spa); 5794eda14cbcSMatt Macy mutex_exit(&spa_namespace_lock); 5795eda14cbcSMatt Macy return (error); 5796eda14cbcSMatt Macy } 5797eda14cbcSMatt Macy } 5798eda14cbcSMatt Macy if (!has_allocclass && zfs_special_devs(nvroot, NULL)) { 5799eda14cbcSMatt Macy spa_deactivate(spa); 5800eda14cbcSMatt Macy spa_remove(spa); 5801eda14cbcSMatt Macy mutex_exit(&spa_namespace_lock); 5802eda14cbcSMatt Macy return (ENOTSUP); 5803eda14cbcSMatt Macy } 5804eda14cbcSMatt Macy 5805eda14cbcSMatt Macy if (has_features || nvlist_lookup_uint64(props, 5806eda14cbcSMatt Macy zpool_prop_to_name(ZPOOL_PROP_VERSION), &version) != 0) { 5807eda14cbcSMatt Macy version = SPA_VERSION; 5808eda14cbcSMatt Macy } 5809eda14cbcSMatt Macy ASSERT(SPA_VERSION_IS_SUPPORTED(version)); 5810eda14cbcSMatt Macy 5811eda14cbcSMatt Macy spa->spa_first_txg = txg; 5812eda14cbcSMatt Macy spa->spa_uberblock.ub_txg = txg - 1; 5813eda14cbcSMatt Macy spa->spa_uberblock.ub_version = version; 5814eda14cbcSMatt Macy spa->spa_ubsync = spa->spa_uberblock; 5815eda14cbcSMatt Macy spa->spa_load_state = SPA_LOAD_CREATE; 5816eda14cbcSMatt Macy spa->spa_removing_phys.sr_state = DSS_NONE; 5817eda14cbcSMatt Macy spa->spa_removing_phys.sr_removing_vdev = -1; 5818eda14cbcSMatt Macy spa->spa_removing_phys.sr_prev_indirect_vdev = -1; 5819eda14cbcSMatt Macy spa->spa_indirect_vdevs_loaded = B_TRUE; 5820eda14cbcSMatt Macy 5821eda14cbcSMatt Macy /* 5822eda14cbcSMatt Macy * Create "The Godfather" zio to hold all async IOs 5823eda14cbcSMatt Macy */ 5824eda14cbcSMatt Macy spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *), 5825eda14cbcSMatt Macy KM_SLEEP); 5826eda14cbcSMatt Macy for (int i = 0; i < max_ncpus; i++) { 5827eda14cbcSMatt Macy spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL, 5828eda14cbcSMatt Macy ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | 5829eda14cbcSMatt Macy ZIO_FLAG_GODFATHER); 5830eda14cbcSMatt Macy } 5831eda14cbcSMatt Macy 5832eda14cbcSMatt Macy /* 5833eda14cbcSMatt Macy * Create the root vdev. 5834eda14cbcSMatt Macy */ 5835eda14cbcSMatt Macy spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 5836eda14cbcSMatt Macy 5837eda14cbcSMatt Macy error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD); 5838eda14cbcSMatt Macy 5839eda14cbcSMatt Macy ASSERT(error != 0 || rvd != NULL); 5840eda14cbcSMatt Macy ASSERT(error != 0 || spa->spa_root_vdev == rvd); 5841eda14cbcSMatt Macy 5842eda14cbcSMatt Macy if (error == 0 && !zfs_allocatable_devs(nvroot)) 5843eda14cbcSMatt Macy error = SET_ERROR(EINVAL); 5844eda14cbcSMatt Macy 5845eda14cbcSMatt Macy if (error == 0 && 5846eda14cbcSMatt Macy (error = vdev_create(rvd, txg, B_FALSE)) == 0 && 58477877fdebSMatt Macy (error = vdev_draid_spare_create(nvroot, rvd, &ndraid, 0)) == 0 && 58487877fdebSMatt Macy (error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) == 0) { 5849eda14cbcSMatt Macy /* 5850eda14cbcSMatt Macy * instantiate the metaslab groups (this will dirty the vdevs) 5851eda14cbcSMatt Macy * we can no longer error exit past this point 5852eda14cbcSMatt Macy */ 5853eda14cbcSMatt Macy for (int c = 0; error == 0 && c < rvd->vdev_children; c++) { 5854eda14cbcSMatt Macy vdev_t *vd = rvd->vdev_child[c]; 5855eda14cbcSMatt Macy 5856eda14cbcSMatt Macy vdev_metaslab_set_size(vd); 5857eda14cbcSMatt Macy vdev_expand(vd, txg); 5858eda14cbcSMatt Macy } 5859eda14cbcSMatt Macy } 5860eda14cbcSMatt Macy 5861eda14cbcSMatt Macy spa_config_exit(spa, SCL_ALL, FTAG); 5862eda14cbcSMatt Macy 5863eda14cbcSMatt Macy if (error != 0) { 5864eda14cbcSMatt Macy spa_unload(spa); 5865eda14cbcSMatt Macy spa_deactivate(spa); 5866eda14cbcSMatt Macy spa_remove(spa); 5867eda14cbcSMatt Macy mutex_exit(&spa_namespace_lock); 5868eda14cbcSMatt Macy return (error); 5869eda14cbcSMatt Macy } 5870eda14cbcSMatt Macy 5871eda14cbcSMatt Macy /* 5872eda14cbcSMatt Macy * Get the list of spares, if specified. 5873eda14cbcSMatt Macy */ 5874eda14cbcSMatt Macy if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 5875eda14cbcSMatt Macy &spares, &nspares) == 0) { 5876eda14cbcSMatt Macy VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME, 5877eda14cbcSMatt Macy KM_SLEEP) == 0); 5878eda14cbcSMatt Macy VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 5879eda14cbcSMatt Macy ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 5880eda14cbcSMatt Macy spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 5881eda14cbcSMatt Macy spa_load_spares(spa); 5882eda14cbcSMatt Macy spa_config_exit(spa, SCL_ALL, FTAG); 5883eda14cbcSMatt Macy spa->spa_spares.sav_sync = B_TRUE; 5884eda14cbcSMatt Macy } 5885eda14cbcSMatt Macy 5886eda14cbcSMatt Macy /* 5887eda14cbcSMatt Macy * Get the list of level 2 cache devices, if specified. 5888eda14cbcSMatt Macy */ 5889eda14cbcSMatt Macy if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 5890eda14cbcSMatt Macy &l2cache, &nl2cache) == 0) { 5891eda14cbcSMatt Macy VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config, 5892eda14cbcSMatt Macy NV_UNIQUE_NAME, KM_SLEEP) == 0); 5893eda14cbcSMatt Macy VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config, 5894eda14cbcSMatt Macy ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 5895eda14cbcSMatt Macy spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 5896eda14cbcSMatt Macy spa_load_l2cache(spa); 5897eda14cbcSMatt Macy spa_config_exit(spa, SCL_ALL, FTAG); 5898eda14cbcSMatt Macy spa->spa_l2cache.sav_sync = B_TRUE; 5899eda14cbcSMatt Macy } 5900eda14cbcSMatt Macy 5901eda14cbcSMatt Macy spa->spa_is_initializing = B_TRUE; 5902eda14cbcSMatt Macy spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, dcp, txg); 5903eda14cbcSMatt Macy spa->spa_is_initializing = B_FALSE; 5904eda14cbcSMatt Macy 5905eda14cbcSMatt Macy /* 5906eda14cbcSMatt Macy * Create DDTs (dedup tables). 5907eda14cbcSMatt Macy */ 5908eda14cbcSMatt Macy ddt_create(spa); 5909eda14cbcSMatt Macy 5910eda14cbcSMatt Macy spa_update_dspace(spa); 5911eda14cbcSMatt Macy 5912eda14cbcSMatt Macy tx = dmu_tx_create_assigned(dp, txg); 5913eda14cbcSMatt Macy 5914eda14cbcSMatt Macy /* 5915eda14cbcSMatt Macy * Create the pool's history object. 5916eda14cbcSMatt Macy */ 5917eda14cbcSMatt Macy if (version >= SPA_VERSION_ZPOOL_HISTORY && !spa->spa_history) 5918eda14cbcSMatt Macy spa_history_create_obj(spa, tx); 5919eda14cbcSMatt Macy 5920eda14cbcSMatt Macy spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_CREATE); 5921eda14cbcSMatt Macy spa_history_log_version(spa, "create", tx); 5922eda14cbcSMatt Macy 5923eda14cbcSMatt Macy /* 5924eda14cbcSMatt Macy * Create the pool config object. 5925eda14cbcSMatt Macy */ 5926eda14cbcSMatt Macy spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset, 5927eda14cbcSMatt Macy DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE, 5928eda14cbcSMatt Macy DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx); 5929eda14cbcSMatt Macy 5930eda14cbcSMatt Macy if (zap_add(spa->spa_meta_objset, 5931eda14cbcSMatt Macy DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG, 5932eda14cbcSMatt Macy sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) { 5933eda14cbcSMatt Macy cmn_err(CE_PANIC, "failed to add pool config"); 5934eda14cbcSMatt Macy } 5935eda14cbcSMatt Macy 5936eda14cbcSMatt Macy if (zap_add(spa->spa_meta_objset, 5937eda14cbcSMatt Macy DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CREATION_VERSION, 5938eda14cbcSMatt Macy sizeof (uint64_t), 1, &version, tx) != 0) { 5939eda14cbcSMatt Macy cmn_err(CE_PANIC, "failed to add pool version"); 5940eda14cbcSMatt Macy } 5941eda14cbcSMatt Macy 5942eda14cbcSMatt Macy /* Newly created pools with the right version are always deflated. */ 5943eda14cbcSMatt Macy if (version >= SPA_VERSION_RAIDZ_DEFLATE) { 5944eda14cbcSMatt Macy spa->spa_deflate = TRUE; 5945eda14cbcSMatt Macy if (zap_add(spa->spa_meta_objset, 5946eda14cbcSMatt Macy DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 5947eda14cbcSMatt Macy sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) { 5948eda14cbcSMatt Macy cmn_err(CE_PANIC, "failed to add deflate"); 5949eda14cbcSMatt Macy } 5950eda14cbcSMatt Macy } 5951eda14cbcSMatt Macy 5952eda14cbcSMatt Macy /* 5953eda14cbcSMatt Macy * Create the deferred-free bpobj. Turn off compression 5954eda14cbcSMatt Macy * because sync-to-convergence takes longer if the blocksize 5955eda14cbcSMatt Macy * keeps changing. 5956eda14cbcSMatt Macy */ 5957eda14cbcSMatt Macy obj = bpobj_alloc(spa->spa_meta_objset, 1 << 14, tx); 5958eda14cbcSMatt Macy dmu_object_set_compress(spa->spa_meta_objset, obj, 5959eda14cbcSMatt Macy ZIO_COMPRESS_OFF, tx); 5960eda14cbcSMatt Macy if (zap_add(spa->spa_meta_objset, 5961eda14cbcSMatt Macy DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPOBJ, 5962eda14cbcSMatt Macy sizeof (uint64_t), 1, &obj, tx) != 0) { 5963eda14cbcSMatt Macy cmn_err(CE_PANIC, "failed to add bpobj"); 5964eda14cbcSMatt Macy } 5965eda14cbcSMatt Macy VERIFY3U(0, ==, bpobj_open(&spa->spa_deferred_bpobj, 5966eda14cbcSMatt Macy spa->spa_meta_objset, obj)); 5967eda14cbcSMatt Macy 5968eda14cbcSMatt Macy /* 5969eda14cbcSMatt Macy * Generate some random noise for salted checksums to operate on. 5970eda14cbcSMatt Macy */ 5971eda14cbcSMatt Macy (void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes, 5972eda14cbcSMatt Macy sizeof (spa->spa_cksum_salt.zcs_bytes)); 5973eda14cbcSMatt Macy 5974eda14cbcSMatt Macy /* 5975eda14cbcSMatt Macy * Set pool properties. 5976eda14cbcSMatt Macy */ 5977eda14cbcSMatt Macy spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS); 5978eda14cbcSMatt Macy spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION); 5979eda14cbcSMatt Macy spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE); 5980eda14cbcSMatt Macy spa->spa_autoexpand = zpool_prop_default_numeric(ZPOOL_PROP_AUTOEXPAND); 5981eda14cbcSMatt Macy spa->spa_multihost = zpool_prop_default_numeric(ZPOOL_PROP_MULTIHOST); 5982eda14cbcSMatt Macy spa->spa_autotrim = zpool_prop_default_numeric(ZPOOL_PROP_AUTOTRIM); 5983eda14cbcSMatt Macy 5984eda14cbcSMatt Macy if (props != NULL) { 5985eda14cbcSMatt Macy spa_configfile_set(spa, props, B_FALSE); 5986eda14cbcSMatt Macy spa_sync_props(props, tx); 5987eda14cbcSMatt Macy } 5988eda14cbcSMatt Macy 59897877fdebSMatt Macy for (int i = 0; i < ndraid; i++) 59907877fdebSMatt Macy spa_feature_incr(spa, SPA_FEATURE_DRAID, tx); 59917877fdebSMatt Macy 5992eda14cbcSMatt Macy dmu_tx_commit(tx); 5993eda14cbcSMatt Macy 5994eda14cbcSMatt Macy spa->spa_sync_on = B_TRUE; 5995eda14cbcSMatt Macy txg_sync_start(dp); 5996eda14cbcSMatt Macy mmp_thread_start(spa); 5997eda14cbcSMatt Macy txg_wait_synced(dp, txg); 5998eda14cbcSMatt Macy 5999eda14cbcSMatt Macy spa_spawn_aux_threads(spa); 6000eda14cbcSMatt Macy 6001eda14cbcSMatt Macy spa_write_cachefile(spa, B_FALSE, B_TRUE); 6002eda14cbcSMatt Macy 6003eda14cbcSMatt Macy /* 6004eda14cbcSMatt Macy * Don't count references from objsets that are already closed 6005eda14cbcSMatt Macy * and are making their way through the eviction process. 6006eda14cbcSMatt Macy */ 6007eda14cbcSMatt Macy spa_evicting_os_wait(spa); 6008eda14cbcSMatt Macy spa->spa_minref = zfs_refcount_count(&spa->spa_refcount); 6009eda14cbcSMatt Macy spa->spa_load_state = SPA_LOAD_NONE; 6010eda14cbcSMatt Macy 6011eda14cbcSMatt Macy mutex_exit(&spa_namespace_lock); 6012eda14cbcSMatt Macy 6013eda14cbcSMatt Macy return (0); 6014eda14cbcSMatt Macy } 6015eda14cbcSMatt Macy 6016eda14cbcSMatt Macy /* 6017eda14cbcSMatt Macy * Import a non-root pool into the system. 6018eda14cbcSMatt Macy */ 6019eda14cbcSMatt Macy int 6020eda14cbcSMatt Macy spa_import(char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags) 6021eda14cbcSMatt Macy { 6022eda14cbcSMatt Macy spa_t *spa; 6023eda14cbcSMatt Macy char *altroot = NULL; 6024eda14cbcSMatt Macy spa_load_state_t state = SPA_LOAD_IMPORT; 6025eda14cbcSMatt Macy zpool_load_policy_t policy; 6026eda14cbcSMatt Macy spa_mode_t mode = spa_mode_global; 6027eda14cbcSMatt Macy uint64_t readonly = B_FALSE; 6028eda14cbcSMatt Macy int error; 6029eda14cbcSMatt Macy nvlist_t *nvroot; 6030eda14cbcSMatt Macy nvlist_t **spares, **l2cache; 6031eda14cbcSMatt Macy uint_t nspares, nl2cache; 6032eda14cbcSMatt Macy 6033eda14cbcSMatt Macy /* 6034eda14cbcSMatt Macy * If a pool with this name exists, return failure. 6035eda14cbcSMatt Macy */ 6036eda14cbcSMatt Macy mutex_enter(&spa_namespace_lock); 6037eda14cbcSMatt Macy if (spa_lookup(pool) != NULL) { 6038eda14cbcSMatt Macy mutex_exit(&spa_namespace_lock); 6039eda14cbcSMatt Macy return (SET_ERROR(EEXIST)); 6040eda14cbcSMatt Macy } 6041eda14cbcSMatt Macy 6042eda14cbcSMatt Macy /* 6043eda14cbcSMatt Macy * Create and initialize the spa structure. 6044eda14cbcSMatt Macy */ 6045eda14cbcSMatt Macy (void) nvlist_lookup_string(props, 6046eda14cbcSMatt Macy zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 6047eda14cbcSMatt Macy (void) nvlist_lookup_uint64(props, 6048eda14cbcSMatt Macy zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly); 6049eda14cbcSMatt Macy if (readonly) 6050eda14cbcSMatt Macy mode = SPA_MODE_READ; 6051eda14cbcSMatt Macy spa = spa_add(pool, config, altroot); 6052eda14cbcSMatt Macy spa->spa_import_flags = flags; 6053eda14cbcSMatt Macy 6054eda14cbcSMatt Macy /* 6055eda14cbcSMatt Macy * Verbatim import - Take a pool and insert it into the namespace 6056eda14cbcSMatt Macy * as if it had been loaded at boot. 6057eda14cbcSMatt Macy */ 6058eda14cbcSMatt Macy if (spa->spa_import_flags & ZFS_IMPORT_VERBATIM) { 6059eda14cbcSMatt Macy if (props != NULL) 6060eda14cbcSMatt Macy spa_configfile_set(spa, props, B_FALSE); 6061eda14cbcSMatt Macy 6062eda14cbcSMatt Macy spa_write_cachefile(spa, B_FALSE, B_TRUE); 6063eda14cbcSMatt Macy spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT); 6064eda14cbcSMatt Macy zfs_dbgmsg("spa_import: verbatim import of %s", pool); 6065eda14cbcSMatt Macy mutex_exit(&spa_namespace_lock); 6066eda14cbcSMatt Macy return (0); 6067eda14cbcSMatt Macy } 6068eda14cbcSMatt Macy 6069eda14cbcSMatt Macy spa_activate(spa, mode); 6070eda14cbcSMatt Macy 6071eda14cbcSMatt Macy /* 6072eda14cbcSMatt Macy * Don't start async tasks until we know everything is healthy. 6073eda14cbcSMatt Macy */ 6074eda14cbcSMatt Macy spa_async_suspend(spa); 6075eda14cbcSMatt Macy 6076eda14cbcSMatt Macy zpool_get_load_policy(config, &policy); 6077eda14cbcSMatt Macy if (policy.zlp_rewind & ZPOOL_DO_REWIND) 6078eda14cbcSMatt Macy state = SPA_LOAD_RECOVER; 6079eda14cbcSMatt Macy 6080eda14cbcSMatt Macy spa->spa_config_source = SPA_CONFIG_SRC_TRYIMPORT; 6081eda14cbcSMatt Macy 6082eda14cbcSMatt Macy if (state != SPA_LOAD_RECOVER) { 6083eda14cbcSMatt Macy spa->spa_last_ubsync_txg = spa->spa_load_txg = 0; 6084eda14cbcSMatt Macy zfs_dbgmsg("spa_import: importing %s", pool); 6085eda14cbcSMatt Macy } else { 6086eda14cbcSMatt Macy zfs_dbgmsg("spa_import: importing %s, max_txg=%lld " 6087eda14cbcSMatt Macy "(RECOVERY MODE)", pool, (longlong_t)policy.zlp_txg); 6088eda14cbcSMatt Macy } 6089eda14cbcSMatt Macy error = spa_load_best(spa, state, policy.zlp_txg, policy.zlp_rewind); 6090eda14cbcSMatt Macy 6091eda14cbcSMatt Macy /* 6092eda14cbcSMatt Macy * Propagate anything learned while loading the pool and pass it 6093eda14cbcSMatt Macy * back to caller (i.e. rewind info, missing devices, etc). 6094eda14cbcSMatt Macy */ 6095eda14cbcSMatt Macy VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, 6096eda14cbcSMatt Macy spa->spa_load_info) == 0); 6097eda14cbcSMatt Macy 6098eda14cbcSMatt Macy spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 6099eda14cbcSMatt Macy /* 6100eda14cbcSMatt Macy * Toss any existing sparelist, as it doesn't have any validity 6101eda14cbcSMatt Macy * anymore, and conflicts with spa_has_spare(). 6102eda14cbcSMatt Macy */ 6103eda14cbcSMatt Macy if (spa->spa_spares.sav_config) { 6104eda14cbcSMatt Macy nvlist_free(spa->spa_spares.sav_config); 6105eda14cbcSMatt Macy spa->spa_spares.sav_config = NULL; 6106eda14cbcSMatt Macy spa_load_spares(spa); 6107eda14cbcSMatt Macy } 6108eda14cbcSMatt Macy if (spa->spa_l2cache.sav_config) { 6109eda14cbcSMatt Macy nvlist_free(spa->spa_l2cache.sav_config); 6110eda14cbcSMatt Macy spa->spa_l2cache.sav_config = NULL; 6111eda14cbcSMatt Macy spa_load_l2cache(spa); 6112eda14cbcSMatt Macy } 6113eda14cbcSMatt Macy 6114eda14cbcSMatt Macy VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 6115eda14cbcSMatt Macy &nvroot) == 0); 6116eda14cbcSMatt Macy spa_config_exit(spa, SCL_ALL, FTAG); 6117eda14cbcSMatt Macy 6118eda14cbcSMatt Macy if (props != NULL) 6119eda14cbcSMatt Macy spa_configfile_set(spa, props, B_FALSE); 6120eda14cbcSMatt Macy 6121eda14cbcSMatt Macy if (error != 0 || (props && spa_writeable(spa) && 6122eda14cbcSMatt Macy (error = spa_prop_set(spa, props)))) { 6123eda14cbcSMatt Macy spa_unload(spa); 6124eda14cbcSMatt Macy spa_deactivate(spa); 6125eda14cbcSMatt Macy spa_remove(spa); 6126eda14cbcSMatt Macy mutex_exit(&spa_namespace_lock); 6127eda14cbcSMatt Macy return (error); 6128eda14cbcSMatt Macy } 6129eda14cbcSMatt Macy 6130eda14cbcSMatt Macy spa_async_resume(spa); 6131eda14cbcSMatt Macy 6132eda14cbcSMatt Macy /* 6133eda14cbcSMatt Macy * Override any spares and level 2 cache devices as specified by 6134eda14cbcSMatt Macy * the user, as these may have correct device names/devids, etc. 6135eda14cbcSMatt Macy */ 6136eda14cbcSMatt Macy if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 6137eda14cbcSMatt Macy &spares, &nspares) == 0) { 6138eda14cbcSMatt Macy if (spa->spa_spares.sav_config) 6139eda14cbcSMatt Macy VERIFY(nvlist_remove(spa->spa_spares.sav_config, 6140eda14cbcSMatt Macy ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0); 6141eda14cbcSMatt Macy else 6142eda14cbcSMatt Macy VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, 6143eda14cbcSMatt Macy NV_UNIQUE_NAME, KM_SLEEP) == 0); 6144eda14cbcSMatt Macy VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 6145eda14cbcSMatt Macy ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 6146eda14cbcSMatt Macy spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 6147eda14cbcSMatt Macy spa_load_spares(spa); 6148eda14cbcSMatt Macy spa_config_exit(spa, SCL_ALL, FTAG); 6149eda14cbcSMatt Macy spa->spa_spares.sav_sync = B_TRUE; 6150eda14cbcSMatt Macy } 6151eda14cbcSMatt Macy if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 6152eda14cbcSMatt Macy &l2cache, &nl2cache) == 0) { 6153eda14cbcSMatt Macy if (spa->spa_l2cache.sav_config) 6154eda14cbcSMatt Macy VERIFY(nvlist_remove(spa->spa_l2cache.sav_config, 6155eda14cbcSMatt Macy ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0); 6156eda14cbcSMatt Macy else 6157eda14cbcSMatt Macy VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config, 6158eda14cbcSMatt Macy NV_UNIQUE_NAME, KM_SLEEP) == 0); 6159eda14cbcSMatt Macy VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config, 6160eda14cbcSMatt Macy ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 6161eda14cbcSMatt Macy spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 6162eda14cbcSMatt Macy spa_load_l2cache(spa); 6163eda14cbcSMatt Macy spa_config_exit(spa, SCL_ALL, FTAG); 6164eda14cbcSMatt Macy spa->spa_l2cache.sav_sync = B_TRUE; 6165eda14cbcSMatt Macy } 6166eda14cbcSMatt Macy 6167eda14cbcSMatt Macy /* 6168eda14cbcSMatt Macy * Check for any removed devices. 6169eda14cbcSMatt Macy */ 6170eda14cbcSMatt Macy if (spa->spa_autoreplace) { 6171eda14cbcSMatt Macy spa_aux_check_removed(&spa->spa_spares); 6172eda14cbcSMatt Macy spa_aux_check_removed(&spa->spa_l2cache); 6173eda14cbcSMatt Macy } 6174eda14cbcSMatt Macy 6175eda14cbcSMatt Macy if (spa_writeable(spa)) { 6176eda14cbcSMatt Macy /* 6177eda14cbcSMatt Macy * Update the config cache to include the newly-imported pool. 6178eda14cbcSMatt Macy */ 6179eda14cbcSMatt Macy spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 6180eda14cbcSMatt Macy } 6181eda14cbcSMatt Macy 6182eda14cbcSMatt Macy /* 6183eda14cbcSMatt Macy * It's possible that the pool was expanded while it was exported. 6184eda14cbcSMatt Macy * We kick off an async task to handle this for us. 6185eda14cbcSMatt Macy */ 6186eda14cbcSMatt Macy spa_async_request(spa, SPA_ASYNC_AUTOEXPAND); 6187eda14cbcSMatt Macy 6188eda14cbcSMatt Macy spa_history_log_version(spa, "import", NULL); 6189eda14cbcSMatt Macy 6190eda14cbcSMatt Macy spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT); 6191eda14cbcSMatt Macy 6192eda14cbcSMatt Macy mutex_exit(&spa_namespace_lock); 6193eda14cbcSMatt Macy 6194eda14cbcSMatt Macy zvol_create_minors_recursive(pool); 6195eda14cbcSMatt Macy 6196eda14cbcSMatt Macy return (0); 6197eda14cbcSMatt Macy } 6198eda14cbcSMatt Macy 6199eda14cbcSMatt Macy nvlist_t * 6200eda14cbcSMatt Macy spa_tryimport(nvlist_t *tryconfig) 6201eda14cbcSMatt Macy { 6202eda14cbcSMatt Macy nvlist_t *config = NULL; 6203eda14cbcSMatt Macy char *poolname, *cachefile; 6204eda14cbcSMatt Macy spa_t *spa; 6205eda14cbcSMatt Macy uint64_t state; 6206eda14cbcSMatt Macy int error; 6207eda14cbcSMatt Macy zpool_load_policy_t policy; 6208eda14cbcSMatt Macy 6209eda14cbcSMatt Macy if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname)) 6210eda14cbcSMatt Macy return (NULL); 6211eda14cbcSMatt Macy 6212eda14cbcSMatt Macy if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state)) 6213eda14cbcSMatt Macy return (NULL); 6214eda14cbcSMatt Macy 6215eda14cbcSMatt Macy /* 6216eda14cbcSMatt Macy * Create and initialize the spa structure. 6217eda14cbcSMatt Macy */ 6218eda14cbcSMatt Macy mutex_enter(&spa_namespace_lock); 6219eda14cbcSMatt Macy spa = spa_add(TRYIMPORT_NAME, tryconfig, NULL); 6220eda14cbcSMatt Macy spa_activate(spa, SPA_MODE_READ); 6221eda14cbcSMatt Macy 6222eda14cbcSMatt Macy /* 6223eda14cbcSMatt Macy * Rewind pool if a max txg was provided. 6224eda14cbcSMatt Macy */ 6225eda14cbcSMatt Macy zpool_get_load_policy(spa->spa_config, &policy); 6226eda14cbcSMatt Macy if (policy.zlp_txg != UINT64_MAX) { 6227eda14cbcSMatt Macy spa->spa_load_max_txg = policy.zlp_txg; 6228eda14cbcSMatt Macy spa->spa_extreme_rewind = B_TRUE; 6229eda14cbcSMatt Macy zfs_dbgmsg("spa_tryimport: importing %s, max_txg=%lld", 6230eda14cbcSMatt Macy poolname, (longlong_t)policy.zlp_txg); 6231eda14cbcSMatt Macy } else { 6232eda14cbcSMatt Macy zfs_dbgmsg("spa_tryimport: importing %s", poolname); 6233eda14cbcSMatt Macy } 6234eda14cbcSMatt Macy 6235eda14cbcSMatt Macy if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_CACHEFILE, &cachefile) 6236eda14cbcSMatt Macy == 0) { 6237eda14cbcSMatt Macy zfs_dbgmsg("spa_tryimport: using cachefile '%s'", cachefile); 6238eda14cbcSMatt Macy spa->spa_config_source = SPA_CONFIG_SRC_CACHEFILE; 6239eda14cbcSMatt Macy } else { 6240eda14cbcSMatt Macy spa->spa_config_source = SPA_CONFIG_SRC_SCAN; 6241eda14cbcSMatt Macy } 6242eda14cbcSMatt Macy 6243eda14cbcSMatt Macy error = spa_load(spa, SPA_LOAD_TRYIMPORT, SPA_IMPORT_EXISTING); 6244eda14cbcSMatt Macy 6245eda14cbcSMatt Macy /* 6246eda14cbcSMatt Macy * If 'tryconfig' was at least parsable, return the current config. 6247eda14cbcSMatt Macy */ 6248eda14cbcSMatt Macy if (spa->spa_root_vdev != NULL) { 6249eda14cbcSMatt Macy config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 6250eda14cbcSMatt Macy VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, 6251eda14cbcSMatt Macy poolname) == 0); 6252eda14cbcSMatt Macy VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, 6253eda14cbcSMatt Macy state) == 0); 6254eda14cbcSMatt Macy VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP, 6255eda14cbcSMatt Macy spa->spa_uberblock.ub_timestamp) == 0); 6256eda14cbcSMatt Macy VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, 6257eda14cbcSMatt Macy spa->spa_load_info) == 0); 6258eda14cbcSMatt Macy VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_ERRATA, 6259eda14cbcSMatt Macy spa->spa_errata) == 0); 6260eda14cbcSMatt Macy 6261eda14cbcSMatt Macy /* 6262eda14cbcSMatt Macy * If the bootfs property exists on this pool then we 6263eda14cbcSMatt Macy * copy it out so that external consumers can tell which 6264eda14cbcSMatt Macy * pools are bootable. 6265eda14cbcSMatt Macy */ 6266eda14cbcSMatt Macy if ((!error || error == EEXIST) && spa->spa_bootfs) { 6267eda14cbcSMatt Macy char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 6268eda14cbcSMatt Macy 6269eda14cbcSMatt Macy /* 6270eda14cbcSMatt Macy * We have to play games with the name since the 6271eda14cbcSMatt Macy * pool was opened as TRYIMPORT_NAME. 6272eda14cbcSMatt Macy */ 6273eda14cbcSMatt Macy if (dsl_dsobj_to_dsname(spa_name(spa), 6274eda14cbcSMatt Macy spa->spa_bootfs, tmpname) == 0) { 6275eda14cbcSMatt Macy char *cp; 6276eda14cbcSMatt Macy char *dsname; 6277eda14cbcSMatt Macy 6278eda14cbcSMatt Macy dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 6279eda14cbcSMatt Macy 6280eda14cbcSMatt Macy cp = strchr(tmpname, '/'); 6281eda14cbcSMatt Macy if (cp == NULL) { 6282eda14cbcSMatt Macy (void) strlcpy(dsname, tmpname, 6283eda14cbcSMatt Macy MAXPATHLEN); 6284eda14cbcSMatt Macy } else { 6285eda14cbcSMatt Macy (void) snprintf(dsname, MAXPATHLEN, 6286eda14cbcSMatt Macy "%s/%s", poolname, ++cp); 6287eda14cbcSMatt Macy } 6288eda14cbcSMatt Macy VERIFY(nvlist_add_string(config, 6289eda14cbcSMatt Macy ZPOOL_CONFIG_BOOTFS, dsname) == 0); 6290eda14cbcSMatt Macy kmem_free(dsname, MAXPATHLEN); 6291eda14cbcSMatt Macy } 6292eda14cbcSMatt Macy kmem_free(tmpname, MAXPATHLEN); 6293eda14cbcSMatt Macy } 6294eda14cbcSMatt Macy 6295eda14cbcSMatt Macy /* 6296eda14cbcSMatt Macy * Add the list of hot spares and level 2 cache devices. 6297eda14cbcSMatt Macy */ 6298eda14cbcSMatt Macy spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 6299eda14cbcSMatt Macy spa_add_spares(spa, config); 6300eda14cbcSMatt Macy spa_add_l2cache(spa, config); 6301eda14cbcSMatt Macy spa_config_exit(spa, SCL_CONFIG, FTAG); 6302eda14cbcSMatt Macy } 6303eda14cbcSMatt Macy 6304eda14cbcSMatt Macy spa_unload(spa); 6305eda14cbcSMatt Macy spa_deactivate(spa); 6306eda14cbcSMatt Macy spa_remove(spa); 6307eda14cbcSMatt Macy mutex_exit(&spa_namespace_lock); 6308eda14cbcSMatt Macy 6309eda14cbcSMatt Macy return (config); 6310eda14cbcSMatt Macy } 6311eda14cbcSMatt Macy 6312eda14cbcSMatt Macy /* 6313eda14cbcSMatt Macy * Pool export/destroy 6314eda14cbcSMatt Macy * 6315eda14cbcSMatt Macy * The act of destroying or exporting a pool is very simple. We make sure there 6316eda14cbcSMatt Macy * is no more pending I/O and any references to the pool are gone. Then, we 6317eda14cbcSMatt Macy * update the pool state and sync all the labels to disk, removing the 6318eda14cbcSMatt Macy * configuration from the cache afterwards. If the 'hardforce' flag is set, then 6319eda14cbcSMatt Macy * we don't sync the labels or remove the configuration cache. 6320eda14cbcSMatt Macy */ 6321eda14cbcSMatt Macy static int 6322180f8225SMatt Macy spa_export_common(const char *pool, int new_state, nvlist_t **oldconfig, 6323eda14cbcSMatt Macy boolean_t force, boolean_t hardforce) 6324eda14cbcSMatt Macy { 6325184c1b94SMartin Matuska int error; 6326eda14cbcSMatt Macy spa_t *spa; 6327eda14cbcSMatt Macy 6328eda14cbcSMatt Macy if (oldconfig) 6329eda14cbcSMatt Macy *oldconfig = NULL; 6330eda14cbcSMatt Macy 6331eda14cbcSMatt Macy if (!(spa_mode_global & SPA_MODE_WRITE)) 6332eda14cbcSMatt Macy return (SET_ERROR(EROFS)); 6333eda14cbcSMatt Macy 6334eda14cbcSMatt Macy mutex_enter(&spa_namespace_lock); 6335eda14cbcSMatt Macy if ((spa = spa_lookup(pool)) == NULL) { 6336eda14cbcSMatt Macy mutex_exit(&spa_namespace_lock); 6337eda14cbcSMatt Macy return (SET_ERROR(ENOENT)); 6338eda14cbcSMatt Macy } 6339eda14cbcSMatt Macy 6340eda14cbcSMatt Macy if (spa->spa_is_exporting) { 6341eda14cbcSMatt Macy /* the pool is being exported by another thread */ 6342eda14cbcSMatt Macy mutex_exit(&spa_namespace_lock); 6343eda14cbcSMatt Macy return (SET_ERROR(ZFS_ERR_EXPORT_IN_PROGRESS)); 6344eda14cbcSMatt Macy } 6345eda14cbcSMatt Macy spa->spa_is_exporting = B_TRUE; 6346eda14cbcSMatt Macy 6347eda14cbcSMatt Macy /* 6348eda14cbcSMatt Macy * Put a hold on the pool, drop the namespace lock, stop async tasks, 6349eda14cbcSMatt Macy * reacquire the namespace lock, and see if we can export. 6350eda14cbcSMatt Macy */ 6351eda14cbcSMatt Macy spa_open_ref(spa, FTAG); 6352eda14cbcSMatt Macy mutex_exit(&spa_namespace_lock); 6353eda14cbcSMatt Macy spa_async_suspend(spa); 6354eda14cbcSMatt Macy if (spa->spa_zvol_taskq) { 6355eda14cbcSMatt Macy zvol_remove_minors(spa, spa_name(spa), B_TRUE); 6356eda14cbcSMatt Macy taskq_wait(spa->spa_zvol_taskq); 6357eda14cbcSMatt Macy } 6358eda14cbcSMatt Macy mutex_enter(&spa_namespace_lock); 6359eda14cbcSMatt Macy spa_close(spa, FTAG); 6360eda14cbcSMatt Macy 6361eda14cbcSMatt Macy if (spa->spa_state == POOL_STATE_UNINITIALIZED) 6362eda14cbcSMatt Macy goto export_spa; 6363eda14cbcSMatt Macy /* 6364eda14cbcSMatt Macy * The pool will be in core if it's openable, in which case we can 6365eda14cbcSMatt Macy * modify its state. Objsets may be open only because they're dirty, 6366eda14cbcSMatt Macy * so we have to force it to sync before checking spa_refcnt. 6367eda14cbcSMatt Macy */ 6368eda14cbcSMatt Macy if (spa->spa_sync_on) { 6369eda14cbcSMatt Macy txg_wait_synced(spa->spa_dsl_pool, 0); 6370eda14cbcSMatt Macy spa_evicting_os_wait(spa); 6371eda14cbcSMatt Macy } 6372eda14cbcSMatt Macy 6373eda14cbcSMatt Macy /* 6374eda14cbcSMatt Macy * A pool cannot be exported or destroyed if there are active 6375eda14cbcSMatt Macy * references. If we are resetting a pool, allow references by 6376eda14cbcSMatt Macy * fault injection handlers. 6377eda14cbcSMatt Macy */ 6378184c1b94SMartin Matuska if (!spa_refcount_zero(spa) || (spa->spa_inject_ref != 0)) { 6379184c1b94SMartin Matuska error = SET_ERROR(EBUSY); 6380184c1b94SMartin Matuska goto fail; 6381eda14cbcSMatt Macy } 6382eda14cbcSMatt Macy 6383eda14cbcSMatt Macy if (spa->spa_sync_on) { 6384eda14cbcSMatt Macy /* 6385eda14cbcSMatt Macy * A pool cannot be exported if it has an active shared spare. 6386eda14cbcSMatt Macy * This is to prevent other pools stealing the active spare 6387eda14cbcSMatt Macy * from an exported pool. At user's own will, such pool can 6388eda14cbcSMatt Macy * be forcedly exported. 6389eda14cbcSMatt Macy */ 6390eda14cbcSMatt Macy if (!force && new_state == POOL_STATE_EXPORTED && 6391eda14cbcSMatt Macy spa_has_active_shared_spare(spa)) { 6392184c1b94SMartin Matuska error = SET_ERROR(EXDEV); 6393184c1b94SMartin Matuska goto fail; 6394eda14cbcSMatt Macy } 6395eda14cbcSMatt Macy 6396eda14cbcSMatt Macy /* 6397eda14cbcSMatt Macy * We're about to export or destroy this pool. Make sure 6398eda14cbcSMatt Macy * we stop all initialization and trim activity here before 6399eda14cbcSMatt Macy * we set the spa_final_txg. This will ensure that all 6400eda14cbcSMatt Macy * dirty data resulting from the initialization is 6401eda14cbcSMatt Macy * committed to disk before we unload the pool. 6402eda14cbcSMatt Macy */ 6403eda14cbcSMatt Macy if (spa->spa_root_vdev != NULL) { 6404eda14cbcSMatt Macy vdev_t *rvd = spa->spa_root_vdev; 6405eda14cbcSMatt Macy vdev_initialize_stop_all(rvd, VDEV_INITIALIZE_ACTIVE); 6406eda14cbcSMatt Macy vdev_trim_stop_all(rvd, VDEV_TRIM_ACTIVE); 6407eda14cbcSMatt Macy vdev_autotrim_stop_all(spa); 6408eda14cbcSMatt Macy vdev_rebuild_stop_all(spa); 6409eda14cbcSMatt Macy } 6410eda14cbcSMatt Macy 6411eda14cbcSMatt Macy /* 6412eda14cbcSMatt Macy * We want this to be reflected on every label, 6413eda14cbcSMatt Macy * so mark them all dirty. spa_unload() will do the 6414eda14cbcSMatt Macy * final sync that pushes these changes out. 6415eda14cbcSMatt Macy */ 6416eda14cbcSMatt Macy if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) { 6417eda14cbcSMatt Macy spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 6418eda14cbcSMatt Macy spa->spa_state = new_state; 6419eda14cbcSMatt Macy spa->spa_final_txg = spa_last_synced_txg(spa) + 6420eda14cbcSMatt Macy TXG_DEFER_SIZE + 1; 6421eda14cbcSMatt Macy vdev_config_dirty(spa->spa_root_vdev); 6422eda14cbcSMatt Macy spa_config_exit(spa, SCL_ALL, FTAG); 6423eda14cbcSMatt Macy } 6424eda14cbcSMatt Macy } 6425eda14cbcSMatt Macy 6426eda14cbcSMatt Macy export_spa: 6427eda14cbcSMatt Macy if (new_state == POOL_STATE_DESTROYED) 6428eda14cbcSMatt Macy spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_DESTROY); 6429eda14cbcSMatt Macy else if (new_state == POOL_STATE_EXPORTED) 6430eda14cbcSMatt Macy spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_EXPORT); 6431eda14cbcSMatt Macy 6432eda14cbcSMatt Macy if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 6433eda14cbcSMatt Macy spa_unload(spa); 6434eda14cbcSMatt Macy spa_deactivate(spa); 6435eda14cbcSMatt Macy } 6436eda14cbcSMatt Macy 6437eda14cbcSMatt Macy if (oldconfig && spa->spa_config) 6438eda14cbcSMatt Macy VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0); 6439eda14cbcSMatt Macy 6440eda14cbcSMatt Macy if (new_state != POOL_STATE_UNINITIALIZED) { 6441eda14cbcSMatt Macy if (!hardforce) 6442eda14cbcSMatt Macy spa_write_cachefile(spa, B_TRUE, B_TRUE); 6443eda14cbcSMatt Macy spa_remove(spa); 6444eda14cbcSMatt Macy } else { 6445eda14cbcSMatt Macy /* 6446eda14cbcSMatt Macy * If spa_remove() is not called for this spa_t and 6447eda14cbcSMatt Macy * there is any possibility that it can be reused, 6448eda14cbcSMatt Macy * we make sure to reset the exporting flag. 6449eda14cbcSMatt Macy */ 6450eda14cbcSMatt Macy spa->spa_is_exporting = B_FALSE; 6451eda14cbcSMatt Macy } 6452eda14cbcSMatt Macy 6453eda14cbcSMatt Macy mutex_exit(&spa_namespace_lock); 6454eda14cbcSMatt Macy return (0); 6455184c1b94SMartin Matuska 6456184c1b94SMartin Matuska fail: 6457184c1b94SMartin Matuska spa->spa_is_exporting = B_FALSE; 6458184c1b94SMartin Matuska spa_async_resume(spa); 6459184c1b94SMartin Matuska mutex_exit(&spa_namespace_lock); 6460184c1b94SMartin Matuska return (error); 6461eda14cbcSMatt Macy } 6462eda14cbcSMatt Macy 6463eda14cbcSMatt Macy /* 6464eda14cbcSMatt Macy * Destroy a storage pool. 6465eda14cbcSMatt Macy */ 6466eda14cbcSMatt Macy int 6467180f8225SMatt Macy spa_destroy(const char *pool) 6468eda14cbcSMatt Macy { 6469eda14cbcSMatt Macy return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL, 6470eda14cbcSMatt Macy B_FALSE, B_FALSE)); 6471eda14cbcSMatt Macy } 6472eda14cbcSMatt Macy 6473eda14cbcSMatt Macy /* 6474eda14cbcSMatt Macy * Export a storage pool. 6475eda14cbcSMatt Macy */ 6476eda14cbcSMatt Macy int 6477180f8225SMatt Macy spa_export(const char *pool, nvlist_t **oldconfig, boolean_t force, 6478eda14cbcSMatt Macy boolean_t hardforce) 6479eda14cbcSMatt Macy { 6480eda14cbcSMatt Macy return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig, 6481eda14cbcSMatt Macy force, hardforce)); 6482eda14cbcSMatt Macy } 6483eda14cbcSMatt Macy 6484eda14cbcSMatt Macy /* 6485eda14cbcSMatt Macy * Similar to spa_export(), this unloads the spa_t without actually removing it 6486eda14cbcSMatt Macy * from the namespace in any way. 6487eda14cbcSMatt Macy */ 6488eda14cbcSMatt Macy int 6489180f8225SMatt Macy spa_reset(const char *pool) 6490eda14cbcSMatt Macy { 6491eda14cbcSMatt Macy return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL, 6492eda14cbcSMatt Macy B_FALSE, B_FALSE)); 6493eda14cbcSMatt Macy } 6494eda14cbcSMatt Macy 6495eda14cbcSMatt Macy /* 6496eda14cbcSMatt Macy * ========================================================================== 6497eda14cbcSMatt Macy * Device manipulation 6498eda14cbcSMatt Macy * ========================================================================== 6499eda14cbcSMatt Macy */ 6500eda14cbcSMatt Macy 6501eda14cbcSMatt Macy /* 65027877fdebSMatt Macy * This is called as a synctask to increment the draid feature flag 65037877fdebSMatt Macy */ 65047877fdebSMatt Macy static void 65057877fdebSMatt Macy spa_draid_feature_incr(void *arg, dmu_tx_t *tx) 65067877fdebSMatt Macy { 65077877fdebSMatt Macy spa_t *spa = dmu_tx_pool(tx)->dp_spa; 65087877fdebSMatt Macy int draid = (int)(uintptr_t)arg; 65097877fdebSMatt Macy 65107877fdebSMatt Macy for (int c = 0; c < draid; c++) 65117877fdebSMatt Macy spa_feature_incr(spa, SPA_FEATURE_DRAID, tx); 65127877fdebSMatt Macy } 65137877fdebSMatt Macy 65147877fdebSMatt Macy /* 6515eda14cbcSMatt Macy * Add a device to a storage pool. 6516eda14cbcSMatt Macy */ 6517eda14cbcSMatt Macy int 6518eda14cbcSMatt Macy spa_vdev_add(spa_t *spa, nvlist_t *nvroot) 6519eda14cbcSMatt Macy { 65207877fdebSMatt Macy uint64_t txg, ndraid = 0; 6521eda14cbcSMatt Macy int error; 6522eda14cbcSMatt Macy vdev_t *rvd = spa->spa_root_vdev; 6523eda14cbcSMatt Macy vdev_t *vd, *tvd; 6524eda14cbcSMatt Macy nvlist_t **spares, **l2cache; 6525eda14cbcSMatt Macy uint_t nspares, nl2cache; 6526eda14cbcSMatt Macy 6527eda14cbcSMatt Macy ASSERT(spa_writeable(spa)); 6528eda14cbcSMatt Macy 6529eda14cbcSMatt Macy txg = spa_vdev_enter(spa); 6530eda14cbcSMatt Macy 6531eda14cbcSMatt Macy if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0, 6532eda14cbcSMatt Macy VDEV_ALLOC_ADD)) != 0) 6533eda14cbcSMatt Macy return (spa_vdev_exit(spa, NULL, txg, error)); 6534eda14cbcSMatt Macy 6535eda14cbcSMatt Macy spa->spa_pending_vdev = vd; /* spa_vdev_exit() will clear this */ 6536eda14cbcSMatt Macy 6537eda14cbcSMatt Macy if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares, 6538eda14cbcSMatt Macy &nspares) != 0) 6539eda14cbcSMatt Macy nspares = 0; 6540eda14cbcSMatt Macy 6541eda14cbcSMatt Macy if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache, 6542eda14cbcSMatt Macy &nl2cache) != 0) 6543eda14cbcSMatt Macy nl2cache = 0; 6544eda14cbcSMatt Macy 6545eda14cbcSMatt Macy if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0) 6546eda14cbcSMatt Macy return (spa_vdev_exit(spa, vd, txg, EINVAL)); 6547eda14cbcSMatt Macy 6548eda14cbcSMatt Macy if (vd->vdev_children != 0 && 65497877fdebSMatt Macy (error = vdev_create(vd, txg, B_FALSE)) != 0) { 6550eda14cbcSMatt Macy return (spa_vdev_exit(spa, vd, txg, error)); 65517877fdebSMatt Macy } 65527877fdebSMatt Macy 65537877fdebSMatt Macy /* 65547877fdebSMatt Macy * The virtual dRAID spares must be added after vdev tree is created 655516038816SMartin Matuska * and the vdev guids are generated. The guid of their associated 65567877fdebSMatt Macy * dRAID is stored in the config and used when opening the spare. 65577877fdebSMatt Macy */ 65587877fdebSMatt Macy if ((error = vdev_draid_spare_create(nvroot, vd, &ndraid, 65597877fdebSMatt Macy rvd->vdev_children)) == 0) { 65607877fdebSMatt Macy if (ndraid > 0 && nvlist_lookup_nvlist_array(nvroot, 65617877fdebSMatt Macy ZPOOL_CONFIG_SPARES, &spares, &nspares) != 0) 65627877fdebSMatt Macy nspares = 0; 65637877fdebSMatt Macy } else { 65647877fdebSMatt Macy return (spa_vdev_exit(spa, vd, txg, error)); 65657877fdebSMatt Macy } 6566eda14cbcSMatt Macy 6567eda14cbcSMatt Macy /* 6568eda14cbcSMatt Macy * We must validate the spares and l2cache devices after checking the 6569eda14cbcSMatt Macy * children. Otherwise, vdev_inuse() will blindly overwrite the spare. 6570eda14cbcSMatt Macy */ 6571eda14cbcSMatt Macy if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0) 6572eda14cbcSMatt Macy return (spa_vdev_exit(spa, vd, txg, error)); 6573eda14cbcSMatt Macy 6574eda14cbcSMatt Macy /* 6575eda14cbcSMatt Macy * If we are in the middle of a device removal, we can only add 6576eda14cbcSMatt Macy * devices which match the existing devices in the pool. 6577eda14cbcSMatt Macy * If we are in the middle of a removal, or have some indirect 65787877fdebSMatt Macy * vdevs, we can not add raidz or dRAID top levels. 6579eda14cbcSMatt Macy */ 6580eda14cbcSMatt Macy if (spa->spa_vdev_removal != NULL || 6581eda14cbcSMatt Macy spa->spa_removing_phys.sr_prev_indirect_vdev != -1) { 6582eda14cbcSMatt Macy for (int c = 0; c < vd->vdev_children; c++) { 6583eda14cbcSMatt Macy tvd = vd->vdev_child[c]; 6584eda14cbcSMatt Macy if (spa->spa_vdev_removal != NULL && 6585eda14cbcSMatt Macy tvd->vdev_ashift != spa->spa_max_ashift) { 6586eda14cbcSMatt Macy return (spa_vdev_exit(spa, vd, txg, EINVAL)); 6587eda14cbcSMatt Macy } 65887877fdebSMatt Macy /* Fail if top level vdev is raidz or a dRAID */ 65897877fdebSMatt Macy if (vdev_get_nparity(tvd) != 0) 6590eda14cbcSMatt Macy return (spa_vdev_exit(spa, vd, txg, EINVAL)); 65917877fdebSMatt Macy 6592eda14cbcSMatt Macy /* 6593eda14cbcSMatt Macy * Need the top level mirror to be 6594eda14cbcSMatt Macy * a mirror of leaf vdevs only 6595eda14cbcSMatt Macy */ 6596eda14cbcSMatt Macy if (tvd->vdev_ops == &vdev_mirror_ops) { 6597eda14cbcSMatt Macy for (uint64_t cid = 0; 6598eda14cbcSMatt Macy cid < tvd->vdev_children; cid++) { 6599eda14cbcSMatt Macy vdev_t *cvd = tvd->vdev_child[cid]; 6600eda14cbcSMatt Macy if (!cvd->vdev_ops->vdev_op_leaf) { 6601eda14cbcSMatt Macy return (spa_vdev_exit(spa, vd, 6602eda14cbcSMatt Macy txg, EINVAL)); 6603eda14cbcSMatt Macy } 6604eda14cbcSMatt Macy } 6605eda14cbcSMatt Macy } 6606eda14cbcSMatt Macy } 6607eda14cbcSMatt Macy } 6608eda14cbcSMatt Macy 6609eda14cbcSMatt Macy for (int c = 0; c < vd->vdev_children; c++) { 6610eda14cbcSMatt Macy tvd = vd->vdev_child[c]; 6611eda14cbcSMatt Macy vdev_remove_child(vd, tvd); 6612eda14cbcSMatt Macy tvd->vdev_id = rvd->vdev_children; 6613eda14cbcSMatt Macy vdev_add_child(rvd, tvd); 6614eda14cbcSMatt Macy vdev_config_dirty(tvd); 6615eda14cbcSMatt Macy } 6616eda14cbcSMatt Macy 6617eda14cbcSMatt Macy if (nspares != 0) { 6618eda14cbcSMatt Macy spa_set_aux_vdevs(&spa->spa_spares, spares, nspares, 6619eda14cbcSMatt Macy ZPOOL_CONFIG_SPARES); 6620eda14cbcSMatt Macy spa_load_spares(spa); 6621eda14cbcSMatt Macy spa->spa_spares.sav_sync = B_TRUE; 6622eda14cbcSMatt Macy } 6623eda14cbcSMatt Macy 6624eda14cbcSMatt Macy if (nl2cache != 0) { 6625eda14cbcSMatt Macy spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache, 6626eda14cbcSMatt Macy ZPOOL_CONFIG_L2CACHE); 6627eda14cbcSMatt Macy spa_load_l2cache(spa); 6628eda14cbcSMatt Macy spa->spa_l2cache.sav_sync = B_TRUE; 6629eda14cbcSMatt Macy } 6630eda14cbcSMatt Macy 6631eda14cbcSMatt Macy /* 66327877fdebSMatt Macy * We can't increment a feature while holding spa_vdev so we 66337877fdebSMatt Macy * have to do it in a synctask. 66347877fdebSMatt Macy */ 66357877fdebSMatt Macy if (ndraid != 0) { 66367877fdebSMatt Macy dmu_tx_t *tx; 66377877fdebSMatt Macy 66387877fdebSMatt Macy tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 66397877fdebSMatt Macy dsl_sync_task_nowait(spa->spa_dsl_pool, spa_draid_feature_incr, 66407877fdebSMatt Macy (void *)(uintptr_t)ndraid, tx); 66417877fdebSMatt Macy dmu_tx_commit(tx); 66427877fdebSMatt Macy } 66437877fdebSMatt Macy 66447877fdebSMatt Macy /* 6645eda14cbcSMatt Macy * We have to be careful when adding new vdevs to an existing pool. 6646eda14cbcSMatt Macy * If other threads start allocating from these vdevs before we 6647eda14cbcSMatt Macy * sync the config cache, and we lose power, then upon reboot we may 6648eda14cbcSMatt Macy * fail to open the pool because there are DVAs that the config cache 6649eda14cbcSMatt Macy * can't translate. Therefore, we first add the vdevs without 6650eda14cbcSMatt Macy * initializing metaslabs; sync the config cache (via spa_vdev_exit()); 6651eda14cbcSMatt Macy * and then let spa_config_update() initialize the new metaslabs. 6652eda14cbcSMatt Macy * 6653eda14cbcSMatt Macy * spa_load() checks for added-but-not-initialized vdevs, so that 6654eda14cbcSMatt Macy * if we lose power at any point in this sequence, the remaining 6655eda14cbcSMatt Macy * steps will be completed the next time we load the pool. 6656eda14cbcSMatt Macy */ 6657eda14cbcSMatt Macy (void) spa_vdev_exit(spa, vd, txg, 0); 6658eda14cbcSMatt Macy 6659eda14cbcSMatt Macy mutex_enter(&spa_namespace_lock); 6660eda14cbcSMatt Macy spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 6661eda14cbcSMatt Macy spa_event_notify(spa, NULL, NULL, ESC_ZFS_VDEV_ADD); 6662eda14cbcSMatt Macy mutex_exit(&spa_namespace_lock); 6663eda14cbcSMatt Macy 6664eda14cbcSMatt Macy return (0); 6665eda14cbcSMatt Macy } 6666eda14cbcSMatt Macy 6667eda14cbcSMatt Macy /* 6668eda14cbcSMatt Macy * Attach a device to a mirror. The arguments are the path to any device 6669eda14cbcSMatt Macy * in the mirror, and the nvroot for the new device. If the path specifies 6670eda14cbcSMatt Macy * a device that is not mirrored, we automatically insert the mirror vdev. 6671eda14cbcSMatt Macy * 6672eda14cbcSMatt Macy * If 'replacing' is specified, the new device is intended to replace the 6673eda14cbcSMatt Macy * existing device; in this case the two devices are made into their own 6674eda14cbcSMatt Macy * mirror using the 'replacing' vdev, which is functionally identical to 6675eda14cbcSMatt Macy * the mirror vdev (it actually reuses all the same ops) but has a few 6676eda14cbcSMatt Macy * extra rules: you can't attach to it after it's been created, and upon 6677eda14cbcSMatt Macy * completion of resilvering, the first disk (the one being replaced) 6678eda14cbcSMatt Macy * is automatically detached. 6679eda14cbcSMatt Macy * 6680eda14cbcSMatt Macy * If 'rebuild' is specified, then sequential reconstruction (a.ka. rebuild) 6681eda14cbcSMatt Macy * should be performed instead of traditional healing reconstruction. From 6682eda14cbcSMatt Macy * an administrators perspective these are both resilver operations. 6683eda14cbcSMatt Macy */ 6684eda14cbcSMatt Macy int 6685eda14cbcSMatt Macy spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing, 6686eda14cbcSMatt Macy int rebuild) 6687eda14cbcSMatt Macy { 6688eda14cbcSMatt Macy uint64_t txg, dtl_max_txg; 6689eda14cbcSMatt Macy vdev_t *rvd = spa->spa_root_vdev; 6690eda14cbcSMatt Macy vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd; 6691eda14cbcSMatt Macy vdev_ops_t *pvops; 6692eda14cbcSMatt Macy char *oldvdpath, *newvdpath; 6693eda14cbcSMatt Macy int newvd_isspare; 6694eda14cbcSMatt Macy int error; 6695eda14cbcSMatt Macy 6696eda14cbcSMatt Macy ASSERT(spa_writeable(spa)); 6697eda14cbcSMatt Macy 6698eda14cbcSMatt Macy txg = spa_vdev_enter(spa); 6699eda14cbcSMatt Macy 6700eda14cbcSMatt Macy oldvd = spa_lookup_by_guid(spa, guid, B_FALSE); 6701eda14cbcSMatt Macy 6702eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&spa_namespace_lock)); 6703eda14cbcSMatt Macy if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) { 6704eda14cbcSMatt Macy error = (spa_has_checkpoint(spa)) ? 6705eda14cbcSMatt Macy ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT; 6706eda14cbcSMatt Macy return (spa_vdev_exit(spa, NULL, txg, error)); 6707eda14cbcSMatt Macy } 6708eda14cbcSMatt Macy 6709eda14cbcSMatt Macy if (rebuild) { 6710eda14cbcSMatt Macy if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REBUILD)) 6711eda14cbcSMatt Macy return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 6712eda14cbcSMatt Macy 6713eda14cbcSMatt Macy if (dsl_scan_resilvering(spa_get_dsl(spa))) 6714eda14cbcSMatt Macy return (spa_vdev_exit(spa, NULL, txg, 6715eda14cbcSMatt Macy ZFS_ERR_RESILVER_IN_PROGRESS)); 6716eda14cbcSMatt Macy } else { 6717eda14cbcSMatt Macy if (vdev_rebuild_active(rvd)) 6718eda14cbcSMatt Macy return (spa_vdev_exit(spa, NULL, txg, 6719eda14cbcSMatt Macy ZFS_ERR_REBUILD_IN_PROGRESS)); 6720eda14cbcSMatt Macy } 6721eda14cbcSMatt Macy 6722eda14cbcSMatt Macy if (spa->spa_vdev_removal != NULL) 6723eda14cbcSMatt Macy return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 6724eda14cbcSMatt Macy 6725eda14cbcSMatt Macy if (oldvd == NULL) 6726eda14cbcSMatt Macy return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 6727eda14cbcSMatt Macy 6728eda14cbcSMatt Macy if (!oldvd->vdev_ops->vdev_op_leaf) 6729eda14cbcSMatt Macy return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 6730eda14cbcSMatt Macy 6731eda14cbcSMatt Macy pvd = oldvd->vdev_parent; 6732eda14cbcSMatt Macy 6733eda14cbcSMatt Macy if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0, 6734eda14cbcSMatt Macy VDEV_ALLOC_ATTACH)) != 0) 6735eda14cbcSMatt Macy return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 6736eda14cbcSMatt Macy 6737eda14cbcSMatt Macy if (newrootvd->vdev_children != 1) 6738eda14cbcSMatt Macy return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 6739eda14cbcSMatt Macy 6740eda14cbcSMatt Macy newvd = newrootvd->vdev_child[0]; 6741eda14cbcSMatt Macy 6742eda14cbcSMatt Macy if (!newvd->vdev_ops->vdev_op_leaf) 6743eda14cbcSMatt Macy return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 6744eda14cbcSMatt Macy 6745eda14cbcSMatt Macy if ((error = vdev_create(newrootvd, txg, replacing)) != 0) 6746eda14cbcSMatt Macy return (spa_vdev_exit(spa, newrootvd, txg, error)); 6747eda14cbcSMatt Macy 6748eda14cbcSMatt Macy /* 6749eda14cbcSMatt Macy * Spares can't replace logs 6750eda14cbcSMatt Macy */ 6751eda14cbcSMatt Macy if (oldvd->vdev_top->vdev_islog && newvd->vdev_isspare) 6752eda14cbcSMatt Macy return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 6753eda14cbcSMatt Macy 67547877fdebSMatt Macy /* 67557877fdebSMatt Macy * A dRAID spare can only replace a child of its parent dRAID vdev. 67567877fdebSMatt Macy */ 67577877fdebSMatt Macy if (newvd->vdev_ops == &vdev_draid_spare_ops && 67587877fdebSMatt Macy oldvd->vdev_top != vdev_draid_spare_get_parent(newvd)) { 67597877fdebSMatt Macy return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 67607877fdebSMatt Macy } 67617877fdebSMatt Macy 6762eda14cbcSMatt Macy if (rebuild) { 6763eda14cbcSMatt Macy /* 67647877fdebSMatt Macy * For rebuilds, the top vdev must support reconstruction 6765eda14cbcSMatt Macy * using only space maps. This means the only allowable 67667877fdebSMatt Macy * vdevs types are the root vdev, a mirror, or dRAID. 6767eda14cbcSMatt Macy */ 67687877fdebSMatt Macy tvd = pvd; 67697877fdebSMatt Macy if (pvd->vdev_top != NULL) 67707877fdebSMatt Macy tvd = pvd->vdev_top; 67717877fdebSMatt Macy 67727877fdebSMatt Macy if (tvd->vdev_ops != &vdev_mirror_ops && 67737877fdebSMatt Macy tvd->vdev_ops != &vdev_root_ops && 67747877fdebSMatt Macy tvd->vdev_ops != &vdev_draid_ops) { 6775eda14cbcSMatt Macy return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 6776eda14cbcSMatt Macy } 6777eda14cbcSMatt Macy } 6778eda14cbcSMatt Macy 6779eda14cbcSMatt Macy if (!replacing) { 6780eda14cbcSMatt Macy /* 6781eda14cbcSMatt Macy * For attach, the only allowable parent is a mirror or the root 6782eda14cbcSMatt Macy * vdev. 6783eda14cbcSMatt Macy */ 6784eda14cbcSMatt Macy if (pvd->vdev_ops != &vdev_mirror_ops && 6785eda14cbcSMatt Macy pvd->vdev_ops != &vdev_root_ops) 6786eda14cbcSMatt Macy return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 6787eda14cbcSMatt Macy 6788eda14cbcSMatt Macy pvops = &vdev_mirror_ops; 6789eda14cbcSMatt Macy } else { 6790eda14cbcSMatt Macy /* 6791eda14cbcSMatt Macy * Active hot spares can only be replaced by inactive hot 6792eda14cbcSMatt Macy * spares. 6793eda14cbcSMatt Macy */ 6794eda14cbcSMatt Macy if (pvd->vdev_ops == &vdev_spare_ops && 6795eda14cbcSMatt Macy oldvd->vdev_isspare && 6796eda14cbcSMatt Macy !spa_has_spare(spa, newvd->vdev_guid)) 6797eda14cbcSMatt Macy return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 6798eda14cbcSMatt Macy 6799eda14cbcSMatt Macy /* 6800eda14cbcSMatt Macy * If the source is a hot spare, and the parent isn't already a 6801eda14cbcSMatt Macy * spare, then we want to create a new hot spare. Otherwise, we 6802eda14cbcSMatt Macy * want to create a replacing vdev. The user is not allowed to 6803eda14cbcSMatt Macy * attach to a spared vdev child unless the 'isspare' state is 6804eda14cbcSMatt Macy * the same (spare replaces spare, non-spare replaces 6805eda14cbcSMatt Macy * non-spare). 6806eda14cbcSMatt Macy */ 6807eda14cbcSMatt Macy if (pvd->vdev_ops == &vdev_replacing_ops && 6808eda14cbcSMatt Macy spa_version(spa) < SPA_VERSION_MULTI_REPLACE) { 6809eda14cbcSMatt Macy return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 6810eda14cbcSMatt Macy } else if (pvd->vdev_ops == &vdev_spare_ops && 6811eda14cbcSMatt Macy newvd->vdev_isspare != oldvd->vdev_isspare) { 6812eda14cbcSMatt Macy return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 6813eda14cbcSMatt Macy } 6814eda14cbcSMatt Macy 6815eda14cbcSMatt Macy if (newvd->vdev_isspare) 6816eda14cbcSMatt Macy pvops = &vdev_spare_ops; 6817eda14cbcSMatt Macy else 6818eda14cbcSMatt Macy pvops = &vdev_replacing_ops; 6819eda14cbcSMatt Macy } 6820eda14cbcSMatt Macy 6821eda14cbcSMatt Macy /* 6822eda14cbcSMatt Macy * Make sure the new device is big enough. 6823eda14cbcSMatt Macy */ 6824eda14cbcSMatt Macy if (newvd->vdev_asize < vdev_get_min_asize(oldvd)) 6825eda14cbcSMatt Macy return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW)); 6826eda14cbcSMatt Macy 6827eda14cbcSMatt Macy /* 6828eda14cbcSMatt Macy * The new device cannot have a higher alignment requirement 6829eda14cbcSMatt Macy * than the top-level vdev. 6830eda14cbcSMatt Macy */ 6831eda14cbcSMatt Macy if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift) 6832eda14cbcSMatt Macy return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 6833eda14cbcSMatt Macy 6834eda14cbcSMatt Macy /* 6835eda14cbcSMatt Macy * If this is an in-place replacement, update oldvd's path and devid 6836eda14cbcSMatt Macy * to make it distinguishable from newvd, and unopenable from now on. 6837eda14cbcSMatt Macy */ 6838eda14cbcSMatt Macy if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) { 6839eda14cbcSMatt Macy spa_strfree(oldvd->vdev_path); 6840eda14cbcSMatt Macy oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5, 6841eda14cbcSMatt Macy KM_SLEEP); 6842eda14cbcSMatt Macy (void) snprintf(oldvd->vdev_path, strlen(newvd->vdev_path) + 5, 6843eda14cbcSMatt Macy "%s/%s", newvd->vdev_path, "old"); 6844eda14cbcSMatt Macy if (oldvd->vdev_devid != NULL) { 6845eda14cbcSMatt Macy spa_strfree(oldvd->vdev_devid); 6846eda14cbcSMatt Macy oldvd->vdev_devid = NULL; 6847eda14cbcSMatt Macy } 6848eda14cbcSMatt Macy } 6849eda14cbcSMatt Macy 6850eda14cbcSMatt Macy /* 6851eda14cbcSMatt Macy * If the parent is not a mirror, or if we're replacing, insert the new 6852eda14cbcSMatt Macy * mirror/replacing/spare vdev above oldvd. 6853eda14cbcSMatt Macy */ 6854eda14cbcSMatt Macy if (pvd->vdev_ops != pvops) 6855eda14cbcSMatt Macy pvd = vdev_add_parent(oldvd, pvops); 6856eda14cbcSMatt Macy 6857eda14cbcSMatt Macy ASSERT(pvd->vdev_top->vdev_parent == rvd); 6858eda14cbcSMatt Macy ASSERT(pvd->vdev_ops == pvops); 6859eda14cbcSMatt Macy ASSERT(oldvd->vdev_parent == pvd); 6860eda14cbcSMatt Macy 6861eda14cbcSMatt Macy /* 6862eda14cbcSMatt Macy * Extract the new device from its root and add it to pvd. 6863eda14cbcSMatt Macy */ 6864eda14cbcSMatt Macy vdev_remove_child(newrootvd, newvd); 6865eda14cbcSMatt Macy newvd->vdev_id = pvd->vdev_children; 6866eda14cbcSMatt Macy newvd->vdev_crtxg = oldvd->vdev_crtxg; 6867eda14cbcSMatt Macy vdev_add_child(pvd, newvd); 6868eda14cbcSMatt Macy 6869eda14cbcSMatt Macy /* 6870eda14cbcSMatt Macy * Reevaluate the parent vdev state. 6871eda14cbcSMatt Macy */ 6872eda14cbcSMatt Macy vdev_propagate_state(pvd); 6873eda14cbcSMatt Macy 6874eda14cbcSMatt Macy tvd = newvd->vdev_top; 6875eda14cbcSMatt Macy ASSERT(pvd->vdev_top == tvd); 6876eda14cbcSMatt Macy ASSERT(tvd->vdev_parent == rvd); 6877eda14cbcSMatt Macy 6878eda14cbcSMatt Macy vdev_config_dirty(tvd); 6879eda14cbcSMatt Macy 6880eda14cbcSMatt Macy /* 6881eda14cbcSMatt Macy * Set newvd's DTL to [TXG_INITIAL, dtl_max_txg) so that we account 6882eda14cbcSMatt Macy * for any dmu_sync-ed blocks. It will propagate upward when 6883eda14cbcSMatt Macy * spa_vdev_exit() calls vdev_dtl_reassess(). 6884eda14cbcSMatt Macy */ 6885eda14cbcSMatt Macy dtl_max_txg = txg + TXG_CONCURRENT_STATES; 6886eda14cbcSMatt Macy 6887eda14cbcSMatt Macy vdev_dtl_dirty(newvd, DTL_MISSING, 6888eda14cbcSMatt Macy TXG_INITIAL, dtl_max_txg - TXG_INITIAL); 6889eda14cbcSMatt Macy 6890eda14cbcSMatt Macy if (newvd->vdev_isspare) { 6891eda14cbcSMatt Macy spa_spare_activate(newvd); 6892eda14cbcSMatt Macy spa_event_notify(spa, newvd, NULL, ESC_ZFS_VDEV_SPARE); 6893eda14cbcSMatt Macy } 6894eda14cbcSMatt Macy 6895eda14cbcSMatt Macy oldvdpath = spa_strdup(oldvd->vdev_path); 6896eda14cbcSMatt Macy newvdpath = spa_strdup(newvd->vdev_path); 6897eda14cbcSMatt Macy newvd_isspare = newvd->vdev_isspare; 6898eda14cbcSMatt Macy 6899eda14cbcSMatt Macy /* 6900eda14cbcSMatt Macy * Mark newvd's DTL dirty in this txg. 6901eda14cbcSMatt Macy */ 6902eda14cbcSMatt Macy vdev_dirty(tvd, VDD_DTL, newvd, txg); 6903eda14cbcSMatt Macy 6904eda14cbcSMatt Macy /* 6905eda14cbcSMatt Macy * Schedule the resilver or rebuild to restart in the future. We do 6906eda14cbcSMatt Macy * this to ensure that dmu_sync-ed blocks have been stitched into the 6907eda14cbcSMatt Macy * respective datasets. 6908eda14cbcSMatt Macy */ 6909eda14cbcSMatt Macy if (rebuild) { 6910eda14cbcSMatt Macy newvd->vdev_rebuild_txg = txg; 6911eda14cbcSMatt Macy 6912eda14cbcSMatt Macy vdev_rebuild(tvd); 6913eda14cbcSMatt Macy } else { 6914eda14cbcSMatt Macy newvd->vdev_resilver_txg = txg; 6915eda14cbcSMatt Macy 6916eda14cbcSMatt Macy if (dsl_scan_resilvering(spa_get_dsl(spa)) && 6917eda14cbcSMatt Macy spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER)) { 6918eda14cbcSMatt Macy vdev_defer_resilver(newvd); 6919eda14cbcSMatt Macy } else { 6920eda14cbcSMatt Macy dsl_scan_restart_resilver(spa->spa_dsl_pool, 6921eda14cbcSMatt Macy dtl_max_txg); 6922eda14cbcSMatt Macy } 6923eda14cbcSMatt Macy } 6924eda14cbcSMatt Macy 6925eda14cbcSMatt Macy if (spa->spa_bootfs) 6926eda14cbcSMatt Macy spa_event_notify(spa, newvd, NULL, ESC_ZFS_BOOTFS_VDEV_ATTACH); 6927eda14cbcSMatt Macy 6928eda14cbcSMatt Macy spa_event_notify(spa, newvd, NULL, ESC_ZFS_VDEV_ATTACH); 6929eda14cbcSMatt Macy 6930eda14cbcSMatt Macy /* 6931eda14cbcSMatt Macy * Commit the config 6932eda14cbcSMatt Macy */ 6933eda14cbcSMatt Macy (void) spa_vdev_exit(spa, newrootvd, dtl_max_txg, 0); 6934eda14cbcSMatt Macy 6935eda14cbcSMatt Macy spa_history_log_internal(spa, "vdev attach", NULL, 6936eda14cbcSMatt Macy "%s vdev=%s %s vdev=%s", 6937eda14cbcSMatt Macy replacing && newvd_isspare ? "spare in" : 6938eda14cbcSMatt Macy replacing ? "replace" : "attach", newvdpath, 6939eda14cbcSMatt Macy replacing ? "for" : "to", oldvdpath); 6940eda14cbcSMatt Macy 6941eda14cbcSMatt Macy spa_strfree(oldvdpath); 6942eda14cbcSMatt Macy spa_strfree(newvdpath); 6943eda14cbcSMatt Macy 6944eda14cbcSMatt Macy return (0); 6945eda14cbcSMatt Macy } 6946eda14cbcSMatt Macy 6947eda14cbcSMatt Macy /* 6948eda14cbcSMatt Macy * Detach a device from a mirror or replacing vdev. 6949eda14cbcSMatt Macy * 6950eda14cbcSMatt Macy * If 'replace_done' is specified, only detach if the parent 6951eda14cbcSMatt Macy * is a replacing vdev. 6952eda14cbcSMatt Macy */ 6953eda14cbcSMatt Macy int 6954eda14cbcSMatt Macy spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done) 6955eda14cbcSMatt Macy { 6956eda14cbcSMatt Macy uint64_t txg; 6957eda14cbcSMatt Macy int error; 6958eda14cbcSMatt Macy vdev_t *rvd __maybe_unused = spa->spa_root_vdev; 6959eda14cbcSMatt Macy vdev_t *vd, *pvd, *cvd, *tvd; 6960eda14cbcSMatt Macy boolean_t unspare = B_FALSE; 6961eda14cbcSMatt Macy uint64_t unspare_guid = 0; 6962eda14cbcSMatt Macy char *vdpath; 6963eda14cbcSMatt Macy 6964eda14cbcSMatt Macy ASSERT(spa_writeable(spa)); 6965eda14cbcSMatt Macy 6966eda14cbcSMatt Macy txg = spa_vdev_detach_enter(spa, guid); 6967eda14cbcSMatt Macy 6968eda14cbcSMatt Macy vd = spa_lookup_by_guid(spa, guid, B_FALSE); 6969eda14cbcSMatt Macy 6970eda14cbcSMatt Macy /* 6971eda14cbcSMatt Macy * Besides being called directly from the userland through the 6972eda14cbcSMatt Macy * ioctl interface, spa_vdev_detach() can be potentially called 6973eda14cbcSMatt Macy * at the end of spa_vdev_resilver_done(). 6974eda14cbcSMatt Macy * 6975eda14cbcSMatt Macy * In the regular case, when we have a checkpoint this shouldn't 6976eda14cbcSMatt Macy * happen as we never empty the DTLs of a vdev during the scrub 6977eda14cbcSMatt Macy * [see comment in dsl_scan_done()]. Thus spa_vdev_resilvering_done() 6978eda14cbcSMatt Macy * should never get here when we have a checkpoint. 6979eda14cbcSMatt Macy * 6980eda14cbcSMatt Macy * That said, even in a case when we checkpoint the pool exactly 6981eda14cbcSMatt Macy * as spa_vdev_resilver_done() calls this function everything 6982eda14cbcSMatt Macy * should be fine as the resilver will return right away. 6983eda14cbcSMatt Macy */ 6984eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&spa_namespace_lock)); 6985eda14cbcSMatt Macy if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) { 6986eda14cbcSMatt Macy error = (spa_has_checkpoint(spa)) ? 6987eda14cbcSMatt Macy ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT; 6988eda14cbcSMatt Macy return (spa_vdev_exit(spa, NULL, txg, error)); 6989eda14cbcSMatt Macy } 6990eda14cbcSMatt Macy 6991eda14cbcSMatt Macy if (vd == NULL) 6992eda14cbcSMatt Macy return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 6993eda14cbcSMatt Macy 6994eda14cbcSMatt Macy if (!vd->vdev_ops->vdev_op_leaf) 6995eda14cbcSMatt Macy return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 6996eda14cbcSMatt Macy 6997eda14cbcSMatt Macy pvd = vd->vdev_parent; 6998eda14cbcSMatt Macy 6999eda14cbcSMatt Macy /* 7000eda14cbcSMatt Macy * If the parent/child relationship is not as expected, don't do it. 7001eda14cbcSMatt Macy * Consider M(A,R(B,C)) -- that is, a mirror of A with a replacing 7002eda14cbcSMatt Macy * vdev that's replacing B with C. The user's intent in replacing 7003eda14cbcSMatt Macy * is to go from M(A,B) to M(A,C). If the user decides to cancel 7004eda14cbcSMatt Macy * the replace by detaching C, the expected behavior is to end up 7005eda14cbcSMatt Macy * M(A,B). But suppose that right after deciding to detach C, 7006eda14cbcSMatt Macy * the replacement of B completes. We would have M(A,C), and then 7007eda14cbcSMatt Macy * ask to detach C, which would leave us with just A -- not what 7008eda14cbcSMatt Macy * the user wanted. To prevent this, we make sure that the 7009eda14cbcSMatt Macy * parent/child relationship hasn't changed -- in this example, 7010eda14cbcSMatt Macy * that C's parent is still the replacing vdev R. 7011eda14cbcSMatt Macy */ 7012eda14cbcSMatt Macy if (pvd->vdev_guid != pguid && pguid != 0) 7013eda14cbcSMatt Macy return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 7014eda14cbcSMatt Macy 7015eda14cbcSMatt Macy /* 7016eda14cbcSMatt Macy * Only 'replacing' or 'spare' vdevs can be replaced. 7017eda14cbcSMatt Macy */ 7018eda14cbcSMatt Macy if (replace_done && pvd->vdev_ops != &vdev_replacing_ops && 7019eda14cbcSMatt Macy pvd->vdev_ops != &vdev_spare_ops) 7020eda14cbcSMatt Macy return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 7021eda14cbcSMatt Macy 7022eda14cbcSMatt Macy ASSERT(pvd->vdev_ops != &vdev_spare_ops || 7023eda14cbcSMatt Macy spa_version(spa) >= SPA_VERSION_SPARES); 7024eda14cbcSMatt Macy 7025eda14cbcSMatt Macy /* 7026eda14cbcSMatt Macy * Only mirror, replacing, and spare vdevs support detach. 7027eda14cbcSMatt Macy */ 7028eda14cbcSMatt Macy if (pvd->vdev_ops != &vdev_replacing_ops && 7029eda14cbcSMatt Macy pvd->vdev_ops != &vdev_mirror_ops && 7030eda14cbcSMatt Macy pvd->vdev_ops != &vdev_spare_ops) 7031eda14cbcSMatt Macy return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 7032eda14cbcSMatt Macy 7033eda14cbcSMatt Macy /* 7034eda14cbcSMatt Macy * If this device has the only valid copy of some data, 7035eda14cbcSMatt Macy * we cannot safely detach it. 7036eda14cbcSMatt Macy */ 7037eda14cbcSMatt Macy if (vdev_dtl_required(vd)) 7038eda14cbcSMatt Macy return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 7039eda14cbcSMatt Macy 7040eda14cbcSMatt Macy ASSERT(pvd->vdev_children >= 2); 7041eda14cbcSMatt Macy 7042eda14cbcSMatt Macy /* 7043eda14cbcSMatt Macy * If we are detaching the second disk from a replacing vdev, then 7044eda14cbcSMatt Macy * check to see if we changed the original vdev's path to have "/old" 7045eda14cbcSMatt Macy * at the end in spa_vdev_attach(). If so, undo that change now. 7046eda14cbcSMatt Macy */ 7047eda14cbcSMatt Macy if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id > 0 && 7048eda14cbcSMatt Macy vd->vdev_path != NULL) { 7049eda14cbcSMatt Macy size_t len = strlen(vd->vdev_path); 7050eda14cbcSMatt Macy 7051eda14cbcSMatt Macy for (int c = 0; c < pvd->vdev_children; c++) { 7052eda14cbcSMatt Macy cvd = pvd->vdev_child[c]; 7053eda14cbcSMatt Macy 7054eda14cbcSMatt Macy if (cvd == vd || cvd->vdev_path == NULL) 7055eda14cbcSMatt Macy continue; 7056eda14cbcSMatt Macy 7057eda14cbcSMatt Macy if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 && 7058eda14cbcSMatt Macy strcmp(cvd->vdev_path + len, "/old") == 0) { 7059eda14cbcSMatt Macy spa_strfree(cvd->vdev_path); 7060eda14cbcSMatt Macy cvd->vdev_path = spa_strdup(vd->vdev_path); 7061eda14cbcSMatt Macy break; 7062eda14cbcSMatt Macy } 7063eda14cbcSMatt Macy } 7064eda14cbcSMatt Macy } 7065eda14cbcSMatt Macy 7066eda14cbcSMatt Macy /* 70677877fdebSMatt Macy * If we are detaching the original disk from a normal spare, then it 70687877fdebSMatt Macy * implies that the spare should become a real disk, and be removed 70697877fdebSMatt Macy * from the active spare list for the pool. dRAID spares on the 70707877fdebSMatt Macy * other hand are coupled to the pool and thus should never be removed 70717877fdebSMatt Macy * from the spares list. 7072eda14cbcSMatt Macy */ 70737877fdebSMatt Macy if (pvd->vdev_ops == &vdev_spare_ops && vd->vdev_id == 0) { 70747877fdebSMatt Macy vdev_t *last_cvd = pvd->vdev_child[pvd->vdev_children - 1]; 70757877fdebSMatt Macy 70767877fdebSMatt Macy if (last_cvd->vdev_isspare && 70777877fdebSMatt Macy last_cvd->vdev_ops != &vdev_draid_spare_ops) { 7078eda14cbcSMatt Macy unspare = B_TRUE; 70797877fdebSMatt Macy } 70807877fdebSMatt Macy } 7081eda14cbcSMatt Macy 7082eda14cbcSMatt Macy /* 7083eda14cbcSMatt Macy * Erase the disk labels so the disk can be used for other things. 7084eda14cbcSMatt Macy * This must be done after all other error cases are handled, 7085eda14cbcSMatt Macy * but before we disembowel vd (so we can still do I/O to it). 7086eda14cbcSMatt Macy * But if we can't do it, don't treat the error as fatal -- 7087eda14cbcSMatt Macy * it may be that the unwritability of the disk is the reason 7088eda14cbcSMatt Macy * it's being detached! 7089eda14cbcSMatt Macy */ 7090eda14cbcSMatt Macy error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE); 7091eda14cbcSMatt Macy 7092eda14cbcSMatt Macy /* 7093eda14cbcSMatt Macy * Remove vd from its parent and compact the parent's children. 7094eda14cbcSMatt Macy */ 7095eda14cbcSMatt Macy vdev_remove_child(pvd, vd); 7096eda14cbcSMatt Macy vdev_compact_children(pvd); 7097eda14cbcSMatt Macy 7098eda14cbcSMatt Macy /* 7099eda14cbcSMatt Macy * Remember one of the remaining children so we can get tvd below. 7100eda14cbcSMatt Macy */ 7101eda14cbcSMatt Macy cvd = pvd->vdev_child[pvd->vdev_children - 1]; 7102eda14cbcSMatt Macy 7103eda14cbcSMatt Macy /* 7104eda14cbcSMatt Macy * If we need to remove the remaining child from the list of hot spares, 7105eda14cbcSMatt Macy * do it now, marking the vdev as no longer a spare in the process. 7106eda14cbcSMatt Macy * We must do this before vdev_remove_parent(), because that can 7107eda14cbcSMatt Macy * change the GUID if it creates a new toplevel GUID. For a similar 7108eda14cbcSMatt Macy * reason, we must remove the spare now, in the same txg as the detach; 7109eda14cbcSMatt Macy * otherwise someone could attach a new sibling, change the GUID, and 7110eda14cbcSMatt Macy * the subsequent attempt to spa_vdev_remove(unspare_guid) would fail. 7111eda14cbcSMatt Macy */ 7112eda14cbcSMatt Macy if (unspare) { 7113eda14cbcSMatt Macy ASSERT(cvd->vdev_isspare); 7114eda14cbcSMatt Macy spa_spare_remove(cvd); 7115eda14cbcSMatt Macy unspare_guid = cvd->vdev_guid; 7116eda14cbcSMatt Macy (void) spa_vdev_remove(spa, unspare_guid, B_TRUE); 7117eda14cbcSMatt Macy cvd->vdev_unspare = B_TRUE; 7118eda14cbcSMatt Macy } 7119eda14cbcSMatt Macy 7120eda14cbcSMatt Macy /* 7121eda14cbcSMatt Macy * If the parent mirror/replacing vdev only has one child, 7122eda14cbcSMatt Macy * the parent is no longer needed. Remove it from the tree. 7123eda14cbcSMatt Macy */ 7124eda14cbcSMatt Macy if (pvd->vdev_children == 1) { 7125eda14cbcSMatt Macy if (pvd->vdev_ops == &vdev_spare_ops) 7126eda14cbcSMatt Macy cvd->vdev_unspare = B_FALSE; 7127eda14cbcSMatt Macy vdev_remove_parent(cvd); 7128eda14cbcSMatt Macy } 7129eda14cbcSMatt Macy 7130eda14cbcSMatt Macy /* 7131eda14cbcSMatt Macy * We don't set tvd until now because the parent we just removed 7132eda14cbcSMatt Macy * may have been the previous top-level vdev. 7133eda14cbcSMatt Macy */ 7134eda14cbcSMatt Macy tvd = cvd->vdev_top; 7135eda14cbcSMatt Macy ASSERT(tvd->vdev_parent == rvd); 7136eda14cbcSMatt Macy 7137eda14cbcSMatt Macy /* 7138eda14cbcSMatt Macy * Reevaluate the parent vdev state. 7139eda14cbcSMatt Macy */ 7140eda14cbcSMatt Macy vdev_propagate_state(cvd); 7141eda14cbcSMatt Macy 7142eda14cbcSMatt Macy /* 7143eda14cbcSMatt Macy * If the 'autoexpand' property is set on the pool then automatically 7144eda14cbcSMatt Macy * try to expand the size of the pool. For example if the device we 7145eda14cbcSMatt Macy * just detached was smaller than the others, it may be possible to 7146eda14cbcSMatt Macy * add metaslabs (i.e. grow the pool). We need to reopen the vdev 7147eda14cbcSMatt Macy * first so that we can obtain the updated sizes of the leaf vdevs. 7148eda14cbcSMatt Macy */ 7149eda14cbcSMatt Macy if (spa->spa_autoexpand) { 7150eda14cbcSMatt Macy vdev_reopen(tvd); 7151eda14cbcSMatt Macy vdev_expand(tvd, txg); 7152eda14cbcSMatt Macy } 7153eda14cbcSMatt Macy 7154eda14cbcSMatt Macy vdev_config_dirty(tvd); 7155eda14cbcSMatt Macy 7156eda14cbcSMatt Macy /* 7157eda14cbcSMatt Macy * Mark vd's DTL as dirty in this txg. vdev_dtl_sync() will see that 7158eda14cbcSMatt Macy * vd->vdev_detached is set and free vd's DTL object in syncing context. 7159eda14cbcSMatt Macy * But first make sure we're not on any *other* txg's DTL list, to 7160eda14cbcSMatt Macy * prevent vd from being accessed after it's freed. 7161eda14cbcSMatt Macy */ 7162eda14cbcSMatt Macy vdpath = spa_strdup(vd->vdev_path ? vd->vdev_path : "none"); 7163eda14cbcSMatt Macy for (int t = 0; t < TXG_SIZE; t++) 7164eda14cbcSMatt Macy (void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t); 7165eda14cbcSMatt Macy vd->vdev_detached = B_TRUE; 7166eda14cbcSMatt Macy vdev_dirty(tvd, VDD_DTL, vd, txg); 7167eda14cbcSMatt Macy 7168eda14cbcSMatt Macy spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_REMOVE); 7169eda14cbcSMatt Macy spa_notify_waiters(spa); 7170eda14cbcSMatt Macy 7171eda14cbcSMatt Macy /* hang on to the spa before we release the lock */ 7172eda14cbcSMatt Macy spa_open_ref(spa, FTAG); 7173eda14cbcSMatt Macy 7174eda14cbcSMatt Macy error = spa_vdev_exit(spa, vd, txg, 0); 7175eda14cbcSMatt Macy 7176eda14cbcSMatt Macy spa_history_log_internal(spa, "detach", NULL, 7177eda14cbcSMatt Macy "vdev=%s", vdpath); 7178eda14cbcSMatt Macy spa_strfree(vdpath); 7179eda14cbcSMatt Macy 7180eda14cbcSMatt Macy /* 7181eda14cbcSMatt Macy * If this was the removal of the original device in a hot spare vdev, 7182eda14cbcSMatt Macy * then we want to go through and remove the device from the hot spare 7183eda14cbcSMatt Macy * list of every other pool. 7184eda14cbcSMatt Macy */ 7185eda14cbcSMatt Macy if (unspare) { 7186eda14cbcSMatt Macy spa_t *altspa = NULL; 7187eda14cbcSMatt Macy 7188eda14cbcSMatt Macy mutex_enter(&spa_namespace_lock); 7189eda14cbcSMatt Macy while ((altspa = spa_next(altspa)) != NULL) { 7190eda14cbcSMatt Macy if (altspa->spa_state != POOL_STATE_ACTIVE || 7191eda14cbcSMatt Macy altspa == spa) 7192eda14cbcSMatt Macy continue; 7193eda14cbcSMatt Macy 7194eda14cbcSMatt Macy spa_open_ref(altspa, FTAG); 7195eda14cbcSMatt Macy mutex_exit(&spa_namespace_lock); 7196eda14cbcSMatt Macy (void) spa_vdev_remove(altspa, unspare_guid, B_TRUE); 7197eda14cbcSMatt Macy mutex_enter(&spa_namespace_lock); 7198eda14cbcSMatt Macy spa_close(altspa, FTAG); 7199eda14cbcSMatt Macy } 7200eda14cbcSMatt Macy mutex_exit(&spa_namespace_lock); 7201eda14cbcSMatt Macy 7202eda14cbcSMatt Macy /* search the rest of the vdevs for spares to remove */ 7203eda14cbcSMatt Macy spa_vdev_resilver_done(spa); 7204eda14cbcSMatt Macy } 7205eda14cbcSMatt Macy 7206eda14cbcSMatt Macy /* all done with the spa; OK to release */ 7207eda14cbcSMatt Macy mutex_enter(&spa_namespace_lock); 7208eda14cbcSMatt Macy spa_close(spa, FTAG); 7209eda14cbcSMatt Macy mutex_exit(&spa_namespace_lock); 7210eda14cbcSMatt Macy 7211eda14cbcSMatt Macy return (error); 7212eda14cbcSMatt Macy } 7213eda14cbcSMatt Macy 7214eda14cbcSMatt Macy static int 7215eda14cbcSMatt Macy spa_vdev_initialize_impl(spa_t *spa, uint64_t guid, uint64_t cmd_type, 7216eda14cbcSMatt Macy list_t *vd_list) 7217eda14cbcSMatt Macy { 7218eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&spa_namespace_lock)); 7219eda14cbcSMatt Macy 7220eda14cbcSMatt Macy spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER); 7221eda14cbcSMatt Macy 7222eda14cbcSMatt Macy /* Look up vdev and ensure it's a leaf. */ 7223eda14cbcSMatt Macy vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE); 7224eda14cbcSMatt Macy if (vd == NULL || vd->vdev_detached) { 7225eda14cbcSMatt Macy spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 7226eda14cbcSMatt Macy return (SET_ERROR(ENODEV)); 7227eda14cbcSMatt Macy } else if (!vd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(vd)) { 7228eda14cbcSMatt Macy spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 7229eda14cbcSMatt Macy return (SET_ERROR(EINVAL)); 7230eda14cbcSMatt Macy } else if (!vdev_writeable(vd)) { 7231eda14cbcSMatt Macy spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 7232eda14cbcSMatt Macy return (SET_ERROR(EROFS)); 7233eda14cbcSMatt Macy } 7234eda14cbcSMatt Macy mutex_enter(&vd->vdev_initialize_lock); 7235eda14cbcSMatt Macy spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 7236eda14cbcSMatt Macy 7237eda14cbcSMatt Macy /* 7238eda14cbcSMatt Macy * When we activate an initialize action we check to see 7239eda14cbcSMatt Macy * if the vdev_initialize_thread is NULL. We do this instead 7240eda14cbcSMatt Macy * of using the vdev_initialize_state since there might be 7241eda14cbcSMatt Macy * a previous initialization process which has completed but 7242eda14cbcSMatt Macy * the thread is not exited. 7243eda14cbcSMatt Macy */ 7244eda14cbcSMatt Macy if (cmd_type == POOL_INITIALIZE_START && 7245eda14cbcSMatt Macy (vd->vdev_initialize_thread != NULL || 7246eda14cbcSMatt Macy vd->vdev_top->vdev_removing)) { 7247eda14cbcSMatt Macy mutex_exit(&vd->vdev_initialize_lock); 7248eda14cbcSMatt Macy return (SET_ERROR(EBUSY)); 7249eda14cbcSMatt Macy } else if (cmd_type == POOL_INITIALIZE_CANCEL && 7250eda14cbcSMatt Macy (vd->vdev_initialize_state != VDEV_INITIALIZE_ACTIVE && 7251eda14cbcSMatt Macy vd->vdev_initialize_state != VDEV_INITIALIZE_SUSPENDED)) { 7252eda14cbcSMatt Macy mutex_exit(&vd->vdev_initialize_lock); 7253eda14cbcSMatt Macy return (SET_ERROR(ESRCH)); 7254eda14cbcSMatt Macy } else if (cmd_type == POOL_INITIALIZE_SUSPEND && 7255eda14cbcSMatt Macy vd->vdev_initialize_state != VDEV_INITIALIZE_ACTIVE) { 7256eda14cbcSMatt Macy mutex_exit(&vd->vdev_initialize_lock); 7257eda14cbcSMatt Macy return (SET_ERROR(ESRCH)); 7258eda14cbcSMatt Macy } 7259eda14cbcSMatt Macy 7260eda14cbcSMatt Macy switch (cmd_type) { 7261eda14cbcSMatt Macy case POOL_INITIALIZE_START: 7262eda14cbcSMatt Macy vdev_initialize(vd); 7263eda14cbcSMatt Macy break; 7264eda14cbcSMatt Macy case POOL_INITIALIZE_CANCEL: 7265eda14cbcSMatt Macy vdev_initialize_stop(vd, VDEV_INITIALIZE_CANCELED, vd_list); 7266eda14cbcSMatt Macy break; 7267eda14cbcSMatt Macy case POOL_INITIALIZE_SUSPEND: 7268eda14cbcSMatt Macy vdev_initialize_stop(vd, VDEV_INITIALIZE_SUSPENDED, vd_list); 7269eda14cbcSMatt Macy break; 7270eda14cbcSMatt Macy default: 7271eda14cbcSMatt Macy panic("invalid cmd_type %llu", (unsigned long long)cmd_type); 7272eda14cbcSMatt Macy } 7273eda14cbcSMatt Macy mutex_exit(&vd->vdev_initialize_lock); 7274eda14cbcSMatt Macy 7275eda14cbcSMatt Macy return (0); 7276eda14cbcSMatt Macy } 7277eda14cbcSMatt Macy 7278eda14cbcSMatt Macy int 7279eda14cbcSMatt Macy spa_vdev_initialize(spa_t *spa, nvlist_t *nv, uint64_t cmd_type, 7280eda14cbcSMatt Macy nvlist_t *vdev_errlist) 7281eda14cbcSMatt Macy { 7282eda14cbcSMatt Macy int total_errors = 0; 7283eda14cbcSMatt Macy list_t vd_list; 7284eda14cbcSMatt Macy 7285eda14cbcSMatt Macy list_create(&vd_list, sizeof (vdev_t), 7286eda14cbcSMatt Macy offsetof(vdev_t, vdev_initialize_node)); 7287eda14cbcSMatt Macy 7288eda14cbcSMatt Macy /* 7289eda14cbcSMatt Macy * We hold the namespace lock through the whole function 7290eda14cbcSMatt Macy * to prevent any changes to the pool while we're starting or 7291eda14cbcSMatt Macy * stopping initialization. The config and state locks are held so that 7292eda14cbcSMatt Macy * we can properly assess the vdev state before we commit to 7293eda14cbcSMatt Macy * the initializing operation. 7294eda14cbcSMatt Macy */ 7295eda14cbcSMatt Macy mutex_enter(&spa_namespace_lock); 7296eda14cbcSMatt Macy 7297eda14cbcSMatt Macy for (nvpair_t *pair = nvlist_next_nvpair(nv, NULL); 7298eda14cbcSMatt Macy pair != NULL; pair = nvlist_next_nvpair(nv, pair)) { 7299eda14cbcSMatt Macy uint64_t vdev_guid = fnvpair_value_uint64(pair); 7300eda14cbcSMatt Macy 7301eda14cbcSMatt Macy int error = spa_vdev_initialize_impl(spa, vdev_guid, cmd_type, 7302eda14cbcSMatt Macy &vd_list); 7303eda14cbcSMatt Macy if (error != 0) { 7304eda14cbcSMatt Macy char guid_as_str[MAXNAMELEN]; 7305eda14cbcSMatt Macy 7306eda14cbcSMatt Macy (void) snprintf(guid_as_str, sizeof (guid_as_str), 7307eda14cbcSMatt Macy "%llu", (unsigned long long)vdev_guid); 7308eda14cbcSMatt Macy fnvlist_add_int64(vdev_errlist, guid_as_str, error); 7309eda14cbcSMatt Macy total_errors++; 7310eda14cbcSMatt Macy } 7311eda14cbcSMatt Macy } 7312eda14cbcSMatt Macy 7313eda14cbcSMatt Macy /* Wait for all initialize threads to stop. */ 7314eda14cbcSMatt Macy vdev_initialize_stop_wait(spa, &vd_list); 7315eda14cbcSMatt Macy 7316eda14cbcSMatt Macy /* Sync out the initializing state */ 7317eda14cbcSMatt Macy txg_wait_synced(spa->spa_dsl_pool, 0); 7318eda14cbcSMatt Macy mutex_exit(&spa_namespace_lock); 7319eda14cbcSMatt Macy 7320eda14cbcSMatt Macy list_destroy(&vd_list); 7321eda14cbcSMatt Macy 7322eda14cbcSMatt Macy return (total_errors); 7323eda14cbcSMatt Macy } 7324eda14cbcSMatt Macy 7325eda14cbcSMatt Macy static int 7326eda14cbcSMatt Macy spa_vdev_trim_impl(spa_t *spa, uint64_t guid, uint64_t cmd_type, 7327eda14cbcSMatt Macy uint64_t rate, boolean_t partial, boolean_t secure, list_t *vd_list) 7328eda14cbcSMatt Macy { 7329eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&spa_namespace_lock)); 7330eda14cbcSMatt Macy 7331eda14cbcSMatt Macy spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER); 7332eda14cbcSMatt Macy 7333eda14cbcSMatt Macy /* Look up vdev and ensure it's a leaf. */ 7334eda14cbcSMatt Macy vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE); 7335eda14cbcSMatt Macy if (vd == NULL || vd->vdev_detached) { 7336eda14cbcSMatt Macy spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 7337eda14cbcSMatt Macy return (SET_ERROR(ENODEV)); 7338eda14cbcSMatt Macy } else if (!vd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(vd)) { 7339eda14cbcSMatt Macy spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 7340eda14cbcSMatt Macy return (SET_ERROR(EINVAL)); 7341eda14cbcSMatt Macy } else if (!vdev_writeable(vd)) { 7342eda14cbcSMatt Macy spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 7343eda14cbcSMatt Macy return (SET_ERROR(EROFS)); 7344eda14cbcSMatt Macy } else if (!vd->vdev_has_trim) { 7345eda14cbcSMatt Macy spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 7346eda14cbcSMatt Macy return (SET_ERROR(EOPNOTSUPP)); 7347eda14cbcSMatt Macy } else if (secure && !vd->vdev_has_securetrim) { 7348eda14cbcSMatt Macy spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 7349eda14cbcSMatt Macy return (SET_ERROR(EOPNOTSUPP)); 7350eda14cbcSMatt Macy } 7351eda14cbcSMatt Macy mutex_enter(&vd->vdev_trim_lock); 7352eda14cbcSMatt Macy spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 7353eda14cbcSMatt Macy 7354eda14cbcSMatt Macy /* 7355eda14cbcSMatt Macy * When we activate a TRIM action we check to see if the 7356eda14cbcSMatt Macy * vdev_trim_thread is NULL. We do this instead of using the 7357eda14cbcSMatt Macy * vdev_trim_state since there might be a previous TRIM process 7358eda14cbcSMatt Macy * which has completed but the thread is not exited. 7359eda14cbcSMatt Macy */ 7360eda14cbcSMatt Macy if (cmd_type == POOL_TRIM_START && 7361eda14cbcSMatt Macy (vd->vdev_trim_thread != NULL || vd->vdev_top->vdev_removing)) { 7362eda14cbcSMatt Macy mutex_exit(&vd->vdev_trim_lock); 7363eda14cbcSMatt Macy return (SET_ERROR(EBUSY)); 7364eda14cbcSMatt Macy } else if (cmd_type == POOL_TRIM_CANCEL && 7365eda14cbcSMatt Macy (vd->vdev_trim_state != VDEV_TRIM_ACTIVE && 7366eda14cbcSMatt Macy vd->vdev_trim_state != VDEV_TRIM_SUSPENDED)) { 7367eda14cbcSMatt Macy mutex_exit(&vd->vdev_trim_lock); 7368eda14cbcSMatt Macy return (SET_ERROR(ESRCH)); 7369eda14cbcSMatt Macy } else if (cmd_type == POOL_TRIM_SUSPEND && 7370eda14cbcSMatt Macy vd->vdev_trim_state != VDEV_TRIM_ACTIVE) { 7371eda14cbcSMatt Macy mutex_exit(&vd->vdev_trim_lock); 7372eda14cbcSMatt Macy return (SET_ERROR(ESRCH)); 7373eda14cbcSMatt Macy } 7374eda14cbcSMatt Macy 7375eda14cbcSMatt Macy switch (cmd_type) { 7376eda14cbcSMatt Macy case POOL_TRIM_START: 7377eda14cbcSMatt Macy vdev_trim(vd, rate, partial, secure); 7378eda14cbcSMatt Macy break; 7379eda14cbcSMatt Macy case POOL_TRIM_CANCEL: 7380eda14cbcSMatt Macy vdev_trim_stop(vd, VDEV_TRIM_CANCELED, vd_list); 7381eda14cbcSMatt Macy break; 7382eda14cbcSMatt Macy case POOL_TRIM_SUSPEND: 7383eda14cbcSMatt Macy vdev_trim_stop(vd, VDEV_TRIM_SUSPENDED, vd_list); 7384eda14cbcSMatt Macy break; 7385eda14cbcSMatt Macy default: 7386eda14cbcSMatt Macy panic("invalid cmd_type %llu", (unsigned long long)cmd_type); 7387eda14cbcSMatt Macy } 7388eda14cbcSMatt Macy mutex_exit(&vd->vdev_trim_lock); 7389eda14cbcSMatt Macy 7390eda14cbcSMatt Macy return (0); 7391eda14cbcSMatt Macy } 7392eda14cbcSMatt Macy 7393eda14cbcSMatt Macy /* 7394eda14cbcSMatt Macy * Initiates a manual TRIM for the requested vdevs. This kicks off individual 7395eda14cbcSMatt Macy * TRIM threads for each child vdev. These threads pass over all of the free 7396eda14cbcSMatt Macy * space in the vdev's metaslabs and issues TRIM commands for that space. 7397eda14cbcSMatt Macy */ 7398eda14cbcSMatt Macy int 7399eda14cbcSMatt Macy spa_vdev_trim(spa_t *spa, nvlist_t *nv, uint64_t cmd_type, uint64_t rate, 7400eda14cbcSMatt Macy boolean_t partial, boolean_t secure, nvlist_t *vdev_errlist) 7401eda14cbcSMatt Macy { 7402eda14cbcSMatt Macy int total_errors = 0; 7403eda14cbcSMatt Macy list_t vd_list; 7404eda14cbcSMatt Macy 7405eda14cbcSMatt Macy list_create(&vd_list, sizeof (vdev_t), 7406eda14cbcSMatt Macy offsetof(vdev_t, vdev_trim_node)); 7407eda14cbcSMatt Macy 7408eda14cbcSMatt Macy /* 7409eda14cbcSMatt Macy * We hold the namespace lock through the whole function 7410eda14cbcSMatt Macy * to prevent any changes to the pool while we're starting or 7411eda14cbcSMatt Macy * stopping TRIM. The config and state locks are held so that 7412eda14cbcSMatt Macy * we can properly assess the vdev state before we commit to 7413eda14cbcSMatt Macy * the TRIM operation. 7414eda14cbcSMatt Macy */ 7415eda14cbcSMatt Macy mutex_enter(&spa_namespace_lock); 7416eda14cbcSMatt Macy 7417eda14cbcSMatt Macy for (nvpair_t *pair = nvlist_next_nvpair(nv, NULL); 7418eda14cbcSMatt Macy pair != NULL; pair = nvlist_next_nvpair(nv, pair)) { 7419eda14cbcSMatt Macy uint64_t vdev_guid = fnvpair_value_uint64(pair); 7420eda14cbcSMatt Macy 7421eda14cbcSMatt Macy int error = spa_vdev_trim_impl(spa, vdev_guid, cmd_type, 7422eda14cbcSMatt Macy rate, partial, secure, &vd_list); 7423eda14cbcSMatt Macy if (error != 0) { 7424eda14cbcSMatt Macy char guid_as_str[MAXNAMELEN]; 7425eda14cbcSMatt Macy 7426eda14cbcSMatt Macy (void) snprintf(guid_as_str, sizeof (guid_as_str), 7427eda14cbcSMatt Macy "%llu", (unsigned long long)vdev_guid); 7428eda14cbcSMatt Macy fnvlist_add_int64(vdev_errlist, guid_as_str, error); 7429eda14cbcSMatt Macy total_errors++; 7430eda14cbcSMatt Macy } 7431eda14cbcSMatt Macy } 7432eda14cbcSMatt Macy 7433eda14cbcSMatt Macy /* Wait for all TRIM threads to stop. */ 7434eda14cbcSMatt Macy vdev_trim_stop_wait(spa, &vd_list); 7435eda14cbcSMatt Macy 7436eda14cbcSMatt Macy /* Sync out the TRIM state */ 7437eda14cbcSMatt Macy txg_wait_synced(spa->spa_dsl_pool, 0); 7438eda14cbcSMatt Macy mutex_exit(&spa_namespace_lock); 7439eda14cbcSMatt Macy 7440eda14cbcSMatt Macy list_destroy(&vd_list); 7441eda14cbcSMatt Macy 7442eda14cbcSMatt Macy return (total_errors); 7443eda14cbcSMatt Macy } 7444eda14cbcSMatt Macy 7445eda14cbcSMatt Macy /* 7446eda14cbcSMatt Macy * Split a set of devices from their mirrors, and create a new pool from them. 7447eda14cbcSMatt Macy */ 7448eda14cbcSMatt Macy int 7449eda14cbcSMatt Macy spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config, 7450eda14cbcSMatt Macy nvlist_t *props, boolean_t exp) 7451eda14cbcSMatt Macy { 7452eda14cbcSMatt Macy int error = 0; 7453eda14cbcSMatt Macy uint64_t txg, *glist; 7454eda14cbcSMatt Macy spa_t *newspa; 7455eda14cbcSMatt Macy uint_t c, children, lastlog; 7456eda14cbcSMatt Macy nvlist_t **child, *nvl, *tmp; 7457eda14cbcSMatt Macy dmu_tx_t *tx; 7458eda14cbcSMatt Macy char *altroot = NULL; 7459eda14cbcSMatt Macy vdev_t *rvd, **vml = NULL; /* vdev modify list */ 7460eda14cbcSMatt Macy boolean_t activate_slog; 7461eda14cbcSMatt Macy 7462eda14cbcSMatt Macy ASSERT(spa_writeable(spa)); 7463eda14cbcSMatt Macy 7464eda14cbcSMatt Macy txg = spa_vdev_enter(spa); 7465eda14cbcSMatt Macy 7466eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&spa_namespace_lock)); 7467eda14cbcSMatt Macy if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) { 7468eda14cbcSMatt Macy error = (spa_has_checkpoint(spa)) ? 7469eda14cbcSMatt Macy ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT; 7470eda14cbcSMatt Macy return (spa_vdev_exit(spa, NULL, txg, error)); 7471eda14cbcSMatt Macy } 7472eda14cbcSMatt Macy 7473eda14cbcSMatt Macy /* clear the log and flush everything up to now */ 7474eda14cbcSMatt Macy activate_slog = spa_passivate_log(spa); 7475eda14cbcSMatt Macy (void) spa_vdev_config_exit(spa, NULL, txg, 0, FTAG); 7476eda14cbcSMatt Macy error = spa_reset_logs(spa); 7477eda14cbcSMatt Macy txg = spa_vdev_config_enter(spa); 7478eda14cbcSMatt Macy 7479eda14cbcSMatt Macy if (activate_slog) 7480eda14cbcSMatt Macy spa_activate_log(spa); 7481eda14cbcSMatt Macy 7482eda14cbcSMatt Macy if (error != 0) 7483eda14cbcSMatt Macy return (spa_vdev_exit(spa, NULL, txg, error)); 7484eda14cbcSMatt Macy 7485eda14cbcSMatt Macy /* check new spa name before going any further */ 7486eda14cbcSMatt Macy if (spa_lookup(newname) != NULL) 7487eda14cbcSMatt Macy return (spa_vdev_exit(spa, NULL, txg, EEXIST)); 7488eda14cbcSMatt Macy 7489eda14cbcSMatt Macy /* 7490eda14cbcSMatt Macy * scan through all the children to ensure they're all mirrors 7491eda14cbcSMatt Macy */ 7492eda14cbcSMatt Macy if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvl) != 0 || 7493eda14cbcSMatt Macy nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN, &child, 7494eda14cbcSMatt Macy &children) != 0) 7495eda14cbcSMatt Macy return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 7496eda14cbcSMatt Macy 7497eda14cbcSMatt Macy /* first, check to ensure we've got the right child count */ 7498eda14cbcSMatt Macy rvd = spa->spa_root_vdev; 7499eda14cbcSMatt Macy lastlog = 0; 7500eda14cbcSMatt Macy for (c = 0; c < rvd->vdev_children; c++) { 7501eda14cbcSMatt Macy vdev_t *vd = rvd->vdev_child[c]; 7502eda14cbcSMatt Macy 7503eda14cbcSMatt Macy /* don't count the holes & logs as children */ 7504eda14cbcSMatt Macy if (vd->vdev_islog || (vd->vdev_ops != &vdev_indirect_ops && 7505eda14cbcSMatt Macy !vdev_is_concrete(vd))) { 7506eda14cbcSMatt Macy if (lastlog == 0) 7507eda14cbcSMatt Macy lastlog = c; 7508eda14cbcSMatt Macy continue; 7509eda14cbcSMatt Macy } 7510eda14cbcSMatt Macy 7511eda14cbcSMatt Macy lastlog = 0; 7512eda14cbcSMatt Macy } 7513eda14cbcSMatt Macy if (children != (lastlog != 0 ? lastlog : rvd->vdev_children)) 7514eda14cbcSMatt Macy return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 7515eda14cbcSMatt Macy 7516eda14cbcSMatt Macy /* next, ensure no spare or cache devices are part of the split */ 7517eda14cbcSMatt Macy if (nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_SPARES, &tmp) == 0 || 7518eda14cbcSMatt Macy nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_L2CACHE, &tmp) == 0) 7519eda14cbcSMatt Macy return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 7520eda14cbcSMatt Macy 7521eda14cbcSMatt Macy vml = kmem_zalloc(children * sizeof (vdev_t *), KM_SLEEP); 7522eda14cbcSMatt Macy glist = kmem_zalloc(children * sizeof (uint64_t), KM_SLEEP); 7523eda14cbcSMatt Macy 7524eda14cbcSMatt Macy /* then, loop over each vdev and validate it */ 7525eda14cbcSMatt Macy for (c = 0; c < children; c++) { 7526eda14cbcSMatt Macy uint64_t is_hole = 0; 7527eda14cbcSMatt Macy 7528eda14cbcSMatt Macy (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 7529eda14cbcSMatt Macy &is_hole); 7530eda14cbcSMatt Macy 7531eda14cbcSMatt Macy if (is_hole != 0) { 7532eda14cbcSMatt Macy if (spa->spa_root_vdev->vdev_child[c]->vdev_ishole || 7533eda14cbcSMatt Macy spa->spa_root_vdev->vdev_child[c]->vdev_islog) { 7534eda14cbcSMatt Macy continue; 7535eda14cbcSMatt Macy } else { 7536eda14cbcSMatt Macy error = SET_ERROR(EINVAL); 7537eda14cbcSMatt Macy break; 7538eda14cbcSMatt Macy } 7539eda14cbcSMatt Macy } 7540eda14cbcSMatt Macy 7541eda14cbcSMatt Macy /* deal with indirect vdevs */ 7542eda14cbcSMatt Macy if (spa->spa_root_vdev->vdev_child[c]->vdev_ops == 7543eda14cbcSMatt Macy &vdev_indirect_ops) 7544eda14cbcSMatt Macy continue; 7545eda14cbcSMatt Macy 7546eda14cbcSMatt Macy /* which disk is going to be split? */ 7547eda14cbcSMatt Macy if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_GUID, 7548eda14cbcSMatt Macy &glist[c]) != 0) { 7549eda14cbcSMatt Macy error = SET_ERROR(EINVAL); 7550eda14cbcSMatt Macy break; 7551eda14cbcSMatt Macy } 7552eda14cbcSMatt Macy 7553eda14cbcSMatt Macy /* look it up in the spa */ 7554eda14cbcSMatt Macy vml[c] = spa_lookup_by_guid(spa, glist[c], B_FALSE); 7555eda14cbcSMatt Macy if (vml[c] == NULL) { 7556eda14cbcSMatt Macy error = SET_ERROR(ENODEV); 7557eda14cbcSMatt Macy break; 7558eda14cbcSMatt Macy } 7559eda14cbcSMatt Macy 7560eda14cbcSMatt Macy /* make sure there's nothing stopping the split */ 7561eda14cbcSMatt Macy if (vml[c]->vdev_parent->vdev_ops != &vdev_mirror_ops || 7562eda14cbcSMatt Macy vml[c]->vdev_islog || 7563eda14cbcSMatt Macy !vdev_is_concrete(vml[c]) || 7564eda14cbcSMatt Macy vml[c]->vdev_isspare || 7565eda14cbcSMatt Macy vml[c]->vdev_isl2cache || 7566eda14cbcSMatt Macy !vdev_writeable(vml[c]) || 7567eda14cbcSMatt Macy vml[c]->vdev_children != 0 || 7568eda14cbcSMatt Macy vml[c]->vdev_state != VDEV_STATE_HEALTHY || 7569eda14cbcSMatt Macy c != spa->spa_root_vdev->vdev_child[c]->vdev_id) { 7570eda14cbcSMatt Macy error = SET_ERROR(EINVAL); 7571eda14cbcSMatt Macy break; 7572eda14cbcSMatt Macy } 7573eda14cbcSMatt Macy 7574eda14cbcSMatt Macy if (vdev_dtl_required(vml[c]) || 7575eda14cbcSMatt Macy vdev_resilver_needed(vml[c], NULL, NULL)) { 7576eda14cbcSMatt Macy error = SET_ERROR(EBUSY); 7577eda14cbcSMatt Macy break; 7578eda14cbcSMatt Macy } 7579eda14cbcSMatt Macy 7580eda14cbcSMatt Macy /* we need certain info from the top level */ 7581eda14cbcSMatt Macy VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_ARRAY, 7582eda14cbcSMatt Macy vml[c]->vdev_top->vdev_ms_array) == 0); 7583eda14cbcSMatt Macy VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_SHIFT, 7584eda14cbcSMatt Macy vml[c]->vdev_top->vdev_ms_shift) == 0); 7585eda14cbcSMatt Macy VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASIZE, 7586eda14cbcSMatt Macy vml[c]->vdev_top->vdev_asize) == 0); 7587eda14cbcSMatt Macy VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASHIFT, 7588eda14cbcSMatt Macy vml[c]->vdev_top->vdev_ashift) == 0); 7589eda14cbcSMatt Macy 7590eda14cbcSMatt Macy /* transfer per-vdev ZAPs */ 7591eda14cbcSMatt Macy ASSERT3U(vml[c]->vdev_leaf_zap, !=, 0); 7592eda14cbcSMatt Macy VERIFY0(nvlist_add_uint64(child[c], 7593eda14cbcSMatt Macy ZPOOL_CONFIG_VDEV_LEAF_ZAP, vml[c]->vdev_leaf_zap)); 7594eda14cbcSMatt Macy 7595eda14cbcSMatt Macy ASSERT3U(vml[c]->vdev_top->vdev_top_zap, !=, 0); 7596eda14cbcSMatt Macy VERIFY0(nvlist_add_uint64(child[c], 7597eda14cbcSMatt Macy ZPOOL_CONFIG_VDEV_TOP_ZAP, 7598eda14cbcSMatt Macy vml[c]->vdev_parent->vdev_top_zap)); 7599eda14cbcSMatt Macy } 7600eda14cbcSMatt Macy 7601eda14cbcSMatt Macy if (error != 0) { 7602eda14cbcSMatt Macy kmem_free(vml, children * sizeof (vdev_t *)); 7603eda14cbcSMatt Macy kmem_free(glist, children * sizeof (uint64_t)); 7604eda14cbcSMatt Macy return (spa_vdev_exit(spa, NULL, txg, error)); 7605eda14cbcSMatt Macy } 7606eda14cbcSMatt Macy 7607eda14cbcSMatt Macy /* stop writers from using the disks */ 7608eda14cbcSMatt Macy for (c = 0; c < children; c++) { 7609eda14cbcSMatt Macy if (vml[c] != NULL) 7610eda14cbcSMatt Macy vml[c]->vdev_offline = B_TRUE; 7611eda14cbcSMatt Macy } 7612eda14cbcSMatt Macy vdev_reopen(spa->spa_root_vdev); 7613eda14cbcSMatt Macy 7614eda14cbcSMatt Macy /* 7615eda14cbcSMatt Macy * Temporarily record the splitting vdevs in the spa config. This 7616eda14cbcSMatt Macy * will disappear once the config is regenerated. 7617eda14cbcSMatt Macy */ 7618eda14cbcSMatt Macy VERIFY(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP) == 0); 7619eda14cbcSMatt Macy VERIFY(nvlist_add_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST, 7620eda14cbcSMatt Macy glist, children) == 0); 7621eda14cbcSMatt Macy kmem_free(glist, children * sizeof (uint64_t)); 7622eda14cbcSMatt Macy 7623eda14cbcSMatt Macy mutex_enter(&spa->spa_props_lock); 7624eda14cbcSMatt Macy VERIFY(nvlist_add_nvlist(spa->spa_config, ZPOOL_CONFIG_SPLIT, 7625eda14cbcSMatt Macy nvl) == 0); 7626eda14cbcSMatt Macy mutex_exit(&spa->spa_props_lock); 7627eda14cbcSMatt Macy spa->spa_config_splitting = nvl; 7628eda14cbcSMatt Macy vdev_config_dirty(spa->spa_root_vdev); 7629eda14cbcSMatt Macy 7630eda14cbcSMatt Macy /* configure and create the new pool */ 7631eda14cbcSMatt Macy VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, newname) == 0); 7632eda14cbcSMatt Macy VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, 7633eda14cbcSMatt Macy exp ? POOL_STATE_EXPORTED : POOL_STATE_ACTIVE) == 0); 7634eda14cbcSMatt Macy VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_VERSION, 7635eda14cbcSMatt Macy spa_version(spa)) == 0); 7636eda14cbcSMatt Macy VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG, 7637eda14cbcSMatt Macy spa->spa_config_txg) == 0); 7638eda14cbcSMatt Macy VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID, 7639eda14cbcSMatt Macy spa_generate_guid(NULL)) == 0); 7640eda14cbcSMatt Macy VERIFY0(nvlist_add_boolean(config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS)); 7641eda14cbcSMatt Macy (void) nvlist_lookup_string(props, 7642eda14cbcSMatt Macy zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 7643eda14cbcSMatt Macy 7644eda14cbcSMatt Macy /* add the new pool to the namespace */ 7645eda14cbcSMatt Macy newspa = spa_add(newname, config, altroot); 7646eda14cbcSMatt Macy newspa->spa_avz_action = AVZ_ACTION_REBUILD; 7647eda14cbcSMatt Macy newspa->spa_config_txg = spa->spa_config_txg; 7648eda14cbcSMatt Macy spa_set_log_state(newspa, SPA_LOG_CLEAR); 7649eda14cbcSMatt Macy 7650eda14cbcSMatt Macy /* release the spa config lock, retaining the namespace lock */ 7651eda14cbcSMatt Macy spa_vdev_config_exit(spa, NULL, txg, 0, FTAG); 7652eda14cbcSMatt Macy 7653eda14cbcSMatt Macy if (zio_injection_enabled) 7654eda14cbcSMatt Macy zio_handle_panic_injection(spa, FTAG, 1); 7655eda14cbcSMatt Macy 7656eda14cbcSMatt Macy spa_activate(newspa, spa_mode_global); 7657eda14cbcSMatt Macy spa_async_suspend(newspa); 7658eda14cbcSMatt Macy 7659eda14cbcSMatt Macy /* 7660eda14cbcSMatt Macy * Temporarily stop the initializing and TRIM activity. We set the 7661eda14cbcSMatt Macy * state to ACTIVE so that we know to resume initializing or TRIM 7662eda14cbcSMatt Macy * once the split has completed. 7663eda14cbcSMatt Macy */ 7664eda14cbcSMatt Macy list_t vd_initialize_list; 7665eda14cbcSMatt Macy list_create(&vd_initialize_list, sizeof (vdev_t), 7666eda14cbcSMatt Macy offsetof(vdev_t, vdev_initialize_node)); 7667eda14cbcSMatt Macy 7668eda14cbcSMatt Macy list_t vd_trim_list; 7669eda14cbcSMatt Macy list_create(&vd_trim_list, sizeof (vdev_t), 7670eda14cbcSMatt Macy offsetof(vdev_t, vdev_trim_node)); 7671eda14cbcSMatt Macy 7672eda14cbcSMatt Macy for (c = 0; c < children; c++) { 7673eda14cbcSMatt Macy if (vml[c] != NULL && vml[c]->vdev_ops != &vdev_indirect_ops) { 7674eda14cbcSMatt Macy mutex_enter(&vml[c]->vdev_initialize_lock); 7675eda14cbcSMatt Macy vdev_initialize_stop(vml[c], 7676eda14cbcSMatt Macy VDEV_INITIALIZE_ACTIVE, &vd_initialize_list); 7677eda14cbcSMatt Macy mutex_exit(&vml[c]->vdev_initialize_lock); 7678eda14cbcSMatt Macy 7679eda14cbcSMatt Macy mutex_enter(&vml[c]->vdev_trim_lock); 7680eda14cbcSMatt Macy vdev_trim_stop(vml[c], VDEV_TRIM_ACTIVE, &vd_trim_list); 7681eda14cbcSMatt Macy mutex_exit(&vml[c]->vdev_trim_lock); 7682eda14cbcSMatt Macy } 7683eda14cbcSMatt Macy } 7684eda14cbcSMatt Macy 7685eda14cbcSMatt Macy vdev_initialize_stop_wait(spa, &vd_initialize_list); 7686eda14cbcSMatt Macy vdev_trim_stop_wait(spa, &vd_trim_list); 7687eda14cbcSMatt Macy 7688eda14cbcSMatt Macy list_destroy(&vd_initialize_list); 7689eda14cbcSMatt Macy list_destroy(&vd_trim_list); 7690eda14cbcSMatt Macy 7691eda14cbcSMatt Macy newspa->spa_config_source = SPA_CONFIG_SRC_SPLIT; 7692eda14cbcSMatt Macy newspa->spa_is_splitting = B_TRUE; 7693eda14cbcSMatt Macy 7694eda14cbcSMatt Macy /* create the new pool from the disks of the original pool */ 7695eda14cbcSMatt Macy error = spa_load(newspa, SPA_LOAD_IMPORT, SPA_IMPORT_ASSEMBLE); 7696eda14cbcSMatt Macy if (error) 7697eda14cbcSMatt Macy goto out; 7698eda14cbcSMatt Macy 7699eda14cbcSMatt Macy /* if that worked, generate a real config for the new pool */ 7700eda14cbcSMatt Macy if (newspa->spa_root_vdev != NULL) { 7701eda14cbcSMatt Macy VERIFY(nvlist_alloc(&newspa->spa_config_splitting, 7702eda14cbcSMatt Macy NV_UNIQUE_NAME, KM_SLEEP) == 0); 7703eda14cbcSMatt Macy VERIFY(nvlist_add_uint64(newspa->spa_config_splitting, 7704eda14cbcSMatt Macy ZPOOL_CONFIG_SPLIT_GUID, spa_guid(spa)) == 0); 7705eda14cbcSMatt Macy spa_config_set(newspa, spa_config_generate(newspa, NULL, -1ULL, 7706eda14cbcSMatt Macy B_TRUE)); 7707eda14cbcSMatt Macy } 7708eda14cbcSMatt Macy 7709eda14cbcSMatt Macy /* set the props */ 7710eda14cbcSMatt Macy if (props != NULL) { 7711eda14cbcSMatt Macy spa_configfile_set(newspa, props, B_FALSE); 7712eda14cbcSMatt Macy error = spa_prop_set(newspa, props); 7713eda14cbcSMatt Macy if (error) 7714eda14cbcSMatt Macy goto out; 7715eda14cbcSMatt Macy } 7716eda14cbcSMatt Macy 7717eda14cbcSMatt Macy /* flush everything */ 7718eda14cbcSMatt Macy txg = spa_vdev_config_enter(newspa); 7719eda14cbcSMatt Macy vdev_config_dirty(newspa->spa_root_vdev); 7720eda14cbcSMatt Macy (void) spa_vdev_config_exit(newspa, NULL, txg, 0, FTAG); 7721eda14cbcSMatt Macy 7722eda14cbcSMatt Macy if (zio_injection_enabled) 7723eda14cbcSMatt Macy zio_handle_panic_injection(spa, FTAG, 2); 7724eda14cbcSMatt Macy 7725eda14cbcSMatt Macy spa_async_resume(newspa); 7726eda14cbcSMatt Macy 7727eda14cbcSMatt Macy /* finally, update the original pool's config */ 7728eda14cbcSMatt Macy txg = spa_vdev_config_enter(spa); 7729eda14cbcSMatt Macy tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 7730eda14cbcSMatt Macy error = dmu_tx_assign(tx, TXG_WAIT); 7731eda14cbcSMatt Macy if (error != 0) 7732eda14cbcSMatt Macy dmu_tx_abort(tx); 7733eda14cbcSMatt Macy for (c = 0; c < children; c++) { 7734eda14cbcSMatt Macy if (vml[c] != NULL && vml[c]->vdev_ops != &vdev_indirect_ops) { 7735eda14cbcSMatt Macy vdev_t *tvd = vml[c]->vdev_top; 7736eda14cbcSMatt Macy 7737eda14cbcSMatt Macy /* 7738eda14cbcSMatt Macy * Need to be sure the detachable VDEV is not 7739eda14cbcSMatt Macy * on any *other* txg's DTL list to prevent it 7740eda14cbcSMatt Macy * from being accessed after it's freed. 7741eda14cbcSMatt Macy */ 7742eda14cbcSMatt Macy for (int t = 0; t < TXG_SIZE; t++) { 7743eda14cbcSMatt Macy (void) txg_list_remove_this( 7744eda14cbcSMatt Macy &tvd->vdev_dtl_list, vml[c], t); 7745eda14cbcSMatt Macy } 7746eda14cbcSMatt Macy 7747eda14cbcSMatt Macy vdev_split(vml[c]); 7748eda14cbcSMatt Macy if (error == 0) 7749eda14cbcSMatt Macy spa_history_log_internal(spa, "detach", tx, 7750eda14cbcSMatt Macy "vdev=%s", vml[c]->vdev_path); 7751eda14cbcSMatt Macy 7752eda14cbcSMatt Macy vdev_free(vml[c]); 7753eda14cbcSMatt Macy } 7754eda14cbcSMatt Macy } 7755eda14cbcSMatt Macy spa->spa_avz_action = AVZ_ACTION_REBUILD; 7756eda14cbcSMatt Macy vdev_config_dirty(spa->spa_root_vdev); 7757eda14cbcSMatt Macy spa->spa_config_splitting = NULL; 7758eda14cbcSMatt Macy nvlist_free(nvl); 7759eda14cbcSMatt Macy if (error == 0) 7760eda14cbcSMatt Macy dmu_tx_commit(tx); 7761eda14cbcSMatt Macy (void) spa_vdev_exit(spa, NULL, txg, 0); 7762eda14cbcSMatt Macy 7763eda14cbcSMatt Macy if (zio_injection_enabled) 7764eda14cbcSMatt Macy zio_handle_panic_injection(spa, FTAG, 3); 7765eda14cbcSMatt Macy 7766eda14cbcSMatt Macy /* split is complete; log a history record */ 7767eda14cbcSMatt Macy spa_history_log_internal(newspa, "split", NULL, 7768eda14cbcSMatt Macy "from pool %s", spa_name(spa)); 7769eda14cbcSMatt Macy 7770eda14cbcSMatt Macy newspa->spa_is_splitting = B_FALSE; 7771eda14cbcSMatt Macy kmem_free(vml, children * sizeof (vdev_t *)); 7772eda14cbcSMatt Macy 7773eda14cbcSMatt Macy /* if we're not going to mount the filesystems in userland, export */ 7774eda14cbcSMatt Macy if (exp) 7775eda14cbcSMatt Macy error = spa_export_common(newname, POOL_STATE_EXPORTED, NULL, 7776eda14cbcSMatt Macy B_FALSE, B_FALSE); 7777eda14cbcSMatt Macy 7778eda14cbcSMatt Macy return (error); 7779eda14cbcSMatt Macy 7780eda14cbcSMatt Macy out: 7781eda14cbcSMatt Macy spa_unload(newspa); 7782eda14cbcSMatt Macy spa_deactivate(newspa); 7783eda14cbcSMatt Macy spa_remove(newspa); 7784eda14cbcSMatt Macy 7785eda14cbcSMatt Macy txg = spa_vdev_config_enter(spa); 7786eda14cbcSMatt Macy 7787eda14cbcSMatt Macy /* re-online all offlined disks */ 7788eda14cbcSMatt Macy for (c = 0; c < children; c++) { 7789eda14cbcSMatt Macy if (vml[c] != NULL) 7790eda14cbcSMatt Macy vml[c]->vdev_offline = B_FALSE; 7791eda14cbcSMatt Macy } 7792eda14cbcSMatt Macy 7793eda14cbcSMatt Macy /* restart initializing or trimming disks as necessary */ 7794eda14cbcSMatt Macy spa_async_request(spa, SPA_ASYNC_INITIALIZE_RESTART); 7795eda14cbcSMatt Macy spa_async_request(spa, SPA_ASYNC_TRIM_RESTART); 7796eda14cbcSMatt Macy spa_async_request(spa, SPA_ASYNC_AUTOTRIM_RESTART); 7797eda14cbcSMatt Macy 7798eda14cbcSMatt Macy vdev_reopen(spa->spa_root_vdev); 7799eda14cbcSMatt Macy 7800eda14cbcSMatt Macy nvlist_free(spa->spa_config_splitting); 7801eda14cbcSMatt Macy spa->spa_config_splitting = NULL; 7802eda14cbcSMatt Macy (void) spa_vdev_exit(spa, NULL, txg, error); 7803eda14cbcSMatt Macy 7804eda14cbcSMatt Macy kmem_free(vml, children * sizeof (vdev_t *)); 7805eda14cbcSMatt Macy return (error); 7806eda14cbcSMatt Macy } 7807eda14cbcSMatt Macy 7808eda14cbcSMatt Macy /* 7809eda14cbcSMatt Macy * Find any device that's done replacing, or a vdev marked 'unspare' that's 7810eda14cbcSMatt Macy * currently spared, so we can detach it. 7811eda14cbcSMatt Macy */ 7812eda14cbcSMatt Macy static vdev_t * 7813eda14cbcSMatt Macy spa_vdev_resilver_done_hunt(vdev_t *vd) 7814eda14cbcSMatt Macy { 7815eda14cbcSMatt Macy vdev_t *newvd, *oldvd; 7816eda14cbcSMatt Macy 7817eda14cbcSMatt Macy for (int c = 0; c < vd->vdev_children; c++) { 7818eda14cbcSMatt Macy oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]); 7819eda14cbcSMatt Macy if (oldvd != NULL) 7820eda14cbcSMatt Macy return (oldvd); 7821eda14cbcSMatt Macy } 7822eda14cbcSMatt Macy 7823eda14cbcSMatt Macy /* 7824eda14cbcSMatt Macy * Check for a completed replacement. We always consider the first 7825eda14cbcSMatt Macy * vdev in the list to be the oldest vdev, and the last one to be 7826eda14cbcSMatt Macy * the newest (see spa_vdev_attach() for how that works). In 7827eda14cbcSMatt Macy * the case where the newest vdev is faulted, we will not automatically 7828eda14cbcSMatt Macy * remove it after a resilver completes. This is OK as it will require 7829eda14cbcSMatt Macy * user intervention to determine which disk the admin wishes to keep. 7830eda14cbcSMatt Macy */ 7831eda14cbcSMatt Macy if (vd->vdev_ops == &vdev_replacing_ops) { 7832eda14cbcSMatt Macy ASSERT(vd->vdev_children > 1); 7833eda14cbcSMatt Macy 7834eda14cbcSMatt Macy newvd = vd->vdev_child[vd->vdev_children - 1]; 7835eda14cbcSMatt Macy oldvd = vd->vdev_child[0]; 7836eda14cbcSMatt Macy 7837eda14cbcSMatt Macy if (vdev_dtl_empty(newvd, DTL_MISSING) && 7838eda14cbcSMatt Macy vdev_dtl_empty(newvd, DTL_OUTAGE) && 7839eda14cbcSMatt Macy !vdev_dtl_required(oldvd)) 7840eda14cbcSMatt Macy return (oldvd); 7841eda14cbcSMatt Macy } 7842eda14cbcSMatt Macy 7843eda14cbcSMatt Macy /* 7844eda14cbcSMatt Macy * Check for a completed resilver with the 'unspare' flag set. 7845eda14cbcSMatt Macy * Also potentially update faulted state. 7846eda14cbcSMatt Macy */ 7847eda14cbcSMatt Macy if (vd->vdev_ops == &vdev_spare_ops) { 7848eda14cbcSMatt Macy vdev_t *first = vd->vdev_child[0]; 7849eda14cbcSMatt Macy vdev_t *last = vd->vdev_child[vd->vdev_children - 1]; 7850eda14cbcSMatt Macy 7851eda14cbcSMatt Macy if (last->vdev_unspare) { 7852eda14cbcSMatt Macy oldvd = first; 7853eda14cbcSMatt Macy newvd = last; 7854eda14cbcSMatt Macy } else if (first->vdev_unspare) { 7855eda14cbcSMatt Macy oldvd = last; 7856eda14cbcSMatt Macy newvd = first; 7857eda14cbcSMatt Macy } else { 7858eda14cbcSMatt Macy oldvd = NULL; 7859eda14cbcSMatt Macy } 7860eda14cbcSMatt Macy 7861eda14cbcSMatt Macy if (oldvd != NULL && 7862eda14cbcSMatt Macy vdev_dtl_empty(newvd, DTL_MISSING) && 7863eda14cbcSMatt Macy vdev_dtl_empty(newvd, DTL_OUTAGE) && 7864eda14cbcSMatt Macy !vdev_dtl_required(oldvd)) 7865eda14cbcSMatt Macy return (oldvd); 7866eda14cbcSMatt Macy 7867eda14cbcSMatt Macy vdev_propagate_state(vd); 7868eda14cbcSMatt Macy 7869eda14cbcSMatt Macy /* 7870eda14cbcSMatt Macy * If there are more than two spares attached to a disk, 7871eda14cbcSMatt Macy * and those spares are not required, then we want to 7872eda14cbcSMatt Macy * attempt to free them up now so that they can be used 7873eda14cbcSMatt Macy * by other pools. Once we're back down to a single 7874eda14cbcSMatt Macy * disk+spare, we stop removing them. 7875eda14cbcSMatt Macy */ 7876eda14cbcSMatt Macy if (vd->vdev_children > 2) { 7877eda14cbcSMatt Macy newvd = vd->vdev_child[1]; 7878eda14cbcSMatt Macy 7879eda14cbcSMatt Macy if (newvd->vdev_isspare && last->vdev_isspare && 7880eda14cbcSMatt Macy vdev_dtl_empty(last, DTL_MISSING) && 7881eda14cbcSMatt Macy vdev_dtl_empty(last, DTL_OUTAGE) && 7882eda14cbcSMatt Macy !vdev_dtl_required(newvd)) 7883eda14cbcSMatt Macy return (newvd); 7884eda14cbcSMatt Macy } 7885eda14cbcSMatt Macy } 7886eda14cbcSMatt Macy 7887eda14cbcSMatt Macy return (NULL); 7888eda14cbcSMatt Macy } 7889eda14cbcSMatt Macy 7890eda14cbcSMatt Macy static void 7891eda14cbcSMatt Macy spa_vdev_resilver_done(spa_t *spa) 7892eda14cbcSMatt Macy { 7893eda14cbcSMatt Macy vdev_t *vd, *pvd, *ppvd; 7894eda14cbcSMatt Macy uint64_t guid, sguid, pguid, ppguid; 7895eda14cbcSMatt Macy 7896eda14cbcSMatt Macy spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 7897eda14cbcSMatt Macy 7898eda14cbcSMatt Macy while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) { 7899eda14cbcSMatt Macy pvd = vd->vdev_parent; 7900eda14cbcSMatt Macy ppvd = pvd->vdev_parent; 7901eda14cbcSMatt Macy guid = vd->vdev_guid; 7902eda14cbcSMatt Macy pguid = pvd->vdev_guid; 7903eda14cbcSMatt Macy ppguid = ppvd->vdev_guid; 7904eda14cbcSMatt Macy sguid = 0; 7905eda14cbcSMatt Macy /* 7906eda14cbcSMatt Macy * If we have just finished replacing a hot spared device, then 7907eda14cbcSMatt Macy * we need to detach the parent's first child (the original hot 7908eda14cbcSMatt Macy * spare) as well. 7909eda14cbcSMatt Macy */ 7910eda14cbcSMatt Macy if (ppvd->vdev_ops == &vdev_spare_ops && pvd->vdev_id == 0 && 7911eda14cbcSMatt Macy ppvd->vdev_children == 2) { 7912eda14cbcSMatt Macy ASSERT(pvd->vdev_ops == &vdev_replacing_ops); 7913eda14cbcSMatt Macy sguid = ppvd->vdev_child[1]->vdev_guid; 7914eda14cbcSMatt Macy } 7915eda14cbcSMatt Macy ASSERT(vd->vdev_resilver_txg == 0 || !vdev_dtl_required(vd)); 7916eda14cbcSMatt Macy 7917eda14cbcSMatt Macy spa_config_exit(spa, SCL_ALL, FTAG); 7918eda14cbcSMatt Macy if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0) 7919eda14cbcSMatt Macy return; 7920eda14cbcSMatt Macy if (sguid && spa_vdev_detach(spa, sguid, ppguid, B_TRUE) != 0) 7921eda14cbcSMatt Macy return; 7922eda14cbcSMatt Macy spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 7923eda14cbcSMatt Macy } 7924eda14cbcSMatt Macy 7925eda14cbcSMatt Macy spa_config_exit(spa, SCL_ALL, FTAG); 7926eda14cbcSMatt Macy 7927eda14cbcSMatt Macy /* 7928eda14cbcSMatt Macy * If a detach was not performed above replace waiters will not have 7929eda14cbcSMatt Macy * been notified. In which case we must do so now. 7930eda14cbcSMatt Macy */ 7931eda14cbcSMatt Macy spa_notify_waiters(spa); 7932eda14cbcSMatt Macy } 7933eda14cbcSMatt Macy 7934eda14cbcSMatt Macy /* 7935eda14cbcSMatt Macy * Update the stored path or FRU for this vdev. 7936eda14cbcSMatt Macy */ 7937eda14cbcSMatt Macy static int 7938eda14cbcSMatt Macy spa_vdev_set_common(spa_t *spa, uint64_t guid, const char *value, 7939eda14cbcSMatt Macy boolean_t ispath) 7940eda14cbcSMatt Macy { 7941eda14cbcSMatt Macy vdev_t *vd; 7942eda14cbcSMatt Macy boolean_t sync = B_FALSE; 7943eda14cbcSMatt Macy 7944eda14cbcSMatt Macy ASSERT(spa_writeable(spa)); 7945eda14cbcSMatt Macy 7946eda14cbcSMatt Macy spa_vdev_state_enter(spa, SCL_ALL); 7947eda14cbcSMatt Macy 7948eda14cbcSMatt Macy if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 7949eda14cbcSMatt Macy return (spa_vdev_state_exit(spa, NULL, ENOENT)); 7950eda14cbcSMatt Macy 7951eda14cbcSMatt Macy if (!vd->vdev_ops->vdev_op_leaf) 7952eda14cbcSMatt Macy return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 7953eda14cbcSMatt Macy 7954eda14cbcSMatt Macy if (ispath) { 7955eda14cbcSMatt Macy if (strcmp(value, vd->vdev_path) != 0) { 7956eda14cbcSMatt Macy spa_strfree(vd->vdev_path); 7957eda14cbcSMatt Macy vd->vdev_path = spa_strdup(value); 7958eda14cbcSMatt Macy sync = B_TRUE; 7959eda14cbcSMatt Macy } 7960eda14cbcSMatt Macy } else { 7961eda14cbcSMatt Macy if (vd->vdev_fru == NULL) { 7962eda14cbcSMatt Macy vd->vdev_fru = spa_strdup(value); 7963eda14cbcSMatt Macy sync = B_TRUE; 7964eda14cbcSMatt Macy } else if (strcmp(value, vd->vdev_fru) != 0) { 7965eda14cbcSMatt Macy spa_strfree(vd->vdev_fru); 7966eda14cbcSMatt Macy vd->vdev_fru = spa_strdup(value); 7967eda14cbcSMatt Macy sync = B_TRUE; 7968eda14cbcSMatt Macy } 7969eda14cbcSMatt Macy } 7970eda14cbcSMatt Macy 7971eda14cbcSMatt Macy return (spa_vdev_state_exit(spa, sync ? vd : NULL, 0)); 7972eda14cbcSMatt Macy } 7973eda14cbcSMatt Macy 7974eda14cbcSMatt Macy int 7975eda14cbcSMatt Macy spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath) 7976eda14cbcSMatt Macy { 7977eda14cbcSMatt Macy return (spa_vdev_set_common(spa, guid, newpath, B_TRUE)); 7978eda14cbcSMatt Macy } 7979eda14cbcSMatt Macy 7980eda14cbcSMatt Macy int 7981eda14cbcSMatt Macy spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru) 7982eda14cbcSMatt Macy { 7983eda14cbcSMatt Macy return (spa_vdev_set_common(spa, guid, newfru, B_FALSE)); 7984eda14cbcSMatt Macy } 7985eda14cbcSMatt Macy 7986eda14cbcSMatt Macy /* 7987eda14cbcSMatt Macy * ========================================================================== 7988eda14cbcSMatt Macy * SPA Scanning 7989eda14cbcSMatt Macy * ========================================================================== 7990eda14cbcSMatt Macy */ 7991eda14cbcSMatt Macy int 7992eda14cbcSMatt Macy spa_scrub_pause_resume(spa_t *spa, pool_scrub_cmd_t cmd) 7993eda14cbcSMatt Macy { 7994eda14cbcSMatt Macy ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0); 7995eda14cbcSMatt Macy 7996eda14cbcSMatt Macy if (dsl_scan_resilvering(spa->spa_dsl_pool)) 7997eda14cbcSMatt Macy return (SET_ERROR(EBUSY)); 7998eda14cbcSMatt Macy 7999eda14cbcSMatt Macy return (dsl_scrub_set_pause_resume(spa->spa_dsl_pool, cmd)); 8000eda14cbcSMatt Macy } 8001eda14cbcSMatt Macy 8002eda14cbcSMatt Macy int 8003eda14cbcSMatt Macy spa_scan_stop(spa_t *spa) 8004eda14cbcSMatt Macy { 8005eda14cbcSMatt Macy ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0); 8006eda14cbcSMatt Macy if (dsl_scan_resilvering(spa->spa_dsl_pool)) 8007eda14cbcSMatt Macy return (SET_ERROR(EBUSY)); 8008eda14cbcSMatt Macy return (dsl_scan_cancel(spa->spa_dsl_pool)); 8009eda14cbcSMatt Macy } 8010eda14cbcSMatt Macy 8011eda14cbcSMatt Macy int 8012eda14cbcSMatt Macy spa_scan(spa_t *spa, pool_scan_func_t func) 8013eda14cbcSMatt Macy { 8014eda14cbcSMatt Macy ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0); 8015eda14cbcSMatt Macy 8016eda14cbcSMatt Macy if (func >= POOL_SCAN_FUNCS || func == POOL_SCAN_NONE) 8017eda14cbcSMatt Macy return (SET_ERROR(ENOTSUP)); 8018eda14cbcSMatt Macy 8019eda14cbcSMatt Macy if (func == POOL_SCAN_RESILVER && 8020eda14cbcSMatt Macy !spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER)) 8021eda14cbcSMatt Macy return (SET_ERROR(ENOTSUP)); 8022eda14cbcSMatt Macy 8023eda14cbcSMatt Macy /* 8024eda14cbcSMatt Macy * If a resilver was requested, but there is no DTL on a 8025eda14cbcSMatt Macy * writeable leaf device, we have nothing to do. 8026eda14cbcSMatt Macy */ 8027eda14cbcSMatt Macy if (func == POOL_SCAN_RESILVER && 8028eda14cbcSMatt Macy !vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) { 8029eda14cbcSMatt Macy spa_async_request(spa, SPA_ASYNC_RESILVER_DONE); 8030eda14cbcSMatt Macy return (0); 8031eda14cbcSMatt Macy } 8032eda14cbcSMatt Macy 8033eda14cbcSMatt Macy return (dsl_scan(spa->spa_dsl_pool, func)); 8034eda14cbcSMatt Macy } 8035eda14cbcSMatt Macy 8036eda14cbcSMatt Macy /* 8037eda14cbcSMatt Macy * ========================================================================== 8038eda14cbcSMatt Macy * SPA async task processing 8039eda14cbcSMatt Macy * ========================================================================== 8040eda14cbcSMatt Macy */ 8041eda14cbcSMatt Macy 8042eda14cbcSMatt Macy static void 8043eda14cbcSMatt Macy spa_async_remove(spa_t *spa, vdev_t *vd) 8044eda14cbcSMatt Macy { 8045eda14cbcSMatt Macy if (vd->vdev_remove_wanted) { 8046eda14cbcSMatt Macy vd->vdev_remove_wanted = B_FALSE; 8047eda14cbcSMatt Macy vd->vdev_delayed_close = B_FALSE; 8048eda14cbcSMatt Macy vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE); 8049eda14cbcSMatt Macy 8050eda14cbcSMatt Macy /* 8051eda14cbcSMatt Macy * We want to clear the stats, but we don't want to do a full 8052eda14cbcSMatt Macy * vdev_clear() as that will cause us to throw away 8053eda14cbcSMatt Macy * degraded/faulted state as well as attempt to reopen the 8054eda14cbcSMatt Macy * device, all of which is a waste. 8055eda14cbcSMatt Macy */ 8056eda14cbcSMatt Macy vd->vdev_stat.vs_read_errors = 0; 8057eda14cbcSMatt Macy vd->vdev_stat.vs_write_errors = 0; 8058eda14cbcSMatt Macy vd->vdev_stat.vs_checksum_errors = 0; 8059eda14cbcSMatt Macy 8060eda14cbcSMatt Macy vdev_state_dirty(vd->vdev_top); 80617877fdebSMatt Macy 80627877fdebSMatt Macy /* Tell userspace that the vdev is gone. */ 80637877fdebSMatt Macy zfs_post_remove(spa, vd); 8064eda14cbcSMatt Macy } 8065eda14cbcSMatt Macy 8066eda14cbcSMatt Macy for (int c = 0; c < vd->vdev_children; c++) 8067eda14cbcSMatt Macy spa_async_remove(spa, vd->vdev_child[c]); 8068eda14cbcSMatt Macy } 8069eda14cbcSMatt Macy 8070eda14cbcSMatt Macy static void 8071eda14cbcSMatt Macy spa_async_probe(spa_t *spa, vdev_t *vd) 8072eda14cbcSMatt Macy { 8073eda14cbcSMatt Macy if (vd->vdev_probe_wanted) { 8074eda14cbcSMatt Macy vd->vdev_probe_wanted = B_FALSE; 8075eda14cbcSMatt Macy vdev_reopen(vd); /* vdev_open() does the actual probe */ 8076eda14cbcSMatt Macy } 8077eda14cbcSMatt Macy 8078eda14cbcSMatt Macy for (int c = 0; c < vd->vdev_children; c++) 8079eda14cbcSMatt Macy spa_async_probe(spa, vd->vdev_child[c]); 8080eda14cbcSMatt Macy } 8081eda14cbcSMatt Macy 8082eda14cbcSMatt Macy static void 8083eda14cbcSMatt Macy spa_async_autoexpand(spa_t *spa, vdev_t *vd) 8084eda14cbcSMatt Macy { 8085eda14cbcSMatt Macy if (!spa->spa_autoexpand) 8086eda14cbcSMatt Macy return; 8087eda14cbcSMatt Macy 8088eda14cbcSMatt Macy for (int c = 0; c < vd->vdev_children; c++) { 8089eda14cbcSMatt Macy vdev_t *cvd = vd->vdev_child[c]; 8090eda14cbcSMatt Macy spa_async_autoexpand(spa, cvd); 8091eda14cbcSMatt Macy } 8092eda14cbcSMatt Macy 8093eda14cbcSMatt Macy if (!vd->vdev_ops->vdev_op_leaf || vd->vdev_physpath == NULL) 8094eda14cbcSMatt Macy return; 8095eda14cbcSMatt Macy 8096eda14cbcSMatt Macy spa_event_notify(vd->vdev_spa, vd, NULL, ESC_ZFS_VDEV_AUTOEXPAND); 8097eda14cbcSMatt Macy } 8098eda14cbcSMatt Macy 8099eda14cbcSMatt Macy static void 8100eda14cbcSMatt Macy spa_async_thread(void *arg) 8101eda14cbcSMatt Macy { 8102eda14cbcSMatt Macy spa_t *spa = (spa_t *)arg; 8103eda14cbcSMatt Macy dsl_pool_t *dp = spa->spa_dsl_pool; 8104eda14cbcSMatt Macy int tasks; 8105eda14cbcSMatt Macy 8106eda14cbcSMatt Macy ASSERT(spa->spa_sync_on); 8107eda14cbcSMatt Macy 8108eda14cbcSMatt Macy mutex_enter(&spa->spa_async_lock); 8109eda14cbcSMatt Macy tasks = spa->spa_async_tasks; 8110eda14cbcSMatt Macy spa->spa_async_tasks = 0; 8111eda14cbcSMatt Macy mutex_exit(&spa->spa_async_lock); 8112eda14cbcSMatt Macy 8113eda14cbcSMatt Macy /* 8114eda14cbcSMatt Macy * See if the config needs to be updated. 8115eda14cbcSMatt Macy */ 8116eda14cbcSMatt Macy if (tasks & SPA_ASYNC_CONFIG_UPDATE) { 8117eda14cbcSMatt Macy uint64_t old_space, new_space; 8118eda14cbcSMatt Macy 8119eda14cbcSMatt Macy mutex_enter(&spa_namespace_lock); 8120eda14cbcSMatt Macy old_space = metaslab_class_get_space(spa_normal_class(spa)); 8121eda14cbcSMatt Macy old_space += metaslab_class_get_space(spa_special_class(spa)); 8122eda14cbcSMatt Macy old_space += metaslab_class_get_space(spa_dedup_class(spa)); 8123184c1b94SMartin Matuska old_space += metaslab_class_get_space( 8124184c1b94SMartin Matuska spa_embedded_log_class(spa)); 8125eda14cbcSMatt Macy 8126eda14cbcSMatt Macy spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 8127eda14cbcSMatt Macy 8128eda14cbcSMatt Macy new_space = metaslab_class_get_space(spa_normal_class(spa)); 8129eda14cbcSMatt Macy new_space += metaslab_class_get_space(spa_special_class(spa)); 8130eda14cbcSMatt Macy new_space += metaslab_class_get_space(spa_dedup_class(spa)); 8131184c1b94SMartin Matuska new_space += metaslab_class_get_space( 8132184c1b94SMartin Matuska spa_embedded_log_class(spa)); 8133eda14cbcSMatt Macy mutex_exit(&spa_namespace_lock); 8134eda14cbcSMatt Macy 8135eda14cbcSMatt Macy /* 8136eda14cbcSMatt Macy * If the pool grew as a result of the config update, 8137eda14cbcSMatt Macy * then log an internal history event. 8138eda14cbcSMatt Macy */ 8139eda14cbcSMatt Macy if (new_space != old_space) { 8140eda14cbcSMatt Macy spa_history_log_internal(spa, "vdev online", NULL, 8141eda14cbcSMatt Macy "pool '%s' size: %llu(+%llu)", 8142eda14cbcSMatt Macy spa_name(spa), (u_longlong_t)new_space, 8143eda14cbcSMatt Macy (u_longlong_t)(new_space - old_space)); 8144eda14cbcSMatt Macy } 8145eda14cbcSMatt Macy } 8146eda14cbcSMatt Macy 8147eda14cbcSMatt Macy /* 8148eda14cbcSMatt Macy * See if any devices need to be marked REMOVED. 8149eda14cbcSMatt Macy */ 8150eda14cbcSMatt Macy if (tasks & SPA_ASYNC_REMOVE) { 8151eda14cbcSMatt Macy spa_vdev_state_enter(spa, SCL_NONE); 8152eda14cbcSMatt Macy spa_async_remove(spa, spa->spa_root_vdev); 8153eda14cbcSMatt Macy for (int i = 0; i < spa->spa_l2cache.sav_count; i++) 8154eda14cbcSMatt Macy spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]); 8155eda14cbcSMatt Macy for (int i = 0; i < spa->spa_spares.sav_count; i++) 8156eda14cbcSMatt Macy spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]); 8157eda14cbcSMatt Macy (void) spa_vdev_state_exit(spa, NULL, 0); 8158eda14cbcSMatt Macy } 8159eda14cbcSMatt Macy 8160eda14cbcSMatt Macy if ((tasks & SPA_ASYNC_AUTOEXPAND) && !spa_suspended(spa)) { 8161eda14cbcSMatt Macy spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 8162eda14cbcSMatt Macy spa_async_autoexpand(spa, spa->spa_root_vdev); 8163eda14cbcSMatt Macy spa_config_exit(spa, SCL_CONFIG, FTAG); 8164eda14cbcSMatt Macy } 8165eda14cbcSMatt Macy 8166eda14cbcSMatt Macy /* 8167eda14cbcSMatt Macy * See if any devices need to be probed. 8168eda14cbcSMatt Macy */ 8169eda14cbcSMatt Macy if (tasks & SPA_ASYNC_PROBE) { 8170eda14cbcSMatt Macy spa_vdev_state_enter(spa, SCL_NONE); 8171eda14cbcSMatt Macy spa_async_probe(spa, spa->spa_root_vdev); 8172eda14cbcSMatt Macy (void) spa_vdev_state_exit(spa, NULL, 0); 8173eda14cbcSMatt Macy } 8174eda14cbcSMatt Macy 8175eda14cbcSMatt Macy /* 8176eda14cbcSMatt Macy * If any devices are done replacing, detach them. 8177eda14cbcSMatt Macy */ 81787877fdebSMatt Macy if (tasks & SPA_ASYNC_RESILVER_DONE || 81797877fdebSMatt Macy tasks & SPA_ASYNC_REBUILD_DONE) { 8180eda14cbcSMatt Macy spa_vdev_resilver_done(spa); 8181eda14cbcSMatt Macy } 8182eda14cbcSMatt Macy 8183eda14cbcSMatt Macy /* 8184eda14cbcSMatt Macy * Kick off a resilver. 8185eda14cbcSMatt Macy */ 8186eda14cbcSMatt Macy if (tasks & SPA_ASYNC_RESILVER && 8187eda14cbcSMatt Macy !vdev_rebuild_active(spa->spa_root_vdev) && 8188eda14cbcSMatt Macy (!dsl_scan_resilvering(dp) || 8189eda14cbcSMatt Macy !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_RESILVER_DEFER))) 8190eda14cbcSMatt Macy dsl_scan_restart_resilver(dp, 0); 8191eda14cbcSMatt Macy 8192eda14cbcSMatt Macy if (tasks & SPA_ASYNC_INITIALIZE_RESTART) { 8193eda14cbcSMatt Macy mutex_enter(&spa_namespace_lock); 8194eda14cbcSMatt Macy spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 8195eda14cbcSMatt Macy vdev_initialize_restart(spa->spa_root_vdev); 8196eda14cbcSMatt Macy spa_config_exit(spa, SCL_CONFIG, FTAG); 8197eda14cbcSMatt Macy mutex_exit(&spa_namespace_lock); 8198eda14cbcSMatt Macy } 8199eda14cbcSMatt Macy 8200eda14cbcSMatt Macy if (tasks & SPA_ASYNC_TRIM_RESTART) { 8201eda14cbcSMatt Macy mutex_enter(&spa_namespace_lock); 8202eda14cbcSMatt Macy spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 8203eda14cbcSMatt Macy vdev_trim_restart(spa->spa_root_vdev); 8204eda14cbcSMatt Macy spa_config_exit(spa, SCL_CONFIG, FTAG); 8205eda14cbcSMatt Macy mutex_exit(&spa_namespace_lock); 8206eda14cbcSMatt Macy } 8207eda14cbcSMatt Macy 8208eda14cbcSMatt Macy if (tasks & SPA_ASYNC_AUTOTRIM_RESTART) { 8209eda14cbcSMatt Macy mutex_enter(&spa_namespace_lock); 8210eda14cbcSMatt Macy spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 8211eda14cbcSMatt Macy vdev_autotrim_restart(spa); 8212eda14cbcSMatt Macy spa_config_exit(spa, SCL_CONFIG, FTAG); 8213eda14cbcSMatt Macy mutex_exit(&spa_namespace_lock); 8214eda14cbcSMatt Macy } 8215eda14cbcSMatt Macy 8216eda14cbcSMatt Macy /* 8217eda14cbcSMatt Macy * Kick off L2 cache whole device TRIM. 8218eda14cbcSMatt Macy */ 8219eda14cbcSMatt Macy if (tasks & SPA_ASYNC_L2CACHE_TRIM) { 8220eda14cbcSMatt Macy mutex_enter(&spa_namespace_lock); 8221eda14cbcSMatt Macy spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 8222eda14cbcSMatt Macy vdev_trim_l2arc(spa); 8223eda14cbcSMatt Macy spa_config_exit(spa, SCL_CONFIG, FTAG); 8224eda14cbcSMatt Macy mutex_exit(&spa_namespace_lock); 8225eda14cbcSMatt Macy } 8226eda14cbcSMatt Macy 8227eda14cbcSMatt Macy /* 8228eda14cbcSMatt Macy * Kick off L2 cache rebuilding. 8229eda14cbcSMatt Macy */ 8230eda14cbcSMatt Macy if (tasks & SPA_ASYNC_L2CACHE_REBUILD) { 8231eda14cbcSMatt Macy mutex_enter(&spa_namespace_lock); 8232eda14cbcSMatt Macy spa_config_enter(spa, SCL_L2ARC, FTAG, RW_READER); 8233eda14cbcSMatt Macy l2arc_spa_rebuild_start(spa); 8234eda14cbcSMatt Macy spa_config_exit(spa, SCL_L2ARC, FTAG); 8235eda14cbcSMatt Macy mutex_exit(&spa_namespace_lock); 8236eda14cbcSMatt Macy } 8237eda14cbcSMatt Macy 8238eda14cbcSMatt Macy /* 8239eda14cbcSMatt Macy * Let the world know that we're done. 8240eda14cbcSMatt Macy */ 8241eda14cbcSMatt Macy mutex_enter(&spa->spa_async_lock); 8242eda14cbcSMatt Macy spa->spa_async_thread = NULL; 8243eda14cbcSMatt Macy cv_broadcast(&spa->spa_async_cv); 8244eda14cbcSMatt Macy mutex_exit(&spa->spa_async_lock); 8245eda14cbcSMatt Macy thread_exit(); 8246eda14cbcSMatt Macy } 8247eda14cbcSMatt Macy 8248eda14cbcSMatt Macy void 8249eda14cbcSMatt Macy spa_async_suspend(spa_t *spa) 8250eda14cbcSMatt Macy { 8251eda14cbcSMatt Macy mutex_enter(&spa->spa_async_lock); 8252eda14cbcSMatt Macy spa->spa_async_suspended++; 8253eda14cbcSMatt Macy while (spa->spa_async_thread != NULL) 8254eda14cbcSMatt Macy cv_wait(&spa->spa_async_cv, &spa->spa_async_lock); 8255eda14cbcSMatt Macy mutex_exit(&spa->spa_async_lock); 8256eda14cbcSMatt Macy 8257eda14cbcSMatt Macy spa_vdev_remove_suspend(spa); 8258eda14cbcSMatt Macy 8259eda14cbcSMatt Macy zthr_t *condense_thread = spa->spa_condense_zthr; 8260eda14cbcSMatt Macy if (condense_thread != NULL) 8261eda14cbcSMatt Macy zthr_cancel(condense_thread); 8262eda14cbcSMatt Macy 8263eda14cbcSMatt Macy zthr_t *discard_thread = spa->spa_checkpoint_discard_zthr; 8264eda14cbcSMatt Macy if (discard_thread != NULL) 8265eda14cbcSMatt Macy zthr_cancel(discard_thread); 8266eda14cbcSMatt Macy 8267eda14cbcSMatt Macy zthr_t *ll_delete_thread = spa->spa_livelist_delete_zthr; 8268eda14cbcSMatt Macy if (ll_delete_thread != NULL) 8269eda14cbcSMatt Macy zthr_cancel(ll_delete_thread); 8270eda14cbcSMatt Macy 8271eda14cbcSMatt Macy zthr_t *ll_condense_thread = spa->spa_livelist_condense_zthr; 8272eda14cbcSMatt Macy if (ll_condense_thread != NULL) 8273eda14cbcSMatt Macy zthr_cancel(ll_condense_thread); 8274eda14cbcSMatt Macy } 8275eda14cbcSMatt Macy 8276eda14cbcSMatt Macy void 8277eda14cbcSMatt Macy spa_async_resume(spa_t *spa) 8278eda14cbcSMatt Macy { 8279eda14cbcSMatt Macy mutex_enter(&spa->spa_async_lock); 8280eda14cbcSMatt Macy ASSERT(spa->spa_async_suspended != 0); 8281eda14cbcSMatt Macy spa->spa_async_suspended--; 8282eda14cbcSMatt Macy mutex_exit(&spa->spa_async_lock); 8283eda14cbcSMatt Macy spa_restart_removal(spa); 8284eda14cbcSMatt Macy 8285eda14cbcSMatt Macy zthr_t *condense_thread = spa->spa_condense_zthr; 8286eda14cbcSMatt Macy if (condense_thread != NULL) 8287eda14cbcSMatt Macy zthr_resume(condense_thread); 8288eda14cbcSMatt Macy 8289eda14cbcSMatt Macy zthr_t *discard_thread = spa->spa_checkpoint_discard_zthr; 8290eda14cbcSMatt Macy if (discard_thread != NULL) 8291eda14cbcSMatt Macy zthr_resume(discard_thread); 8292eda14cbcSMatt Macy 8293eda14cbcSMatt Macy zthr_t *ll_delete_thread = spa->spa_livelist_delete_zthr; 8294eda14cbcSMatt Macy if (ll_delete_thread != NULL) 8295eda14cbcSMatt Macy zthr_resume(ll_delete_thread); 8296eda14cbcSMatt Macy 8297eda14cbcSMatt Macy zthr_t *ll_condense_thread = spa->spa_livelist_condense_zthr; 8298eda14cbcSMatt Macy if (ll_condense_thread != NULL) 8299eda14cbcSMatt Macy zthr_resume(ll_condense_thread); 8300eda14cbcSMatt Macy } 8301eda14cbcSMatt Macy 8302eda14cbcSMatt Macy static boolean_t 8303eda14cbcSMatt Macy spa_async_tasks_pending(spa_t *spa) 8304eda14cbcSMatt Macy { 8305eda14cbcSMatt Macy uint_t non_config_tasks; 8306eda14cbcSMatt Macy uint_t config_task; 8307eda14cbcSMatt Macy boolean_t config_task_suspended; 8308eda14cbcSMatt Macy 8309eda14cbcSMatt Macy non_config_tasks = spa->spa_async_tasks & ~SPA_ASYNC_CONFIG_UPDATE; 8310eda14cbcSMatt Macy config_task = spa->spa_async_tasks & SPA_ASYNC_CONFIG_UPDATE; 8311eda14cbcSMatt Macy if (spa->spa_ccw_fail_time == 0) { 8312eda14cbcSMatt Macy config_task_suspended = B_FALSE; 8313eda14cbcSMatt Macy } else { 8314eda14cbcSMatt Macy config_task_suspended = 8315eda14cbcSMatt Macy (gethrtime() - spa->spa_ccw_fail_time) < 8316eda14cbcSMatt Macy ((hrtime_t)zfs_ccw_retry_interval * NANOSEC); 8317eda14cbcSMatt Macy } 8318eda14cbcSMatt Macy 8319eda14cbcSMatt Macy return (non_config_tasks || (config_task && !config_task_suspended)); 8320eda14cbcSMatt Macy } 8321eda14cbcSMatt Macy 8322eda14cbcSMatt Macy static void 8323eda14cbcSMatt Macy spa_async_dispatch(spa_t *spa) 8324eda14cbcSMatt Macy { 8325eda14cbcSMatt Macy mutex_enter(&spa->spa_async_lock); 8326eda14cbcSMatt Macy if (spa_async_tasks_pending(spa) && 8327eda14cbcSMatt Macy !spa->spa_async_suspended && 8328eda14cbcSMatt Macy spa->spa_async_thread == NULL) 8329eda14cbcSMatt Macy spa->spa_async_thread = thread_create(NULL, 0, 8330eda14cbcSMatt Macy spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri); 8331eda14cbcSMatt Macy mutex_exit(&spa->spa_async_lock); 8332eda14cbcSMatt Macy } 8333eda14cbcSMatt Macy 8334eda14cbcSMatt Macy void 8335eda14cbcSMatt Macy spa_async_request(spa_t *spa, int task) 8336eda14cbcSMatt Macy { 8337eda14cbcSMatt Macy zfs_dbgmsg("spa=%s async request task=%u", spa->spa_name, task); 8338eda14cbcSMatt Macy mutex_enter(&spa->spa_async_lock); 8339eda14cbcSMatt Macy spa->spa_async_tasks |= task; 8340eda14cbcSMatt Macy mutex_exit(&spa->spa_async_lock); 8341eda14cbcSMatt Macy } 8342eda14cbcSMatt Macy 8343eda14cbcSMatt Macy int 8344eda14cbcSMatt Macy spa_async_tasks(spa_t *spa) 8345eda14cbcSMatt Macy { 8346eda14cbcSMatt Macy return (spa->spa_async_tasks); 8347eda14cbcSMatt Macy } 8348eda14cbcSMatt Macy 8349eda14cbcSMatt Macy /* 8350eda14cbcSMatt Macy * ========================================================================== 8351eda14cbcSMatt Macy * SPA syncing routines 8352eda14cbcSMatt Macy * ========================================================================== 8353eda14cbcSMatt Macy */ 8354eda14cbcSMatt Macy 8355eda14cbcSMatt Macy 8356eda14cbcSMatt Macy static int 8357eda14cbcSMatt Macy bpobj_enqueue_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed, 8358eda14cbcSMatt Macy dmu_tx_t *tx) 8359eda14cbcSMatt Macy { 8360eda14cbcSMatt Macy bpobj_t *bpo = arg; 8361eda14cbcSMatt Macy bpobj_enqueue(bpo, bp, bp_freed, tx); 8362eda14cbcSMatt Macy return (0); 8363eda14cbcSMatt Macy } 8364eda14cbcSMatt Macy 8365eda14cbcSMatt Macy int 8366eda14cbcSMatt Macy bpobj_enqueue_alloc_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 8367eda14cbcSMatt Macy { 8368eda14cbcSMatt Macy return (bpobj_enqueue_cb(arg, bp, B_FALSE, tx)); 8369eda14cbcSMatt Macy } 8370eda14cbcSMatt Macy 8371eda14cbcSMatt Macy int 8372eda14cbcSMatt Macy bpobj_enqueue_free_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 8373eda14cbcSMatt Macy { 8374eda14cbcSMatt Macy return (bpobj_enqueue_cb(arg, bp, B_TRUE, tx)); 8375eda14cbcSMatt Macy } 8376eda14cbcSMatt Macy 8377eda14cbcSMatt Macy static int 8378eda14cbcSMatt Macy spa_free_sync_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 8379eda14cbcSMatt Macy { 8380eda14cbcSMatt Macy zio_t *pio = arg; 8381eda14cbcSMatt Macy 8382eda14cbcSMatt Macy zio_nowait(zio_free_sync(pio, pio->io_spa, dmu_tx_get_txg(tx), bp, 8383eda14cbcSMatt Macy pio->io_flags)); 8384eda14cbcSMatt Macy return (0); 8385eda14cbcSMatt Macy } 8386eda14cbcSMatt Macy 8387eda14cbcSMatt Macy static int 8388eda14cbcSMatt Macy bpobj_spa_free_sync_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed, 8389eda14cbcSMatt Macy dmu_tx_t *tx) 8390eda14cbcSMatt Macy { 8391eda14cbcSMatt Macy ASSERT(!bp_freed); 8392eda14cbcSMatt Macy return (spa_free_sync_cb(arg, bp, tx)); 8393eda14cbcSMatt Macy } 8394eda14cbcSMatt Macy 8395eda14cbcSMatt Macy /* 8396eda14cbcSMatt Macy * Note: this simple function is not inlined to make it easier to dtrace the 8397eda14cbcSMatt Macy * amount of time spent syncing frees. 8398eda14cbcSMatt Macy */ 8399eda14cbcSMatt Macy static void 8400eda14cbcSMatt Macy spa_sync_frees(spa_t *spa, bplist_t *bpl, dmu_tx_t *tx) 8401eda14cbcSMatt Macy { 8402eda14cbcSMatt Macy zio_t *zio = zio_root(spa, NULL, NULL, 0); 8403eda14cbcSMatt Macy bplist_iterate(bpl, spa_free_sync_cb, zio, tx); 8404eda14cbcSMatt Macy VERIFY(zio_wait(zio) == 0); 8405eda14cbcSMatt Macy } 8406eda14cbcSMatt Macy 8407eda14cbcSMatt Macy /* 8408eda14cbcSMatt Macy * Note: this simple function is not inlined to make it easier to dtrace the 8409eda14cbcSMatt Macy * amount of time spent syncing deferred frees. 8410eda14cbcSMatt Macy */ 8411eda14cbcSMatt Macy static void 8412eda14cbcSMatt Macy spa_sync_deferred_frees(spa_t *spa, dmu_tx_t *tx) 8413eda14cbcSMatt Macy { 8414eda14cbcSMatt Macy if (spa_sync_pass(spa) != 1) 8415eda14cbcSMatt Macy return; 8416eda14cbcSMatt Macy 8417eda14cbcSMatt Macy /* 8418eda14cbcSMatt Macy * Note: 8419eda14cbcSMatt Macy * If the log space map feature is active, we stop deferring 8420eda14cbcSMatt Macy * frees to the next TXG and therefore running this function 8421eda14cbcSMatt Macy * would be considered a no-op as spa_deferred_bpobj should 8422eda14cbcSMatt Macy * not have any entries. 8423eda14cbcSMatt Macy * 8424eda14cbcSMatt Macy * That said we run this function anyway (instead of returning 8425eda14cbcSMatt Macy * immediately) for the edge-case scenario where we just 8426eda14cbcSMatt Macy * activated the log space map feature in this TXG but we have 8427eda14cbcSMatt Macy * deferred frees from the previous TXG. 8428eda14cbcSMatt Macy */ 8429eda14cbcSMatt Macy zio_t *zio = zio_root(spa, NULL, NULL, 0); 8430eda14cbcSMatt Macy VERIFY3U(bpobj_iterate(&spa->spa_deferred_bpobj, 8431eda14cbcSMatt Macy bpobj_spa_free_sync_cb, zio, tx), ==, 0); 8432eda14cbcSMatt Macy VERIFY0(zio_wait(zio)); 8433eda14cbcSMatt Macy } 8434eda14cbcSMatt Macy 8435eda14cbcSMatt Macy static void 8436eda14cbcSMatt Macy spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx) 8437eda14cbcSMatt Macy { 8438eda14cbcSMatt Macy char *packed = NULL; 8439eda14cbcSMatt Macy size_t bufsize; 8440eda14cbcSMatt Macy size_t nvsize = 0; 8441eda14cbcSMatt Macy dmu_buf_t *db; 8442eda14cbcSMatt Macy 8443eda14cbcSMatt Macy VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0); 8444eda14cbcSMatt Macy 8445eda14cbcSMatt Macy /* 8446eda14cbcSMatt Macy * Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration 8447eda14cbcSMatt Macy * information. This avoids the dmu_buf_will_dirty() path and 8448eda14cbcSMatt Macy * saves us a pre-read to get data we don't actually care about. 8449eda14cbcSMatt Macy */ 8450eda14cbcSMatt Macy bufsize = P2ROUNDUP((uint64_t)nvsize, SPA_CONFIG_BLOCKSIZE); 8451eda14cbcSMatt Macy packed = vmem_alloc(bufsize, KM_SLEEP); 8452eda14cbcSMatt Macy 8453eda14cbcSMatt Macy VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR, 8454eda14cbcSMatt Macy KM_SLEEP) == 0); 8455eda14cbcSMatt Macy bzero(packed + nvsize, bufsize - nvsize); 8456eda14cbcSMatt Macy 8457eda14cbcSMatt Macy dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx); 8458eda14cbcSMatt Macy 8459eda14cbcSMatt Macy vmem_free(packed, bufsize); 8460eda14cbcSMatt Macy 8461eda14cbcSMatt Macy VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db)); 8462eda14cbcSMatt Macy dmu_buf_will_dirty(db, tx); 8463eda14cbcSMatt Macy *(uint64_t *)db->db_data = nvsize; 8464eda14cbcSMatt Macy dmu_buf_rele(db, FTAG); 8465eda14cbcSMatt Macy } 8466eda14cbcSMatt Macy 8467eda14cbcSMatt Macy static void 8468eda14cbcSMatt Macy spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx, 8469eda14cbcSMatt Macy const char *config, const char *entry) 8470eda14cbcSMatt Macy { 8471eda14cbcSMatt Macy nvlist_t *nvroot; 8472eda14cbcSMatt Macy nvlist_t **list; 8473eda14cbcSMatt Macy int i; 8474eda14cbcSMatt Macy 8475eda14cbcSMatt Macy if (!sav->sav_sync) 8476eda14cbcSMatt Macy return; 8477eda14cbcSMatt Macy 8478eda14cbcSMatt Macy /* 8479eda14cbcSMatt Macy * Update the MOS nvlist describing the list of available devices. 8480eda14cbcSMatt Macy * spa_validate_aux() will have already made sure this nvlist is 8481eda14cbcSMatt Macy * valid and the vdevs are labeled appropriately. 8482eda14cbcSMatt Macy */ 8483eda14cbcSMatt Macy if (sav->sav_object == 0) { 8484eda14cbcSMatt Macy sav->sav_object = dmu_object_alloc(spa->spa_meta_objset, 8485eda14cbcSMatt Macy DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE, 8486eda14cbcSMatt Macy sizeof (uint64_t), tx); 8487eda14cbcSMatt Macy VERIFY(zap_update(spa->spa_meta_objset, 8488eda14cbcSMatt Macy DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1, 8489eda14cbcSMatt Macy &sav->sav_object, tx) == 0); 8490eda14cbcSMatt Macy } 8491eda14cbcSMatt Macy 8492eda14cbcSMatt Macy VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); 8493eda14cbcSMatt Macy if (sav->sav_count == 0) { 8494eda14cbcSMatt Macy VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0); 8495eda14cbcSMatt Macy } else { 8496eda14cbcSMatt Macy list = kmem_alloc(sav->sav_count*sizeof (void *), KM_SLEEP); 8497eda14cbcSMatt Macy for (i = 0; i < sav->sav_count; i++) 8498eda14cbcSMatt Macy list[i] = vdev_config_generate(spa, sav->sav_vdevs[i], 8499eda14cbcSMatt Macy B_FALSE, VDEV_CONFIG_L2CACHE); 8500eda14cbcSMatt Macy VERIFY(nvlist_add_nvlist_array(nvroot, config, list, 8501eda14cbcSMatt Macy sav->sav_count) == 0); 8502eda14cbcSMatt Macy for (i = 0; i < sav->sav_count; i++) 8503eda14cbcSMatt Macy nvlist_free(list[i]); 8504eda14cbcSMatt Macy kmem_free(list, sav->sav_count * sizeof (void *)); 8505eda14cbcSMatt Macy } 8506eda14cbcSMatt Macy 8507eda14cbcSMatt Macy spa_sync_nvlist(spa, sav->sav_object, nvroot, tx); 8508eda14cbcSMatt Macy nvlist_free(nvroot); 8509eda14cbcSMatt Macy 8510eda14cbcSMatt Macy sav->sav_sync = B_FALSE; 8511eda14cbcSMatt Macy } 8512eda14cbcSMatt Macy 8513eda14cbcSMatt Macy /* 8514eda14cbcSMatt Macy * Rebuild spa's all-vdev ZAP from the vdev ZAPs indicated in each vdev_t. 8515eda14cbcSMatt Macy * The all-vdev ZAP must be empty. 8516eda14cbcSMatt Macy */ 8517eda14cbcSMatt Macy static void 8518eda14cbcSMatt Macy spa_avz_build(vdev_t *vd, uint64_t avz, dmu_tx_t *tx) 8519eda14cbcSMatt Macy { 8520eda14cbcSMatt Macy spa_t *spa = vd->vdev_spa; 8521eda14cbcSMatt Macy 8522eda14cbcSMatt Macy if (vd->vdev_top_zap != 0) { 8523eda14cbcSMatt Macy VERIFY0(zap_add_int(spa->spa_meta_objset, avz, 8524eda14cbcSMatt Macy vd->vdev_top_zap, tx)); 8525eda14cbcSMatt Macy } 8526eda14cbcSMatt Macy if (vd->vdev_leaf_zap != 0) { 8527eda14cbcSMatt Macy VERIFY0(zap_add_int(spa->spa_meta_objset, avz, 8528eda14cbcSMatt Macy vd->vdev_leaf_zap, tx)); 8529eda14cbcSMatt Macy } 8530eda14cbcSMatt Macy for (uint64_t i = 0; i < vd->vdev_children; i++) { 8531eda14cbcSMatt Macy spa_avz_build(vd->vdev_child[i], avz, tx); 8532eda14cbcSMatt Macy } 8533eda14cbcSMatt Macy } 8534eda14cbcSMatt Macy 8535eda14cbcSMatt Macy static void 8536eda14cbcSMatt Macy spa_sync_config_object(spa_t *spa, dmu_tx_t *tx) 8537eda14cbcSMatt Macy { 8538eda14cbcSMatt Macy nvlist_t *config; 8539eda14cbcSMatt Macy 8540eda14cbcSMatt Macy /* 8541eda14cbcSMatt Macy * If the pool is being imported from a pre-per-vdev-ZAP version of ZFS, 8542eda14cbcSMatt Macy * its config may not be dirty but we still need to build per-vdev ZAPs. 8543eda14cbcSMatt Macy * Similarly, if the pool is being assembled (e.g. after a split), we 8544eda14cbcSMatt Macy * need to rebuild the AVZ although the config may not be dirty. 8545eda14cbcSMatt Macy */ 8546eda14cbcSMatt Macy if (list_is_empty(&spa->spa_config_dirty_list) && 8547eda14cbcSMatt Macy spa->spa_avz_action == AVZ_ACTION_NONE) 8548eda14cbcSMatt Macy return; 8549eda14cbcSMatt Macy 8550eda14cbcSMatt Macy spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 8551eda14cbcSMatt Macy 8552eda14cbcSMatt Macy ASSERT(spa->spa_avz_action == AVZ_ACTION_NONE || 8553eda14cbcSMatt Macy spa->spa_avz_action == AVZ_ACTION_INITIALIZE || 8554eda14cbcSMatt Macy spa->spa_all_vdev_zaps != 0); 8555eda14cbcSMatt Macy 8556eda14cbcSMatt Macy if (spa->spa_avz_action == AVZ_ACTION_REBUILD) { 8557eda14cbcSMatt Macy /* Make and build the new AVZ */ 8558eda14cbcSMatt Macy uint64_t new_avz = zap_create(spa->spa_meta_objset, 8559eda14cbcSMatt Macy DMU_OTN_ZAP_METADATA, DMU_OT_NONE, 0, tx); 8560eda14cbcSMatt Macy spa_avz_build(spa->spa_root_vdev, new_avz, tx); 8561eda14cbcSMatt Macy 8562eda14cbcSMatt Macy /* Diff old AVZ with new one */ 8563eda14cbcSMatt Macy zap_cursor_t zc; 8564eda14cbcSMatt Macy zap_attribute_t za; 8565eda14cbcSMatt Macy 8566eda14cbcSMatt Macy for (zap_cursor_init(&zc, spa->spa_meta_objset, 8567eda14cbcSMatt Macy spa->spa_all_vdev_zaps); 8568eda14cbcSMatt Macy zap_cursor_retrieve(&zc, &za) == 0; 8569eda14cbcSMatt Macy zap_cursor_advance(&zc)) { 8570eda14cbcSMatt Macy uint64_t vdzap = za.za_first_integer; 8571eda14cbcSMatt Macy if (zap_lookup_int(spa->spa_meta_objset, new_avz, 8572eda14cbcSMatt Macy vdzap) == ENOENT) { 8573eda14cbcSMatt Macy /* 8574eda14cbcSMatt Macy * ZAP is listed in old AVZ but not in new one; 8575eda14cbcSMatt Macy * destroy it 8576eda14cbcSMatt Macy */ 8577eda14cbcSMatt Macy VERIFY0(zap_destroy(spa->spa_meta_objset, vdzap, 8578eda14cbcSMatt Macy tx)); 8579eda14cbcSMatt Macy } 8580eda14cbcSMatt Macy } 8581eda14cbcSMatt Macy 8582eda14cbcSMatt Macy zap_cursor_fini(&zc); 8583eda14cbcSMatt Macy 8584eda14cbcSMatt Macy /* Destroy the old AVZ */ 8585eda14cbcSMatt Macy VERIFY0(zap_destroy(spa->spa_meta_objset, 8586eda14cbcSMatt Macy spa->spa_all_vdev_zaps, tx)); 8587eda14cbcSMatt Macy 8588eda14cbcSMatt Macy /* Replace the old AVZ in the dir obj with the new one */ 8589eda14cbcSMatt Macy VERIFY0(zap_update(spa->spa_meta_objset, 8590eda14cbcSMatt Macy DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP, 8591eda14cbcSMatt Macy sizeof (new_avz), 1, &new_avz, tx)); 8592eda14cbcSMatt Macy 8593eda14cbcSMatt Macy spa->spa_all_vdev_zaps = new_avz; 8594eda14cbcSMatt Macy } else if (spa->spa_avz_action == AVZ_ACTION_DESTROY) { 8595eda14cbcSMatt Macy zap_cursor_t zc; 8596eda14cbcSMatt Macy zap_attribute_t za; 8597eda14cbcSMatt Macy 8598eda14cbcSMatt Macy /* Walk through the AVZ and destroy all listed ZAPs */ 8599eda14cbcSMatt Macy for (zap_cursor_init(&zc, spa->spa_meta_objset, 8600eda14cbcSMatt Macy spa->spa_all_vdev_zaps); 8601eda14cbcSMatt Macy zap_cursor_retrieve(&zc, &za) == 0; 8602eda14cbcSMatt Macy zap_cursor_advance(&zc)) { 8603eda14cbcSMatt Macy uint64_t zap = za.za_first_integer; 8604eda14cbcSMatt Macy VERIFY0(zap_destroy(spa->spa_meta_objset, zap, tx)); 8605eda14cbcSMatt Macy } 8606eda14cbcSMatt Macy 8607eda14cbcSMatt Macy zap_cursor_fini(&zc); 8608eda14cbcSMatt Macy 8609eda14cbcSMatt Macy /* Destroy and unlink the AVZ itself */ 8610eda14cbcSMatt Macy VERIFY0(zap_destroy(spa->spa_meta_objset, 8611eda14cbcSMatt Macy spa->spa_all_vdev_zaps, tx)); 8612eda14cbcSMatt Macy VERIFY0(zap_remove(spa->spa_meta_objset, 8613eda14cbcSMatt Macy DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP, tx)); 8614eda14cbcSMatt Macy spa->spa_all_vdev_zaps = 0; 8615eda14cbcSMatt Macy } 8616eda14cbcSMatt Macy 8617eda14cbcSMatt Macy if (spa->spa_all_vdev_zaps == 0) { 8618eda14cbcSMatt Macy spa->spa_all_vdev_zaps = zap_create_link(spa->spa_meta_objset, 8619eda14cbcSMatt Macy DMU_OTN_ZAP_METADATA, DMU_POOL_DIRECTORY_OBJECT, 8620eda14cbcSMatt Macy DMU_POOL_VDEV_ZAP_MAP, tx); 8621eda14cbcSMatt Macy } 8622eda14cbcSMatt Macy spa->spa_avz_action = AVZ_ACTION_NONE; 8623eda14cbcSMatt Macy 8624eda14cbcSMatt Macy /* Create ZAPs for vdevs that don't have them. */ 8625eda14cbcSMatt Macy vdev_construct_zaps(spa->spa_root_vdev, tx); 8626eda14cbcSMatt Macy 8627eda14cbcSMatt Macy config = spa_config_generate(spa, spa->spa_root_vdev, 8628eda14cbcSMatt Macy dmu_tx_get_txg(tx), B_FALSE); 8629eda14cbcSMatt Macy 8630eda14cbcSMatt Macy /* 8631eda14cbcSMatt Macy * If we're upgrading the spa version then make sure that 8632eda14cbcSMatt Macy * the config object gets updated with the correct version. 8633eda14cbcSMatt Macy */ 8634eda14cbcSMatt Macy if (spa->spa_ubsync.ub_version < spa->spa_uberblock.ub_version) 8635eda14cbcSMatt Macy fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION, 8636eda14cbcSMatt Macy spa->spa_uberblock.ub_version); 8637eda14cbcSMatt Macy 8638eda14cbcSMatt Macy spa_config_exit(spa, SCL_STATE, FTAG); 8639eda14cbcSMatt Macy 8640eda14cbcSMatt Macy nvlist_free(spa->spa_config_syncing); 8641eda14cbcSMatt Macy spa->spa_config_syncing = config; 8642eda14cbcSMatt Macy 8643eda14cbcSMatt Macy spa_sync_nvlist(spa, spa->spa_config_object, config, tx); 8644eda14cbcSMatt Macy } 8645eda14cbcSMatt Macy 8646eda14cbcSMatt Macy static void 8647eda14cbcSMatt Macy spa_sync_version(void *arg, dmu_tx_t *tx) 8648eda14cbcSMatt Macy { 8649eda14cbcSMatt Macy uint64_t *versionp = arg; 8650eda14cbcSMatt Macy uint64_t version = *versionp; 8651eda14cbcSMatt Macy spa_t *spa = dmu_tx_pool(tx)->dp_spa; 8652eda14cbcSMatt Macy 8653eda14cbcSMatt Macy /* 8654eda14cbcSMatt Macy * Setting the version is special cased when first creating the pool. 8655eda14cbcSMatt Macy */ 8656eda14cbcSMatt Macy ASSERT(tx->tx_txg != TXG_INITIAL); 8657eda14cbcSMatt Macy 8658eda14cbcSMatt Macy ASSERT(SPA_VERSION_IS_SUPPORTED(version)); 8659eda14cbcSMatt Macy ASSERT(version >= spa_version(spa)); 8660eda14cbcSMatt Macy 8661eda14cbcSMatt Macy spa->spa_uberblock.ub_version = version; 8662eda14cbcSMatt Macy vdev_config_dirty(spa->spa_root_vdev); 8663eda14cbcSMatt Macy spa_history_log_internal(spa, "set", tx, "version=%lld", 8664eda14cbcSMatt Macy (longlong_t)version); 8665eda14cbcSMatt Macy } 8666eda14cbcSMatt Macy 8667eda14cbcSMatt Macy /* 8668eda14cbcSMatt Macy * Set zpool properties. 8669eda14cbcSMatt Macy */ 8670eda14cbcSMatt Macy static void 8671eda14cbcSMatt Macy spa_sync_props(void *arg, dmu_tx_t *tx) 8672eda14cbcSMatt Macy { 8673eda14cbcSMatt Macy nvlist_t *nvp = arg; 8674eda14cbcSMatt Macy spa_t *spa = dmu_tx_pool(tx)->dp_spa; 8675eda14cbcSMatt Macy objset_t *mos = spa->spa_meta_objset; 8676eda14cbcSMatt Macy nvpair_t *elem = NULL; 8677eda14cbcSMatt Macy 8678eda14cbcSMatt Macy mutex_enter(&spa->spa_props_lock); 8679eda14cbcSMatt Macy 8680eda14cbcSMatt Macy while ((elem = nvlist_next_nvpair(nvp, elem))) { 8681eda14cbcSMatt Macy uint64_t intval; 8682eda14cbcSMatt Macy char *strval, *fname; 8683eda14cbcSMatt Macy zpool_prop_t prop; 8684eda14cbcSMatt Macy const char *propname; 8685eda14cbcSMatt Macy zprop_type_t proptype; 8686eda14cbcSMatt Macy spa_feature_t fid; 8687eda14cbcSMatt Macy 8688eda14cbcSMatt Macy switch (prop = zpool_name_to_prop(nvpair_name(elem))) { 8689eda14cbcSMatt Macy case ZPOOL_PROP_INVAL: 8690eda14cbcSMatt Macy /* 8691eda14cbcSMatt Macy * We checked this earlier in spa_prop_validate(). 8692eda14cbcSMatt Macy */ 8693eda14cbcSMatt Macy ASSERT(zpool_prop_feature(nvpair_name(elem))); 8694eda14cbcSMatt Macy 8695eda14cbcSMatt Macy fname = strchr(nvpair_name(elem), '@') + 1; 8696eda14cbcSMatt Macy VERIFY0(zfeature_lookup_name(fname, &fid)); 8697eda14cbcSMatt Macy 8698eda14cbcSMatt Macy spa_feature_enable(spa, fid, tx); 8699eda14cbcSMatt Macy spa_history_log_internal(spa, "set", tx, 8700eda14cbcSMatt Macy "%s=enabled", nvpair_name(elem)); 8701eda14cbcSMatt Macy break; 8702eda14cbcSMatt Macy 8703eda14cbcSMatt Macy case ZPOOL_PROP_VERSION: 8704eda14cbcSMatt Macy intval = fnvpair_value_uint64(elem); 8705eda14cbcSMatt Macy /* 8706eda14cbcSMatt Macy * The version is synced separately before other 8707eda14cbcSMatt Macy * properties and should be correct by now. 8708eda14cbcSMatt Macy */ 8709eda14cbcSMatt Macy ASSERT3U(spa_version(spa), >=, intval); 8710eda14cbcSMatt Macy break; 8711eda14cbcSMatt Macy 8712eda14cbcSMatt Macy case ZPOOL_PROP_ALTROOT: 8713eda14cbcSMatt Macy /* 8714eda14cbcSMatt Macy * 'altroot' is a non-persistent property. It should 8715eda14cbcSMatt Macy * have been set temporarily at creation or import time. 8716eda14cbcSMatt Macy */ 8717eda14cbcSMatt Macy ASSERT(spa->spa_root != NULL); 8718eda14cbcSMatt Macy break; 8719eda14cbcSMatt Macy 8720eda14cbcSMatt Macy case ZPOOL_PROP_READONLY: 8721eda14cbcSMatt Macy case ZPOOL_PROP_CACHEFILE: 8722eda14cbcSMatt Macy /* 8723eda14cbcSMatt Macy * 'readonly' and 'cachefile' are also non-persistent 8724eda14cbcSMatt Macy * properties. 8725eda14cbcSMatt Macy */ 8726eda14cbcSMatt Macy break; 8727eda14cbcSMatt Macy case ZPOOL_PROP_COMMENT: 8728eda14cbcSMatt Macy strval = fnvpair_value_string(elem); 8729eda14cbcSMatt Macy if (spa->spa_comment != NULL) 8730eda14cbcSMatt Macy spa_strfree(spa->spa_comment); 8731eda14cbcSMatt Macy spa->spa_comment = spa_strdup(strval); 8732eda14cbcSMatt Macy /* 8733eda14cbcSMatt Macy * We need to dirty the configuration on all the vdevs 873433b8c039SMartin Matuska * so that their labels get updated. We also need to 873533b8c039SMartin Matuska * update the cache file to keep it in sync with the 873633b8c039SMartin Matuska * MOS version. It's unnecessary to do this for pool 873733b8c039SMartin Matuska * creation since the vdev's configuration has already 873833b8c039SMartin Matuska * been dirtied. 8739eda14cbcSMatt Macy */ 874033b8c039SMartin Matuska if (tx->tx_txg != TXG_INITIAL) { 8741eda14cbcSMatt Macy vdev_config_dirty(spa->spa_root_vdev); 874233b8c039SMartin Matuska spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 874333b8c039SMartin Matuska } 8744eda14cbcSMatt Macy spa_history_log_internal(spa, "set", tx, 8745eda14cbcSMatt Macy "%s=%s", nvpair_name(elem), strval); 8746eda14cbcSMatt Macy break; 8747ee36e25aSMartin Matuska case ZPOOL_PROP_COMPATIBILITY: 8748ee36e25aSMartin Matuska strval = fnvpair_value_string(elem); 8749ee36e25aSMartin Matuska if (spa->spa_compatibility != NULL) 8750ee36e25aSMartin Matuska spa_strfree(spa->spa_compatibility); 8751ee36e25aSMartin Matuska spa->spa_compatibility = spa_strdup(strval); 8752ee36e25aSMartin Matuska /* 8753ee36e25aSMartin Matuska * Dirty the configuration on vdevs as above. 8754ee36e25aSMartin Matuska */ 875533b8c039SMartin Matuska if (tx->tx_txg != TXG_INITIAL) { 8756ee36e25aSMartin Matuska vdev_config_dirty(spa->spa_root_vdev); 875733b8c039SMartin Matuska spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 875833b8c039SMartin Matuska } 875933b8c039SMartin Matuska 8760ee36e25aSMartin Matuska spa_history_log_internal(spa, "set", tx, 8761ee36e25aSMartin Matuska "%s=%s", nvpair_name(elem), strval); 8762ee36e25aSMartin Matuska break; 8763ee36e25aSMartin Matuska 8764eda14cbcSMatt Macy default: 8765eda14cbcSMatt Macy /* 8766eda14cbcSMatt Macy * Set pool property values in the poolprops mos object. 8767eda14cbcSMatt Macy */ 8768eda14cbcSMatt Macy if (spa->spa_pool_props_object == 0) { 8769eda14cbcSMatt Macy spa->spa_pool_props_object = 8770eda14cbcSMatt Macy zap_create_link(mos, DMU_OT_POOL_PROPS, 8771eda14cbcSMatt Macy DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS, 8772eda14cbcSMatt Macy tx); 8773eda14cbcSMatt Macy } 8774eda14cbcSMatt Macy 8775eda14cbcSMatt Macy /* normalize the property name */ 8776eda14cbcSMatt Macy propname = zpool_prop_to_name(prop); 8777eda14cbcSMatt Macy proptype = zpool_prop_get_type(prop); 8778eda14cbcSMatt Macy 8779eda14cbcSMatt Macy if (nvpair_type(elem) == DATA_TYPE_STRING) { 8780eda14cbcSMatt Macy ASSERT(proptype == PROP_TYPE_STRING); 8781eda14cbcSMatt Macy strval = fnvpair_value_string(elem); 8782eda14cbcSMatt Macy VERIFY0(zap_update(mos, 8783eda14cbcSMatt Macy spa->spa_pool_props_object, propname, 8784eda14cbcSMatt Macy 1, strlen(strval) + 1, strval, tx)); 8785eda14cbcSMatt Macy spa_history_log_internal(spa, "set", tx, 8786eda14cbcSMatt Macy "%s=%s", nvpair_name(elem), strval); 8787eda14cbcSMatt Macy } else if (nvpair_type(elem) == DATA_TYPE_UINT64) { 8788eda14cbcSMatt Macy intval = fnvpair_value_uint64(elem); 8789eda14cbcSMatt Macy 8790eda14cbcSMatt Macy if (proptype == PROP_TYPE_INDEX) { 8791eda14cbcSMatt Macy const char *unused; 8792eda14cbcSMatt Macy VERIFY0(zpool_prop_index_to_string( 8793eda14cbcSMatt Macy prop, intval, &unused)); 8794eda14cbcSMatt Macy } 8795eda14cbcSMatt Macy VERIFY0(zap_update(mos, 8796eda14cbcSMatt Macy spa->spa_pool_props_object, propname, 8797eda14cbcSMatt Macy 8, 1, &intval, tx)); 8798eda14cbcSMatt Macy spa_history_log_internal(spa, "set", tx, 8799eda14cbcSMatt Macy "%s=%lld", nvpair_name(elem), 8800eda14cbcSMatt Macy (longlong_t)intval); 8801eda14cbcSMatt Macy } else { 8802eda14cbcSMatt Macy ASSERT(0); /* not allowed */ 8803eda14cbcSMatt Macy } 8804eda14cbcSMatt Macy 8805eda14cbcSMatt Macy switch (prop) { 8806eda14cbcSMatt Macy case ZPOOL_PROP_DELEGATION: 8807eda14cbcSMatt Macy spa->spa_delegation = intval; 8808eda14cbcSMatt Macy break; 8809eda14cbcSMatt Macy case ZPOOL_PROP_BOOTFS: 8810eda14cbcSMatt Macy spa->spa_bootfs = intval; 8811eda14cbcSMatt Macy break; 8812eda14cbcSMatt Macy case ZPOOL_PROP_FAILUREMODE: 8813eda14cbcSMatt Macy spa->spa_failmode = intval; 8814eda14cbcSMatt Macy break; 8815eda14cbcSMatt Macy case ZPOOL_PROP_AUTOTRIM: 8816eda14cbcSMatt Macy spa->spa_autotrim = intval; 8817eda14cbcSMatt Macy spa_async_request(spa, 8818eda14cbcSMatt Macy SPA_ASYNC_AUTOTRIM_RESTART); 8819eda14cbcSMatt Macy break; 8820eda14cbcSMatt Macy case ZPOOL_PROP_AUTOEXPAND: 8821eda14cbcSMatt Macy spa->spa_autoexpand = intval; 8822eda14cbcSMatt Macy if (tx->tx_txg != TXG_INITIAL) 8823eda14cbcSMatt Macy spa_async_request(spa, 8824eda14cbcSMatt Macy SPA_ASYNC_AUTOEXPAND); 8825eda14cbcSMatt Macy break; 8826eda14cbcSMatt Macy case ZPOOL_PROP_MULTIHOST: 8827eda14cbcSMatt Macy spa->spa_multihost = intval; 8828eda14cbcSMatt Macy break; 8829eda14cbcSMatt Macy default: 8830eda14cbcSMatt Macy break; 8831eda14cbcSMatt Macy } 8832eda14cbcSMatt Macy } 8833eda14cbcSMatt Macy 8834eda14cbcSMatt Macy } 8835eda14cbcSMatt Macy 8836eda14cbcSMatt Macy mutex_exit(&spa->spa_props_lock); 8837eda14cbcSMatt Macy } 8838eda14cbcSMatt Macy 8839eda14cbcSMatt Macy /* 8840eda14cbcSMatt Macy * Perform one-time upgrade on-disk changes. spa_version() does not 8841eda14cbcSMatt Macy * reflect the new version this txg, so there must be no changes this 8842eda14cbcSMatt Macy * txg to anything that the upgrade code depends on after it executes. 8843eda14cbcSMatt Macy * Therefore this must be called after dsl_pool_sync() does the sync 8844eda14cbcSMatt Macy * tasks. 8845eda14cbcSMatt Macy */ 8846eda14cbcSMatt Macy static void 8847eda14cbcSMatt Macy spa_sync_upgrades(spa_t *spa, dmu_tx_t *tx) 8848eda14cbcSMatt Macy { 8849eda14cbcSMatt Macy if (spa_sync_pass(spa) != 1) 8850eda14cbcSMatt Macy return; 8851eda14cbcSMatt Macy 8852eda14cbcSMatt Macy dsl_pool_t *dp = spa->spa_dsl_pool; 8853eda14cbcSMatt Macy rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG); 8854eda14cbcSMatt Macy 8855eda14cbcSMatt Macy if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN && 8856eda14cbcSMatt Macy spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) { 8857eda14cbcSMatt Macy dsl_pool_create_origin(dp, tx); 8858eda14cbcSMatt Macy 8859eda14cbcSMatt Macy /* Keeping the origin open increases spa_minref */ 8860eda14cbcSMatt Macy spa->spa_minref += 3; 8861eda14cbcSMatt Macy } 8862eda14cbcSMatt Macy 8863eda14cbcSMatt Macy if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES && 8864eda14cbcSMatt Macy spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) { 8865eda14cbcSMatt Macy dsl_pool_upgrade_clones(dp, tx); 8866eda14cbcSMatt Macy } 8867eda14cbcSMatt Macy 8868eda14cbcSMatt Macy if (spa->spa_ubsync.ub_version < SPA_VERSION_DIR_CLONES && 8869eda14cbcSMatt Macy spa->spa_uberblock.ub_version >= SPA_VERSION_DIR_CLONES) { 8870eda14cbcSMatt Macy dsl_pool_upgrade_dir_clones(dp, tx); 8871eda14cbcSMatt Macy 8872eda14cbcSMatt Macy /* Keeping the freedir open increases spa_minref */ 8873eda14cbcSMatt Macy spa->spa_minref += 3; 8874eda14cbcSMatt Macy } 8875eda14cbcSMatt Macy 8876eda14cbcSMatt Macy if (spa->spa_ubsync.ub_version < SPA_VERSION_FEATURES && 8877eda14cbcSMatt Macy spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) { 8878eda14cbcSMatt Macy spa_feature_create_zap_objects(spa, tx); 8879eda14cbcSMatt Macy } 8880eda14cbcSMatt Macy 8881eda14cbcSMatt Macy /* 8882eda14cbcSMatt Macy * LZ4_COMPRESS feature's behaviour was changed to activate_on_enable 8883eda14cbcSMatt Macy * when possibility to use lz4 compression for metadata was added 8884eda14cbcSMatt Macy * Old pools that have this feature enabled must be upgraded to have 8885eda14cbcSMatt Macy * this feature active 8886eda14cbcSMatt Macy */ 8887eda14cbcSMatt Macy if (spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) { 8888eda14cbcSMatt Macy boolean_t lz4_en = spa_feature_is_enabled(spa, 8889eda14cbcSMatt Macy SPA_FEATURE_LZ4_COMPRESS); 8890eda14cbcSMatt Macy boolean_t lz4_ac = spa_feature_is_active(spa, 8891eda14cbcSMatt Macy SPA_FEATURE_LZ4_COMPRESS); 8892eda14cbcSMatt Macy 8893eda14cbcSMatt Macy if (lz4_en && !lz4_ac) 8894eda14cbcSMatt Macy spa_feature_incr(spa, SPA_FEATURE_LZ4_COMPRESS, tx); 8895eda14cbcSMatt Macy } 8896eda14cbcSMatt Macy 8897eda14cbcSMatt Macy /* 8898eda14cbcSMatt Macy * If we haven't written the salt, do so now. Note that the 8899eda14cbcSMatt Macy * feature may not be activated yet, but that's fine since 8900eda14cbcSMatt Macy * the presence of this ZAP entry is backwards compatible. 8901eda14cbcSMatt Macy */ 8902eda14cbcSMatt Macy if (zap_contains(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 8903eda14cbcSMatt Macy DMU_POOL_CHECKSUM_SALT) == ENOENT) { 8904eda14cbcSMatt Macy VERIFY0(zap_add(spa->spa_meta_objset, 8905eda14cbcSMatt Macy DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CHECKSUM_SALT, 1, 8906eda14cbcSMatt Macy sizeof (spa->spa_cksum_salt.zcs_bytes), 8907eda14cbcSMatt Macy spa->spa_cksum_salt.zcs_bytes, tx)); 8908eda14cbcSMatt Macy } 8909eda14cbcSMatt Macy 8910eda14cbcSMatt Macy rrw_exit(&dp->dp_config_rwlock, FTAG); 8911eda14cbcSMatt Macy } 8912eda14cbcSMatt Macy 8913eda14cbcSMatt Macy static void 8914eda14cbcSMatt Macy vdev_indirect_state_sync_verify(vdev_t *vd) 8915eda14cbcSMatt Macy { 8916eda14cbcSMatt Macy vdev_indirect_mapping_t *vim __maybe_unused = vd->vdev_indirect_mapping; 8917eda14cbcSMatt Macy vdev_indirect_births_t *vib __maybe_unused = vd->vdev_indirect_births; 8918eda14cbcSMatt Macy 8919eda14cbcSMatt Macy if (vd->vdev_ops == &vdev_indirect_ops) { 8920eda14cbcSMatt Macy ASSERT(vim != NULL); 8921eda14cbcSMatt Macy ASSERT(vib != NULL); 8922eda14cbcSMatt Macy } 8923eda14cbcSMatt Macy 8924eda14cbcSMatt Macy uint64_t obsolete_sm_object = 0; 8925eda14cbcSMatt Macy ASSERT0(vdev_obsolete_sm_object(vd, &obsolete_sm_object)); 8926eda14cbcSMatt Macy if (obsolete_sm_object != 0) { 8927eda14cbcSMatt Macy ASSERT(vd->vdev_obsolete_sm != NULL); 8928eda14cbcSMatt Macy ASSERT(vd->vdev_removing || 8929eda14cbcSMatt Macy vd->vdev_ops == &vdev_indirect_ops); 8930eda14cbcSMatt Macy ASSERT(vdev_indirect_mapping_num_entries(vim) > 0); 8931eda14cbcSMatt Macy ASSERT(vdev_indirect_mapping_bytes_mapped(vim) > 0); 8932eda14cbcSMatt Macy ASSERT3U(obsolete_sm_object, ==, 8933eda14cbcSMatt Macy space_map_object(vd->vdev_obsolete_sm)); 8934eda14cbcSMatt Macy ASSERT3U(vdev_indirect_mapping_bytes_mapped(vim), >=, 8935eda14cbcSMatt Macy space_map_allocated(vd->vdev_obsolete_sm)); 8936eda14cbcSMatt Macy } 8937eda14cbcSMatt Macy ASSERT(vd->vdev_obsolete_segments != NULL); 8938eda14cbcSMatt Macy 8939eda14cbcSMatt Macy /* 8940eda14cbcSMatt Macy * Since frees / remaps to an indirect vdev can only 8941eda14cbcSMatt Macy * happen in syncing context, the obsolete segments 8942eda14cbcSMatt Macy * tree must be empty when we start syncing. 8943eda14cbcSMatt Macy */ 8944eda14cbcSMatt Macy ASSERT0(range_tree_space(vd->vdev_obsolete_segments)); 8945eda14cbcSMatt Macy } 8946eda14cbcSMatt Macy 8947eda14cbcSMatt Macy /* 8948eda14cbcSMatt Macy * Set the top-level vdev's max queue depth. Evaluate each top-level's 8949eda14cbcSMatt Macy * async write queue depth in case it changed. The max queue depth will 8950eda14cbcSMatt Macy * not change in the middle of syncing out this txg. 8951eda14cbcSMatt Macy */ 8952eda14cbcSMatt Macy static void 8953eda14cbcSMatt Macy spa_sync_adjust_vdev_max_queue_depth(spa_t *spa) 8954eda14cbcSMatt Macy { 8955eda14cbcSMatt Macy ASSERT(spa_writeable(spa)); 8956eda14cbcSMatt Macy 8957eda14cbcSMatt Macy vdev_t *rvd = spa->spa_root_vdev; 8958eda14cbcSMatt Macy uint32_t max_queue_depth = zfs_vdev_async_write_max_active * 8959eda14cbcSMatt Macy zfs_vdev_queue_depth_pct / 100; 8960eda14cbcSMatt Macy metaslab_class_t *normal = spa_normal_class(spa); 8961eda14cbcSMatt Macy metaslab_class_t *special = spa_special_class(spa); 8962eda14cbcSMatt Macy metaslab_class_t *dedup = spa_dedup_class(spa); 8963eda14cbcSMatt Macy 8964eda14cbcSMatt Macy uint64_t slots_per_allocator = 0; 8965eda14cbcSMatt Macy for (int c = 0; c < rvd->vdev_children; c++) { 8966eda14cbcSMatt Macy vdev_t *tvd = rvd->vdev_child[c]; 8967eda14cbcSMatt Macy 8968eda14cbcSMatt Macy metaslab_group_t *mg = tvd->vdev_mg; 8969eda14cbcSMatt Macy if (mg == NULL || !metaslab_group_initialized(mg)) 8970eda14cbcSMatt Macy continue; 8971eda14cbcSMatt Macy 8972eda14cbcSMatt Macy metaslab_class_t *mc = mg->mg_class; 8973eda14cbcSMatt Macy if (mc != normal && mc != special && mc != dedup) 8974eda14cbcSMatt Macy continue; 8975eda14cbcSMatt Macy 8976eda14cbcSMatt Macy /* 8977eda14cbcSMatt Macy * It is safe to do a lock-free check here because only async 8978eda14cbcSMatt Macy * allocations look at mg_max_alloc_queue_depth, and async 8979eda14cbcSMatt Macy * allocations all happen from spa_sync(). 8980eda14cbcSMatt Macy */ 8981eda14cbcSMatt Macy for (int i = 0; i < mg->mg_allocators; i++) { 8982eda14cbcSMatt Macy ASSERT0(zfs_refcount_count( 8983eda14cbcSMatt Macy &(mg->mg_allocator[i].mga_alloc_queue_depth))); 8984eda14cbcSMatt Macy } 8985eda14cbcSMatt Macy mg->mg_max_alloc_queue_depth = max_queue_depth; 8986eda14cbcSMatt Macy 8987eda14cbcSMatt Macy for (int i = 0; i < mg->mg_allocators; i++) { 8988eda14cbcSMatt Macy mg->mg_allocator[i].mga_cur_max_alloc_queue_depth = 8989eda14cbcSMatt Macy zfs_vdev_def_queue_depth; 8990eda14cbcSMatt Macy } 8991eda14cbcSMatt Macy slots_per_allocator += zfs_vdev_def_queue_depth; 8992eda14cbcSMatt Macy } 8993eda14cbcSMatt Macy 8994eda14cbcSMatt Macy for (int i = 0; i < spa->spa_alloc_count; i++) { 89957877fdebSMatt Macy ASSERT0(zfs_refcount_count(&normal->mc_allocator[i]. 89967877fdebSMatt Macy mca_alloc_slots)); 89977877fdebSMatt Macy ASSERT0(zfs_refcount_count(&special->mc_allocator[i]. 89987877fdebSMatt Macy mca_alloc_slots)); 89997877fdebSMatt Macy ASSERT0(zfs_refcount_count(&dedup->mc_allocator[i]. 90007877fdebSMatt Macy mca_alloc_slots)); 90017877fdebSMatt Macy normal->mc_allocator[i].mca_alloc_max_slots = 90027877fdebSMatt Macy slots_per_allocator; 90037877fdebSMatt Macy special->mc_allocator[i].mca_alloc_max_slots = 90047877fdebSMatt Macy slots_per_allocator; 90057877fdebSMatt Macy dedup->mc_allocator[i].mca_alloc_max_slots = 90067877fdebSMatt Macy slots_per_allocator; 9007eda14cbcSMatt Macy } 9008eda14cbcSMatt Macy normal->mc_alloc_throttle_enabled = zio_dva_throttle_enabled; 9009eda14cbcSMatt Macy special->mc_alloc_throttle_enabled = zio_dva_throttle_enabled; 9010eda14cbcSMatt Macy dedup->mc_alloc_throttle_enabled = zio_dva_throttle_enabled; 9011eda14cbcSMatt Macy } 9012eda14cbcSMatt Macy 9013eda14cbcSMatt Macy static void 9014eda14cbcSMatt Macy spa_sync_condense_indirect(spa_t *spa, dmu_tx_t *tx) 9015eda14cbcSMatt Macy { 9016eda14cbcSMatt Macy ASSERT(spa_writeable(spa)); 9017eda14cbcSMatt Macy 9018eda14cbcSMatt Macy vdev_t *rvd = spa->spa_root_vdev; 9019eda14cbcSMatt Macy for (int c = 0; c < rvd->vdev_children; c++) { 9020eda14cbcSMatt Macy vdev_t *vd = rvd->vdev_child[c]; 9021eda14cbcSMatt Macy vdev_indirect_state_sync_verify(vd); 9022eda14cbcSMatt Macy 9023eda14cbcSMatt Macy if (vdev_indirect_should_condense(vd)) { 9024eda14cbcSMatt Macy spa_condense_indirect_start_sync(vd, tx); 9025eda14cbcSMatt Macy break; 9026eda14cbcSMatt Macy } 9027eda14cbcSMatt Macy } 9028eda14cbcSMatt Macy } 9029eda14cbcSMatt Macy 9030eda14cbcSMatt Macy static void 9031eda14cbcSMatt Macy spa_sync_iterate_to_convergence(spa_t *spa, dmu_tx_t *tx) 9032eda14cbcSMatt Macy { 9033eda14cbcSMatt Macy objset_t *mos = spa->spa_meta_objset; 9034eda14cbcSMatt Macy dsl_pool_t *dp = spa->spa_dsl_pool; 9035eda14cbcSMatt Macy uint64_t txg = tx->tx_txg; 9036eda14cbcSMatt Macy bplist_t *free_bpl = &spa->spa_free_bplist[txg & TXG_MASK]; 9037eda14cbcSMatt Macy 9038eda14cbcSMatt Macy do { 9039eda14cbcSMatt Macy int pass = ++spa->spa_sync_pass; 9040eda14cbcSMatt Macy 9041eda14cbcSMatt Macy spa_sync_config_object(spa, tx); 9042eda14cbcSMatt Macy spa_sync_aux_dev(spa, &spa->spa_spares, tx, 9043eda14cbcSMatt Macy ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES); 9044eda14cbcSMatt Macy spa_sync_aux_dev(spa, &spa->spa_l2cache, tx, 9045eda14cbcSMatt Macy ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE); 9046eda14cbcSMatt Macy spa_errlog_sync(spa, txg); 9047eda14cbcSMatt Macy dsl_pool_sync(dp, txg); 9048eda14cbcSMatt Macy 9049eda14cbcSMatt Macy if (pass < zfs_sync_pass_deferred_free || 9050eda14cbcSMatt Macy spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) { 9051eda14cbcSMatt Macy /* 9052eda14cbcSMatt Macy * If the log space map feature is active we don't 9053eda14cbcSMatt Macy * care about deferred frees and the deferred bpobj 9054eda14cbcSMatt Macy * as the log space map should effectively have the 9055eda14cbcSMatt Macy * same results (i.e. appending only to one object). 9056eda14cbcSMatt Macy */ 9057eda14cbcSMatt Macy spa_sync_frees(spa, free_bpl, tx); 9058eda14cbcSMatt Macy } else { 9059eda14cbcSMatt Macy /* 9060eda14cbcSMatt Macy * We can not defer frees in pass 1, because 9061eda14cbcSMatt Macy * we sync the deferred frees later in pass 1. 9062eda14cbcSMatt Macy */ 9063eda14cbcSMatt Macy ASSERT3U(pass, >, 1); 9064eda14cbcSMatt Macy bplist_iterate(free_bpl, bpobj_enqueue_alloc_cb, 9065eda14cbcSMatt Macy &spa->spa_deferred_bpobj, tx); 9066eda14cbcSMatt Macy } 9067eda14cbcSMatt Macy 9068eda14cbcSMatt Macy ddt_sync(spa, txg); 9069eda14cbcSMatt Macy dsl_scan_sync(dp, tx); 9070eda14cbcSMatt Macy svr_sync(spa, tx); 9071eda14cbcSMatt Macy spa_sync_upgrades(spa, tx); 9072eda14cbcSMatt Macy 9073eda14cbcSMatt Macy spa_flush_metaslabs(spa, tx); 9074eda14cbcSMatt Macy 9075eda14cbcSMatt Macy vdev_t *vd = NULL; 9076eda14cbcSMatt Macy while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)) 9077eda14cbcSMatt Macy != NULL) 9078eda14cbcSMatt Macy vdev_sync(vd, txg); 9079eda14cbcSMatt Macy 9080eda14cbcSMatt Macy /* 9081eda14cbcSMatt Macy * Note: We need to check if the MOS is dirty because we could 9082eda14cbcSMatt Macy * have marked the MOS dirty without updating the uberblock 9083eda14cbcSMatt Macy * (e.g. if we have sync tasks but no dirty user data). We need 9084eda14cbcSMatt Macy * to check the uberblock's rootbp because it is updated if we 9085eda14cbcSMatt Macy * have synced out dirty data (though in this case the MOS will 9086eda14cbcSMatt Macy * most likely also be dirty due to second order effects, we 9087eda14cbcSMatt Macy * don't want to rely on that here). 9088eda14cbcSMatt Macy */ 9089eda14cbcSMatt Macy if (pass == 1 && 9090eda14cbcSMatt Macy spa->spa_uberblock.ub_rootbp.blk_birth < txg && 9091eda14cbcSMatt Macy !dmu_objset_is_dirty(mos, txg)) { 9092eda14cbcSMatt Macy /* 9093eda14cbcSMatt Macy * Nothing changed on the first pass, therefore this 9094eda14cbcSMatt Macy * TXG is a no-op. Avoid syncing deferred frees, so 9095eda14cbcSMatt Macy * that we can keep this TXG as a no-op. 9096eda14cbcSMatt Macy */ 9097eda14cbcSMatt Macy ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg)); 9098eda14cbcSMatt Macy ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg)); 9099eda14cbcSMatt Macy ASSERT(txg_list_empty(&dp->dp_sync_tasks, txg)); 9100eda14cbcSMatt Macy ASSERT(txg_list_empty(&dp->dp_early_sync_tasks, txg)); 9101eda14cbcSMatt Macy break; 9102eda14cbcSMatt Macy } 9103eda14cbcSMatt Macy 9104eda14cbcSMatt Macy spa_sync_deferred_frees(spa, tx); 9105eda14cbcSMatt Macy } while (dmu_objset_is_dirty(mos, txg)); 9106eda14cbcSMatt Macy } 9107eda14cbcSMatt Macy 9108eda14cbcSMatt Macy /* 9109eda14cbcSMatt Macy * Rewrite the vdev configuration (which includes the uberblock) to 9110eda14cbcSMatt Macy * commit the transaction group. 9111eda14cbcSMatt Macy * 9112eda14cbcSMatt Macy * If there are no dirty vdevs, we sync the uberblock to a few random 9113eda14cbcSMatt Macy * top-level vdevs that are known to be visible in the config cache 9114eda14cbcSMatt Macy * (see spa_vdev_add() for a complete description). If there *are* dirty 9115eda14cbcSMatt Macy * vdevs, sync the uberblock to all vdevs. 9116eda14cbcSMatt Macy */ 9117eda14cbcSMatt Macy static void 9118eda14cbcSMatt Macy spa_sync_rewrite_vdev_config(spa_t *spa, dmu_tx_t *tx) 9119eda14cbcSMatt Macy { 9120eda14cbcSMatt Macy vdev_t *rvd = spa->spa_root_vdev; 9121eda14cbcSMatt Macy uint64_t txg = tx->tx_txg; 9122eda14cbcSMatt Macy 9123eda14cbcSMatt Macy for (;;) { 9124eda14cbcSMatt Macy int error = 0; 9125eda14cbcSMatt Macy 9126eda14cbcSMatt Macy /* 9127eda14cbcSMatt Macy * We hold SCL_STATE to prevent vdev open/close/etc. 9128eda14cbcSMatt Macy * while we're attempting to write the vdev labels. 9129eda14cbcSMatt Macy */ 9130eda14cbcSMatt Macy spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 9131eda14cbcSMatt Macy 9132eda14cbcSMatt Macy if (list_is_empty(&spa->spa_config_dirty_list)) { 9133eda14cbcSMatt Macy vdev_t *svd[SPA_SYNC_MIN_VDEVS] = { NULL }; 9134eda14cbcSMatt Macy int svdcount = 0; 9135eda14cbcSMatt Macy int children = rvd->vdev_children; 913633b8c039SMartin Matuska int c0 = random_in_range(children); 9137eda14cbcSMatt Macy 9138eda14cbcSMatt Macy for (int c = 0; c < children; c++) { 9139eda14cbcSMatt Macy vdev_t *vd = 9140eda14cbcSMatt Macy rvd->vdev_child[(c0 + c) % children]; 9141eda14cbcSMatt Macy 9142eda14cbcSMatt Macy /* Stop when revisiting the first vdev */ 9143eda14cbcSMatt Macy if (c > 0 && svd[0] == vd) 9144eda14cbcSMatt Macy break; 9145eda14cbcSMatt Macy 9146eda14cbcSMatt Macy if (vd->vdev_ms_array == 0 || 9147eda14cbcSMatt Macy vd->vdev_islog || 9148eda14cbcSMatt Macy !vdev_is_concrete(vd)) 9149eda14cbcSMatt Macy continue; 9150eda14cbcSMatt Macy 9151eda14cbcSMatt Macy svd[svdcount++] = vd; 9152eda14cbcSMatt Macy if (svdcount == SPA_SYNC_MIN_VDEVS) 9153eda14cbcSMatt Macy break; 9154eda14cbcSMatt Macy } 9155eda14cbcSMatt Macy error = vdev_config_sync(svd, svdcount, txg); 9156eda14cbcSMatt Macy } else { 9157eda14cbcSMatt Macy error = vdev_config_sync(rvd->vdev_child, 9158eda14cbcSMatt Macy rvd->vdev_children, txg); 9159eda14cbcSMatt Macy } 9160eda14cbcSMatt Macy 9161eda14cbcSMatt Macy if (error == 0) 9162eda14cbcSMatt Macy spa->spa_last_synced_guid = rvd->vdev_guid; 9163eda14cbcSMatt Macy 9164eda14cbcSMatt Macy spa_config_exit(spa, SCL_STATE, FTAG); 9165eda14cbcSMatt Macy 9166eda14cbcSMatt Macy if (error == 0) 9167eda14cbcSMatt Macy break; 9168eda14cbcSMatt Macy zio_suspend(spa, NULL, ZIO_SUSPEND_IOERR); 9169eda14cbcSMatt Macy zio_resume_wait(spa); 9170eda14cbcSMatt Macy } 9171eda14cbcSMatt Macy } 9172eda14cbcSMatt Macy 9173eda14cbcSMatt Macy /* 9174eda14cbcSMatt Macy * Sync the specified transaction group. New blocks may be dirtied as 9175eda14cbcSMatt Macy * part of the process, so we iterate until it converges. 9176eda14cbcSMatt Macy */ 9177eda14cbcSMatt Macy void 9178eda14cbcSMatt Macy spa_sync(spa_t *spa, uint64_t txg) 9179eda14cbcSMatt Macy { 9180eda14cbcSMatt Macy vdev_t *vd = NULL; 9181eda14cbcSMatt Macy 9182eda14cbcSMatt Macy VERIFY(spa_writeable(spa)); 9183eda14cbcSMatt Macy 9184eda14cbcSMatt Macy /* 9185eda14cbcSMatt Macy * Wait for i/os issued in open context that need to complete 9186eda14cbcSMatt Macy * before this txg syncs. 9187eda14cbcSMatt Macy */ 9188eda14cbcSMatt Macy (void) zio_wait(spa->spa_txg_zio[txg & TXG_MASK]); 9189eda14cbcSMatt Macy spa->spa_txg_zio[txg & TXG_MASK] = zio_root(spa, NULL, NULL, 9190eda14cbcSMatt Macy ZIO_FLAG_CANFAIL); 9191eda14cbcSMatt Macy 9192eda14cbcSMatt Macy /* 9193eda14cbcSMatt Macy * Lock out configuration changes. 9194eda14cbcSMatt Macy */ 9195eda14cbcSMatt Macy spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 9196eda14cbcSMatt Macy 9197eda14cbcSMatt Macy spa->spa_syncing_txg = txg; 9198eda14cbcSMatt Macy spa->spa_sync_pass = 0; 9199eda14cbcSMatt Macy 9200eda14cbcSMatt Macy for (int i = 0; i < spa->spa_alloc_count; i++) { 92013f9d360cSMartin Matuska mutex_enter(&spa->spa_allocs[i].spaa_lock); 92023f9d360cSMartin Matuska VERIFY0(avl_numnodes(&spa->spa_allocs[i].spaa_tree)); 92033f9d360cSMartin Matuska mutex_exit(&spa->spa_allocs[i].spaa_lock); 9204eda14cbcSMatt Macy } 9205eda14cbcSMatt Macy 9206eda14cbcSMatt Macy /* 9207eda14cbcSMatt Macy * If there are any pending vdev state changes, convert them 9208eda14cbcSMatt Macy * into config changes that go out with this transaction group. 9209eda14cbcSMatt Macy */ 9210eda14cbcSMatt Macy spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 9211eda14cbcSMatt Macy while (list_head(&spa->spa_state_dirty_list) != NULL) { 9212eda14cbcSMatt Macy /* 9213eda14cbcSMatt Macy * We need the write lock here because, for aux vdevs, 9214eda14cbcSMatt Macy * calling vdev_config_dirty() modifies sav_config. 9215eda14cbcSMatt Macy * This is ugly and will become unnecessary when we 9216eda14cbcSMatt Macy * eliminate the aux vdev wart by integrating all vdevs 9217eda14cbcSMatt Macy * into the root vdev tree. 9218eda14cbcSMatt Macy */ 9219eda14cbcSMatt Macy spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 9220eda14cbcSMatt Macy spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER); 9221eda14cbcSMatt Macy while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) { 9222eda14cbcSMatt Macy vdev_state_clean(vd); 9223eda14cbcSMatt Macy vdev_config_dirty(vd); 9224eda14cbcSMatt Macy } 9225eda14cbcSMatt Macy spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 9226eda14cbcSMatt Macy spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER); 9227eda14cbcSMatt Macy } 9228eda14cbcSMatt Macy spa_config_exit(spa, SCL_STATE, FTAG); 9229eda14cbcSMatt Macy 9230eda14cbcSMatt Macy dsl_pool_t *dp = spa->spa_dsl_pool; 9231eda14cbcSMatt Macy dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg); 9232eda14cbcSMatt Macy 9233eda14cbcSMatt Macy spa->spa_sync_starttime = gethrtime(); 9234eda14cbcSMatt Macy taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid); 9235eda14cbcSMatt Macy spa->spa_deadman_tqid = taskq_dispatch_delay(system_delay_taskq, 9236eda14cbcSMatt Macy spa_deadman, spa, TQ_SLEEP, ddi_get_lbolt() + 9237eda14cbcSMatt Macy NSEC_TO_TICK(spa->spa_deadman_synctime)); 9238eda14cbcSMatt Macy 9239eda14cbcSMatt Macy /* 9240eda14cbcSMatt Macy * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg, 9241eda14cbcSMatt Macy * set spa_deflate if we have no raid-z vdevs. 9242eda14cbcSMatt Macy */ 9243eda14cbcSMatt Macy if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE && 9244eda14cbcSMatt Macy spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) { 9245eda14cbcSMatt Macy vdev_t *rvd = spa->spa_root_vdev; 9246eda14cbcSMatt Macy 9247eda14cbcSMatt Macy int i; 9248eda14cbcSMatt Macy for (i = 0; i < rvd->vdev_children; i++) { 9249eda14cbcSMatt Macy vd = rvd->vdev_child[i]; 9250eda14cbcSMatt Macy if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE) 9251eda14cbcSMatt Macy break; 9252eda14cbcSMatt Macy } 9253eda14cbcSMatt Macy if (i == rvd->vdev_children) { 9254eda14cbcSMatt Macy spa->spa_deflate = TRUE; 9255eda14cbcSMatt Macy VERIFY0(zap_add(spa->spa_meta_objset, 9256eda14cbcSMatt Macy DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 9257eda14cbcSMatt Macy sizeof (uint64_t), 1, &spa->spa_deflate, tx)); 9258eda14cbcSMatt Macy } 9259eda14cbcSMatt Macy } 9260eda14cbcSMatt Macy 9261eda14cbcSMatt Macy spa_sync_adjust_vdev_max_queue_depth(spa); 9262eda14cbcSMatt Macy 9263eda14cbcSMatt Macy spa_sync_condense_indirect(spa, tx); 9264eda14cbcSMatt Macy 9265eda14cbcSMatt Macy spa_sync_iterate_to_convergence(spa, tx); 9266eda14cbcSMatt Macy 9267eda14cbcSMatt Macy #ifdef ZFS_DEBUG 9268eda14cbcSMatt Macy if (!list_is_empty(&spa->spa_config_dirty_list)) { 9269eda14cbcSMatt Macy /* 9270eda14cbcSMatt Macy * Make sure that the number of ZAPs for all the vdevs matches 9271eda14cbcSMatt Macy * the number of ZAPs in the per-vdev ZAP list. This only gets 9272eda14cbcSMatt Macy * called if the config is dirty; otherwise there may be 9273eda14cbcSMatt Macy * outstanding AVZ operations that weren't completed in 9274eda14cbcSMatt Macy * spa_sync_config_object. 9275eda14cbcSMatt Macy */ 9276eda14cbcSMatt Macy uint64_t all_vdev_zap_entry_count; 9277eda14cbcSMatt Macy ASSERT0(zap_count(spa->spa_meta_objset, 9278eda14cbcSMatt Macy spa->spa_all_vdev_zaps, &all_vdev_zap_entry_count)); 9279eda14cbcSMatt Macy ASSERT3U(vdev_count_verify_zaps(spa->spa_root_vdev), ==, 9280eda14cbcSMatt Macy all_vdev_zap_entry_count); 9281eda14cbcSMatt Macy } 9282eda14cbcSMatt Macy #endif 9283eda14cbcSMatt Macy 9284eda14cbcSMatt Macy if (spa->spa_vdev_removal != NULL) { 9285eda14cbcSMatt Macy ASSERT0(spa->spa_vdev_removal->svr_bytes_done[txg & TXG_MASK]); 9286eda14cbcSMatt Macy } 9287eda14cbcSMatt Macy 9288eda14cbcSMatt Macy spa_sync_rewrite_vdev_config(spa, tx); 9289eda14cbcSMatt Macy dmu_tx_commit(tx); 9290eda14cbcSMatt Macy 9291eda14cbcSMatt Macy taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid); 9292eda14cbcSMatt Macy spa->spa_deadman_tqid = 0; 9293eda14cbcSMatt Macy 9294eda14cbcSMatt Macy /* 9295eda14cbcSMatt Macy * Clear the dirty config list. 9296eda14cbcSMatt Macy */ 9297eda14cbcSMatt Macy while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL) 9298eda14cbcSMatt Macy vdev_config_clean(vd); 9299eda14cbcSMatt Macy 9300eda14cbcSMatt Macy /* 9301eda14cbcSMatt Macy * Now that the new config has synced transactionally, 9302eda14cbcSMatt Macy * let it become visible to the config cache. 9303eda14cbcSMatt Macy */ 9304eda14cbcSMatt Macy if (spa->spa_config_syncing != NULL) { 9305eda14cbcSMatt Macy spa_config_set(spa, spa->spa_config_syncing); 9306eda14cbcSMatt Macy spa->spa_config_txg = txg; 9307eda14cbcSMatt Macy spa->spa_config_syncing = NULL; 9308eda14cbcSMatt Macy } 9309eda14cbcSMatt Macy 9310eda14cbcSMatt Macy dsl_pool_sync_done(dp, txg); 9311eda14cbcSMatt Macy 9312eda14cbcSMatt Macy for (int i = 0; i < spa->spa_alloc_count; i++) { 93133f9d360cSMartin Matuska mutex_enter(&spa->spa_allocs[i].spaa_lock); 93143f9d360cSMartin Matuska VERIFY0(avl_numnodes(&spa->spa_allocs[i].spaa_tree)); 93153f9d360cSMartin Matuska mutex_exit(&spa->spa_allocs[i].spaa_lock); 9316eda14cbcSMatt Macy } 9317eda14cbcSMatt Macy 9318eda14cbcSMatt Macy /* 9319eda14cbcSMatt Macy * Update usable space statistics. 9320eda14cbcSMatt Macy */ 9321eda14cbcSMatt Macy while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg))) 9322eda14cbcSMatt Macy != NULL) 9323eda14cbcSMatt Macy vdev_sync_done(vd, txg); 9324eda14cbcSMatt Macy 9325eda14cbcSMatt Macy metaslab_class_evict_old(spa->spa_normal_class, txg); 9326eda14cbcSMatt Macy metaslab_class_evict_old(spa->spa_log_class, txg); 9327eda14cbcSMatt Macy 9328eda14cbcSMatt Macy spa_sync_close_syncing_log_sm(spa); 9329eda14cbcSMatt Macy 9330eda14cbcSMatt Macy spa_update_dspace(spa); 9331eda14cbcSMatt Macy 9332eda14cbcSMatt Macy /* 9333eda14cbcSMatt Macy * It had better be the case that we didn't dirty anything 9334eda14cbcSMatt Macy * since vdev_config_sync(). 9335eda14cbcSMatt Macy */ 9336eda14cbcSMatt Macy ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg)); 9337eda14cbcSMatt Macy ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg)); 9338eda14cbcSMatt Macy ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg)); 9339eda14cbcSMatt Macy 9340eda14cbcSMatt Macy while (zfs_pause_spa_sync) 9341eda14cbcSMatt Macy delay(1); 9342eda14cbcSMatt Macy 9343eda14cbcSMatt Macy spa->spa_sync_pass = 0; 9344eda14cbcSMatt Macy 9345eda14cbcSMatt Macy /* 9346eda14cbcSMatt Macy * Update the last synced uberblock here. We want to do this at 9347eda14cbcSMatt Macy * the end of spa_sync() so that consumers of spa_last_synced_txg() 9348eda14cbcSMatt Macy * will be guaranteed that all the processing associated with 9349eda14cbcSMatt Macy * that txg has been completed. 9350eda14cbcSMatt Macy */ 9351eda14cbcSMatt Macy spa->spa_ubsync = spa->spa_uberblock; 9352eda14cbcSMatt Macy spa_config_exit(spa, SCL_CONFIG, FTAG); 9353eda14cbcSMatt Macy 9354eda14cbcSMatt Macy spa_handle_ignored_writes(spa); 9355eda14cbcSMatt Macy 9356eda14cbcSMatt Macy /* 9357eda14cbcSMatt Macy * If any async tasks have been requested, kick them off. 9358eda14cbcSMatt Macy */ 9359eda14cbcSMatt Macy spa_async_dispatch(spa); 9360eda14cbcSMatt Macy } 9361eda14cbcSMatt Macy 9362eda14cbcSMatt Macy /* 9363eda14cbcSMatt Macy * Sync all pools. We don't want to hold the namespace lock across these 9364eda14cbcSMatt Macy * operations, so we take a reference on the spa_t and drop the lock during the 9365eda14cbcSMatt Macy * sync. 9366eda14cbcSMatt Macy */ 9367eda14cbcSMatt Macy void 9368eda14cbcSMatt Macy spa_sync_allpools(void) 9369eda14cbcSMatt Macy { 9370eda14cbcSMatt Macy spa_t *spa = NULL; 9371eda14cbcSMatt Macy mutex_enter(&spa_namespace_lock); 9372eda14cbcSMatt Macy while ((spa = spa_next(spa)) != NULL) { 9373eda14cbcSMatt Macy if (spa_state(spa) != POOL_STATE_ACTIVE || 9374eda14cbcSMatt Macy !spa_writeable(spa) || spa_suspended(spa)) 9375eda14cbcSMatt Macy continue; 9376eda14cbcSMatt Macy spa_open_ref(spa, FTAG); 9377eda14cbcSMatt Macy mutex_exit(&spa_namespace_lock); 9378eda14cbcSMatt Macy txg_wait_synced(spa_get_dsl(spa), 0); 9379eda14cbcSMatt Macy mutex_enter(&spa_namespace_lock); 9380eda14cbcSMatt Macy spa_close(spa, FTAG); 9381eda14cbcSMatt Macy } 9382eda14cbcSMatt Macy mutex_exit(&spa_namespace_lock); 9383eda14cbcSMatt Macy } 9384eda14cbcSMatt Macy 9385eda14cbcSMatt Macy /* 9386eda14cbcSMatt Macy * ========================================================================== 9387eda14cbcSMatt Macy * Miscellaneous routines 9388eda14cbcSMatt Macy * ========================================================================== 9389eda14cbcSMatt Macy */ 9390eda14cbcSMatt Macy 9391eda14cbcSMatt Macy /* 9392eda14cbcSMatt Macy * Remove all pools in the system. 9393eda14cbcSMatt Macy */ 9394eda14cbcSMatt Macy void 9395eda14cbcSMatt Macy spa_evict_all(void) 9396eda14cbcSMatt Macy { 9397eda14cbcSMatt Macy spa_t *spa; 9398eda14cbcSMatt Macy 9399eda14cbcSMatt Macy /* 9400eda14cbcSMatt Macy * Remove all cached state. All pools should be closed now, 9401eda14cbcSMatt Macy * so every spa in the AVL tree should be unreferenced. 9402eda14cbcSMatt Macy */ 9403eda14cbcSMatt Macy mutex_enter(&spa_namespace_lock); 9404eda14cbcSMatt Macy while ((spa = spa_next(NULL)) != NULL) { 9405eda14cbcSMatt Macy /* 9406eda14cbcSMatt Macy * Stop async tasks. The async thread may need to detach 9407eda14cbcSMatt Macy * a device that's been replaced, which requires grabbing 9408eda14cbcSMatt Macy * spa_namespace_lock, so we must drop it here. 9409eda14cbcSMatt Macy */ 9410eda14cbcSMatt Macy spa_open_ref(spa, FTAG); 9411eda14cbcSMatt Macy mutex_exit(&spa_namespace_lock); 9412eda14cbcSMatt Macy spa_async_suspend(spa); 9413eda14cbcSMatt Macy mutex_enter(&spa_namespace_lock); 9414eda14cbcSMatt Macy spa_close(spa, FTAG); 9415eda14cbcSMatt Macy 9416eda14cbcSMatt Macy if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 9417eda14cbcSMatt Macy spa_unload(spa); 9418eda14cbcSMatt Macy spa_deactivate(spa); 9419eda14cbcSMatt Macy } 9420eda14cbcSMatt Macy spa_remove(spa); 9421eda14cbcSMatt Macy } 9422eda14cbcSMatt Macy mutex_exit(&spa_namespace_lock); 9423eda14cbcSMatt Macy } 9424eda14cbcSMatt Macy 9425eda14cbcSMatt Macy vdev_t * 9426eda14cbcSMatt Macy spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t aux) 9427eda14cbcSMatt Macy { 9428eda14cbcSMatt Macy vdev_t *vd; 9429eda14cbcSMatt Macy int i; 9430eda14cbcSMatt Macy 9431eda14cbcSMatt Macy if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL) 9432eda14cbcSMatt Macy return (vd); 9433eda14cbcSMatt Macy 9434eda14cbcSMatt Macy if (aux) { 9435eda14cbcSMatt Macy for (i = 0; i < spa->spa_l2cache.sav_count; i++) { 9436eda14cbcSMatt Macy vd = spa->spa_l2cache.sav_vdevs[i]; 9437eda14cbcSMatt Macy if (vd->vdev_guid == guid) 9438eda14cbcSMatt Macy return (vd); 9439eda14cbcSMatt Macy } 9440eda14cbcSMatt Macy 9441eda14cbcSMatt Macy for (i = 0; i < spa->spa_spares.sav_count; i++) { 9442eda14cbcSMatt Macy vd = spa->spa_spares.sav_vdevs[i]; 9443eda14cbcSMatt Macy if (vd->vdev_guid == guid) 9444eda14cbcSMatt Macy return (vd); 9445eda14cbcSMatt Macy } 9446eda14cbcSMatt Macy } 9447eda14cbcSMatt Macy 9448eda14cbcSMatt Macy return (NULL); 9449eda14cbcSMatt Macy } 9450eda14cbcSMatt Macy 9451eda14cbcSMatt Macy void 9452eda14cbcSMatt Macy spa_upgrade(spa_t *spa, uint64_t version) 9453eda14cbcSMatt Macy { 9454eda14cbcSMatt Macy ASSERT(spa_writeable(spa)); 9455eda14cbcSMatt Macy 9456eda14cbcSMatt Macy spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 9457eda14cbcSMatt Macy 9458eda14cbcSMatt Macy /* 9459eda14cbcSMatt Macy * This should only be called for a non-faulted pool, and since a 9460eda14cbcSMatt Macy * future version would result in an unopenable pool, this shouldn't be 9461eda14cbcSMatt Macy * possible. 9462eda14cbcSMatt Macy */ 9463eda14cbcSMatt Macy ASSERT(SPA_VERSION_IS_SUPPORTED(spa->spa_uberblock.ub_version)); 9464eda14cbcSMatt Macy ASSERT3U(version, >=, spa->spa_uberblock.ub_version); 9465eda14cbcSMatt Macy 9466eda14cbcSMatt Macy spa->spa_uberblock.ub_version = version; 9467eda14cbcSMatt Macy vdev_config_dirty(spa->spa_root_vdev); 9468eda14cbcSMatt Macy 9469eda14cbcSMatt Macy spa_config_exit(spa, SCL_ALL, FTAG); 9470eda14cbcSMatt Macy 9471eda14cbcSMatt Macy txg_wait_synced(spa_get_dsl(spa), 0); 9472eda14cbcSMatt Macy } 9473eda14cbcSMatt Macy 9474eda14cbcSMatt Macy boolean_t 9475eda14cbcSMatt Macy spa_has_spare(spa_t *spa, uint64_t guid) 9476eda14cbcSMatt Macy { 9477eda14cbcSMatt Macy int i; 9478eda14cbcSMatt Macy uint64_t spareguid; 9479eda14cbcSMatt Macy spa_aux_vdev_t *sav = &spa->spa_spares; 9480eda14cbcSMatt Macy 9481eda14cbcSMatt Macy for (i = 0; i < sav->sav_count; i++) 9482eda14cbcSMatt Macy if (sav->sav_vdevs[i]->vdev_guid == guid) 9483eda14cbcSMatt Macy return (B_TRUE); 9484eda14cbcSMatt Macy 9485eda14cbcSMatt Macy for (i = 0; i < sav->sav_npending; i++) { 9486eda14cbcSMatt Macy if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID, 9487eda14cbcSMatt Macy &spareguid) == 0 && spareguid == guid) 9488eda14cbcSMatt Macy return (B_TRUE); 9489eda14cbcSMatt Macy } 9490eda14cbcSMatt Macy 9491eda14cbcSMatt Macy return (B_FALSE); 9492eda14cbcSMatt Macy } 9493eda14cbcSMatt Macy 9494eda14cbcSMatt Macy /* 9495eda14cbcSMatt Macy * Check if a pool has an active shared spare device. 9496eda14cbcSMatt Macy * Note: reference count of an active spare is 2, as a spare and as a replace 9497eda14cbcSMatt Macy */ 9498eda14cbcSMatt Macy static boolean_t 9499eda14cbcSMatt Macy spa_has_active_shared_spare(spa_t *spa) 9500eda14cbcSMatt Macy { 9501eda14cbcSMatt Macy int i, refcnt; 9502eda14cbcSMatt Macy uint64_t pool; 9503eda14cbcSMatt Macy spa_aux_vdev_t *sav = &spa->spa_spares; 9504eda14cbcSMatt Macy 9505eda14cbcSMatt Macy for (i = 0; i < sav->sav_count; i++) { 9506eda14cbcSMatt Macy if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool, 9507eda14cbcSMatt Macy &refcnt) && pool != 0ULL && pool == spa_guid(spa) && 9508eda14cbcSMatt Macy refcnt > 2) 9509eda14cbcSMatt Macy return (B_TRUE); 9510eda14cbcSMatt Macy } 9511eda14cbcSMatt Macy 9512eda14cbcSMatt Macy return (B_FALSE); 9513eda14cbcSMatt Macy } 9514eda14cbcSMatt Macy 9515eda14cbcSMatt Macy uint64_t 9516eda14cbcSMatt Macy spa_total_metaslabs(spa_t *spa) 9517eda14cbcSMatt Macy { 9518eda14cbcSMatt Macy vdev_t *rvd = spa->spa_root_vdev; 9519eda14cbcSMatt Macy 9520eda14cbcSMatt Macy uint64_t m = 0; 9521eda14cbcSMatt Macy for (uint64_t c = 0; c < rvd->vdev_children; c++) { 9522eda14cbcSMatt Macy vdev_t *vd = rvd->vdev_child[c]; 9523eda14cbcSMatt Macy if (!vdev_is_concrete(vd)) 9524eda14cbcSMatt Macy continue; 9525eda14cbcSMatt Macy m += vd->vdev_ms_count; 9526eda14cbcSMatt Macy } 9527eda14cbcSMatt Macy return (m); 9528eda14cbcSMatt Macy } 9529eda14cbcSMatt Macy 9530eda14cbcSMatt Macy /* 9531eda14cbcSMatt Macy * Notify any waiting threads that some activity has switched from being in- 9532eda14cbcSMatt Macy * progress to not-in-progress so that the thread can wake up and determine 9533eda14cbcSMatt Macy * whether it is finished waiting. 9534eda14cbcSMatt Macy */ 9535eda14cbcSMatt Macy void 9536eda14cbcSMatt Macy spa_notify_waiters(spa_t *spa) 9537eda14cbcSMatt Macy { 9538eda14cbcSMatt Macy /* 9539eda14cbcSMatt Macy * Acquiring spa_activities_lock here prevents the cv_broadcast from 9540eda14cbcSMatt Macy * happening between the waiting thread's check and cv_wait. 9541eda14cbcSMatt Macy */ 9542eda14cbcSMatt Macy mutex_enter(&spa->spa_activities_lock); 9543eda14cbcSMatt Macy cv_broadcast(&spa->spa_activities_cv); 9544eda14cbcSMatt Macy mutex_exit(&spa->spa_activities_lock); 9545eda14cbcSMatt Macy } 9546eda14cbcSMatt Macy 9547eda14cbcSMatt Macy /* 9548eda14cbcSMatt Macy * Notify any waiting threads that the pool is exporting, and then block until 9549eda14cbcSMatt Macy * they are finished using the spa_t. 9550eda14cbcSMatt Macy */ 9551eda14cbcSMatt Macy void 9552eda14cbcSMatt Macy spa_wake_waiters(spa_t *spa) 9553eda14cbcSMatt Macy { 9554eda14cbcSMatt Macy mutex_enter(&spa->spa_activities_lock); 9555eda14cbcSMatt Macy spa->spa_waiters_cancel = B_TRUE; 9556eda14cbcSMatt Macy cv_broadcast(&spa->spa_activities_cv); 9557eda14cbcSMatt Macy while (spa->spa_waiters != 0) 9558eda14cbcSMatt Macy cv_wait(&spa->spa_waiters_cv, &spa->spa_activities_lock); 9559eda14cbcSMatt Macy spa->spa_waiters_cancel = B_FALSE; 9560eda14cbcSMatt Macy mutex_exit(&spa->spa_activities_lock); 9561eda14cbcSMatt Macy } 9562eda14cbcSMatt Macy 9563eda14cbcSMatt Macy /* Whether the vdev or any of its descendants are being initialized/trimmed. */ 9564eda14cbcSMatt Macy static boolean_t 9565eda14cbcSMatt Macy spa_vdev_activity_in_progress_impl(vdev_t *vd, zpool_wait_activity_t activity) 9566eda14cbcSMatt Macy { 9567eda14cbcSMatt Macy spa_t *spa = vd->vdev_spa; 9568eda14cbcSMatt Macy 9569eda14cbcSMatt Macy ASSERT(spa_config_held(spa, SCL_CONFIG | SCL_STATE, RW_READER)); 9570eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&spa->spa_activities_lock)); 9571eda14cbcSMatt Macy ASSERT(activity == ZPOOL_WAIT_INITIALIZE || 9572eda14cbcSMatt Macy activity == ZPOOL_WAIT_TRIM); 9573eda14cbcSMatt Macy 9574eda14cbcSMatt Macy kmutex_t *lock = activity == ZPOOL_WAIT_INITIALIZE ? 9575eda14cbcSMatt Macy &vd->vdev_initialize_lock : &vd->vdev_trim_lock; 9576eda14cbcSMatt Macy 9577eda14cbcSMatt Macy mutex_exit(&spa->spa_activities_lock); 9578eda14cbcSMatt Macy mutex_enter(lock); 9579eda14cbcSMatt Macy mutex_enter(&spa->spa_activities_lock); 9580eda14cbcSMatt Macy 9581eda14cbcSMatt Macy boolean_t in_progress = (activity == ZPOOL_WAIT_INITIALIZE) ? 9582eda14cbcSMatt Macy (vd->vdev_initialize_state == VDEV_INITIALIZE_ACTIVE) : 9583eda14cbcSMatt Macy (vd->vdev_trim_state == VDEV_TRIM_ACTIVE); 9584eda14cbcSMatt Macy mutex_exit(lock); 9585eda14cbcSMatt Macy 9586eda14cbcSMatt Macy if (in_progress) 9587eda14cbcSMatt Macy return (B_TRUE); 9588eda14cbcSMatt Macy 9589eda14cbcSMatt Macy for (int i = 0; i < vd->vdev_children; i++) { 9590eda14cbcSMatt Macy if (spa_vdev_activity_in_progress_impl(vd->vdev_child[i], 9591eda14cbcSMatt Macy activity)) 9592eda14cbcSMatt Macy return (B_TRUE); 9593eda14cbcSMatt Macy } 9594eda14cbcSMatt Macy 9595eda14cbcSMatt Macy return (B_FALSE); 9596eda14cbcSMatt Macy } 9597eda14cbcSMatt Macy 9598eda14cbcSMatt Macy /* 9599eda14cbcSMatt Macy * If use_guid is true, this checks whether the vdev specified by guid is 9600eda14cbcSMatt Macy * being initialized/trimmed. Otherwise, it checks whether any vdev in the pool 9601eda14cbcSMatt Macy * is being initialized/trimmed. The caller must hold the config lock and 9602eda14cbcSMatt Macy * spa_activities_lock. 9603eda14cbcSMatt Macy */ 9604eda14cbcSMatt Macy static int 9605eda14cbcSMatt Macy spa_vdev_activity_in_progress(spa_t *spa, boolean_t use_guid, uint64_t guid, 9606eda14cbcSMatt Macy zpool_wait_activity_t activity, boolean_t *in_progress) 9607eda14cbcSMatt Macy { 9608eda14cbcSMatt Macy mutex_exit(&spa->spa_activities_lock); 9609eda14cbcSMatt Macy spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER); 9610eda14cbcSMatt Macy mutex_enter(&spa->spa_activities_lock); 9611eda14cbcSMatt Macy 9612eda14cbcSMatt Macy vdev_t *vd; 9613eda14cbcSMatt Macy if (use_guid) { 9614eda14cbcSMatt Macy vd = spa_lookup_by_guid(spa, guid, B_FALSE); 9615eda14cbcSMatt Macy if (vd == NULL || !vd->vdev_ops->vdev_op_leaf) { 9616eda14cbcSMatt Macy spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 9617eda14cbcSMatt Macy return (EINVAL); 9618eda14cbcSMatt Macy } 9619eda14cbcSMatt Macy } else { 9620eda14cbcSMatt Macy vd = spa->spa_root_vdev; 9621eda14cbcSMatt Macy } 9622eda14cbcSMatt Macy 9623eda14cbcSMatt Macy *in_progress = spa_vdev_activity_in_progress_impl(vd, activity); 9624eda14cbcSMatt Macy 9625eda14cbcSMatt Macy spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 9626eda14cbcSMatt Macy return (0); 9627eda14cbcSMatt Macy } 9628eda14cbcSMatt Macy 9629eda14cbcSMatt Macy /* 9630eda14cbcSMatt Macy * Locking for waiting threads 9631eda14cbcSMatt Macy * --------------------------- 9632eda14cbcSMatt Macy * 9633eda14cbcSMatt Macy * Waiting threads need a way to check whether a given activity is in progress, 9634eda14cbcSMatt Macy * and then, if it is, wait for it to complete. Each activity will have some 9635eda14cbcSMatt Macy * in-memory representation of the relevant on-disk state which can be used to 9636eda14cbcSMatt Macy * determine whether or not the activity is in progress. The in-memory state and 9637eda14cbcSMatt Macy * the locking used to protect it will be different for each activity, and may 9638eda14cbcSMatt Macy * not be suitable for use with a cvar (e.g., some state is protected by the 9639eda14cbcSMatt Macy * config lock). To allow waiting threads to wait without any races, another 9640eda14cbcSMatt Macy * lock, spa_activities_lock, is used. 9641eda14cbcSMatt Macy * 9642eda14cbcSMatt Macy * When the state is checked, both the activity-specific lock (if there is one) 9643eda14cbcSMatt Macy * and spa_activities_lock are held. In some cases, the activity-specific lock 9644eda14cbcSMatt Macy * is acquired explicitly (e.g. the config lock). In others, the locking is 9645eda14cbcSMatt Macy * internal to some check (e.g. bpobj_is_empty). After checking, the waiting 9646eda14cbcSMatt Macy * thread releases the activity-specific lock and, if the activity is in 9647eda14cbcSMatt Macy * progress, then cv_waits using spa_activities_lock. 9648eda14cbcSMatt Macy * 9649eda14cbcSMatt Macy * The waiting thread is woken when another thread, one completing some 9650eda14cbcSMatt Macy * activity, updates the state of the activity and then calls 9651eda14cbcSMatt Macy * spa_notify_waiters, which will cv_broadcast. This 'completing' thread only 9652eda14cbcSMatt Macy * needs to hold its activity-specific lock when updating the state, and this 9653eda14cbcSMatt Macy * lock can (but doesn't have to) be dropped before calling spa_notify_waiters. 9654eda14cbcSMatt Macy * 9655eda14cbcSMatt Macy * Because spa_notify_waiters acquires spa_activities_lock before broadcasting, 9656eda14cbcSMatt Macy * and because it is held when the waiting thread checks the state of the 9657eda14cbcSMatt Macy * activity, it can never be the case that the completing thread both updates 9658eda14cbcSMatt Macy * the activity state and cv_broadcasts in between the waiting thread's check 9659eda14cbcSMatt Macy * and cv_wait. Thus, a waiting thread can never miss a wakeup. 9660eda14cbcSMatt Macy * 9661eda14cbcSMatt Macy * In order to prevent deadlock, when the waiting thread does its check, in some 9662eda14cbcSMatt Macy * cases it will temporarily drop spa_activities_lock in order to acquire the 9663eda14cbcSMatt Macy * activity-specific lock. The order in which spa_activities_lock and the 9664eda14cbcSMatt Macy * activity specific lock are acquired in the waiting thread is determined by 9665eda14cbcSMatt Macy * the order in which they are acquired in the completing thread; if the 9666eda14cbcSMatt Macy * completing thread calls spa_notify_waiters with the activity-specific lock 9667eda14cbcSMatt Macy * held, then the waiting thread must also acquire the activity-specific lock 9668eda14cbcSMatt Macy * first. 9669eda14cbcSMatt Macy */ 9670eda14cbcSMatt Macy 9671eda14cbcSMatt Macy static int 9672eda14cbcSMatt Macy spa_activity_in_progress(spa_t *spa, zpool_wait_activity_t activity, 9673eda14cbcSMatt Macy boolean_t use_tag, uint64_t tag, boolean_t *in_progress) 9674eda14cbcSMatt Macy { 9675eda14cbcSMatt Macy int error = 0; 9676eda14cbcSMatt Macy 9677eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&spa->spa_activities_lock)); 9678eda14cbcSMatt Macy 9679eda14cbcSMatt Macy switch (activity) { 9680eda14cbcSMatt Macy case ZPOOL_WAIT_CKPT_DISCARD: 9681eda14cbcSMatt Macy *in_progress = 9682eda14cbcSMatt Macy (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT) && 9683eda14cbcSMatt Macy zap_contains(spa_meta_objset(spa), 9684eda14cbcSMatt Macy DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ZPOOL_CHECKPOINT) == 9685eda14cbcSMatt Macy ENOENT); 9686eda14cbcSMatt Macy break; 9687eda14cbcSMatt Macy case ZPOOL_WAIT_FREE: 9688eda14cbcSMatt Macy *in_progress = ((spa_version(spa) >= SPA_VERSION_DEADLISTS && 9689eda14cbcSMatt Macy !bpobj_is_empty(&spa->spa_dsl_pool->dp_free_bpobj)) || 9690eda14cbcSMatt Macy spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY) || 9691eda14cbcSMatt Macy spa_livelist_delete_check(spa)); 9692eda14cbcSMatt Macy break; 9693eda14cbcSMatt Macy case ZPOOL_WAIT_INITIALIZE: 9694eda14cbcSMatt Macy case ZPOOL_WAIT_TRIM: 9695eda14cbcSMatt Macy error = spa_vdev_activity_in_progress(spa, use_tag, tag, 9696eda14cbcSMatt Macy activity, in_progress); 9697eda14cbcSMatt Macy break; 9698eda14cbcSMatt Macy case ZPOOL_WAIT_REPLACE: 9699eda14cbcSMatt Macy mutex_exit(&spa->spa_activities_lock); 9700eda14cbcSMatt Macy spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER); 9701eda14cbcSMatt Macy mutex_enter(&spa->spa_activities_lock); 9702eda14cbcSMatt Macy 9703eda14cbcSMatt Macy *in_progress = vdev_replace_in_progress(spa->spa_root_vdev); 9704eda14cbcSMatt Macy spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 9705eda14cbcSMatt Macy break; 9706eda14cbcSMatt Macy case ZPOOL_WAIT_REMOVE: 9707eda14cbcSMatt Macy *in_progress = (spa->spa_removing_phys.sr_state == 9708eda14cbcSMatt Macy DSS_SCANNING); 9709eda14cbcSMatt Macy break; 9710eda14cbcSMatt Macy case ZPOOL_WAIT_RESILVER: 9711eda14cbcSMatt Macy if ((*in_progress = vdev_rebuild_active(spa->spa_root_vdev))) 9712eda14cbcSMatt Macy break; 9713eda14cbcSMatt Macy /* fall through */ 9714eda14cbcSMatt Macy case ZPOOL_WAIT_SCRUB: 9715eda14cbcSMatt Macy { 9716eda14cbcSMatt Macy boolean_t scanning, paused, is_scrub; 9717eda14cbcSMatt Macy dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan; 9718eda14cbcSMatt Macy 9719eda14cbcSMatt Macy is_scrub = (scn->scn_phys.scn_func == POOL_SCAN_SCRUB); 9720eda14cbcSMatt Macy scanning = (scn->scn_phys.scn_state == DSS_SCANNING); 9721eda14cbcSMatt Macy paused = dsl_scan_is_paused_scrub(scn); 9722eda14cbcSMatt Macy *in_progress = (scanning && !paused && 9723eda14cbcSMatt Macy is_scrub == (activity == ZPOOL_WAIT_SCRUB)); 9724eda14cbcSMatt Macy break; 9725eda14cbcSMatt Macy } 9726eda14cbcSMatt Macy default: 9727eda14cbcSMatt Macy panic("unrecognized value for activity %d", activity); 9728eda14cbcSMatt Macy } 9729eda14cbcSMatt Macy 9730eda14cbcSMatt Macy return (error); 9731eda14cbcSMatt Macy } 9732eda14cbcSMatt Macy 9733eda14cbcSMatt Macy static int 9734eda14cbcSMatt Macy spa_wait_common(const char *pool, zpool_wait_activity_t activity, 9735eda14cbcSMatt Macy boolean_t use_tag, uint64_t tag, boolean_t *waited) 9736eda14cbcSMatt Macy { 9737eda14cbcSMatt Macy /* 9738eda14cbcSMatt Macy * The tag is used to distinguish between instances of an activity. 9739eda14cbcSMatt Macy * 'initialize' and 'trim' are the only activities that we use this for. 9740eda14cbcSMatt Macy * The other activities can only have a single instance in progress in a 9741eda14cbcSMatt Macy * pool at one time, making the tag unnecessary. 9742eda14cbcSMatt Macy * 9743eda14cbcSMatt Macy * There can be multiple devices being replaced at once, but since they 9744eda14cbcSMatt Macy * all finish once resilvering finishes, we don't bother keeping track 9745eda14cbcSMatt Macy * of them individually, we just wait for them all to finish. 9746eda14cbcSMatt Macy */ 9747eda14cbcSMatt Macy if (use_tag && activity != ZPOOL_WAIT_INITIALIZE && 9748eda14cbcSMatt Macy activity != ZPOOL_WAIT_TRIM) 9749eda14cbcSMatt Macy return (EINVAL); 9750eda14cbcSMatt Macy 9751eda14cbcSMatt Macy if (activity < 0 || activity >= ZPOOL_WAIT_NUM_ACTIVITIES) 9752eda14cbcSMatt Macy return (EINVAL); 9753eda14cbcSMatt Macy 9754eda14cbcSMatt Macy spa_t *spa; 9755eda14cbcSMatt Macy int error = spa_open(pool, &spa, FTAG); 9756eda14cbcSMatt Macy if (error != 0) 9757eda14cbcSMatt Macy return (error); 9758eda14cbcSMatt Macy 9759eda14cbcSMatt Macy /* 9760eda14cbcSMatt Macy * Increment the spa's waiter count so that we can call spa_close and 9761eda14cbcSMatt Macy * still ensure that the spa_t doesn't get freed before this thread is 9762eda14cbcSMatt Macy * finished with it when the pool is exported. We want to call spa_close 9763eda14cbcSMatt Macy * before we start waiting because otherwise the additional ref would 9764eda14cbcSMatt Macy * prevent the pool from being exported or destroyed throughout the 9765eda14cbcSMatt Macy * potentially long wait. 9766eda14cbcSMatt Macy */ 9767eda14cbcSMatt Macy mutex_enter(&spa->spa_activities_lock); 9768eda14cbcSMatt Macy spa->spa_waiters++; 9769eda14cbcSMatt Macy spa_close(spa, FTAG); 9770eda14cbcSMatt Macy 9771eda14cbcSMatt Macy *waited = B_FALSE; 9772eda14cbcSMatt Macy for (;;) { 9773eda14cbcSMatt Macy boolean_t in_progress; 9774eda14cbcSMatt Macy error = spa_activity_in_progress(spa, activity, use_tag, tag, 9775eda14cbcSMatt Macy &in_progress); 9776eda14cbcSMatt Macy 9777eda14cbcSMatt Macy if (error || !in_progress || spa->spa_waiters_cancel) 9778eda14cbcSMatt Macy break; 9779eda14cbcSMatt Macy 9780eda14cbcSMatt Macy *waited = B_TRUE; 9781eda14cbcSMatt Macy 9782eda14cbcSMatt Macy if (cv_wait_sig(&spa->spa_activities_cv, 9783eda14cbcSMatt Macy &spa->spa_activities_lock) == 0) { 9784eda14cbcSMatt Macy error = EINTR; 9785eda14cbcSMatt Macy break; 9786eda14cbcSMatt Macy } 9787eda14cbcSMatt Macy } 9788eda14cbcSMatt Macy 9789eda14cbcSMatt Macy spa->spa_waiters--; 9790eda14cbcSMatt Macy cv_signal(&spa->spa_waiters_cv); 9791eda14cbcSMatt Macy mutex_exit(&spa->spa_activities_lock); 9792eda14cbcSMatt Macy 9793eda14cbcSMatt Macy return (error); 9794eda14cbcSMatt Macy } 9795eda14cbcSMatt Macy 9796eda14cbcSMatt Macy /* 9797eda14cbcSMatt Macy * Wait for a particular instance of the specified activity to complete, where 9798eda14cbcSMatt Macy * the instance is identified by 'tag' 9799eda14cbcSMatt Macy */ 9800eda14cbcSMatt Macy int 9801eda14cbcSMatt Macy spa_wait_tag(const char *pool, zpool_wait_activity_t activity, uint64_t tag, 9802eda14cbcSMatt Macy boolean_t *waited) 9803eda14cbcSMatt Macy { 9804eda14cbcSMatt Macy return (spa_wait_common(pool, activity, B_TRUE, tag, waited)); 9805eda14cbcSMatt Macy } 9806eda14cbcSMatt Macy 9807eda14cbcSMatt Macy /* 9808eda14cbcSMatt Macy * Wait for all instances of the specified activity complete 9809eda14cbcSMatt Macy */ 9810eda14cbcSMatt Macy int 9811eda14cbcSMatt Macy spa_wait(const char *pool, zpool_wait_activity_t activity, boolean_t *waited) 9812eda14cbcSMatt Macy { 9813eda14cbcSMatt Macy 9814eda14cbcSMatt Macy return (spa_wait_common(pool, activity, B_FALSE, 0, waited)); 9815eda14cbcSMatt Macy } 9816eda14cbcSMatt Macy 9817eda14cbcSMatt Macy sysevent_t * 9818eda14cbcSMatt Macy spa_event_create(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl, const char *name) 9819eda14cbcSMatt Macy { 9820eda14cbcSMatt Macy sysevent_t *ev = NULL; 9821eda14cbcSMatt Macy #ifdef _KERNEL 9822eda14cbcSMatt Macy nvlist_t *resource; 9823eda14cbcSMatt Macy 9824eda14cbcSMatt Macy resource = zfs_event_create(spa, vd, FM_SYSEVENT_CLASS, name, hist_nvl); 9825eda14cbcSMatt Macy if (resource) { 9826eda14cbcSMatt Macy ev = kmem_alloc(sizeof (sysevent_t), KM_SLEEP); 9827eda14cbcSMatt Macy ev->resource = resource; 9828eda14cbcSMatt Macy } 9829eda14cbcSMatt Macy #endif 9830eda14cbcSMatt Macy return (ev); 9831eda14cbcSMatt Macy } 9832eda14cbcSMatt Macy 9833eda14cbcSMatt Macy void 9834eda14cbcSMatt Macy spa_event_post(sysevent_t *ev) 9835eda14cbcSMatt Macy { 9836eda14cbcSMatt Macy #ifdef _KERNEL 9837eda14cbcSMatt Macy if (ev) { 9838eda14cbcSMatt Macy zfs_zevent_post(ev->resource, NULL, zfs_zevent_post_cb); 9839eda14cbcSMatt Macy kmem_free(ev, sizeof (*ev)); 9840eda14cbcSMatt Macy } 9841eda14cbcSMatt Macy #endif 9842eda14cbcSMatt Macy } 9843eda14cbcSMatt Macy 9844eda14cbcSMatt Macy /* 9845eda14cbcSMatt Macy * Post a zevent corresponding to the given sysevent. The 'name' must be one 9846eda14cbcSMatt Macy * of the event definitions in sys/sysevent/eventdefs.h. The payload will be 9847eda14cbcSMatt Macy * filled in from the spa and (optionally) the vdev. This doesn't do anything 9848eda14cbcSMatt Macy * in the userland libzpool, as we don't want consumers to misinterpret ztest 9849eda14cbcSMatt Macy * or zdb as real changes. 9850eda14cbcSMatt Macy */ 9851eda14cbcSMatt Macy void 9852eda14cbcSMatt Macy spa_event_notify(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl, const char *name) 9853eda14cbcSMatt Macy { 9854eda14cbcSMatt Macy spa_event_post(spa_event_create(spa, vd, hist_nvl, name)); 9855eda14cbcSMatt Macy } 9856eda14cbcSMatt Macy 9857eda14cbcSMatt Macy /* state manipulation functions */ 9858eda14cbcSMatt Macy EXPORT_SYMBOL(spa_open); 9859eda14cbcSMatt Macy EXPORT_SYMBOL(spa_open_rewind); 9860eda14cbcSMatt Macy EXPORT_SYMBOL(spa_get_stats); 9861eda14cbcSMatt Macy EXPORT_SYMBOL(spa_create); 9862eda14cbcSMatt Macy EXPORT_SYMBOL(spa_import); 9863eda14cbcSMatt Macy EXPORT_SYMBOL(spa_tryimport); 9864eda14cbcSMatt Macy EXPORT_SYMBOL(spa_destroy); 9865eda14cbcSMatt Macy EXPORT_SYMBOL(spa_export); 9866eda14cbcSMatt Macy EXPORT_SYMBOL(spa_reset); 9867eda14cbcSMatt Macy EXPORT_SYMBOL(spa_async_request); 9868eda14cbcSMatt Macy EXPORT_SYMBOL(spa_async_suspend); 9869eda14cbcSMatt Macy EXPORT_SYMBOL(spa_async_resume); 9870eda14cbcSMatt Macy EXPORT_SYMBOL(spa_inject_addref); 9871eda14cbcSMatt Macy EXPORT_SYMBOL(spa_inject_delref); 9872eda14cbcSMatt Macy EXPORT_SYMBOL(spa_scan_stat_init); 9873eda14cbcSMatt Macy EXPORT_SYMBOL(spa_scan_get_stats); 9874eda14cbcSMatt Macy 9875eda14cbcSMatt Macy /* device manipulation */ 9876eda14cbcSMatt Macy EXPORT_SYMBOL(spa_vdev_add); 9877eda14cbcSMatt Macy EXPORT_SYMBOL(spa_vdev_attach); 9878eda14cbcSMatt Macy EXPORT_SYMBOL(spa_vdev_detach); 9879eda14cbcSMatt Macy EXPORT_SYMBOL(spa_vdev_setpath); 9880eda14cbcSMatt Macy EXPORT_SYMBOL(spa_vdev_setfru); 9881eda14cbcSMatt Macy EXPORT_SYMBOL(spa_vdev_split_mirror); 9882eda14cbcSMatt Macy 9883eda14cbcSMatt Macy /* spare statech is global across all pools) */ 9884eda14cbcSMatt Macy EXPORT_SYMBOL(spa_spare_add); 9885eda14cbcSMatt Macy EXPORT_SYMBOL(spa_spare_remove); 9886eda14cbcSMatt Macy EXPORT_SYMBOL(spa_spare_exists); 9887eda14cbcSMatt Macy EXPORT_SYMBOL(spa_spare_activate); 9888eda14cbcSMatt Macy 9889eda14cbcSMatt Macy /* L2ARC statech is global across all pools) */ 9890eda14cbcSMatt Macy EXPORT_SYMBOL(spa_l2cache_add); 9891eda14cbcSMatt Macy EXPORT_SYMBOL(spa_l2cache_remove); 9892eda14cbcSMatt Macy EXPORT_SYMBOL(spa_l2cache_exists); 9893eda14cbcSMatt Macy EXPORT_SYMBOL(spa_l2cache_activate); 9894eda14cbcSMatt Macy EXPORT_SYMBOL(spa_l2cache_drop); 9895eda14cbcSMatt Macy 9896eda14cbcSMatt Macy /* scanning */ 9897eda14cbcSMatt Macy EXPORT_SYMBOL(spa_scan); 9898eda14cbcSMatt Macy EXPORT_SYMBOL(spa_scan_stop); 9899eda14cbcSMatt Macy 9900eda14cbcSMatt Macy /* spa syncing */ 9901eda14cbcSMatt Macy EXPORT_SYMBOL(spa_sync); /* only for DMU use */ 9902eda14cbcSMatt Macy EXPORT_SYMBOL(spa_sync_allpools); 9903eda14cbcSMatt Macy 9904eda14cbcSMatt Macy /* properties */ 9905eda14cbcSMatt Macy EXPORT_SYMBOL(spa_prop_set); 9906eda14cbcSMatt Macy EXPORT_SYMBOL(spa_prop_get); 9907eda14cbcSMatt Macy EXPORT_SYMBOL(spa_prop_clear_bootfs); 9908eda14cbcSMatt Macy 9909eda14cbcSMatt Macy /* asynchronous event notification */ 9910eda14cbcSMatt Macy EXPORT_SYMBOL(spa_event_notify); 9911eda14cbcSMatt Macy 9912eda14cbcSMatt Macy /* BEGIN CSTYLED */ 9913eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_shift, INT, ZMOD_RW, 991416038816SMartin Matuska "log2 fraction of arc that can be used by inflight I/Os when " 9915eda14cbcSMatt Macy "verifying pool during import"); 9916eda14cbcSMatt Macy 9917eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_metadata, INT, ZMOD_RW, 9918eda14cbcSMatt Macy "Set to traverse metadata on pool import"); 9919eda14cbcSMatt Macy 9920eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_data, INT, ZMOD_RW, 9921eda14cbcSMatt Macy "Set to traverse data on pool import"); 9922eda14cbcSMatt Macy 9923eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_spa, spa_, load_print_vdev_tree, INT, ZMOD_RW, 9924eda14cbcSMatt Macy "Print vdev tree to zfs_dbgmsg during pool import"); 9925eda14cbcSMatt Macy 9926eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_zio, zio_, taskq_batch_pct, UINT, ZMOD_RD, 9927eda14cbcSMatt Macy "Percentage of CPUs to run an IO worker thread"); 9928eda14cbcSMatt Macy 992916038816SMartin Matuska ZFS_MODULE_PARAM(zfs_zio, zio_, taskq_batch_tpq, UINT, ZMOD_RD, 993016038816SMartin Matuska "Number of threads per IO worker taskqueue"); 993116038816SMartin Matuska 9932eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs, zfs_, max_missing_tvds, ULONG, ZMOD_RW, 9933eda14cbcSMatt Macy "Allow importing pool with up to this number of missing top-level " 9934eda14cbcSMatt Macy "vdevs (in read-only mode)"); 9935eda14cbcSMatt Macy 9936eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, zthr_pause, INT, ZMOD_RW, 9937eda14cbcSMatt Macy "Set the livelist condense zthr to pause"); 9938eda14cbcSMatt Macy 9939eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, sync_pause, INT, ZMOD_RW, 9940eda14cbcSMatt Macy "Set the livelist condense synctask to pause"); 9941eda14cbcSMatt Macy 9942eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, sync_cancel, INT, ZMOD_RW, 9943eda14cbcSMatt Macy "Whether livelist condensing was canceled in the synctask"); 9944eda14cbcSMatt Macy 9945eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, zthr_cancel, INT, ZMOD_RW, 9946eda14cbcSMatt Macy "Whether livelist condensing was canceled in the zthr function"); 9947eda14cbcSMatt Macy 9948eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, new_alloc, INT, ZMOD_RW, 9949eda14cbcSMatt Macy "Whether extra ALLOC blkptrs were added to a livelist entry while it " 9950eda14cbcSMatt Macy "was being condensed"); 9951eda14cbcSMatt Macy /* END CSTYLED */ 9952