1fa9e4066Sahrens /* 2fa9e4066Sahrens * CDDL HEADER START 3fa9e4066Sahrens * 4fa9e4066Sahrens * The contents of this file are subject to the terms of the 5ea8dc4b6Seschrock * Common Development and Distribution License (the "License"). 6ea8dc4b6Seschrock * You may not use this file except in compliance with the License. 7fa9e4066Sahrens * 8fa9e4066Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9fa9e4066Sahrens * or http://www.opensolaris.org/os/licensing. 10fa9e4066Sahrens * See the License for the specific language governing permissions 11fa9e4066Sahrens * and limitations under the License. 12fa9e4066Sahrens * 13fa9e4066Sahrens * When distributing Covered Code, include this CDDL HEADER in each 14fa9e4066Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15fa9e4066Sahrens * If applicable, add the following below this CDDL HEADER, with the 16fa9e4066Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 17fa9e4066Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 18fa9e4066Sahrens * 19fa9e4066Sahrens * CDDL HEADER END 20fa9e4066Sahrens */ 2199653d4eSeschrock 22fa9e4066Sahrens /* 2398d1cbfeSGeorge Wilson * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 241b497ab8SAdam H. Leventhal * Copyright (c) 2011, 2014 by Delphix. All rights reserved. 2514372834SHans Rosenfeld * Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved. 26bc9014e6SJustin Gibbs * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 2745818ee1SMatthew Ahrens * Copyright 2013 Saso Kiselkov. All rights reserved. 28c3d26abcSMatthew Ahrens * Copyright (c) 2014 Integros [integros.com] 299b6cddcfSToomas Soome * Copyright 2016 Toomas Soome <tsoome@me.com> 305aeb9474SGarrett D'Amore */ 31fa9e4066Sahrens 32fa9e4066Sahrens /* 333e30c24aSWill Andrews * SPA: Storage Pool Allocator 343e30c24aSWill Andrews * 35fa9e4066Sahrens * This file contains all the routines used when modifying on-disk SPA state. 36fa9e4066Sahrens * This includes opening, importing, destroying, exporting a pool, and syncing a 37fa9e4066Sahrens * pool. 38fa9e4066Sahrens */ 39fa9e4066Sahrens 40fa9e4066Sahrens #include <sys/zfs_context.h> 41ea8dc4b6Seschrock #include <sys/fm/fs/zfs.h> 42fa9e4066Sahrens #include <sys/spa_impl.h> 43fa9e4066Sahrens #include <sys/zio.h> 44fa9e4066Sahrens #include <sys/zio_checksum.h> 45fa9e4066Sahrens #include <sys/dmu.h> 46fa9e4066Sahrens #include <sys/dmu_tx.h> 47fa9e4066Sahrens #include <sys/zap.h> 48fa9e4066Sahrens #include <sys/zil.h> 49b24ab676SJeff Bonwick #include <sys/ddt.h> 50fa9e4066Sahrens #include <sys/vdev_impl.h> 51fa9e4066Sahrens #include <sys/metaslab.h> 5288ecc943SGeorge Wilson #include <sys/metaslab_impl.h> 53fa9e4066Sahrens #include <sys/uberblock_impl.h> 54fa9e4066Sahrens #include <sys/txg.h> 55fa9e4066Sahrens #include <sys/avl.h> 56fa9e4066Sahrens #include <sys/dmu_traverse.h> 57b1b8ab34Slling #include <sys/dmu_objset.h> 58fa9e4066Sahrens #include <sys/unique.h> 59fa9e4066Sahrens #include <sys/dsl_pool.h> 60b1b8ab34Slling #include <sys/dsl_dataset.h> 61fa9e4066Sahrens #include <sys/dsl_dir.h> 62fa9e4066Sahrens #include <sys/dsl_prop.h> 63b1b8ab34Slling #include <sys/dsl_synctask.h> 64fa9e4066Sahrens #include <sys/fs/zfs.h> 65fa94a07fSbrendan #include <sys/arc.h> 66fa9e4066Sahrens #include <sys/callb.h> 6795173954Sek110237 #include <sys/systeminfo.h> 68e7cbe64fSgw25295 #include <sys/spa_boot.h> 69573ca77eSGeorge Wilson #include <sys/zfs_ioctl.h> 703f9d6ad7SLin Ling #include <sys/dsl_scan.h> 71ad135b5dSChristopher Siden #include <sys/zfeature.h> 723b2aab18SMatthew Ahrens #include <sys/dsl_destroy.h> 73fa9e4066Sahrens 745679c89fSjv227347 #ifdef _KERNEL 75dedec472SJack Meng #include <sys/bootprops.h> 7635a5a358SJonathan Adams #include <sys/callb.h> 7735a5a358SJonathan Adams #include <sys/cpupart.h> 7835a5a358SJonathan Adams #include <sys/pool.h> 7935a5a358SJonathan Adams #include <sys/sysdc.h> 8035a5a358SJonathan Adams #include <sys/zone.h> 815679c89fSjv227347 #endif /* _KERNEL */ 825679c89fSjv227347 83990b4856Slling #include "zfs_prop.h" 84b7b97454Sperrin #include "zfs_comutil.h" 85990b4856Slling 863cb69f73SWill Andrews /* 873cb69f73SWill Andrews * The interval, in seconds, at which failed configuration cache file writes 883cb69f73SWill Andrews * should be retried. 893cb69f73SWill Andrews */ 903cb69f73SWill Andrews static int zfs_ccw_retry_interval = 300; 913cb69f73SWill Andrews 9235a5a358SJonathan Adams typedef enum zti_modes { 93ec94d322SAdam Leventhal ZTI_MODE_FIXED, /* value is # of threads (min 1) */ 94ec94d322SAdam Leventhal ZTI_MODE_BATCH, /* cpu-intensive; value is ignored */ 95ec94d322SAdam Leventhal ZTI_MODE_NULL, /* don't create a taskq */ 96ec94d322SAdam Leventhal ZTI_NMODES 9735a5a358SJonathan Adams } zti_modes_t; 98416e0cd8Sek110237 99ec94d322SAdam Leventhal #define ZTI_P(n, q) { ZTI_MODE_FIXED, (n), (q) } 100ec94d322SAdam Leventhal #define ZTI_BATCH { ZTI_MODE_BATCH, 0, 1 } 101ec94d322SAdam Leventhal #define ZTI_NULL { ZTI_MODE_NULL, 0, 0 } 1022e0c549eSJonathan Adams 103ec94d322SAdam Leventhal #define ZTI_N(n) ZTI_P(n, 1) 104ec94d322SAdam Leventhal #define ZTI_ONE ZTI_N(1) 1052e0c549eSJonathan Adams 1062e0c549eSJonathan Adams typedef struct zio_taskq_info { 107ec94d322SAdam Leventhal zti_modes_t zti_mode; 1082e0c549eSJonathan Adams uint_t zti_value; 109ec94d322SAdam Leventhal uint_t zti_count; 1102e0c549eSJonathan Adams } zio_taskq_info_t; 1112e0c549eSJonathan Adams 1122e0c549eSJonathan Adams static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = { 11380eb36f2SGeorge Wilson "issue", "issue_high", "intr", "intr_high" 1142e0c549eSJonathan Adams }; 1152e0c549eSJonathan Adams 11680eb36f2SGeorge Wilson /* 117ec94d322SAdam Leventhal * This table defines the taskq settings for each ZFS I/O type. When 118ec94d322SAdam Leventhal * initializing a pool, we use this table to create an appropriately sized 119ec94d322SAdam Leventhal * taskq. Some operations are low volume and therefore have a small, static 120ec94d322SAdam Leventhal * number of threads assigned to their taskqs using the ZTI_N(#) or ZTI_ONE 121ec94d322SAdam Leventhal * macros. Other operations process a large amount of data; the ZTI_BATCH 122ec94d322SAdam Leventhal * macro causes us to create a taskq oriented for throughput. Some operations 123ec94d322SAdam Leventhal * are so high frequency and short-lived that the taskq itself can become a a 124ec94d322SAdam Leventhal * point of lock contention. The ZTI_P(#, #) macro indicates that we need an 125ec94d322SAdam Leventhal * additional degree of parallelism specified by the number of threads per- 126ec94d322SAdam Leventhal * taskq and the number of taskqs; when dispatching an event in this case, the 127ec94d322SAdam Leventhal * particular taskq is chosen at random. 128ec94d322SAdam Leventhal * 129ec94d322SAdam Leventhal * The different taskq priorities are to handle the different contexts (issue 130ec94d322SAdam Leventhal * and interrupt) and then to reserve threads for ZIO_PRIORITY_NOW I/Os that 131ec94d322SAdam Leventhal * need to be handled with minimum delay. 13280eb36f2SGeorge Wilson */ 13380eb36f2SGeorge Wilson const zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = { 13480eb36f2SGeorge Wilson /* ISSUE ISSUE_HIGH INTR INTR_HIGH */ 135ec94d322SAdam Leventhal { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* NULL */ 1361b497ab8SAdam H. Leventhal { ZTI_N(8), ZTI_NULL, ZTI_P(12, 8), ZTI_NULL }, /* READ */ 137ec94d322SAdam Leventhal { ZTI_BATCH, ZTI_N(5), ZTI_N(8), ZTI_N(5) }, /* WRITE */ 138ec94d322SAdam Leventhal { ZTI_P(12, 8), ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* FREE */ 139ec94d322SAdam Leventhal { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* CLAIM */ 140ec94d322SAdam Leventhal { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* IOCTL */ 1412e0c549eSJonathan Adams }; 1422e0c549eSJonathan Adams 1433b2aab18SMatthew Ahrens static void spa_sync_version(void *arg, dmu_tx_t *tx); 1443b2aab18SMatthew Ahrens static void spa_sync_props(void *arg, dmu_tx_t *tx); 14589a89ebfSlling static boolean_t spa_has_active_shared_spare(spa_t *spa); 1461195e687SMark J Musante static int spa_load_impl(spa_t *spa, uint64_t, nvlist_t *config, 1471195e687SMark J Musante spa_load_state_t state, spa_import_type_t type, boolean_t mosconfig, 1481195e687SMark J Musante char **ereport); 149cb04b873SMark J Musante static void spa_vdev_resilver_done(spa_t *spa); 150990b4856Slling 15169962b56SMatthew Ahrens uint_t zio_taskq_batch_pct = 75; /* 1 thread per cpu in pset */ 15235a5a358SJonathan Adams id_t zio_taskq_psrset_bind = PS_NONE; 15335a5a358SJonathan Adams boolean_t zio_taskq_sysdc = B_TRUE; /* use SDC scheduling class */ 15435a5a358SJonathan Adams uint_t zio_taskq_basedc = 80; /* base duty cycle */ 15535a5a358SJonathan Adams 15635a5a358SJonathan Adams boolean_t spa_create_process = B_TRUE; /* no process ==> no sysdc */ 15701f55e48SGeorge Wilson extern int zfs_sync_pass_deferred_free; 15835a5a358SJonathan Adams 15935a5a358SJonathan Adams /* 16035a5a358SJonathan Adams * This (illegal) pool name is used when temporarily importing a spa_t in order 16135a5a358SJonathan Adams * to get the vdev stats associated with the imported devices. 16235a5a358SJonathan Adams */ 16335a5a358SJonathan Adams #define TRYIMPORT_NAME "$import" 16435a5a358SJonathan Adams 165990b4856Slling /* 166990b4856Slling * ========================================================================== 167990b4856Slling * SPA properties routines 168990b4856Slling * ========================================================================== 169990b4856Slling */ 170990b4856Slling 171990b4856Slling /* 172990b4856Slling * Add a (source=src, propname=propval) list to an nvlist. 173990b4856Slling */ 1749d82f4f6Slling static void 175990b4856Slling spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval, 176990b4856Slling uint64_t intval, zprop_source_t src) 177990b4856Slling { 178990b4856Slling const char *propname = zpool_prop_to_name(prop); 179990b4856Slling nvlist_t *propval; 180990b4856Slling 1819d82f4f6Slling VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0); 1829d82f4f6Slling VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0); 183990b4856Slling 1849d82f4f6Slling if (strval != NULL) 1859d82f4f6Slling VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0); 1869d82f4f6Slling else 1879d82f4f6Slling VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0); 188990b4856Slling 1899d82f4f6Slling VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0); 190990b4856Slling nvlist_free(propval); 191990b4856Slling } 192990b4856Slling 193990b4856Slling /* 194990b4856Slling * Get property values from the spa configuration. 195990b4856Slling */ 1969d82f4f6Slling static void 197990b4856Slling spa_prop_get_config(spa_t *spa, nvlist_t **nvp) 198990b4856Slling { 1994263d13fSGeorge Wilson vdev_t *rvd = spa->spa_root_vdev; 200ad135b5dSChristopher Siden dsl_pool_t *pool = spa->spa_dsl_pool; 2012e4c9986SGeorge Wilson uint64_t size, alloc, cap, version; 202990b4856Slling zprop_source_t src = ZPROP_SRC_NONE; 203c5904d13Seschrock spa_config_dirent_t *dp; 2042e4c9986SGeorge Wilson metaslab_class_t *mc = spa_normal_class(spa); 205990b4856Slling 206e14bb325SJeff Bonwick ASSERT(MUTEX_HELD(&spa->spa_props_lock)); 207e14bb325SJeff Bonwick 2084263d13fSGeorge Wilson if (rvd != NULL) { 209485bbbf5SGeorge Wilson alloc = metaslab_class_get_alloc(spa_normal_class(spa)); 210b24ab676SJeff Bonwick size = metaslab_class_get_space(spa_normal_class(spa)); 211e14bb325SJeff Bonwick spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src); 2129d82f4f6Slling spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src); 213485bbbf5SGeorge Wilson spa_prop_add_list(*nvp, ZPOOL_PROP_ALLOCATED, NULL, alloc, src); 214485bbbf5SGeorge Wilson spa_prop_add_list(*nvp, ZPOOL_PROP_FREE, NULL, 215485bbbf5SGeorge Wilson size - alloc, src); 2164263d13fSGeorge Wilson 2172e4c9986SGeorge Wilson spa_prop_add_list(*nvp, ZPOOL_PROP_FRAGMENTATION, NULL, 2182e4c9986SGeorge Wilson metaslab_class_fragmentation(mc), src); 2192e4c9986SGeorge Wilson spa_prop_add_list(*nvp, ZPOOL_PROP_EXPANDSZ, NULL, 2202e4c9986SGeorge Wilson metaslab_class_expandable_space(mc), src); 221f9af39baSGeorge Wilson spa_prop_add_list(*nvp, ZPOOL_PROP_READONLY, NULL, 222f9af39baSGeorge Wilson (spa_mode(spa) == FREAD), src); 223990b4856Slling 224485bbbf5SGeorge Wilson cap = (size == 0) ? 0 : (alloc * 100 / size); 2259d82f4f6Slling spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src); 226990b4856Slling 227b24ab676SJeff Bonwick spa_prop_add_list(*nvp, ZPOOL_PROP_DEDUPRATIO, NULL, 228b24ab676SJeff Bonwick ddt_get_pool_dedup_ratio(spa), src); 229b24ab676SJeff Bonwick 2309d82f4f6Slling spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL, 2314263d13fSGeorge Wilson rvd->vdev_state, src); 232990b4856Slling 233990b4856Slling version = spa_version(spa); 234990b4856Slling if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION)) 235990b4856Slling src = ZPROP_SRC_DEFAULT; 236990b4856Slling else 237990b4856Slling src = ZPROP_SRC_LOCAL; 2389d82f4f6Slling spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, version, src); 239379c004dSEric Schrock } 240379c004dSEric Schrock 241ad135b5dSChristopher Siden if (pool != NULL) { 242ad135b5dSChristopher Siden /* 243ad135b5dSChristopher Siden * The $FREE directory was introduced in SPA_VERSION_DEADLISTS, 244ad135b5dSChristopher Siden * when opening pools before this version freedir will be NULL. 245ad135b5dSChristopher Siden */ 2467fd05ac4SMatthew Ahrens if (pool->dp_free_dir != NULL) { 247ad135b5dSChristopher Siden spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, NULL, 248c1379625SJustin T. Gibbs dsl_dir_phys(pool->dp_free_dir)->dd_used_bytes, 249c1379625SJustin T. Gibbs src); 250ad135b5dSChristopher Siden } else { 251ad135b5dSChristopher Siden spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, 252ad135b5dSChristopher Siden NULL, 0, src); 253ad135b5dSChristopher Siden } 2547fd05ac4SMatthew Ahrens 2557fd05ac4SMatthew Ahrens if (pool->dp_leak_dir != NULL) { 2567fd05ac4SMatthew Ahrens spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED, NULL, 257c1379625SJustin T. Gibbs dsl_dir_phys(pool->dp_leak_dir)->dd_used_bytes, 258c1379625SJustin T. Gibbs src); 2597fd05ac4SMatthew Ahrens } else { 2607fd05ac4SMatthew Ahrens spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED, 2617fd05ac4SMatthew Ahrens NULL, 0, src); 2627fd05ac4SMatthew Ahrens } 263ad135b5dSChristopher Siden } 264ad135b5dSChristopher Siden 265379c004dSEric Schrock spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src); 266990b4856Slling 2678704186eSDan McDonald if (spa->spa_comment != NULL) { 2688704186eSDan McDonald spa_prop_add_list(*nvp, ZPOOL_PROP_COMMENT, spa->spa_comment, 2698704186eSDan McDonald 0, ZPROP_SRC_LOCAL); 2708704186eSDan McDonald } 2718704186eSDan McDonald 2729d82f4f6Slling if (spa->spa_root != NULL) 2739d82f4f6Slling spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root, 2749d82f4f6Slling 0, ZPROP_SRC_LOCAL); 275990b4856Slling 276b5152584SMatthew Ahrens if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) { 277b5152584SMatthew Ahrens spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL, 278b5152584SMatthew Ahrens MIN(zfs_max_recordsize, SPA_MAXBLOCKSIZE), ZPROP_SRC_NONE); 279b5152584SMatthew Ahrens } else { 280b5152584SMatthew Ahrens spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL, 281b5152584SMatthew Ahrens SPA_OLD_MAXBLOCKSIZE, ZPROP_SRC_NONE); 282b5152584SMatthew Ahrens } 283b5152584SMatthew Ahrens 284c5904d13Seschrock if ((dp = list_head(&spa->spa_config_list)) != NULL) { 285c5904d13Seschrock if (dp->scd_path == NULL) { 2869d82f4f6Slling spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE, 287c5904d13Seschrock "none", 0, ZPROP_SRC_LOCAL); 288c5904d13Seschrock } else if (strcmp(dp->scd_path, spa_config_path) != 0) { 2899d82f4f6Slling spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE, 290c5904d13Seschrock dp->scd_path, 0, ZPROP_SRC_LOCAL); 2912f8aaab3Seschrock } 2922f8aaab3Seschrock } 293990b4856Slling } 294990b4856Slling 295990b4856Slling /* 296990b4856Slling * Get zpool property values. 297990b4856Slling */ 298990b4856Slling int 299990b4856Slling spa_prop_get(spa_t *spa, nvlist_t **nvp) 300990b4856Slling { 301b24ab676SJeff Bonwick objset_t *mos = spa->spa_meta_objset; 302990b4856Slling zap_cursor_t zc; 303990b4856Slling zap_attribute_t za; 304990b4856Slling int err; 305990b4856Slling 3069d82f4f6Slling VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0); 307990b4856Slling 308e14bb325SJeff Bonwick mutex_enter(&spa->spa_props_lock); 309e14bb325SJeff Bonwick 310990b4856Slling /* 311990b4856Slling * Get properties from the spa config. 312990b4856Slling */ 3139d82f4f6Slling spa_prop_get_config(spa, nvp); 314990b4856Slling 315990b4856Slling /* If no pool property object, no more prop to get. */ 316afee20e4SGeorge Wilson if (mos == NULL || spa->spa_pool_props_object == 0) { 317990b4856Slling mutex_exit(&spa->spa_props_lock); 318990b4856Slling return (0); 319990b4856Slling } 320990b4856Slling 321990b4856Slling /* 322990b4856Slling * Get properties from the MOS pool property object. 323990b4856Slling */ 324990b4856Slling for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object); 325990b4856Slling (err = zap_cursor_retrieve(&zc, &za)) == 0; 326990b4856Slling zap_cursor_advance(&zc)) { 327990b4856Slling uint64_t intval = 0; 328990b4856Slling char *strval = NULL; 329990b4856Slling zprop_source_t src = ZPROP_SRC_DEFAULT; 330990b4856Slling zpool_prop_t prop; 331990b4856Slling 332990b4856Slling if ((prop = zpool_name_to_prop(za.za_name)) == ZPROP_INVAL) 333990b4856Slling continue; 334990b4856Slling 335990b4856Slling switch (za.za_integer_length) { 336990b4856Slling case 8: 337990b4856Slling /* integer property */ 338990b4856Slling if (za.za_first_integer != 339990b4856Slling zpool_prop_default_numeric(prop)) 340990b4856Slling src = ZPROP_SRC_LOCAL; 341990b4856Slling 342990b4856Slling if (prop == ZPOOL_PROP_BOOTFS) { 343990b4856Slling dsl_pool_t *dp; 344990b4856Slling dsl_dataset_t *ds = NULL; 345990b4856Slling 346990b4856Slling dp = spa_get_dsl(spa); 3473b2aab18SMatthew Ahrens dsl_pool_config_enter(dp, FTAG); 348745cd3c5Smaybee if (err = dsl_dataset_hold_obj(dp, 349745cd3c5Smaybee za.za_first_integer, FTAG, &ds)) { 3503b2aab18SMatthew Ahrens dsl_pool_config_exit(dp, FTAG); 351990b4856Slling break; 352990b4856Slling } 353990b4856Slling 35440a5c998SMatthew Ahrens strval = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, 355990b4856Slling KM_SLEEP); 356990b4856Slling dsl_dataset_name(ds, strval); 357745cd3c5Smaybee dsl_dataset_rele(ds, FTAG); 3583b2aab18SMatthew Ahrens dsl_pool_config_exit(dp, FTAG); 359990b4856Slling } else { 360990b4856Slling strval = NULL; 361990b4856Slling intval = za.za_first_integer; 362990b4856Slling } 363990b4856Slling 3649d82f4f6Slling spa_prop_add_list(*nvp, prop, strval, intval, src); 365990b4856Slling 366990b4856Slling if (strval != NULL) 36740a5c998SMatthew Ahrens kmem_free(strval, ZFS_MAX_DATASET_NAME_LEN); 368990b4856Slling 369990b4856Slling break; 370990b4856Slling 371990b4856Slling case 1: 372990b4856Slling /* string property */ 373990b4856Slling strval = kmem_alloc(za.za_num_integers, KM_SLEEP); 374990b4856Slling err = zap_lookup(mos, spa->spa_pool_props_object, 375990b4856Slling za.za_name, 1, za.za_num_integers, strval); 376990b4856Slling if (err) { 377990b4856Slling kmem_free(strval, za.za_num_integers); 378990b4856Slling break; 379990b4856Slling } 3809d82f4f6Slling spa_prop_add_list(*nvp, prop, strval, 0, src); 381990b4856Slling kmem_free(strval, za.za_num_integers); 382990b4856Slling break; 383990b4856Slling 384990b4856Slling default: 385990b4856Slling break; 386990b4856Slling } 387990b4856Slling } 388990b4856Slling zap_cursor_fini(&zc); 389990b4856Slling mutex_exit(&spa->spa_props_lock); 390990b4856Slling out: 391990b4856Slling if (err && err != ENOENT) { 392990b4856Slling nvlist_free(*nvp); 3939d82f4f6Slling *nvp = NULL; 394990b4856Slling return (err); 395990b4856Slling } 396990b4856Slling 397990b4856Slling return (0); 398990b4856Slling } 399990b4856Slling 400990b4856Slling /* 401990b4856Slling * Validate the given pool properties nvlist and modify the list 402990b4856Slling * for the property values to be set. 403990b4856Slling */ 404990b4856Slling static int 405990b4856Slling spa_prop_validate(spa_t *spa, nvlist_t *props) 406990b4856Slling { 407990b4856Slling nvpair_t *elem; 408990b4856Slling int error = 0, reset_bootfs = 0; 409d5285caeSGeorge Wilson uint64_t objnum = 0; 410ad135b5dSChristopher Siden boolean_t has_feature = B_FALSE; 411990b4856Slling 412990b4856Slling elem = NULL; 413990b4856Slling while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 414990b4856Slling uint64_t intval; 415ad135b5dSChristopher Siden char *strval, *slash, *check, *fname; 416ad135b5dSChristopher Siden const char *propname = nvpair_name(elem); 417ad135b5dSChristopher Siden zpool_prop_t prop = zpool_name_to_prop(propname); 418990b4856Slling 419990b4856Slling switch (prop) { 420ad135b5dSChristopher Siden case ZPROP_INVAL: 421ad135b5dSChristopher Siden if (!zpool_prop_feature(propname)) { 422be6fd75aSMatthew Ahrens error = SET_ERROR(EINVAL); 423ad135b5dSChristopher Siden break; 424ad135b5dSChristopher Siden } 425ad135b5dSChristopher Siden 426ad135b5dSChristopher Siden /* 427ad135b5dSChristopher Siden * Sanitize the input. 428ad135b5dSChristopher Siden */ 429ad135b5dSChristopher Siden if (nvpair_type(elem) != DATA_TYPE_UINT64) { 430be6fd75aSMatthew Ahrens error = SET_ERROR(EINVAL); 431ad135b5dSChristopher Siden break; 432ad135b5dSChristopher Siden } 433ad135b5dSChristopher Siden 434ad135b5dSChristopher Siden if (nvpair_value_uint64(elem, &intval) != 0) { 435be6fd75aSMatthew Ahrens error = SET_ERROR(EINVAL); 436ad135b5dSChristopher Siden break; 437ad135b5dSChristopher Siden } 438ad135b5dSChristopher Siden 439ad135b5dSChristopher Siden if (intval != 0) { 440be6fd75aSMatthew Ahrens error = SET_ERROR(EINVAL); 441ad135b5dSChristopher Siden break; 442ad135b5dSChristopher Siden } 443ad135b5dSChristopher Siden 444ad135b5dSChristopher Siden fname = strchr(propname, '@') + 1; 445ad135b5dSChristopher Siden if (zfeature_lookup_name(fname, NULL) != 0) { 446be6fd75aSMatthew Ahrens error = SET_ERROR(EINVAL); 447ad135b5dSChristopher Siden break; 448ad135b5dSChristopher Siden } 449ad135b5dSChristopher Siden 450ad135b5dSChristopher Siden has_feature = B_TRUE; 451ad135b5dSChristopher Siden break; 452ad135b5dSChristopher Siden 453990b4856Slling case ZPOOL_PROP_VERSION: 454990b4856Slling error = nvpair_value_uint64(elem, &intval); 455990b4856Slling if (!error && 456ad135b5dSChristopher Siden (intval < spa_version(spa) || 457ad135b5dSChristopher Siden intval > SPA_VERSION_BEFORE_FEATURES || 458ad135b5dSChristopher Siden has_feature)) 459be6fd75aSMatthew Ahrens error = SET_ERROR(EINVAL); 460990b4856Slling break; 461990b4856Slling 462990b4856Slling case ZPOOL_PROP_DELEGATION: 463990b4856Slling case ZPOOL_PROP_AUTOREPLACE: 464d5b5bb25SRich Morris case ZPOOL_PROP_LISTSNAPS: 465573ca77eSGeorge Wilson case ZPOOL_PROP_AUTOEXPAND: 466990b4856Slling error = nvpair_value_uint64(elem, &intval); 467990b4856Slling if (!error && intval > 1) 468be6fd75aSMatthew Ahrens error = SET_ERROR(EINVAL); 469990b4856Slling break; 470990b4856Slling 471990b4856Slling case ZPOOL_PROP_BOOTFS: 47225f89ee2SJeff Bonwick /* 47325f89ee2SJeff Bonwick * If the pool version is less than SPA_VERSION_BOOTFS, 47425f89ee2SJeff Bonwick * or the pool is still being created (version == 0), 47525f89ee2SJeff Bonwick * the bootfs property cannot be set. 47625f89ee2SJeff Bonwick */ 477990b4856Slling if (spa_version(spa) < SPA_VERSION_BOOTFS) { 478be6fd75aSMatthew Ahrens error = SET_ERROR(ENOTSUP); 479990b4856Slling break; 480990b4856Slling } 481990b4856Slling 482990b4856Slling /* 48315e6edf1Sgw25295 * Make sure the vdev config is bootable 484990b4856Slling */ 48515e6edf1Sgw25295 if (!vdev_is_bootable(spa->spa_root_vdev)) { 486be6fd75aSMatthew Ahrens error = SET_ERROR(ENOTSUP); 487990b4856Slling break; 488990b4856Slling } 489990b4856Slling 490990b4856Slling reset_bootfs = 1; 491990b4856Slling 492990b4856Slling error = nvpair_value_string(elem, &strval); 493990b4856Slling 494990b4856Slling if (!error) { 495ad135b5dSChristopher Siden objset_t *os; 496b5152584SMatthew Ahrens uint64_t propval; 49715e6edf1Sgw25295 498990b4856Slling if (strval == NULL || strval[0] == '\0') { 499990b4856Slling objnum = zpool_prop_default_numeric( 500990b4856Slling ZPOOL_PROP_BOOTFS); 501990b4856Slling break; 502990b4856Slling } 503990b4856Slling 504503ad85cSMatthew Ahrens if (error = dmu_objset_hold(strval, FTAG, &os)) 505990b4856Slling break; 50615e6edf1Sgw25295 507b5152584SMatthew Ahrens /* 508b5152584SMatthew Ahrens * Must be ZPL, and its property settings 509b5152584SMatthew Ahrens * must be supported by GRUB (compression 510b5152584SMatthew Ahrens * is not gzip, and large blocks are not used). 511b5152584SMatthew Ahrens */ 512503ad85cSMatthew Ahrens 513503ad85cSMatthew Ahrens if (dmu_objset_type(os) != DMU_OST_ZFS) { 514be6fd75aSMatthew Ahrens error = SET_ERROR(ENOTSUP); 5153b2aab18SMatthew Ahrens } else if ((error = 5163b2aab18SMatthew Ahrens dsl_prop_get_int_ds(dmu_objset_ds(os), 51715e6edf1Sgw25295 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 518b5152584SMatthew Ahrens &propval)) == 0 && 519b5152584SMatthew Ahrens !BOOTFS_COMPRESS_VALID(propval)) { 520b5152584SMatthew Ahrens error = SET_ERROR(ENOTSUP); 52115e6edf1Sgw25295 } else { 522990b4856Slling objnum = dmu_objset_id(os); 52315e6edf1Sgw25295 } 524503ad85cSMatthew Ahrens dmu_objset_rele(os, FTAG); 525990b4856Slling } 526990b4856Slling break; 527e14bb325SJeff Bonwick 5280a4e9518Sgw25295 case ZPOOL_PROP_FAILUREMODE: 5290a4e9518Sgw25295 error = nvpair_value_uint64(elem, &intval); 5300a4e9518Sgw25295 if (!error && (intval < ZIO_FAILURE_MODE_WAIT || 5310a4e9518Sgw25295 intval > ZIO_FAILURE_MODE_PANIC)) 532be6fd75aSMatthew Ahrens error = SET_ERROR(EINVAL); 5330a4e9518Sgw25295 5340a4e9518Sgw25295 /* 5350a4e9518Sgw25295 * This is a special case which only occurs when 5360a4e9518Sgw25295 * the pool has completely failed. This allows 5370a4e9518Sgw25295 * the user to change the in-core failmode property 5380a4e9518Sgw25295 * without syncing it out to disk (I/Os might 5390a4e9518Sgw25295 * currently be blocked). We do this by returning 5400a4e9518Sgw25295 * EIO to the caller (spa_prop_set) to trick it 5410a4e9518Sgw25295 * into thinking we encountered a property validation 5420a4e9518Sgw25295 * error. 5430a4e9518Sgw25295 */ 544e14bb325SJeff Bonwick if (!error && spa_suspended(spa)) { 5450a4e9518Sgw25295 spa->spa_failmode = intval; 546be6fd75aSMatthew Ahrens error = SET_ERROR(EIO); 5470a4e9518Sgw25295 } 5480a4e9518Sgw25295 break; 5492f8aaab3Seschrock 5502f8aaab3Seschrock case ZPOOL_PROP_CACHEFILE: 5512f8aaab3Seschrock if ((error = nvpair_value_string(elem, &strval)) != 0) 5522f8aaab3Seschrock break; 5532f8aaab3Seschrock 5542f8aaab3Seschrock if (strval[0] == '\0') 5552f8aaab3Seschrock break; 5562f8aaab3Seschrock 5572f8aaab3Seschrock if (strcmp(strval, "none") == 0) 5582f8aaab3Seschrock break; 5592f8aaab3Seschrock 5602f8aaab3Seschrock if (strval[0] != '/') { 561be6fd75aSMatthew Ahrens error = SET_ERROR(EINVAL); 5622f8aaab3Seschrock break; 5632f8aaab3Seschrock } 5642f8aaab3Seschrock 5652f8aaab3Seschrock slash = strrchr(strval, '/'); 5662f8aaab3Seschrock ASSERT(slash != NULL); 5672f8aaab3Seschrock 5682f8aaab3Seschrock if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 5692f8aaab3Seschrock strcmp(slash, "/..") == 0) 570be6fd75aSMatthew Ahrens error = SET_ERROR(EINVAL); 5712f8aaab3Seschrock break; 572b24ab676SJeff Bonwick 5738704186eSDan McDonald case ZPOOL_PROP_COMMENT: 5748704186eSDan McDonald if ((error = nvpair_value_string(elem, &strval)) != 0) 5758704186eSDan McDonald break; 5768704186eSDan McDonald for (check = strval; *check != '\0'; check++) { 5778704186eSDan McDonald /* 5788704186eSDan McDonald * The kernel doesn't have an easy isprint() 5798704186eSDan McDonald * check. For this kernel check, we merely 5808704186eSDan McDonald * check ASCII apart from DEL. Fix this if 5818704186eSDan McDonald * there is an easy-to-use kernel isprint(). 5828704186eSDan McDonald */ 5838704186eSDan McDonald if (*check >= 0x7f) { 584be6fd75aSMatthew Ahrens error = SET_ERROR(EINVAL); 5858704186eSDan McDonald break; 5868704186eSDan McDonald } 5878704186eSDan McDonald } 5888704186eSDan McDonald if (strlen(strval) > ZPROP_MAX_COMMENT) 5898704186eSDan McDonald error = E2BIG; 5908704186eSDan McDonald break; 5918704186eSDan McDonald 592b24ab676SJeff Bonwick case ZPOOL_PROP_DEDUPDITTO: 593b24ab676SJeff Bonwick if (spa_version(spa) < SPA_VERSION_DEDUP) 594be6fd75aSMatthew Ahrens error = SET_ERROR(ENOTSUP); 595b24ab676SJeff Bonwick else 596b24ab676SJeff Bonwick error = nvpair_value_uint64(elem, &intval); 597b24ab676SJeff Bonwick if (error == 0 && 598b24ab676SJeff Bonwick intval != 0 && intval < ZIO_DEDUPDITTO_MIN) 599be6fd75aSMatthew Ahrens error = SET_ERROR(EINVAL); 600b24ab676SJeff Bonwick break; 601990b4856Slling } 602990b4856Slling 603990b4856Slling if (error) 604990b4856Slling break; 605990b4856Slling } 606990b4856Slling 607990b4856Slling if (!error && reset_bootfs) { 608990b4856Slling error = nvlist_remove(props, 609990b4856Slling zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING); 610990b4856Slling 611990b4856Slling if (!error) { 612990b4856Slling error = nvlist_add_uint64(props, 613990b4856Slling zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum); 614990b4856Slling } 615990b4856Slling } 616990b4856Slling 617990b4856Slling return (error); 618990b4856Slling } 619990b4856Slling 620379c004dSEric Schrock void 621379c004dSEric Schrock spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync) 622379c004dSEric Schrock { 623379c004dSEric Schrock char *cachefile; 624379c004dSEric Schrock spa_config_dirent_t *dp; 625379c004dSEric Schrock 626379c004dSEric Schrock if (nvlist_lookup_string(nvp, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), 627379c004dSEric Schrock &cachefile) != 0) 628379c004dSEric Schrock return; 629379c004dSEric Schrock 630379c004dSEric Schrock dp = kmem_alloc(sizeof (spa_config_dirent_t), 631379c004dSEric Schrock KM_SLEEP); 632379c004dSEric Schrock 633379c004dSEric Schrock if (cachefile[0] == '\0') 634379c004dSEric Schrock dp->scd_path = spa_strdup(spa_config_path); 635379c004dSEric Schrock else if (strcmp(cachefile, "none") == 0) 636379c004dSEric Schrock dp->scd_path = NULL; 637379c004dSEric Schrock else 638379c004dSEric Schrock dp->scd_path = spa_strdup(cachefile); 639379c004dSEric Schrock 640379c004dSEric Schrock list_insert_head(&spa->spa_config_list, dp); 641379c004dSEric Schrock if (need_sync) 642379c004dSEric Schrock spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 643379c004dSEric Schrock } 644379c004dSEric Schrock 645990b4856Slling int 646990b4856Slling spa_prop_set(spa_t *spa, nvlist_t *nvp) 647990b4856Slling { 648990b4856Slling int error; 649ad135b5dSChristopher Siden nvpair_t *elem = NULL; 650379c004dSEric Schrock boolean_t need_sync = B_FALSE; 651990b4856Slling 652990b4856Slling if ((error = spa_prop_validate(spa, nvp)) != 0) 653990b4856Slling return (error); 654990b4856Slling 655379c004dSEric Schrock while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) { 656ad135b5dSChristopher Siden zpool_prop_t prop = zpool_name_to_prop(nvpair_name(elem)); 657379c004dSEric Schrock 658f9af39baSGeorge Wilson if (prop == ZPOOL_PROP_CACHEFILE || 659f9af39baSGeorge Wilson prop == ZPOOL_PROP_ALTROOT || 660f9af39baSGeorge Wilson prop == ZPOOL_PROP_READONLY) 661379c004dSEric Schrock continue; 662379c004dSEric Schrock 663ad135b5dSChristopher Siden if (prop == ZPOOL_PROP_VERSION || prop == ZPROP_INVAL) { 664ad135b5dSChristopher Siden uint64_t ver; 665ad135b5dSChristopher Siden 666ad135b5dSChristopher Siden if (prop == ZPOOL_PROP_VERSION) { 667ad135b5dSChristopher Siden VERIFY(nvpair_value_uint64(elem, &ver) == 0); 668ad135b5dSChristopher Siden } else { 669ad135b5dSChristopher Siden ASSERT(zpool_prop_feature(nvpair_name(elem))); 670ad135b5dSChristopher Siden ver = SPA_VERSION_FEATURES; 671ad135b5dSChristopher Siden need_sync = B_TRUE; 672ad135b5dSChristopher Siden } 673ad135b5dSChristopher Siden 674ad135b5dSChristopher Siden /* Save time if the version is already set. */ 675ad135b5dSChristopher Siden if (ver == spa_version(spa)) 676ad135b5dSChristopher Siden continue; 677ad135b5dSChristopher Siden 678ad135b5dSChristopher Siden /* 679ad135b5dSChristopher Siden * In addition to the pool directory object, we might 680ad135b5dSChristopher Siden * create the pool properties object, the features for 681ad135b5dSChristopher Siden * read object, the features for write object, or the 682ad135b5dSChristopher Siden * feature descriptions object. 683ad135b5dSChristopher Siden */ 6843b2aab18SMatthew Ahrens error = dsl_sync_task(spa->spa_name, NULL, 6857d46dc6cSMatthew Ahrens spa_sync_version, &ver, 6867d46dc6cSMatthew Ahrens 6, ZFS_SPACE_CHECK_RESERVED); 687ad135b5dSChristopher Siden if (error) 688ad135b5dSChristopher Siden return (error); 689ad135b5dSChristopher Siden continue; 690ad135b5dSChristopher Siden } 691ad135b5dSChristopher Siden 692379c004dSEric Schrock need_sync = B_TRUE; 693379c004dSEric Schrock break; 694379c004dSEric Schrock } 695379c004dSEric Schrock 696ad135b5dSChristopher Siden if (need_sync) { 6973b2aab18SMatthew Ahrens return (dsl_sync_task(spa->spa_name, NULL, spa_sync_props, 6987d46dc6cSMatthew Ahrens nvp, 6, ZFS_SPACE_CHECK_RESERVED)); 699ad135b5dSChristopher Siden } 700ad135b5dSChristopher Siden 701379c004dSEric Schrock return (0); 702990b4856Slling } 703990b4856Slling 704990b4856Slling /* 705990b4856Slling * If the bootfs property value is dsobj, clear it. 706990b4856Slling */ 707990b4856Slling void 708990b4856Slling spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx) 709990b4856Slling { 710990b4856Slling if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) { 711990b4856Slling VERIFY(zap_remove(spa->spa_meta_objset, 712990b4856Slling spa->spa_pool_props_object, 713990b4856Slling zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0); 714990b4856Slling spa->spa_bootfs = 0; 715990b4856Slling } 716990b4856Slling } 717990b4856Slling 718dfbb9432SGeorge Wilson /*ARGSUSED*/ 719dfbb9432SGeorge Wilson static int 7203b2aab18SMatthew Ahrens spa_change_guid_check(void *arg, dmu_tx_t *tx) 721dfbb9432SGeorge Wilson { 7223b2aab18SMatthew Ahrens uint64_t *newguid = arg; 7233b2aab18SMatthew Ahrens spa_t *spa = dmu_tx_pool(tx)->dp_spa; 724dfbb9432SGeorge Wilson vdev_t *rvd = spa->spa_root_vdev; 725dfbb9432SGeorge Wilson uint64_t vdev_state; 726dfbb9432SGeorge Wilson 727dfbb9432SGeorge Wilson spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 728dfbb9432SGeorge Wilson vdev_state = rvd->vdev_state; 729dfbb9432SGeorge Wilson spa_config_exit(spa, SCL_STATE, FTAG); 730dfbb9432SGeorge Wilson 731dfbb9432SGeorge Wilson if (vdev_state != VDEV_STATE_HEALTHY) 732be6fd75aSMatthew Ahrens return (SET_ERROR(ENXIO)); 733dfbb9432SGeorge Wilson 734dfbb9432SGeorge Wilson ASSERT3U(spa_guid(spa), !=, *newguid); 735dfbb9432SGeorge Wilson 736dfbb9432SGeorge Wilson return (0); 737dfbb9432SGeorge Wilson } 738dfbb9432SGeorge Wilson 739dfbb9432SGeorge Wilson static void 7403b2aab18SMatthew Ahrens spa_change_guid_sync(void *arg, dmu_tx_t *tx) 741dfbb9432SGeorge Wilson { 7423b2aab18SMatthew Ahrens uint64_t *newguid = arg; 7433b2aab18SMatthew Ahrens spa_t *spa = dmu_tx_pool(tx)->dp_spa; 744dfbb9432SGeorge Wilson uint64_t oldguid; 745dfbb9432SGeorge Wilson vdev_t *rvd = spa->spa_root_vdev; 746dfbb9432SGeorge Wilson 747dfbb9432SGeorge Wilson oldguid = spa_guid(spa); 748dfbb9432SGeorge Wilson 749dfbb9432SGeorge Wilson spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 750dfbb9432SGeorge Wilson rvd->vdev_guid = *newguid; 751dfbb9432SGeorge Wilson rvd->vdev_guid_sum += (*newguid - oldguid); 752dfbb9432SGeorge Wilson vdev_config_dirty(rvd); 753dfbb9432SGeorge Wilson spa_config_exit(spa, SCL_STATE, FTAG); 754dfbb9432SGeorge Wilson 75520128a08SGeorge Wilson spa_history_log_internal(spa, "guid change", tx, "old=%llu new=%llu", 756dfbb9432SGeorge Wilson oldguid, *newguid); 757dfbb9432SGeorge Wilson } 758dfbb9432SGeorge Wilson 759fa9e4066Sahrens /* 760e9103aaeSGarrett D'Amore * Change the GUID for the pool. This is done so that we can later 761e9103aaeSGarrett D'Amore * re-import a pool built from a clone of our own vdevs. We will modify 762e9103aaeSGarrett D'Amore * the root vdev's guid, our own pool guid, and then mark all of our 763e9103aaeSGarrett D'Amore * vdevs dirty. Note that we must make sure that all our vdevs are 764e9103aaeSGarrett D'Amore * online when we do this, or else any vdevs that weren't present 765e9103aaeSGarrett D'Amore * would be orphaned from our pool. We are also going to issue a 766e9103aaeSGarrett D'Amore * sysevent to update any watchers. 767e9103aaeSGarrett D'Amore */ 768e9103aaeSGarrett D'Amore int 769e9103aaeSGarrett D'Amore spa_change_guid(spa_t *spa) 770e9103aaeSGarrett D'Amore { 771dfbb9432SGeorge Wilson int error; 772dfbb9432SGeorge Wilson uint64_t guid; 773e9103aaeSGarrett D'Amore 7742c1e2b44SGeorge Wilson mutex_enter(&spa->spa_vdev_top_lock); 775dfbb9432SGeorge Wilson mutex_enter(&spa_namespace_lock); 776dfbb9432SGeorge Wilson guid = spa_generate_guid(NULL); 777e9103aaeSGarrett D'Amore 7783b2aab18SMatthew Ahrens error = dsl_sync_task(spa->spa_name, spa_change_guid_check, 7797d46dc6cSMatthew Ahrens spa_change_guid_sync, &guid, 5, ZFS_SPACE_CHECK_RESERVED); 780e9103aaeSGarrett D'Amore 781dfbb9432SGeorge Wilson if (error == 0) { 782dfbb9432SGeorge Wilson spa_config_sync(spa, B_FALSE, B_TRUE); 783e9103aaeSGarrett D'Amore spa_event_notify(spa, NULL, ESC_ZFS_POOL_REGUID); 784dfbb9432SGeorge Wilson } 785e9103aaeSGarrett D'Amore 786dfbb9432SGeorge Wilson mutex_exit(&spa_namespace_lock); 7872c1e2b44SGeorge Wilson mutex_exit(&spa->spa_vdev_top_lock); 788dfbb9432SGeorge Wilson 789dfbb9432SGeorge Wilson return (error); 790e9103aaeSGarrett D'Amore } 791e9103aaeSGarrett D'Amore 792e9103aaeSGarrett D'Amore /* 793fa9e4066Sahrens * ========================================================================== 794fa9e4066Sahrens * SPA state manipulation (open/create/destroy/import/export) 795fa9e4066Sahrens * ========================================================================== 796fa9e4066Sahrens */ 797fa9e4066Sahrens 798ea8dc4b6Seschrock static int 799ea8dc4b6Seschrock spa_error_entry_compare(const void *a, const void *b) 800ea8dc4b6Seschrock { 801ea8dc4b6Seschrock spa_error_entry_t *sa = (spa_error_entry_t *)a; 802ea8dc4b6Seschrock spa_error_entry_t *sb = (spa_error_entry_t *)b; 803ea8dc4b6Seschrock int ret; 804ea8dc4b6Seschrock 805ea8dc4b6Seschrock ret = bcmp(&sa->se_bookmark, &sb->se_bookmark, 8067802d7bfSMatthew Ahrens sizeof (zbookmark_phys_t)); 807ea8dc4b6Seschrock 808ea8dc4b6Seschrock if (ret < 0) 809ea8dc4b6Seschrock return (-1); 810ea8dc4b6Seschrock else if (ret > 0) 811ea8dc4b6Seschrock return (1); 812ea8dc4b6Seschrock else 813ea8dc4b6Seschrock return (0); 814ea8dc4b6Seschrock } 815ea8dc4b6Seschrock 816ea8dc4b6Seschrock /* 817ea8dc4b6Seschrock * Utility function which retrieves copies of the current logs and 818ea8dc4b6Seschrock * re-initializes them in the process. 819ea8dc4b6Seschrock */ 820ea8dc4b6Seschrock void 821ea8dc4b6Seschrock spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub) 822ea8dc4b6Seschrock { 823ea8dc4b6Seschrock ASSERT(MUTEX_HELD(&spa->spa_errlist_lock)); 824ea8dc4b6Seschrock 825ea8dc4b6Seschrock bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t)); 826ea8dc4b6Seschrock bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t)); 827ea8dc4b6Seschrock 828ea8dc4b6Seschrock avl_create(&spa->spa_errlist_scrub, 829ea8dc4b6Seschrock spa_error_entry_compare, sizeof (spa_error_entry_t), 830ea8dc4b6Seschrock offsetof(spa_error_entry_t, se_avl)); 831ea8dc4b6Seschrock avl_create(&spa->spa_errlist_last, 832ea8dc4b6Seschrock spa_error_entry_compare, sizeof (spa_error_entry_t), 833ea8dc4b6Seschrock offsetof(spa_error_entry_t, se_avl)); 834ea8dc4b6Seschrock } 835ea8dc4b6Seschrock 836ec94d322SAdam Leventhal static void 837ec94d322SAdam Leventhal spa_taskqs_init(spa_t *spa, zio_type_t t, zio_taskq_type_t q) 83835a5a358SJonathan Adams { 839ec94d322SAdam Leventhal const zio_taskq_info_t *ztip = &zio_taskqs[t][q]; 840ec94d322SAdam Leventhal enum zti_modes mode = ztip->zti_mode; 841ec94d322SAdam Leventhal uint_t value = ztip->zti_value; 842ec94d322SAdam Leventhal uint_t count = ztip->zti_count; 843ec94d322SAdam Leventhal spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 844ec94d322SAdam Leventhal char name[32]; 8455aeb9474SGarrett D'Amore uint_t flags = 0; 84635a5a358SJonathan Adams boolean_t batch = B_FALSE; 84735a5a358SJonathan Adams 848ec94d322SAdam Leventhal if (mode == ZTI_MODE_NULL) { 849ec94d322SAdam Leventhal tqs->stqs_count = 0; 850ec94d322SAdam Leventhal tqs->stqs_taskq = NULL; 851ec94d322SAdam Leventhal return; 852ec94d322SAdam Leventhal } 85335a5a358SJonathan Adams 854ec94d322SAdam Leventhal ASSERT3U(count, >, 0); 855ec94d322SAdam Leventhal 856ec94d322SAdam Leventhal tqs->stqs_count = count; 857ec94d322SAdam Leventhal tqs->stqs_taskq = kmem_alloc(count * sizeof (taskq_t *), KM_SLEEP); 858ec94d322SAdam Leventhal 859ec94d322SAdam Leventhal switch (mode) { 860ec94d322SAdam Leventhal case ZTI_MODE_FIXED: 86135a5a358SJonathan Adams ASSERT3U(value, >=, 1); 86235a5a358SJonathan Adams value = MAX(value, 1); 86335a5a358SJonathan Adams break; 86435a5a358SJonathan Adams 865ec94d322SAdam Leventhal case ZTI_MODE_BATCH: 86635a5a358SJonathan Adams batch = B_TRUE; 86735a5a358SJonathan Adams flags |= TASKQ_THREADS_CPU_PCT; 86835a5a358SJonathan Adams value = zio_taskq_batch_pct; 86935a5a358SJonathan Adams break; 87035a5a358SJonathan Adams 87135a5a358SJonathan Adams default: 872ec94d322SAdam Leventhal panic("unrecognized mode for %s_%s taskq (%u:%u) in " 87335a5a358SJonathan Adams "spa_activate()", 874ec94d322SAdam Leventhal zio_type_name[t], zio_taskq_types[q], mode, value); 87535a5a358SJonathan Adams break; 87635a5a358SJonathan Adams } 87735a5a358SJonathan Adams 87869962b56SMatthew Ahrens for (uint_t i = 0; i < count; i++) { 87969962b56SMatthew Ahrens taskq_t *tq; 88069962b56SMatthew Ahrens 881ec94d322SAdam Leventhal if (count > 1) { 882ec94d322SAdam Leventhal (void) snprintf(name, sizeof (name), "%s_%s_%u", 883ec94d322SAdam Leventhal zio_type_name[t], zio_taskq_types[q], i); 884ec94d322SAdam Leventhal } else { 885ec94d322SAdam Leventhal (void) snprintf(name, sizeof (name), "%s_%s", 886ec94d322SAdam Leventhal zio_type_name[t], zio_taskq_types[q]); 887ec94d322SAdam Leventhal } 888ec94d322SAdam Leventhal 88935a5a358SJonathan Adams if (zio_taskq_sysdc && spa->spa_proc != &p0) { 89035a5a358SJonathan Adams if (batch) 89135a5a358SJonathan Adams flags |= TASKQ_DC_BATCH; 89235a5a358SJonathan Adams 893ec94d322SAdam Leventhal tq = taskq_create_sysdc(name, value, 50, INT_MAX, 894ec94d322SAdam Leventhal spa->spa_proc, zio_taskq_basedc, flags); 895ec94d322SAdam Leventhal } else { 89669962b56SMatthew Ahrens pri_t pri = maxclsyspri; 89769962b56SMatthew Ahrens /* 89869962b56SMatthew Ahrens * The write issue taskq can be extremely CPU 89969962b56SMatthew Ahrens * intensive. Run it at slightly lower priority 90069962b56SMatthew Ahrens * than the other taskqs. 90169962b56SMatthew Ahrens */ 90269962b56SMatthew Ahrens if (t == ZIO_TYPE_WRITE && q == ZIO_TASKQ_ISSUE) 90369962b56SMatthew Ahrens pri--; 90469962b56SMatthew Ahrens 90569962b56SMatthew Ahrens tq = taskq_create_proc(name, value, pri, 50, 906ec94d322SAdam Leventhal INT_MAX, spa->spa_proc, flags); 90735a5a358SJonathan Adams } 908ec94d322SAdam Leventhal 909ec94d322SAdam Leventhal tqs->stqs_taskq[i] = tq; 910ec94d322SAdam Leventhal } 911ec94d322SAdam Leventhal } 912ec94d322SAdam Leventhal 913ec94d322SAdam Leventhal static void 914ec94d322SAdam Leventhal spa_taskqs_fini(spa_t *spa, zio_type_t t, zio_taskq_type_t q) 915ec94d322SAdam Leventhal { 916ec94d322SAdam Leventhal spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 917ec94d322SAdam Leventhal 918ec94d322SAdam Leventhal if (tqs->stqs_taskq == NULL) { 919ec94d322SAdam Leventhal ASSERT0(tqs->stqs_count); 920ec94d322SAdam Leventhal return; 921ec94d322SAdam Leventhal } 922ec94d322SAdam Leventhal 923ec94d322SAdam Leventhal for (uint_t i = 0; i < tqs->stqs_count; i++) { 924ec94d322SAdam Leventhal ASSERT3P(tqs->stqs_taskq[i], !=, NULL); 925ec94d322SAdam Leventhal taskq_destroy(tqs->stqs_taskq[i]); 926ec94d322SAdam Leventhal } 927ec94d322SAdam Leventhal 928ec94d322SAdam Leventhal kmem_free(tqs->stqs_taskq, tqs->stqs_count * sizeof (taskq_t *)); 929ec94d322SAdam Leventhal tqs->stqs_taskq = NULL; 930ec94d322SAdam Leventhal } 931ec94d322SAdam Leventhal 932ec94d322SAdam Leventhal /* 933ec94d322SAdam Leventhal * Dispatch a task to the appropriate taskq for the ZFS I/O type and priority. 934ec94d322SAdam Leventhal * Note that a type may have multiple discrete taskqs to avoid lock contention 935ec94d322SAdam Leventhal * on the taskq itself. In that case we choose which taskq at random by using 936ec94d322SAdam Leventhal * the low bits of gethrtime(). 937ec94d322SAdam Leventhal */ 938ec94d322SAdam Leventhal void 939ec94d322SAdam Leventhal spa_taskq_dispatch_ent(spa_t *spa, zio_type_t t, zio_taskq_type_t q, 940ec94d322SAdam Leventhal task_func_t *func, void *arg, uint_t flags, taskq_ent_t *ent) 941ec94d322SAdam Leventhal { 942ec94d322SAdam Leventhal spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 943ec94d322SAdam Leventhal taskq_t *tq; 944ec94d322SAdam Leventhal 945ec94d322SAdam Leventhal ASSERT3P(tqs->stqs_taskq, !=, NULL); 946ec94d322SAdam Leventhal ASSERT3U(tqs->stqs_count, !=, 0); 947ec94d322SAdam Leventhal 948ec94d322SAdam Leventhal if (tqs->stqs_count == 1) { 949ec94d322SAdam Leventhal tq = tqs->stqs_taskq[0]; 950ec94d322SAdam Leventhal } else { 951ec94d322SAdam Leventhal tq = tqs->stqs_taskq[gethrtime() % tqs->stqs_count]; 952ec94d322SAdam Leventhal } 953ec94d322SAdam Leventhal 954ec94d322SAdam Leventhal taskq_dispatch_ent(tq, func, arg, flags, ent); 95535a5a358SJonathan Adams } 95635a5a358SJonathan Adams 95735a5a358SJonathan Adams static void 95835a5a358SJonathan Adams spa_create_zio_taskqs(spa_t *spa) 95935a5a358SJonathan Adams { 96035a5a358SJonathan Adams for (int t = 0; t < ZIO_TYPES; t++) { 96135a5a358SJonathan Adams for (int q = 0; q < ZIO_TASKQ_TYPES; q++) { 962ec94d322SAdam Leventhal spa_taskqs_init(spa, t, q); 96335a5a358SJonathan Adams } 96435a5a358SJonathan Adams } 96535a5a358SJonathan Adams } 96635a5a358SJonathan Adams 96735a5a358SJonathan Adams #ifdef _KERNEL 96835a5a358SJonathan Adams static void 96935a5a358SJonathan Adams spa_thread(void *arg) 97035a5a358SJonathan Adams { 97135a5a358SJonathan Adams callb_cpr_t cprinfo; 97235a5a358SJonathan Adams 97335a5a358SJonathan Adams spa_t *spa = arg; 97435a5a358SJonathan Adams user_t *pu = PTOU(curproc); 97535a5a358SJonathan Adams 97635a5a358SJonathan Adams CALLB_CPR_INIT(&cprinfo, &spa->spa_proc_lock, callb_generic_cpr, 97735a5a358SJonathan Adams spa->spa_name); 97835a5a358SJonathan Adams 97935a5a358SJonathan Adams ASSERT(curproc != &p0); 98035a5a358SJonathan Adams (void) snprintf(pu->u_psargs, sizeof (pu->u_psargs), 98135a5a358SJonathan Adams "zpool-%s", spa->spa_name); 98235a5a358SJonathan Adams (void) strlcpy(pu->u_comm, pu->u_psargs, sizeof (pu->u_comm)); 98335a5a358SJonathan Adams 98435a5a358SJonathan Adams /* bind this thread to the requested psrset */ 98535a5a358SJonathan Adams if (zio_taskq_psrset_bind != PS_NONE) { 98635a5a358SJonathan Adams pool_lock(); 98735a5a358SJonathan Adams mutex_enter(&cpu_lock); 98835a5a358SJonathan Adams mutex_enter(&pidlock); 98935a5a358SJonathan Adams mutex_enter(&curproc->p_lock); 99035a5a358SJonathan Adams 99135a5a358SJonathan Adams if (cpupart_bind_thread(curthread, zio_taskq_psrset_bind, 99235a5a358SJonathan Adams 0, NULL, NULL) == 0) { 99335a5a358SJonathan Adams curthread->t_bind_pset = zio_taskq_psrset_bind; 99435a5a358SJonathan Adams } else { 99535a5a358SJonathan Adams cmn_err(CE_WARN, 99635a5a358SJonathan Adams "Couldn't bind process for zfs pool \"%s\" to " 99735a5a358SJonathan Adams "pset %d\n", spa->spa_name, zio_taskq_psrset_bind); 99835a5a358SJonathan Adams } 99935a5a358SJonathan Adams 100035a5a358SJonathan Adams mutex_exit(&curproc->p_lock); 100135a5a358SJonathan Adams mutex_exit(&pidlock); 100235a5a358SJonathan Adams mutex_exit(&cpu_lock); 100335a5a358SJonathan Adams pool_unlock(); 100435a5a358SJonathan Adams } 100535a5a358SJonathan Adams 100635a5a358SJonathan Adams if (zio_taskq_sysdc) { 100735a5a358SJonathan Adams sysdc_thread_enter(curthread, 100, 0); 100835a5a358SJonathan Adams } 100935a5a358SJonathan Adams 101035a5a358SJonathan Adams spa->spa_proc = curproc; 101135a5a358SJonathan Adams spa->spa_did = curthread->t_did; 101235a5a358SJonathan Adams 101335a5a358SJonathan Adams spa_create_zio_taskqs(spa); 101435a5a358SJonathan Adams 101535a5a358SJonathan Adams mutex_enter(&spa->spa_proc_lock); 101635a5a358SJonathan Adams ASSERT(spa->spa_proc_state == SPA_PROC_CREATED); 101735a5a358SJonathan Adams 101835a5a358SJonathan Adams spa->spa_proc_state = SPA_PROC_ACTIVE; 101935a5a358SJonathan Adams cv_broadcast(&spa->spa_proc_cv); 102035a5a358SJonathan Adams 102135a5a358SJonathan Adams CALLB_CPR_SAFE_BEGIN(&cprinfo); 102235a5a358SJonathan Adams while (spa->spa_proc_state == SPA_PROC_ACTIVE) 102335a5a358SJonathan Adams cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock); 102435a5a358SJonathan Adams CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_proc_lock); 102535a5a358SJonathan Adams 102635a5a358SJonathan Adams ASSERT(spa->spa_proc_state == SPA_PROC_DEACTIVATE); 102735a5a358SJonathan Adams spa->spa_proc_state = SPA_PROC_GONE; 102835a5a358SJonathan Adams spa->spa_proc = &p0; 102935a5a358SJonathan Adams cv_broadcast(&spa->spa_proc_cv); 103035a5a358SJonathan Adams CALLB_CPR_EXIT(&cprinfo); /* drops spa_proc_lock */ 103135a5a358SJonathan Adams 103235a5a358SJonathan Adams mutex_enter(&curproc->p_lock); 103335a5a358SJonathan Adams lwp_exit(); 103435a5a358SJonathan Adams } 103535a5a358SJonathan Adams #endif 103635a5a358SJonathan Adams 1037fa9e4066Sahrens /* 1038fa9e4066Sahrens * Activate an uninitialized pool. 1039fa9e4066Sahrens */ 1040fa9e4066Sahrens static void 10418ad4d6ddSJeff Bonwick spa_activate(spa_t *spa, int mode) 1042fa9e4066Sahrens { 1043fa9e4066Sahrens ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); 1044fa9e4066Sahrens 1045fa9e4066Sahrens spa->spa_state = POOL_STATE_ACTIVE; 10468ad4d6ddSJeff Bonwick spa->spa_mode = mode; 1047fa9e4066Sahrens 104888ecc943SGeorge Wilson spa->spa_normal_class = metaslab_class_create(spa, zfs_metaslab_ops); 104988ecc943SGeorge Wilson spa->spa_log_class = metaslab_class_create(spa, zfs_metaslab_ops); 1050fa9e4066Sahrens 105135a5a358SJonathan Adams /* Try to create a covering process */ 105235a5a358SJonathan Adams mutex_enter(&spa->spa_proc_lock); 105335a5a358SJonathan Adams ASSERT(spa->spa_proc_state == SPA_PROC_NONE); 105435a5a358SJonathan Adams ASSERT(spa->spa_proc == &p0); 105535a5a358SJonathan Adams spa->spa_did = 0; 10562e0c549eSJonathan Adams 105735a5a358SJonathan Adams /* Only create a process if we're going to be around a while. */ 105835a5a358SJonathan Adams if (spa_create_process && strcmp(spa->spa_name, TRYIMPORT_NAME) != 0) { 105935a5a358SJonathan Adams if (newproc(spa_thread, (caddr_t)spa, syscid, maxclsyspri, 106035a5a358SJonathan Adams NULL, 0) == 0) { 106135a5a358SJonathan Adams spa->spa_proc_state = SPA_PROC_CREATED; 106235a5a358SJonathan Adams while (spa->spa_proc_state == SPA_PROC_CREATED) { 106335a5a358SJonathan Adams cv_wait(&spa->spa_proc_cv, 106435a5a358SJonathan Adams &spa->spa_proc_lock); 10652e0c549eSJonathan Adams } 106635a5a358SJonathan Adams ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE); 106735a5a358SJonathan Adams ASSERT(spa->spa_proc != &p0); 106835a5a358SJonathan Adams ASSERT(spa->spa_did != 0); 106935a5a358SJonathan Adams } else { 107035a5a358SJonathan Adams #ifdef _KERNEL 107135a5a358SJonathan Adams cmn_err(CE_WARN, 107235a5a358SJonathan Adams "Couldn't create process for zfs pool \"%s\"\n", 107335a5a358SJonathan Adams spa->spa_name); 107435a5a358SJonathan Adams #endif 10752e0c549eSJonathan Adams } 1076e14bb325SJeff Bonwick } 107735a5a358SJonathan Adams mutex_exit(&spa->spa_proc_lock); 107835a5a358SJonathan Adams 107935a5a358SJonathan Adams /* If we didn't create a process, we need to create our taskqs. */ 108035a5a358SJonathan Adams if (spa->spa_proc == &p0) { 108135a5a358SJonathan Adams spa_create_zio_taskqs(spa); 1082fa9e4066Sahrens } 1083fa9e4066Sahrens 1084e14bb325SJeff Bonwick list_create(&spa->spa_config_dirty_list, sizeof (vdev_t), 1085e14bb325SJeff Bonwick offsetof(vdev_t, vdev_config_dirty_node)); 1086bc9014e6SJustin Gibbs list_create(&spa->spa_evicting_os_list, sizeof (objset_t), 1087bc9014e6SJustin Gibbs offsetof(objset_t, os_evicting_node)); 1088e14bb325SJeff Bonwick list_create(&spa->spa_state_dirty_list, sizeof (vdev_t), 1089e14bb325SJeff Bonwick offsetof(vdev_t, vdev_state_dirty_node)); 1090fa9e4066Sahrens 1091fa9e4066Sahrens txg_list_create(&spa->spa_vdev_txg_list, 1092fa9e4066Sahrens offsetof(struct vdev, vdev_txg_node)); 1093ea8dc4b6Seschrock 1094ea8dc4b6Seschrock avl_create(&spa->spa_errlist_scrub, 1095ea8dc4b6Seschrock spa_error_entry_compare, sizeof (spa_error_entry_t), 1096ea8dc4b6Seschrock offsetof(spa_error_entry_t, se_avl)); 1097ea8dc4b6Seschrock avl_create(&spa->spa_errlist_last, 1098ea8dc4b6Seschrock spa_error_entry_compare, sizeof (spa_error_entry_t), 1099ea8dc4b6Seschrock offsetof(spa_error_entry_t, se_avl)); 1100fa9e4066Sahrens } 1101fa9e4066Sahrens 1102fa9e4066Sahrens /* 1103fa9e4066Sahrens * Opposite of spa_activate(). 1104fa9e4066Sahrens */ 1105fa9e4066Sahrens static void 1106fa9e4066Sahrens spa_deactivate(spa_t *spa) 1107fa9e4066Sahrens { 1108fa9e4066Sahrens ASSERT(spa->spa_sync_on == B_FALSE); 1109fa9e4066Sahrens ASSERT(spa->spa_dsl_pool == NULL); 1110fa9e4066Sahrens ASSERT(spa->spa_root_vdev == NULL); 111125f89ee2SJeff Bonwick ASSERT(spa->spa_async_zio_root == NULL); 1112fa9e4066Sahrens ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED); 1113fa9e4066Sahrens 1114bc9014e6SJustin Gibbs spa_evicting_os_wait(spa); 1115bc9014e6SJustin Gibbs 1116fa9e4066Sahrens txg_list_destroy(&spa->spa_vdev_txg_list); 1117fa9e4066Sahrens 1118e14bb325SJeff Bonwick list_destroy(&spa->spa_config_dirty_list); 1119bc9014e6SJustin Gibbs list_destroy(&spa->spa_evicting_os_list); 1120e14bb325SJeff Bonwick list_destroy(&spa->spa_state_dirty_list); 1121fa9e4066Sahrens 1122e14bb325SJeff Bonwick for (int t = 0; t < ZIO_TYPES; t++) { 1123e14bb325SJeff Bonwick for (int q = 0; q < ZIO_TASKQ_TYPES; q++) { 1124ec94d322SAdam Leventhal spa_taskqs_fini(spa, t, q); 1125e14bb325SJeff Bonwick } 1126fa9e4066Sahrens } 1127fa9e4066Sahrens 1128fa9e4066Sahrens metaslab_class_destroy(spa->spa_normal_class); 1129fa9e4066Sahrens spa->spa_normal_class = NULL; 1130fa9e4066Sahrens 11318654d025Sperrin metaslab_class_destroy(spa->spa_log_class); 11328654d025Sperrin spa->spa_log_class = NULL; 11338654d025Sperrin 1134ea8dc4b6Seschrock /* 1135ea8dc4b6Seschrock * If this was part of an import or the open otherwise failed, we may 1136ea8dc4b6Seschrock * still have errors left in the queues. Empty them just in case. 1137ea8dc4b6Seschrock */ 1138ea8dc4b6Seschrock spa_errlog_drain(spa); 1139ea8dc4b6Seschrock 1140ea8dc4b6Seschrock avl_destroy(&spa->spa_errlist_scrub); 1141ea8dc4b6Seschrock avl_destroy(&spa->spa_errlist_last); 1142ea8dc4b6Seschrock 1143fa9e4066Sahrens spa->spa_state = POOL_STATE_UNINITIALIZED; 114435a5a358SJonathan Adams 114535a5a358SJonathan Adams mutex_enter(&spa->spa_proc_lock); 114635a5a358SJonathan Adams if (spa->spa_proc_state != SPA_PROC_NONE) { 114735a5a358SJonathan Adams ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE); 114835a5a358SJonathan Adams spa->spa_proc_state = SPA_PROC_DEACTIVATE; 114935a5a358SJonathan Adams cv_broadcast(&spa->spa_proc_cv); 115035a5a358SJonathan Adams while (spa->spa_proc_state == SPA_PROC_DEACTIVATE) { 115135a5a358SJonathan Adams ASSERT(spa->spa_proc != &p0); 115235a5a358SJonathan Adams cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock); 115335a5a358SJonathan Adams } 115435a5a358SJonathan Adams ASSERT(spa->spa_proc_state == SPA_PROC_GONE); 115535a5a358SJonathan Adams spa->spa_proc_state = SPA_PROC_NONE; 115635a5a358SJonathan Adams } 115735a5a358SJonathan Adams ASSERT(spa->spa_proc == &p0); 115835a5a358SJonathan Adams mutex_exit(&spa->spa_proc_lock); 115935a5a358SJonathan Adams 116035a5a358SJonathan Adams /* 116135a5a358SJonathan Adams * We want to make sure spa_thread() has actually exited the ZFS 116235a5a358SJonathan Adams * module, so that the module can't be unloaded out from underneath 116335a5a358SJonathan Adams * it. 116435a5a358SJonathan Adams */ 116535a5a358SJonathan Adams if (spa->spa_did != 0) { 116635a5a358SJonathan Adams thread_join(spa->spa_did); 116735a5a358SJonathan Adams spa->spa_did = 0; 116835a5a358SJonathan Adams } 1169fa9e4066Sahrens } 1170fa9e4066Sahrens 1171fa9e4066Sahrens /* 1172fa9e4066Sahrens * Verify a pool configuration, and construct the vdev tree appropriately. This 1173fa9e4066Sahrens * will create all the necessary vdevs in the appropriate layout, with each vdev 1174fa9e4066Sahrens * in the CLOSED state. This will prep the pool before open/creation/import. 1175fa9e4066Sahrens * All vdev validation is done by the vdev_alloc() routine. 1176fa9e4066Sahrens */ 117799653d4eSeschrock static int 117899653d4eSeschrock spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, 117999653d4eSeschrock uint_t id, int atype) 1180fa9e4066Sahrens { 1181fa9e4066Sahrens nvlist_t **child; 1182573ca77eSGeorge Wilson uint_t children; 118399653d4eSeschrock int error; 1184fa9e4066Sahrens 118599653d4eSeschrock if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0) 118699653d4eSeschrock return (error); 1187fa9e4066Sahrens 118899653d4eSeschrock if ((*vdp)->vdev_ops->vdev_op_leaf) 118999653d4eSeschrock return (0); 1190fa9e4066Sahrens 1191e14bb325SJeff Bonwick error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1192e14bb325SJeff Bonwick &child, &children); 1193e14bb325SJeff Bonwick 1194e14bb325SJeff Bonwick if (error == ENOENT) 1195e14bb325SJeff Bonwick return (0); 1196e14bb325SJeff Bonwick 1197e14bb325SJeff Bonwick if (error) { 119899653d4eSeschrock vdev_free(*vdp); 119999653d4eSeschrock *vdp = NULL; 1200be6fd75aSMatthew Ahrens return (SET_ERROR(EINVAL)); 1201fa9e4066Sahrens } 1202fa9e4066Sahrens 1203573ca77eSGeorge Wilson for (int c = 0; c < children; c++) { 120499653d4eSeschrock vdev_t *vd; 120599653d4eSeschrock if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c, 120699653d4eSeschrock atype)) != 0) { 120799653d4eSeschrock vdev_free(*vdp); 120899653d4eSeschrock *vdp = NULL; 120999653d4eSeschrock return (error); 1210fa9e4066Sahrens } 1211fa9e4066Sahrens } 1212fa9e4066Sahrens 121399653d4eSeschrock ASSERT(*vdp != NULL); 121499653d4eSeschrock 121599653d4eSeschrock return (0); 1216fa9e4066Sahrens } 1217fa9e4066Sahrens 1218fa9e4066Sahrens /* 1219fa9e4066Sahrens * Opposite of spa_load(). 1220fa9e4066Sahrens */ 1221fa9e4066Sahrens static void 1222fa9e4066Sahrens spa_unload(spa_t *spa) 1223fa9e4066Sahrens { 122499653d4eSeschrock int i; 122599653d4eSeschrock 1226e14bb325SJeff Bonwick ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1227e14bb325SJeff Bonwick 1228fa9e4066Sahrens /* 1229ea8dc4b6Seschrock * Stop async tasks. 1230ea8dc4b6Seschrock */ 1231ea8dc4b6Seschrock spa_async_suspend(spa); 1232ea8dc4b6Seschrock 1233ea8dc4b6Seschrock /* 1234fa9e4066Sahrens * Stop syncing. 1235fa9e4066Sahrens */ 1236fa9e4066Sahrens if (spa->spa_sync_on) { 1237fa9e4066Sahrens txg_sync_stop(spa->spa_dsl_pool); 1238fa9e4066Sahrens spa->spa_sync_on = B_FALSE; 1239fa9e4066Sahrens } 1240fa9e4066Sahrens 1241fa9e4066Sahrens /* 1242e14bb325SJeff Bonwick * Wait for any outstanding async I/O to complete. 1243fa9e4066Sahrens */ 124454d692b7SGeorge Wilson if (spa->spa_async_zio_root != NULL) { 12456f834bc1SMatthew Ahrens for (int i = 0; i < max_ncpus; i++) 12466f834bc1SMatthew Ahrens (void) zio_wait(spa->spa_async_zio_root[i]); 12476f834bc1SMatthew Ahrens kmem_free(spa->spa_async_zio_root, max_ncpus * sizeof (void *)); 124854d692b7SGeorge Wilson spa->spa_async_zio_root = NULL; 124954d692b7SGeorge Wilson } 1250fa9e4066Sahrens 1251cde58dbcSMatthew Ahrens bpobj_close(&spa->spa_deferred_bpobj); 1252cde58dbcSMatthew Ahrens 12530713e232SGeorge Wilson spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 12540713e232SGeorge Wilson 12550713e232SGeorge Wilson /* 12560713e232SGeorge Wilson * Close all vdevs. 12570713e232SGeorge Wilson */ 12580713e232SGeorge Wilson if (spa->spa_root_vdev) 12590713e232SGeorge Wilson vdev_free(spa->spa_root_vdev); 12600713e232SGeorge Wilson ASSERT(spa->spa_root_vdev == NULL); 12610713e232SGeorge Wilson 1262fa9e4066Sahrens /* 1263fa9e4066Sahrens * Close the dsl pool. 1264fa9e4066Sahrens */ 1265fa9e4066Sahrens if (spa->spa_dsl_pool) { 1266fa9e4066Sahrens dsl_pool_close(spa->spa_dsl_pool); 1267fa9e4066Sahrens spa->spa_dsl_pool = NULL; 1268afee20e4SGeorge Wilson spa->spa_meta_objset = NULL; 1269fa9e4066Sahrens } 1270fa9e4066Sahrens 1271b24ab676SJeff Bonwick ddt_unload(spa); 1272b24ab676SJeff Bonwick 12738ad4d6ddSJeff Bonwick 12748ad4d6ddSJeff Bonwick /* 12758ad4d6ddSJeff Bonwick * Drop and purge level 2 cache 12768ad4d6ddSJeff Bonwick */ 12778ad4d6ddSJeff Bonwick spa_l2cache_drop(spa); 12788ad4d6ddSJeff Bonwick 1279fa94a07fSbrendan for (i = 0; i < spa->spa_spares.sav_count; i++) 1280fa94a07fSbrendan vdev_free(spa->spa_spares.sav_vdevs[i]); 1281fa94a07fSbrendan if (spa->spa_spares.sav_vdevs) { 1282fa94a07fSbrendan kmem_free(spa->spa_spares.sav_vdevs, 1283fa94a07fSbrendan spa->spa_spares.sav_count * sizeof (void *)); 1284fa94a07fSbrendan spa->spa_spares.sav_vdevs = NULL; 128599653d4eSeschrock } 1286fa94a07fSbrendan if (spa->spa_spares.sav_config) { 1287fa94a07fSbrendan nvlist_free(spa->spa_spares.sav_config); 1288fa94a07fSbrendan spa->spa_spares.sav_config = NULL; 1289fa94a07fSbrendan } 12902ce8af81SEric Schrock spa->spa_spares.sav_count = 0; 1291fa94a07fSbrendan 1292cd0837ccSGeorge Wilson for (i = 0; i < spa->spa_l2cache.sav_count; i++) { 1293cd0837ccSGeorge Wilson vdev_clear_stats(spa->spa_l2cache.sav_vdevs[i]); 1294fa94a07fSbrendan vdev_free(spa->spa_l2cache.sav_vdevs[i]); 1295cd0837ccSGeorge Wilson } 1296fa94a07fSbrendan if (spa->spa_l2cache.sav_vdevs) { 1297fa94a07fSbrendan kmem_free(spa->spa_l2cache.sav_vdevs, 1298fa94a07fSbrendan spa->spa_l2cache.sav_count * sizeof (void *)); 1299fa94a07fSbrendan spa->spa_l2cache.sav_vdevs = NULL; 1300fa94a07fSbrendan } 1301fa94a07fSbrendan if (spa->spa_l2cache.sav_config) { 1302fa94a07fSbrendan nvlist_free(spa->spa_l2cache.sav_config); 1303fa94a07fSbrendan spa->spa_l2cache.sav_config = NULL; 130499653d4eSeschrock } 13052ce8af81SEric Schrock spa->spa_l2cache.sav_count = 0; 130699653d4eSeschrock 1307ea8dc4b6Seschrock spa->spa_async_suspended = 0; 13088ad4d6ddSJeff Bonwick 13098704186eSDan McDonald if (spa->spa_comment != NULL) { 13108704186eSDan McDonald spa_strfree(spa->spa_comment); 13118704186eSDan McDonald spa->spa_comment = NULL; 13128704186eSDan McDonald } 13138704186eSDan McDonald 13148ad4d6ddSJeff Bonwick spa_config_exit(spa, SCL_ALL, FTAG); 1315fa9e4066Sahrens } 1316fa9e4066Sahrens 1317fa9e4066Sahrens /* 131899653d4eSeschrock * Load (or re-load) the current list of vdevs describing the active spares for 131999653d4eSeschrock * this pool. When this is called, we have some form of basic information in 1320fa94a07fSbrendan * 'spa_spares.sav_config'. We parse this into vdevs, try to open them, and 1321fa94a07fSbrendan * then re-generate a more complete list including status information. 132299653d4eSeschrock */ 132399653d4eSeschrock static void 132499653d4eSeschrock spa_load_spares(spa_t *spa) 132599653d4eSeschrock { 132699653d4eSeschrock nvlist_t **spares; 132799653d4eSeschrock uint_t nspares; 132899653d4eSeschrock int i; 132939c23413Seschrock vdev_t *vd, *tvd; 133099653d4eSeschrock 1331e14bb325SJeff Bonwick ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 1332e14bb325SJeff Bonwick 133399653d4eSeschrock /* 133499653d4eSeschrock * First, close and free any existing spare vdevs. 133599653d4eSeschrock */ 1336fa94a07fSbrendan for (i = 0; i < spa->spa_spares.sav_count; i++) { 1337fa94a07fSbrendan vd = spa->spa_spares.sav_vdevs[i]; 133839c23413Seschrock 133939c23413Seschrock /* Undo the call to spa_activate() below */ 1340c5904d13Seschrock if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid, 1341c5904d13Seschrock B_FALSE)) != NULL && tvd->vdev_isspare) 134239c23413Seschrock spa_spare_remove(tvd); 134339c23413Seschrock vdev_close(vd); 134439c23413Seschrock vdev_free(vd); 134599653d4eSeschrock } 134639c23413Seschrock 1347fa94a07fSbrendan if (spa->spa_spares.sav_vdevs) 1348fa94a07fSbrendan kmem_free(spa->spa_spares.sav_vdevs, 1349fa94a07fSbrendan spa->spa_spares.sav_count * sizeof (void *)); 135099653d4eSeschrock 1351fa94a07fSbrendan if (spa->spa_spares.sav_config == NULL) 135299653d4eSeschrock nspares = 0; 135399653d4eSeschrock else 1354fa94a07fSbrendan VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 135599653d4eSeschrock ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 135699653d4eSeschrock 1357fa94a07fSbrendan spa->spa_spares.sav_count = (int)nspares; 1358fa94a07fSbrendan spa->spa_spares.sav_vdevs = NULL; 135999653d4eSeschrock 136099653d4eSeschrock if (nspares == 0) 136199653d4eSeschrock return; 136299653d4eSeschrock 136399653d4eSeschrock /* 136499653d4eSeschrock * Construct the array of vdevs, opening them to get status in the 136539c23413Seschrock * process. For each spare, there is potentially two different vdev_t 136639c23413Seschrock * structures associated with it: one in the list of spares (used only 136739c23413Seschrock * for basic validation purposes) and one in the active vdev 136839c23413Seschrock * configuration (if it's spared in). During this phase we open and 136939c23413Seschrock * validate each vdev on the spare list. If the vdev also exists in the 137039c23413Seschrock * active configuration, then we also mark this vdev as an active spare. 137199653d4eSeschrock */ 1372fa94a07fSbrendan spa->spa_spares.sav_vdevs = kmem_alloc(nspares * sizeof (void *), 1373fa94a07fSbrendan KM_SLEEP); 1374fa94a07fSbrendan for (i = 0; i < spa->spa_spares.sav_count; i++) { 137599653d4eSeschrock VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0, 137699653d4eSeschrock VDEV_ALLOC_SPARE) == 0); 137799653d4eSeschrock ASSERT(vd != NULL); 137899653d4eSeschrock 1379fa94a07fSbrendan spa->spa_spares.sav_vdevs[i] = vd; 138099653d4eSeschrock 1381c5904d13Seschrock if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid, 1382c5904d13Seschrock B_FALSE)) != NULL) { 138339c23413Seschrock if (!tvd->vdev_isspare) 138439c23413Seschrock spa_spare_add(tvd); 138539c23413Seschrock 138639c23413Seschrock /* 138739c23413Seschrock * We only mark the spare active if we were successfully 138839c23413Seschrock * able to load the vdev. Otherwise, importing a pool 138939c23413Seschrock * with a bad active spare would result in strange 139039c23413Seschrock * behavior, because multiple pool would think the spare 139139c23413Seschrock * is actively in use. 139239c23413Seschrock * 139339c23413Seschrock * There is a vulnerability here to an equally bizarre 139439c23413Seschrock * circumstance, where a dead active spare is later 139539c23413Seschrock * brought back to life (onlined or otherwise). Given 139639c23413Seschrock * the rarity of this scenario, and the extra complexity 139739c23413Seschrock * it adds, we ignore the possibility. 139839c23413Seschrock */ 139939c23413Seschrock if (!vdev_is_dead(tvd)) 140039c23413Seschrock spa_spare_activate(tvd); 140139c23413Seschrock } 140239c23413Seschrock 1403e14bb325SJeff Bonwick vd->vdev_top = vd; 14046809eb4eSEric Schrock vd->vdev_aux = &spa->spa_spares; 1405e14bb325SJeff Bonwick 140699653d4eSeschrock if (vdev_open(vd) != 0) 140799653d4eSeschrock continue; 140899653d4eSeschrock 1409fa94a07fSbrendan if (vdev_validate_aux(vd) == 0) 1410fa94a07fSbrendan spa_spare_add(vd); 141199653d4eSeschrock } 141299653d4eSeschrock 141399653d4eSeschrock /* 141499653d4eSeschrock * Recompute the stashed list of spares, with status information 141599653d4eSeschrock * this time. 141699653d4eSeschrock */ 1417fa94a07fSbrendan VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES, 141899653d4eSeschrock DATA_TYPE_NVLIST_ARRAY) == 0); 141999653d4eSeschrock 1420fa94a07fSbrendan spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *), 1421fa94a07fSbrendan KM_SLEEP); 1422fa94a07fSbrendan for (i = 0; i < spa->spa_spares.sav_count; i++) 1423fa94a07fSbrendan spares[i] = vdev_config_generate(spa, 14243f9d6ad7SLin Ling spa->spa_spares.sav_vdevs[i], B_TRUE, VDEV_CONFIG_SPARE); 1425fa94a07fSbrendan VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 1426fa94a07fSbrendan ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0); 1427fa94a07fSbrendan for (i = 0; i < spa->spa_spares.sav_count; i++) 142899653d4eSeschrock nvlist_free(spares[i]); 1429fa94a07fSbrendan kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *)); 1430fa94a07fSbrendan } 1431fa94a07fSbrendan 1432fa94a07fSbrendan /* 1433fa94a07fSbrendan * Load (or re-load) the current list of vdevs describing the active l2cache for 1434fa94a07fSbrendan * this pool. When this is called, we have some form of basic information in 1435fa94a07fSbrendan * 'spa_l2cache.sav_config'. We parse this into vdevs, try to open them, and 1436fa94a07fSbrendan * then re-generate a more complete list including status information. 1437fa94a07fSbrendan * Devices which are already active have their details maintained, and are 1438fa94a07fSbrendan * not re-opened. 1439fa94a07fSbrendan */ 1440fa94a07fSbrendan static void 1441fa94a07fSbrendan spa_load_l2cache(spa_t *spa) 1442fa94a07fSbrendan { 1443fa94a07fSbrendan nvlist_t **l2cache; 1444fa94a07fSbrendan uint_t nl2cache; 1445fa94a07fSbrendan int i, j, oldnvdevs; 1446573ca77eSGeorge Wilson uint64_t guid; 1447fa94a07fSbrendan vdev_t *vd, **oldvdevs, **newvdevs; 1448fa94a07fSbrendan spa_aux_vdev_t *sav = &spa->spa_l2cache; 1449fa94a07fSbrendan 1450e14bb325SJeff Bonwick ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 1451e14bb325SJeff Bonwick 1452fa94a07fSbrendan if (sav->sav_config != NULL) { 1453fa94a07fSbrendan VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, 1454fa94a07fSbrendan ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 1455fa94a07fSbrendan newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP); 1456fa94a07fSbrendan } else { 1457fa94a07fSbrendan nl2cache = 0; 1458d5285caeSGeorge Wilson newvdevs = NULL; 1459fa94a07fSbrendan } 1460fa94a07fSbrendan 1461fa94a07fSbrendan oldvdevs = sav->sav_vdevs; 1462fa94a07fSbrendan oldnvdevs = sav->sav_count; 1463fa94a07fSbrendan sav->sav_vdevs = NULL; 1464fa94a07fSbrendan sav->sav_count = 0; 1465fa94a07fSbrendan 1466fa94a07fSbrendan /* 1467fa94a07fSbrendan * Process new nvlist of vdevs. 1468fa94a07fSbrendan */ 1469fa94a07fSbrendan for (i = 0; i < nl2cache; i++) { 1470fa94a07fSbrendan VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID, 1471fa94a07fSbrendan &guid) == 0); 1472fa94a07fSbrendan 1473fa94a07fSbrendan newvdevs[i] = NULL; 1474fa94a07fSbrendan for (j = 0; j < oldnvdevs; j++) { 1475fa94a07fSbrendan vd = oldvdevs[j]; 1476fa94a07fSbrendan if (vd != NULL && guid == vd->vdev_guid) { 1477fa94a07fSbrendan /* 1478fa94a07fSbrendan * Retain previous vdev for add/remove ops. 1479fa94a07fSbrendan */ 1480fa94a07fSbrendan newvdevs[i] = vd; 1481fa94a07fSbrendan oldvdevs[j] = NULL; 1482fa94a07fSbrendan break; 1483fa94a07fSbrendan } 1484fa94a07fSbrendan } 1485fa94a07fSbrendan 1486fa94a07fSbrendan if (newvdevs[i] == NULL) { 1487fa94a07fSbrendan /* 1488fa94a07fSbrendan * Create new vdev 1489fa94a07fSbrendan */ 1490fa94a07fSbrendan VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0, 1491fa94a07fSbrendan VDEV_ALLOC_L2CACHE) == 0); 1492fa94a07fSbrendan ASSERT(vd != NULL); 1493fa94a07fSbrendan newvdevs[i] = vd; 1494fa94a07fSbrendan 1495fa94a07fSbrendan /* 1496fa94a07fSbrendan * Commit this vdev as an l2cache device, 1497fa94a07fSbrendan * even if it fails to open. 1498fa94a07fSbrendan */ 1499fa94a07fSbrendan spa_l2cache_add(vd); 1500fa94a07fSbrendan 1501c5904d13Seschrock vd->vdev_top = vd; 1502c5904d13Seschrock vd->vdev_aux = sav; 1503c5904d13Seschrock 1504c5904d13Seschrock spa_l2cache_activate(vd); 1505c5904d13Seschrock 1506fa94a07fSbrendan if (vdev_open(vd) != 0) 1507fa94a07fSbrendan continue; 1508fa94a07fSbrendan 1509fa94a07fSbrendan (void) vdev_validate_aux(vd); 1510fa94a07fSbrendan 1511*5f992543SArne Jansen if (!vdev_is_dead(vd)) 1512*5f992543SArne Jansen l2arc_add_vdev(spa, vd); 1513fa94a07fSbrendan } 1514fa94a07fSbrendan } 1515fa94a07fSbrendan 1516fa94a07fSbrendan /* 1517fa94a07fSbrendan * Purge vdevs that were dropped 1518fa94a07fSbrendan */ 1519fa94a07fSbrendan for (i = 0; i < oldnvdevs; i++) { 1520fa94a07fSbrendan uint64_t pool; 1521fa94a07fSbrendan 1522fa94a07fSbrendan vd = oldvdevs[i]; 1523fa94a07fSbrendan if (vd != NULL) { 1524cd0837ccSGeorge Wilson ASSERT(vd->vdev_isl2cache); 1525cd0837ccSGeorge Wilson 15268ad4d6ddSJeff Bonwick if (spa_l2cache_exists(vd->vdev_guid, &pool) && 15278ad4d6ddSJeff Bonwick pool != 0ULL && l2arc_vdev_present(vd)) 1528fa94a07fSbrendan l2arc_remove_vdev(vd); 1529cd0837ccSGeorge Wilson vdev_clear_stats(vd); 1530cd0837ccSGeorge Wilson vdev_free(vd); 1531fa94a07fSbrendan } 1532fa94a07fSbrendan } 1533fa94a07fSbrendan 1534fa94a07fSbrendan if (oldvdevs) 1535fa94a07fSbrendan kmem_free(oldvdevs, oldnvdevs * sizeof (void *)); 1536fa94a07fSbrendan 1537fa94a07fSbrendan if (sav->sav_config == NULL) 1538fa94a07fSbrendan goto out; 1539fa94a07fSbrendan 1540fa94a07fSbrendan sav->sav_vdevs = newvdevs; 1541fa94a07fSbrendan sav->sav_count = (int)nl2cache; 1542fa94a07fSbrendan 1543fa94a07fSbrendan /* 1544fa94a07fSbrendan * Recompute the stashed list of l2cache devices, with status 1545fa94a07fSbrendan * information this time. 1546fa94a07fSbrendan */ 1547fa94a07fSbrendan VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE, 1548fa94a07fSbrendan DATA_TYPE_NVLIST_ARRAY) == 0); 1549fa94a07fSbrendan 1550fa94a07fSbrendan l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP); 1551fa94a07fSbrendan for (i = 0; i < sav->sav_count; i++) 1552fa94a07fSbrendan l2cache[i] = vdev_config_generate(spa, 15533f9d6ad7SLin Ling sav->sav_vdevs[i], B_TRUE, VDEV_CONFIG_L2CACHE); 1554fa94a07fSbrendan VERIFY(nvlist_add_nvlist_array(sav->sav_config, 1555fa94a07fSbrendan ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0); 1556fa94a07fSbrendan out: 1557fa94a07fSbrendan for (i = 0; i < sav->sav_count; i++) 1558fa94a07fSbrendan nvlist_free(l2cache[i]); 1559fa94a07fSbrendan if (sav->sav_count) 1560fa94a07fSbrendan kmem_free(l2cache, sav->sav_count * sizeof (void *)); 156199653d4eSeschrock } 156299653d4eSeschrock 156399653d4eSeschrock static int 156499653d4eSeschrock load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value) 156599653d4eSeschrock { 156699653d4eSeschrock dmu_buf_t *db; 156799653d4eSeschrock char *packed = NULL; 156899653d4eSeschrock size_t nvsize = 0; 156999653d4eSeschrock int error; 157099653d4eSeschrock *value = NULL; 157199653d4eSeschrock 1572a45f1c3cSBrian Behlendorf error = dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db); 1573a45f1c3cSBrian Behlendorf if (error != 0) 1574a45f1c3cSBrian Behlendorf return (error); 1575a45f1c3cSBrian Behlendorf 157699653d4eSeschrock nvsize = *(uint64_t *)db->db_data; 157799653d4eSeschrock dmu_buf_rele(db, FTAG); 157899653d4eSeschrock 157999653d4eSeschrock packed = kmem_alloc(nvsize, KM_SLEEP); 15807bfdf011SNeil Perrin error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed, 15817bfdf011SNeil Perrin DMU_READ_PREFETCH); 158299653d4eSeschrock if (error == 0) 158399653d4eSeschrock error = nvlist_unpack(packed, nvsize, value, 0); 158499653d4eSeschrock kmem_free(packed, nvsize); 158599653d4eSeschrock 158699653d4eSeschrock return (error); 158799653d4eSeschrock } 158899653d4eSeschrock 158999653d4eSeschrock /* 15903d7072f8Seschrock * Checks to see if the given vdev could not be opened, in which case we post a 15913d7072f8Seschrock * sysevent to notify the autoreplace code that the device has been removed. 15923d7072f8Seschrock */ 15933d7072f8Seschrock static void 15943d7072f8Seschrock spa_check_removed(vdev_t *vd) 15953d7072f8Seschrock { 1596573ca77eSGeorge Wilson for (int c = 0; c < vd->vdev_children; c++) 15973d7072f8Seschrock spa_check_removed(vd->vdev_child[c]); 15983d7072f8Seschrock 1599efb4a871SYuri Pankov if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd) && 1600efb4a871SYuri Pankov !vd->vdev_ishole) { 16013d7072f8Seschrock zfs_post_autoreplace(vd->vdev_spa, vd); 16023d7072f8Seschrock spa_event_notify(vd->vdev_spa, vd, ESC_ZFS_VDEV_CHECK); 16033d7072f8Seschrock } 16043d7072f8Seschrock } 16053d7072f8Seschrock 16063d7072f8Seschrock /* 16074b964adaSGeorge Wilson * Validate the current config against the MOS config 1608e6ca193dSGeorge Wilson */ 16094b964adaSGeorge Wilson static boolean_t 16104b964adaSGeorge Wilson spa_config_valid(spa_t *spa, nvlist_t *config) 1611e6ca193dSGeorge Wilson { 16124b964adaSGeorge Wilson vdev_t *mrvd, *rvd = spa->spa_root_vdev; 16134b964adaSGeorge Wilson nvlist_t *nv; 16144b964adaSGeorge Wilson 16154b964adaSGeorge Wilson VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nv) == 0); 16164b964adaSGeorge Wilson 16174b964adaSGeorge Wilson spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 16184b964adaSGeorge Wilson VERIFY(spa_config_parse(spa, &mrvd, nv, NULL, 0, VDEV_ALLOC_LOAD) == 0); 16194b964adaSGeorge Wilson 16204b964adaSGeorge Wilson ASSERT3U(rvd->vdev_children, ==, mrvd->vdev_children); 1621e6ca193dSGeorge Wilson 162288ecc943SGeorge Wilson /* 16234b964adaSGeorge Wilson * If we're doing a normal import, then build up any additional 16244b964adaSGeorge Wilson * diagnostic information about missing devices in this config. 16254b964adaSGeorge Wilson * We'll pass this up to the user for further processing. 162688ecc943SGeorge Wilson */ 16274b964adaSGeorge Wilson if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG)) { 16284b964adaSGeorge Wilson nvlist_t **child, *nv; 16294b964adaSGeorge Wilson uint64_t idx = 0; 16304b964adaSGeorge Wilson 16314b964adaSGeorge Wilson child = kmem_alloc(rvd->vdev_children * sizeof (nvlist_t **), 16324b964adaSGeorge Wilson KM_SLEEP); 16334b964adaSGeorge Wilson VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0); 1634e6ca193dSGeorge Wilson 163588ecc943SGeorge Wilson for (int c = 0; c < rvd->vdev_children; c++) { 16364b964adaSGeorge Wilson vdev_t *tvd = rvd->vdev_child[c]; 16374b964adaSGeorge Wilson vdev_t *mtvd = mrvd->vdev_child[c]; 16384b964adaSGeorge Wilson 16394b964adaSGeorge Wilson if (tvd->vdev_ops == &vdev_missing_ops && 16404b964adaSGeorge Wilson mtvd->vdev_ops != &vdev_missing_ops && 16414b964adaSGeorge Wilson mtvd->vdev_islog) 16424b964adaSGeorge Wilson child[idx++] = vdev_config_generate(spa, mtvd, 16434b964adaSGeorge Wilson B_FALSE, 0); 1644e6ca193dSGeorge Wilson } 16454b964adaSGeorge Wilson 16464b964adaSGeorge Wilson if (idx) { 16474b964adaSGeorge Wilson VERIFY(nvlist_add_nvlist_array(nv, 16484b964adaSGeorge Wilson ZPOOL_CONFIG_CHILDREN, child, idx) == 0); 16494b964adaSGeorge Wilson VERIFY(nvlist_add_nvlist(spa->spa_load_info, 16504b964adaSGeorge Wilson ZPOOL_CONFIG_MISSING_DEVICES, nv) == 0); 16514b964adaSGeorge Wilson 16524b964adaSGeorge Wilson for (int i = 0; i < idx; i++) 16534b964adaSGeorge Wilson nvlist_free(child[i]); 16544b964adaSGeorge Wilson } 16554b964adaSGeorge Wilson nvlist_free(nv); 16564b964adaSGeorge Wilson kmem_free(child, rvd->vdev_children * sizeof (char **)); 16574b964adaSGeorge Wilson } 16584b964adaSGeorge Wilson 16594b964adaSGeorge Wilson /* 16604b964adaSGeorge Wilson * Compare the root vdev tree with the information we have 16614b964adaSGeorge Wilson * from the MOS config (mrvd). Check each top-level vdev 16624b964adaSGeorge Wilson * with the corresponding MOS config top-level (mtvd). 16634b964adaSGeorge Wilson */ 16644b964adaSGeorge Wilson for (int c = 0; c < rvd->vdev_children; c++) { 16654b964adaSGeorge Wilson vdev_t *tvd = rvd->vdev_child[c]; 16664b964adaSGeorge Wilson vdev_t *mtvd = mrvd->vdev_child[c]; 16674b964adaSGeorge Wilson 16684b964adaSGeorge Wilson /* 16694b964adaSGeorge Wilson * Resolve any "missing" vdevs in the current configuration. 16704b964adaSGeorge Wilson * If we find that the MOS config has more accurate information 16714b964adaSGeorge Wilson * about the top-level vdev then use that vdev instead. 16724b964adaSGeorge Wilson */ 16734b964adaSGeorge Wilson if (tvd->vdev_ops == &vdev_missing_ops && 16744b964adaSGeorge Wilson mtvd->vdev_ops != &vdev_missing_ops) { 16754b964adaSGeorge Wilson 16764b964adaSGeorge Wilson if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG)) 16774b964adaSGeorge Wilson continue; 16784b964adaSGeorge Wilson 16794b964adaSGeorge Wilson /* 16804b964adaSGeorge Wilson * Device specific actions. 16814b964adaSGeorge Wilson */ 16824b964adaSGeorge Wilson if (mtvd->vdev_islog) { 16834b964adaSGeorge Wilson spa_set_log_state(spa, SPA_LOG_CLEAR); 16844b964adaSGeorge Wilson } else { 16854b964adaSGeorge Wilson /* 16864b964adaSGeorge Wilson * XXX - once we have 'readonly' pool 16874b964adaSGeorge Wilson * support we should be able to handle 16884b964adaSGeorge Wilson * missing data devices by transitioning 16894b964adaSGeorge Wilson * the pool to readonly. 16904b964adaSGeorge Wilson */ 16914b964adaSGeorge Wilson continue; 16924b964adaSGeorge Wilson } 16934b964adaSGeorge Wilson 16944b964adaSGeorge Wilson /* 16954b964adaSGeorge Wilson * Swap the missing vdev with the data we were 16964b964adaSGeorge Wilson * able to obtain from the MOS config. 16974b964adaSGeorge Wilson */ 16984b964adaSGeorge Wilson vdev_remove_child(rvd, tvd); 16994b964adaSGeorge Wilson vdev_remove_child(mrvd, mtvd); 17004b964adaSGeorge Wilson 17014b964adaSGeorge Wilson vdev_add_child(rvd, mtvd); 17024b964adaSGeorge Wilson vdev_add_child(mrvd, tvd); 17034b964adaSGeorge Wilson 170488ecc943SGeorge Wilson spa_config_exit(spa, SCL_ALL, FTAG); 17054b964adaSGeorge Wilson vdev_load(mtvd); 17064b964adaSGeorge Wilson spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 17074b964adaSGeorge Wilson 17084b964adaSGeorge Wilson vdev_reopen(rvd); 17094b964adaSGeorge Wilson } else if (mtvd->vdev_islog) { 17104b964adaSGeorge Wilson /* 17114b964adaSGeorge Wilson * Load the slog device's state from the MOS config 17124b964adaSGeorge Wilson * since it's possible that the label does not 17134b964adaSGeorge Wilson * contain the most up-to-date information. 17144b964adaSGeorge Wilson */ 17154b964adaSGeorge Wilson vdev_load_log_state(tvd, mtvd); 17164b964adaSGeorge Wilson vdev_reopen(tvd); 17174b964adaSGeorge Wilson } 17184b964adaSGeorge Wilson } 17194b964adaSGeorge Wilson vdev_free(mrvd); 17204b964adaSGeorge Wilson spa_config_exit(spa, SCL_ALL, FTAG); 17214b964adaSGeorge Wilson 17224b964adaSGeorge Wilson /* 17234b964adaSGeorge Wilson * Ensure we were able to validate the config. 17244b964adaSGeorge Wilson */ 17254b964adaSGeorge Wilson return (rvd->vdev_guid_sum == spa->spa_uberblock.ub_guid_sum); 1726e6ca193dSGeorge Wilson } 1727e6ca193dSGeorge Wilson 1728e6ca193dSGeorge Wilson /* 1729b87f3af3Sperrin * Check for missing log devices 1730b87f3af3Sperrin */ 17313b2aab18SMatthew Ahrens static boolean_t 1732b87f3af3Sperrin spa_check_logs(spa_t *spa) 1733b87f3af3Sperrin { 17343b2aab18SMatthew Ahrens boolean_t rv = B_FALSE; 173512380e1eSArne Jansen dsl_pool_t *dp = spa_get_dsl(spa); 17363b2aab18SMatthew Ahrens 1737b87f3af3Sperrin switch (spa->spa_log_state) { 1738b87f3af3Sperrin case SPA_LOG_MISSING: 1739b87f3af3Sperrin /* need to recheck in case slog has been restored */ 1740b87f3af3Sperrin case SPA_LOG_UNKNOWN: 174112380e1eSArne Jansen rv = (dmu_objset_find_dp(dp, dp->dp_root_dir_obj, 174212380e1eSArne Jansen zil_check_log_chain, NULL, DS_FIND_CHILDREN) != 0); 17433b2aab18SMatthew Ahrens if (rv) 17441195e687SMark J Musante spa_set_log_state(spa, SPA_LOG_MISSING); 1745b87f3af3Sperrin break; 1746b87f3af3Sperrin } 17473b2aab18SMatthew Ahrens return (rv); 1748b87f3af3Sperrin } 1749b87f3af3Sperrin 17501195e687SMark J Musante static boolean_t 17511195e687SMark J Musante spa_passivate_log(spa_t *spa) 17521195e687SMark J Musante { 17531195e687SMark J Musante vdev_t *rvd = spa->spa_root_vdev; 17541195e687SMark J Musante boolean_t slog_found = B_FALSE; 17551195e687SMark J Musante 17561195e687SMark J Musante ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER)); 17571195e687SMark J Musante 17581195e687SMark J Musante if (!spa_has_slogs(spa)) 17591195e687SMark J Musante return (B_FALSE); 17601195e687SMark J Musante 17611195e687SMark J Musante for (int c = 0; c < rvd->vdev_children; c++) { 17621195e687SMark J Musante vdev_t *tvd = rvd->vdev_child[c]; 17631195e687SMark J Musante metaslab_group_t *mg = tvd->vdev_mg; 17641195e687SMark J Musante 17651195e687SMark J Musante if (tvd->vdev_islog) { 17661195e687SMark J Musante metaslab_group_passivate(mg); 17671195e687SMark J Musante slog_found = B_TRUE; 17681195e687SMark J Musante } 17691195e687SMark J Musante } 17701195e687SMark J Musante 17711195e687SMark J Musante return (slog_found); 17721195e687SMark J Musante } 17731195e687SMark J Musante 17741195e687SMark J Musante static void 17751195e687SMark J Musante spa_activate_log(spa_t *spa) 17761195e687SMark J Musante { 17771195e687SMark J Musante vdev_t *rvd = spa->spa_root_vdev; 17781195e687SMark J Musante 17791195e687SMark J Musante ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER)); 17801195e687SMark J Musante 17811195e687SMark J Musante for (int c = 0; c < rvd->vdev_children; c++) { 17821195e687SMark J Musante vdev_t *tvd = rvd->vdev_child[c]; 17831195e687SMark J Musante metaslab_group_t *mg = tvd->vdev_mg; 17841195e687SMark J Musante 17851195e687SMark J Musante if (tvd->vdev_islog) 17861195e687SMark J Musante metaslab_group_activate(mg); 17871195e687SMark J Musante } 17881195e687SMark J Musante } 17891195e687SMark J Musante 17901195e687SMark J Musante int 17911195e687SMark J Musante spa_offline_log(spa_t *spa) 17921195e687SMark J Musante { 17933b2aab18SMatthew Ahrens int error; 17941195e687SMark J Musante 17953b2aab18SMatthew Ahrens error = dmu_objset_find(spa_name(spa), zil_vdev_offline, 17963b2aab18SMatthew Ahrens NULL, DS_FIND_CHILDREN); 17973b2aab18SMatthew Ahrens if (error == 0) { 17981195e687SMark J Musante /* 17991195e687SMark J Musante * We successfully offlined the log device, sync out the 18001195e687SMark J Musante * current txg so that the "stubby" block can be removed 18011195e687SMark J Musante * by zil_sync(). 18021195e687SMark J Musante */ 18031195e687SMark J Musante txg_wait_synced(spa->spa_dsl_pool, 0); 18041195e687SMark J Musante } 18051195e687SMark J Musante return (error); 18061195e687SMark J Musante } 18071195e687SMark J Musante 1808b693757aSEric Schrock static void 1809b693757aSEric Schrock spa_aux_check_removed(spa_aux_vdev_t *sav) 1810b693757aSEric Schrock { 1811b24ab676SJeff Bonwick for (int i = 0; i < sav->sav_count; i++) 1812b693757aSEric Schrock spa_check_removed(sav->sav_vdevs[i]); 1813b693757aSEric Schrock } 1814b693757aSEric Schrock 1815b24ab676SJeff Bonwick void 1816b24ab676SJeff Bonwick spa_claim_notify(zio_t *zio) 1817b24ab676SJeff Bonwick { 1818b24ab676SJeff Bonwick spa_t *spa = zio->io_spa; 1819b24ab676SJeff Bonwick 1820b24ab676SJeff Bonwick if (zio->io_error) 1821b24ab676SJeff Bonwick return; 1822b24ab676SJeff Bonwick 1823b24ab676SJeff Bonwick mutex_enter(&spa->spa_props_lock); /* any mutex will do */ 1824b24ab676SJeff Bonwick if (spa->spa_claim_max_txg < zio->io_bp->blk_birth) 1825b24ab676SJeff Bonwick spa->spa_claim_max_txg = zio->io_bp->blk_birth; 1826b24ab676SJeff Bonwick mutex_exit(&spa->spa_props_lock); 1827b24ab676SJeff Bonwick } 1828b24ab676SJeff Bonwick 1829468c413aSTim Haley typedef struct spa_load_error { 1830c8ee1847SVictor Latushkin uint64_t sle_meta_count; 1831468c413aSTim Haley uint64_t sle_data_count; 1832468c413aSTim Haley } spa_load_error_t; 1833468c413aSTim Haley 1834468c413aSTim Haley static void 1835468c413aSTim Haley spa_load_verify_done(zio_t *zio) 1836468c413aSTim Haley { 1837468c413aSTim Haley blkptr_t *bp = zio->io_bp; 1838468c413aSTim Haley spa_load_error_t *sle = zio->io_private; 1839468c413aSTim Haley dmu_object_type_t type = BP_GET_TYPE(bp); 1840468c413aSTim Haley int error = zio->io_error; 1841e42d2059SMatthew Ahrens spa_t *spa = zio->io_spa; 1842468c413aSTim Haley 1843468c413aSTim Haley if (error) { 1844ad135b5dSChristopher Siden if ((BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type)) && 1845468c413aSTim Haley type != DMU_OT_INTENT_LOG) 18461a5e258fSJosef 'Jeff' Sipek atomic_inc_64(&sle->sle_meta_count); 1847468c413aSTim Haley else 18481a5e258fSJosef 'Jeff' Sipek atomic_inc_64(&sle->sle_data_count); 1849468c413aSTim Haley } 1850468c413aSTim Haley zio_data_buf_free(zio->io_data, zio->io_size); 1851e42d2059SMatthew Ahrens 1852e42d2059SMatthew Ahrens mutex_enter(&spa->spa_scrub_lock); 1853e42d2059SMatthew Ahrens spa->spa_scrub_inflight--; 1854e42d2059SMatthew Ahrens cv_broadcast(&spa->spa_scrub_io_cv); 1855e42d2059SMatthew Ahrens mutex_exit(&spa->spa_scrub_lock); 1856468c413aSTim Haley } 1857468c413aSTim Haley 1858e42d2059SMatthew Ahrens /* 1859e42d2059SMatthew Ahrens * Maximum number of concurrent scrub i/os to create while verifying 1860e42d2059SMatthew Ahrens * a pool while importing it. 1861e42d2059SMatthew Ahrens */ 1862e42d2059SMatthew Ahrens int spa_load_verify_maxinflight = 10000; 1863e42d2059SMatthew Ahrens boolean_t spa_load_verify_metadata = B_TRUE; 1864e42d2059SMatthew Ahrens boolean_t spa_load_verify_data = B_TRUE; 1865e42d2059SMatthew Ahrens 1866468c413aSTim Haley /*ARGSUSED*/ 1867468c413aSTim Haley static int 1868b24ab676SJeff Bonwick spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, 18697802d7bfSMatthew Ahrens const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg) 1870468c413aSTim Haley { 1871a2cdcdd2SPaul Dagnelie if (bp == NULL || BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) 1872e42d2059SMatthew Ahrens return (0); 1873e42d2059SMatthew Ahrens /* 1874e42d2059SMatthew Ahrens * Note: normally this routine will not be called if 1875e42d2059SMatthew Ahrens * spa_load_verify_metadata is not set. However, it may be useful 1876e42d2059SMatthew Ahrens * to manually set the flag after the traversal has begun. 1877e42d2059SMatthew Ahrens */ 1878e42d2059SMatthew Ahrens if (!spa_load_verify_metadata) 1879e42d2059SMatthew Ahrens return (0); 1880e42d2059SMatthew Ahrens if (BP_GET_BUFC_TYPE(bp) == ARC_BUFC_DATA && !spa_load_verify_data) 1881e42d2059SMatthew Ahrens return (0); 1882e42d2059SMatthew Ahrens 1883468c413aSTim Haley zio_t *rio = arg; 1884468c413aSTim Haley size_t size = BP_GET_PSIZE(bp); 1885468c413aSTim Haley void *data = zio_data_buf_alloc(size); 1886468c413aSTim Haley 1887e42d2059SMatthew Ahrens mutex_enter(&spa->spa_scrub_lock); 1888e42d2059SMatthew Ahrens while (spa->spa_scrub_inflight >= spa_load_verify_maxinflight) 1889e42d2059SMatthew Ahrens cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 1890e42d2059SMatthew Ahrens spa->spa_scrub_inflight++; 1891e42d2059SMatthew Ahrens mutex_exit(&spa->spa_scrub_lock); 1892e42d2059SMatthew Ahrens 1893468c413aSTim Haley zio_nowait(zio_read(rio, spa, bp, data, size, 1894468c413aSTim Haley spa_load_verify_done, rio->io_private, ZIO_PRIORITY_SCRUB, 1895468c413aSTim Haley ZIO_FLAG_SPECULATIVE | ZIO_FLAG_CANFAIL | 1896468c413aSTim Haley ZIO_FLAG_SCRUB | ZIO_FLAG_RAW, zb)); 1897468c413aSTim Haley return (0); 1898468c413aSTim Haley } 1899468c413aSTim Haley 1900468c413aSTim Haley static int 1901468c413aSTim Haley spa_load_verify(spa_t *spa) 1902468c413aSTim Haley { 1903468c413aSTim Haley zio_t *rio; 1904468c413aSTim Haley spa_load_error_t sle = { 0 }; 1905468c413aSTim Haley zpool_rewind_policy_t policy; 1906468c413aSTim Haley boolean_t verify_ok = B_FALSE; 1907e42d2059SMatthew Ahrens int error = 0; 1908468c413aSTim Haley 1909c8ee1847SVictor Latushkin zpool_get_rewind_policy(spa->spa_config, &policy); 1910c8ee1847SVictor Latushkin 1911c8ee1847SVictor Latushkin if (policy.zrp_request & ZPOOL_NEVER_REWIND) 1912c8ee1847SVictor Latushkin return (0); 1913c8ee1847SVictor Latushkin 1914468c413aSTim Haley rio = zio_root(spa, NULL, &sle, 1915468c413aSTim Haley ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE); 1916468c413aSTim Haley 1917e42d2059SMatthew Ahrens if (spa_load_verify_metadata) { 1918bbfd46c4SJeff Bonwick error = traverse_pool(spa, spa->spa_verify_min_txg, 1919e42d2059SMatthew Ahrens TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA, 1920e42d2059SMatthew Ahrens spa_load_verify_cb, rio); 1921e42d2059SMatthew Ahrens } 1922468c413aSTim Haley 1923468c413aSTim Haley (void) zio_wait(rio); 1924468c413aSTim Haley 1925c8ee1847SVictor Latushkin spa->spa_load_meta_errors = sle.sle_meta_count; 1926468c413aSTim Haley spa->spa_load_data_errors = sle.sle_data_count; 1927468c413aSTim Haley 1928c8ee1847SVictor Latushkin if (!error && sle.sle_meta_count <= policy.zrp_maxmeta && 1929468c413aSTim Haley sle.sle_data_count <= policy.zrp_maxdata) { 19304b964adaSGeorge Wilson int64_t loss = 0; 19314b964adaSGeorge Wilson 1932468c413aSTim Haley verify_ok = B_TRUE; 1933468c413aSTim Haley spa->spa_load_txg = spa->spa_uberblock.ub_txg; 1934468c413aSTim Haley spa->spa_load_txg_ts = spa->spa_uberblock.ub_timestamp; 19354b964adaSGeorge Wilson 19364b964adaSGeorge Wilson loss = spa->spa_last_ubsync_txg_ts - spa->spa_load_txg_ts; 19374b964adaSGeorge Wilson VERIFY(nvlist_add_uint64(spa->spa_load_info, 19384b964adaSGeorge Wilson ZPOOL_CONFIG_LOAD_TIME, spa->spa_load_txg_ts) == 0); 19394b964adaSGeorge Wilson VERIFY(nvlist_add_int64(spa->spa_load_info, 19404b964adaSGeorge Wilson ZPOOL_CONFIG_REWIND_TIME, loss) == 0); 19414b964adaSGeorge Wilson VERIFY(nvlist_add_uint64(spa->spa_load_info, 19424b964adaSGeorge Wilson ZPOOL_CONFIG_LOAD_DATA_ERRORS, sle.sle_data_count) == 0); 1943a33cae98STim Haley } else { 1944a33cae98STim Haley spa->spa_load_max_txg = spa->spa_uberblock.ub_txg; 1945468c413aSTim Haley } 1946468c413aSTim Haley 1947468c413aSTim Haley if (error) { 1948468c413aSTim Haley if (error != ENXIO && error != EIO) 1949be6fd75aSMatthew Ahrens error = SET_ERROR(EIO); 1950468c413aSTim Haley return (error); 1951468c413aSTim Haley } 1952468c413aSTim Haley 1953468c413aSTim Haley return (verify_ok ? 0 : EIO); 1954468c413aSTim Haley } 1955468c413aSTim Haley 1956b87f3af3Sperrin /* 19571195e687SMark J Musante * Find a value in the pool props object. 19581195e687SMark J Musante */ 19591195e687SMark J Musante static void 19601195e687SMark J Musante spa_prop_find(spa_t *spa, zpool_prop_t prop, uint64_t *val) 19611195e687SMark J Musante { 19621195e687SMark J Musante (void) zap_lookup(spa->spa_meta_objset, spa->spa_pool_props_object, 19631195e687SMark J Musante zpool_prop_to_name(prop), sizeof (uint64_t), 1, val); 19641195e687SMark J Musante } 19651195e687SMark J Musante 19661195e687SMark J Musante /* 19671195e687SMark J Musante * Find a value in the pool directory object. 19681195e687SMark J Musante */ 19691195e687SMark J Musante static int 19701195e687SMark J Musante spa_dir_prop(spa_t *spa, const char *name, uint64_t *val) 19711195e687SMark J Musante { 19721195e687SMark J Musante return (zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 19731195e687SMark J Musante name, sizeof (uint64_t), 1, val)); 19741195e687SMark J Musante } 19751195e687SMark J Musante 19761195e687SMark J Musante static int 19771195e687SMark J Musante spa_vdev_err(vdev_t *vdev, vdev_aux_t aux, int err) 19781195e687SMark J Musante { 19791195e687SMark J Musante vdev_set_state(vdev, B_TRUE, VDEV_STATE_CANT_OPEN, aux); 19801195e687SMark J Musante return (err); 19811195e687SMark J Musante } 19821195e687SMark J Musante 19831195e687SMark J Musante /* 19841195e687SMark J Musante * Fix up config after a partly-completed split. This is done with the 19851195e687SMark J Musante * ZPOOL_CONFIG_SPLIT nvlist. Both the splitting pool and the split-off 19861195e687SMark J Musante * pool have that entry in their config, but only the splitting one contains 19871195e687SMark J Musante * a list of all the guids of the vdevs that are being split off. 19881195e687SMark J Musante * 19891195e687SMark J Musante * This function determines what to do with that list: either rejoin 19901195e687SMark J Musante * all the disks to the pool, or complete the splitting process. To attempt 19911195e687SMark J Musante * the rejoin, each disk that is offlined is marked online again, and 19921195e687SMark J Musante * we do a reopen() call. If the vdev label for every disk that was 19931195e687SMark J Musante * marked online indicates it was successfully split off (VDEV_AUX_SPLIT_POOL) 19941195e687SMark J Musante * then we call vdev_split() on each disk, and complete the split. 19951195e687SMark J Musante * 1996d41c4376SMark J Musante * Otherwise we leave the config alone, with all the vdevs in place in 1997d41c4376SMark J Musante * the original pool. 19981195e687SMark J Musante */ 19991195e687SMark J Musante static void 20001195e687SMark J Musante spa_try_repair(spa_t *spa, nvlist_t *config) 20011195e687SMark J Musante { 20021195e687SMark J Musante uint_t extracted; 20031195e687SMark J Musante uint64_t *glist; 20041195e687SMark J Musante uint_t i, gcount; 20051195e687SMark J Musante nvlist_t *nvl; 20061195e687SMark J Musante vdev_t **vd; 20071195e687SMark J Musante boolean_t attempt_reopen; 20081195e687SMark J Musante 20091195e687SMark J Musante if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) != 0) 20101195e687SMark J Musante return; 20111195e687SMark J Musante 20121195e687SMark J Musante /* check that the config is complete */ 20131195e687SMark J Musante if (nvlist_lookup_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST, 20141195e687SMark J Musante &glist, &gcount) != 0) 20151195e687SMark J Musante return; 20161195e687SMark J Musante 20171195e687SMark J Musante vd = kmem_zalloc(gcount * sizeof (vdev_t *), KM_SLEEP); 20181195e687SMark J Musante 20191195e687SMark J Musante /* attempt to online all the vdevs & validate */ 20201195e687SMark J Musante attempt_reopen = B_TRUE; 20211195e687SMark J Musante for (i = 0; i < gcount; i++) { 20221195e687SMark J Musante if (glist[i] == 0) /* vdev is hole */ 20231195e687SMark J Musante continue; 20241195e687SMark J Musante 20251195e687SMark J Musante vd[i] = spa_lookup_by_guid(spa, glist[i], B_FALSE); 20261195e687SMark J Musante if (vd[i] == NULL) { 20271195e687SMark J Musante /* 20281195e687SMark J Musante * Don't bother attempting to reopen the disks; 20291195e687SMark J Musante * just do the split. 20301195e687SMark J Musante */ 20311195e687SMark J Musante attempt_reopen = B_FALSE; 20321195e687SMark J Musante } else { 20331195e687SMark J Musante /* attempt to re-online it */ 20341195e687SMark J Musante vd[i]->vdev_offline = B_FALSE; 20351195e687SMark J Musante } 20361195e687SMark J Musante } 20371195e687SMark J Musante 20381195e687SMark J Musante if (attempt_reopen) { 20391195e687SMark J Musante vdev_reopen(spa->spa_root_vdev); 20401195e687SMark J Musante 20411195e687SMark J Musante /* check each device to see what state it's in */ 20421195e687SMark J Musante for (extracted = 0, i = 0; i < gcount; i++) { 20431195e687SMark J Musante if (vd[i] != NULL && 20441195e687SMark J Musante vd[i]->vdev_stat.vs_aux != VDEV_AUX_SPLIT_POOL) 20451195e687SMark J Musante break; 20461195e687SMark J Musante ++extracted; 20471195e687SMark J Musante } 20481195e687SMark J Musante } 20491195e687SMark J Musante 20501195e687SMark J Musante /* 20511195e687SMark J Musante * If every disk has been moved to the new pool, or if we never 20521195e687SMark J Musante * even attempted to look at them, then we split them off for 20531195e687SMark J Musante * good. 20541195e687SMark J Musante */ 20551195e687SMark J Musante if (!attempt_reopen || gcount == extracted) { 20561195e687SMark J Musante for (i = 0; i < gcount; i++) 20571195e687SMark J Musante if (vd[i] != NULL) 20581195e687SMark J Musante vdev_split(vd[i]); 20591195e687SMark J Musante vdev_reopen(spa->spa_root_vdev); 20601195e687SMark J Musante } 20611195e687SMark J Musante 20621195e687SMark J Musante kmem_free(vd, gcount * sizeof (vdev_t *)); 20631195e687SMark J Musante } 20641195e687SMark J Musante 20651195e687SMark J Musante static int 20661195e687SMark J Musante spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type, 20671195e687SMark J Musante boolean_t mosconfig) 20681195e687SMark J Musante { 20691195e687SMark J Musante nvlist_t *config = spa->spa_config; 20701195e687SMark J Musante char *ereport = FM_EREPORT_ZFS_POOL; 20718704186eSDan McDonald char *comment; 20721195e687SMark J Musante int error; 20731195e687SMark J Musante uint64_t pool_guid; 20741195e687SMark J Musante nvlist_t *nvl; 20751195e687SMark J Musante 20761195e687SMark J Musante if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) 2077be6fd75aSMatthew Ahrens return (SET_ERROR(EINVAL)); 20781195e687SMark J Musante 20798704186eSDan McDonald ASSERT(spa->spa_comment == NULL); 20808704186eSDan McDonald if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0) 20818704186eSDan McDonald spa->spa_comment = spa_strdup(comment); 20828704186eSDan McDonald 20831195e687SMark J Musante /* 20841195e687SMark J Musante * Versioning wasn't explicitly added to the label until later, so if 20851195e687SMark J Musante * it's not present treat it as the initial version. 20861195e687SMark J Musante */ 20871195e687SMark J Musante if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 20881195e687SMark J Musante &spa->spa_ubsync.ub_version) != 0) 20891195e687SMark J Musante spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL; 20901195e687SMark J Musante 20911195e687SMark J Musante (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, 20921195e687SMark J Musante &spa->spa_config_txg); 20931195e687SMark J Musante 20941195e687SMark J Musante if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) && 20951195e687SMark J Musante spa_guid_exists(pool_guid, 0)) { 2096be6fd75aSMatthew Ahrens error = SET_ERROR(EEXIST); 20971195e687SMark J Musante } else { 2098e9103aaeSGarrett D'Amore spa->spa_config_guid = pool_guid; 20991195e687SMark J Musante 21001195e687SMark J Musante if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, 21011195e687SMark J Musante &nvl) == 0) { 21021195e687SMark J Musante VERIFY(nvlist_dup(nvl, &spa->spa_config_splitting, 21031195e687SMark J Musante KM_SLEEP) == 0); 21041195e687SMark J Musante } 21051195e687SMark J Musante 2106ad135b5dSChristopher Siden nvlist_free(spa->spa_load_info); 2107ad135b5dSChristopher Siden spa->spa_load_info = fnvlist_alloc(); 2108ad135b5dSChristopher Siden 210911027bc7STim Haley gethrestime(&spa->spa_loaded_ts); 21101195e687SMark J Musante error = spa_load_impl(spa, pool_guid, config, state, type, 21111195e687SMark J Musante mosconfig, &ereport); 21121195e687SMark J Musante } 21131195e687SMark J Musante 2114bc9014e6SJustin Gibbs /* 2115bc9014e6SJustin Gibbs * Don't count references from objsets that are already closed 2116bc9014e6SJustin Gibbs * and are making their way through the eviction process. 2117bc9014e6SJustin Gibbs */ 2118bc9014e6SJustin Gibbs spa_evicting_os_wait(spa); 21191195e687SMark J Musante spa->spa_minref = refcount_count(&spa->spa_refcount); 212011027bc7STim Haley if (error) { 212111027bc7STim Haley if (error != EEXIST) { 212211027bc7STim Haley spa->spa_loaded_ts.tv_sec = 0; 212311027bc7STim Haley spa->spa_loaded_ts.tv_nsec = 0; 212411027bc7STim Haley } 212511027bc7STim Haley if (error != EBADF) { 21261195e687SMark J Musante zfs_ereport_post(ereport, spa, NULL, NULL, 0, 0); 212711027bc7STim Haley } 212811027bc7STim Haley } 21291195e687SMark J Musante spa->spa_load_state = error ? SPA_LOAD_ERROR : SPA_LOAD_NONE; 21301195e687SMark J Musante spa->spa_ena = 0; 21311195e687SMark J Musante 21321195e687SMark J Musante return (error); 21331195e687SMark J Musante } 21341195e687SMark J Musante 21351195e687SMark J Musante /* 2136fa9e4066Sahrens * Load an existing storage pool, using the pool's builtin spa_config as a 2137ea8dc4b6Seschrock * source of configuration information. 2138fa9e4066Sahrens */ 2139fa9e4066Sahrens static int 21401195e687SMark J Musante spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config, 21411195e687SMark J Musante spa_load_state_t state, spa_import_type_t type, boolean_t mosconfig, 21421195e687SMark J Musante char **ereport) 2143fa9e4066Sahrens { 2144fa9e4066Sahrens int error = 0; 2145871a9500SMark J Musante nvlist_t *nvroot = NULL; 2146ad135b5dSChristopher Siden nvlist_t *label; 2147fa9e4066Sahrens vdev_t *rvd; 2148fa9e4066Sahrens uberblock_t *ub = &spa->spa_uberblock; 21494b964adaSGeorge Wilson uint64_t children, config_cache_txg = spa->spa_config_txg; 21508ad4d6ddSJeff Bonwick int orig_mode = spa->spa_mode; 21511195e687SMark J Musante int parse; 2152cde58dbcSMatthew Ahrens uint64_t obj; 2153ad135b5dSChristopher Siden boolean_t missing_feat_write = B_FALSE; 2154fa9e4066Sahrens 21558ad4d6ddSJeff Bonwick /* 21568ad4d6ddSJeff Bonwick * If this is an untrusted config, access the pool in read-only mode. 21578ad4d6ddSJeff Bonwick * This prevents things like resilvering recently removed devices. 21588ad4d6ddSJeff Bonwick */ 21598ad4d6ddSJeff Bonwick if (!mosconfig) 21608ad4d6ddSJeff Bonwick spa->spa_mode = FREAD; 21618ad4d6ddSJeff Bonwick 2162e14bb325SJeff Bonwick ASSERT(MUTEX_HELD(&spa_namespace_lock)); 2163e14bb325SJeff Bonwick 2164ea8dc4b6Seschrock spa->spa_load_state = state; 21650373e76bSbonwick 21661195e687SMark J Musante if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot)) 2167be6fd75aSMatthew Ahrens return (SET_ERROR(EINVAL)); 2168fa9e4066Sahrens 21691195e687SMark J Musante parse = (type == SPA_IMPORT_EXISTING ? 21701195e687SMark J Musante VDEV_ALLOC_LOAD : VDEV_ALLOC_SPLIT); 2171b5989ec7Seschrock 2172fa9e4066Sahrens /* 217354d692b7SGeorge Wilson * Create "The Godfather" zio to hold all async IOs 217454d692b7SGeorge Wilson */ 21756f834bc1SMatthew Ahrens spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *), 21766f834bc1SMatthew Ahrens KM_SLEEP); 21776f834bc1SMatthew Ahrens for (int i = 0; i < max_ncpus; i++) { 21786f834bc1SMatthew Ahrens spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL, 21796f834bc1SMatthew Ahrens ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | 21806f834bc1SMatthew Ahrens ZIO_FLAG_GODFATHER); 21816f834bc1SMatthew Ahrens } 218254d692b7SGeorge Wilson 218354d692b7SGeorge Wilson /* 218499653d4eSeschrock * Parse the configuration into a vdev tree. We explicitly set the 218599653d4eSeschrock * value that will be returned by spa_version() since parsing the 218699653d4eSeschrock * configuration requires knowing the version number. 2187fa9e4066Sahrens */ 2188e14bb325SJeff Bonwick spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 21891195e687SMark J Musante error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, parse); 2190e14bb325SJeff Bonwick spa_config_exit(spa, SCL_ALL, FTAG); 2191fa9e4066Sahrens 219299653d4eSeschrock if (error != 0) 21931195e687SMark J Musante return (error); 2194fa9e4066Sahrens 21950e34b6a7Sbonwick ASSERT(spa->spa_root_vdev == rvd); 219681cd5c55SMatthew Ahrens ASSERT3U(spa->spa_min_ashift, >=, SPA_MINBLOCKSHIFT); 219781cd5c55SMatthew Ahrens ASSERT3U(spa->spa_max_ashift, <=, SPA_MAXBLOCKSHIFT); 21981195e687SMark J Musante 21991195e687SMark J Musante if (type != SPA_IMPORT_ASSEMBLE) { 2200fa9e4066Sahrens ASSERT(spa_guid(spa) == pool_guid); 22011195e687SMark J Musante } 2202fa9e4066Sahrens 2203fa9e4066Sahrens /* 2204fa9e4066Sahrens * Try to open all vdevs, loading each label in the process. 2205fa9e4066Sahrens */ 2206e14bb325SJeff Bonwick spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 22070bf246f5Smc142369 error = vdev_open(rvd); 2208e14bb325SJeff Bonwick spa_config_exit(spa, SCL_ALL, FTAG); 22090bf246f5Smc142369 if (error != 0) 22101195e687SMark J Musante return (error); 2211fa9e4066Sahrens 2212fa9e4066Sahrens /* 221377e3a39cSMark J Musante * We need to validate the vdev labels against the configuration that 221477e3a39cSMark J Musante * we have in hand, which is dependent on the setting of mosconfig. If 221577e3a39cSMark J Musante * mosconfig is true then we're validating the vdev labels based on 221677e3a39cSMark J Musante * that config. Otherwise, we're validating against the cached config 221777e3a39cSMark J Musante * (zpool.cache) that was read when we loaded the zfs module, and then 221877e3a39cSMark J Musante * later we will recursively call spa_load() and validate against 221977e3a39cSMark J Musante * the vdev config. 22201195e687SMark J Musante * 22211195e687SMark J Musante * If we're assembling a new pool that's been split off from an 22221195e687SMark J Musante * existing pool, the labels haven't yet been updated so we skip 22231195e687SMark J Musante * validation for now. 2224560e6e96Seschrock */ 22251195e687SMark J Musante if (type != SPA_IMPORT_ASSEMBLE) { 2226e14bb325SJeff Bonwick spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2227d7f601efSGeorge Wilson error = vdev_validate(rvd, mosconfig); 2228e14bb325SJeff Bonwick spa_config_exit(spa, SCL_ALL, FTAG); 2229560e6e96Seschrock 22301195e687SMark J Musante if (error != 0) 22311195e687SMark J Musante return (error); 22321195e687SMark J Musante 22331195e687SMark J Musante if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) 2234be6fd75aSMatthew Ahrens return (SET_ERROR(ENXIO)); 2235560e6e96Seschrock } 2236560e6e96Seschrock 2237560e6e96Seschrock /* 2238fa9e4066Sahrens * Find the best uberblock. 2239fa9e4066Sahrens */ 2240ad135b5dSChristopher Siden vdev_uberblock_load(rvd, ub, &label); 2241fa9e4066Sahrens 2242fa9e4066Sahrens /* 2243fa9e4066Sahrens * If we weren't able to find a single valid uberblock, return failure. 2244fa9e4066Sahrens */ 2245ad135b5dSChristopher Siden if (ub->ub_txg == 0) { 2246ad135b5dSChristopher Siden nvlist_free(label); 22471195e687SMark J Musante return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, ENXIO)); 2248ad135b5dSChristopher Siden } 2249ea8dc4b6Seschrock 2250ea8dc4b6Seschrock /* 2251ad135b5dSChristopher Siden * If the pool has an unsupported version we can't open it. 2252ea8dc4b6Seschrock */ 2253ad135b5dSChristopher Siden if (!SPA_VERSION_IS_SUPPORTED(ub->ub_version)) { 2254ad135b5dSChristopher Siden nvlist_free(label); 22551195e687SMark J Musante return (spa_vdev_err(rvd, VDEV_AUX_VERSION_NEWER, ENOTSUP)); 2256ad135b5dSChristopher Siden } 2257ad135b5dSChristopher Siden 2258ad135b5dSChristopher Siden if (ub->ub_version >= SPA_VERSION_FEATURES) { 2259ad135b5dSChristopher Siden nvlist_t *features; 2260ad135b5dSChristopher Siden 2261ad135b5dSChristopher Siden /* 2262ad135b5dSChristopher Siden * If we weren't able to find what's necessary for reading the 2263ad135b5dSChristopher Siden * MOS in the label, return failure. 2264ad135b5dSChristopher Siden */ 2265ad135b5dSChristopher Siden if (label == NULL || nvlist_lookup_nvlist(label, 2266ad135b5dSChristopher Siden ZPOOL_CONFIG_FEATURES_FOR_READ, &features) != 0) { 2267ad135b5dSChristopher Siden nvlist_free(label); 2268ad135b5dSChristopher Siden return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, 2269ad135b5dSChristopher Siden ENXIO)); 2270ad135b5dSChristopher Siden } 2271ad135b5dSChristopher Siden 2272ad135b5dSChristopher Siden /* 2273ad135b5dSChristopher Siden * Update our in-core representation with the definitive values 2274ad135b5dSChristopher Siden * from the label. 2275ad135b5dSChristopher Siden */ 2276ad135b5dSChristopher Siden nvlist_free(spa->spa_label_features); 2277ad135b5dSChristopher Siden VERIFY(nvlist_dup(features, &spa->spa_label_features, 0) == 0); 2278ad135b5dSChristopher Siden } 2279ad135b5dSChristopher Siden 2280ad135b5dSChristopher Siden nvlist_free(label); 2281ad135b5dSChristopher Siden 2282ad135b5dSChristopher Siden /* 2283ad135b5dSChristopher Siden * Look through entries in the label nvlist's features_for_read. If 2284ad135b5dSChristopher Siden * there is a feature listed there which we don't understand then we 2285ad135b5dSChristopher Siden * cannot open a pool. 2286ad135b5dSChristopher Siden */ 2287ad135b5dSChristopher Siden if (ub->ub_version >= SPA_VERSION_FEATURES) { 2288ad135b5dSChristopher Siden nvlist_t *unsup_feat; 2289ad135b5dSChristopher Siden 2290ad135b5dSChristopher Siden VERIFY(nvlist_alloc(&unsup_feat, NV_UNIQUE_NAME, KM_SLEEP) == 2291ad135b5dSChristopher Siden 0); 2292ad135b5dSChristopher Siden 2293ad135b5dSChristopher Siden for (nvpair_t *nvp = nvlist_next_nvpair(spa->spa_label_features, 2294ad135b5dSChristopher Siden NULL); nvp != NULL; 2295ad135b5dSChristopher Siden nvp = nvlist_next_nvpair(spa->spa_label_features, nvp)) { 2296ad135b5dSChristopher Siden if (!zfeature_is_supported(nvpair_name(nvp))) { 2297ad135b5dSChristopher Siden VERIFY(nvlist_add_string(unsup_feat, 2298ad135b5dSChristopher Siden nvpair_name(nvp), "") == 0); 2299ad135b5dSChristopher Siden } 2300ad135b5dSChristopher Siden } 2301ad135b5dSChristopher Siden 2302ad135b5dSChristopher Siden if (!nvlist_empty(unsup_feat)) { 2303ad135b5dSChristopher Siden VERIFY(nvlist_add_nvlist(spa->spa_load_info, 2304ad135b5dSChristopher Siden ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat) == 0); 2305ad135b5dSChristopher Siden nvlist_free(unsup_feat); 2306ad135b5dSChristopher Siden return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, 2307ad135b5dSChristopher Siden ENOTSUP)); 2308ad135b5dSChristopher Siden } 2309ad135b5dSChristopher Siden 2310ad135b5dSChristopher Siden nvlist_free(unsup_feat); 2311ad135b5dSChristopher Siden } 2312fa9e4066Sahrens 2313fa9e4066Sahrens /* 2314fa9e4066Sahrens * If the vdev guid sum doesn't match the uberblock, we have an 23154b964adaSGeorge Wilson * incomplete configuration. We first check to see if the pool 23164b964adaSGeorge Wilson * is aware of the complete config (i.e ZPOOL_CONFIG_VDEV_CHILDREN). 23174b964adaSGeorge Wilson * If it is, defer the vdev_guid_sum check till later so we 23184b964adaSGeorge Wilson * can handle missing vdevs. 2319fa9e4066Sahrens */ 23204b964adaSGeorge Wilson if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VDEV_CHILDREN, 23214b964adaSGeorge Wilson &children) != 0 && mosconfig && type != SPA_IMPORT_ASSEMBLE && 23221195e687SMark J Musante rvd->vdev_guid_sum != ub->ub_guid_sum) 23231195e687SMark J Musante return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, ENXIO)); 23241195e687SMark J Musante 23251195e687SMark J Musante if (type != SPA_IMPORT_ASSEMBLE && spa->spa_config_splitting) { 23261195e687SMark J Musante spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 23271195e687SMark J Musante spa_try_repair(spa, config); 23281195e687SMark J Musante spa_config_exit(spa, SCL_ALL, FTAG); 23291195e687SMark J Musante nvlist_free(spa->spa_config_splitting); 23301195e687SMark J Musante spa->spa_config_splitting = NULL; 2331fa9e4066Sahrens } 2332fa9e4066Sahrens 2333fa9e4066Sahrens /* 2334fa9e4066Sahrens * Initialize internal SPA structures. 2335fa9e4066Sahrens */ 2336fa9e4066Sahrens spa->spa_state = POOL_STATE_ACTIVE; 2337fa9e4066Sahrens spa->spa_ubsync = spa->spa_uberblock; 2338468c413aSTim Haley spa->spa_verify_min_txg = spa->spa_extreme_rewind ? 2339c8ee1847SVictor Latushkin TXG_INITIAL - 1 : spa_last_synced_txg(spa) - TXG_DEFER_SIZE - 1; 2340468c413aSTim Haley spa->spa_first_txg = spa->spa_last_ubsync_txg ? 2341468c413aSTim Haley spa->spa_last_ubsync_txg : spa_last_synced_txg(spa) + 1; 2342b24ab676SJeff Bonwick spa->spa_claim_max_txg = spa->spa_first_txg; 23433f9d6ad7SLin Ling spa->spa_prev_software_version = ub->ub_software_version; 2344b24ab676SJeff Bonwick 2345ad135b5dSChristopher Siden error = dsl_pool_init(spa, spa->spa_first_txg, &spa->spa_dsl_pool); 23461195e687SMark J Musante if (error) 23471195e687SMark J Musante return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2348fa9e4066Sahrens spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset; 2349fa9e4066Sahrens 23501195e687SMark J Musante if (spa_dir_prop(spa, DMU_POOL_CONFIG, &spa->spa_config_object) != 0) 23511195e687SMark J Musante return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2352fa9e4066Sahrens 2353ad135b5dSChristopher Siden if (spa_version(spa) >= SPA_VERSION_FEATURES) { 2354ad135b5dSChristopher Siden boolean_t missing_feat_read = B_FALSE; 235557221772SChristopher Siden nvlist_t *unsup_feat, *enabled_feat; 2356ad135b5dSChristopher Siden 2357ad135b5dSChristopher Siden if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_READ, 2358ad135b5dSChristopher Siden &spa->spa_feat_for_read_obj) != 0) { 2359ad135b5dSChristopher Siden return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2360ad135b5dSChristopher Siden } 2361ad135b5dSChristopher Siden 2362ad135b5dSChristopher Siden if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_WRITE, 2363ad135b5dSChristopher Siden &spa->spa_feat_for_write_obj) != 0) { 2364ad135b5dSChristopher Siden return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2365ad135b5dSChristopher Siden } 2366ad135b5dSChristopher Siden 2367ad135b5dSChristopher Siden if (spa_dir_prop(spa, DMU_POOL_FEATURE_DESCRIPTIONS, 2368ad135b5dSChristopher Siden &spa->spa_feat_desc_obj) != 0) { 2369ad135b5dSChristopher Siden return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2370ad135b5dSChristopher Siden } 2371ad135b5dSChristopher Siden 237257221772SChristopher Siden enabled_feat = fnvlist_alloc(); 237357221772SChristopher Siden unsup_feat = fnvlist_alloc(); 2374ad135b5dSChristopher Siden 23752acef22dSMatthew Ahrens if (!spa_features_check(spa, B_FALSE, 237657221772SChristopher Siden unsup_feat, enabled_feat)) 2377ad135b5dSChristopher Siden missing_feat_read = B_TRUE; 2378ad135b5dSChristopher Siden 2379ad135b5dSChristopher Siden if (spa_writeable(spa) || state == SPA_LOAD_TRYIMPORT) { 23802acef22dSMatthew Ahrens if (!spa_features_check(spa, B_TRUE, 238157221772SChristopher Siden unsup_feat, enabled_feat)) { 2382ad135b5dSChristopher Siden missing_feat_write = B_TRUE; 2383ad135b5dSChristopher Siden } 2384ad135b5dSChristopher Siden } 2385ad135b5dSChristopher Siden 238657221772SChristopher Siden fnvlist_add_nvlist(spa->spa_load_info, 238757221772SChristopher Siden ZPOOL_CONFIG_ENABLED_FEAT, enabled_feat); 238857221772SChristopher Siden 238957221772SChristopher Siden if (!nvlist_empty(unsup_feat)) { 239057221772SChristopher Siden fnvlist_add_nvlist(spa->spa_load_info, 239157221772SChristopher Siden ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat); 239257221772SChristopher Siden } 239357221772SChristopher Siden 239457221772SChristopher Siden fnvlist_free(enabled_feat); 239557221772SChristopher Siden fnvlist_free(unsup_feat); 2396ad135b5dSChristopher Siden 2397ad135b5dSChristopher Siden if (!missing_feat_read) { 2398ad135b5dSChristopher Siden fnvlist_add_boolean(spa->spa_load_info, 2399ad135b5dSChristopher Siden ZPOOL_CONFIG_CAN_RDONLY); 2400ad135b5dSChristopher Siden } 2401ad135b5dSChristopher Siden 2402ad135b5dSChristopher Siden /* 2403ad135b5dSChristopher Siden * If the state is SPA_LOAD_TRYIMPORT, our objective is 2404ad135b5dSChristopher Siden * twofold: to determine whether the pool is available for 2405ad135b5dSChristopher Siden * import in read-write mode and (if it is not) whether the 2406ad135b5dSChristopher Siden * pool is available for import in read-only mode. If the pool 2407ad135b5dSChristopher Siden * is available for import in read-write mode, it is displayed 2408ad135b5dSChristopher Siden * as available in userland; if it is not available for import 2409ad135b5dSChristopher Siden * in read-only mode, it is displayed as unavailable in 2410ad135b5dSChristopher Siden * userland. If the pool is available for import in read-only 2411ad135b5dSChristopher Siden * mode but not read-write mode, it is displayed as unavailable 2412ad135b5dSChristopher Siden * in userland with a special note that the pool is actually 2413ad135b5dSChristopher Siden * available for open in read-only mode. 2414ad135b5dSChristopher Siden * 2415ad135b5dSChristopher Siden * As a result, if the state is SPA_LOAD_TRYIMPORT and we are 2416ad135b5dSChristopher Siden * missing a feature for write, we must first determine whether 2417ad135b5dSChristopher Siden * the pool can be opened read-only before returning to 2418ad135b5dSChristopher Siden * userland in order to know whether to display the 2419ad135b5dSChristopher Siden * abovementioned note. 2420ad135b5dSChristopher Siden */ 2421ad135b5dSChristopher Siden if (missing_feat_read || (missing_feat_write && 2422ad135b5dSChristopher Siden spa_writeable(spa))) { 2423ad135b5dSChristopher Siden return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, 2424ad135b5dSChristopher Siden ENOTSUP)); 2425ad135b5dSChristopher Siden } 242643466aaeSMax Grossman 242743466aaeSMax Grossman /* 242843466aaeSMax Grossman * Load refcounts for ZFS features from disk into an in-memory 242943466aaeSMax Grossman * cache during SPA initialization. 243043466aaeSMax Grossman */ 243143466aaeSMax Grossman for (spa_feature_t i = 0; i < SPA_FEATURES; i++) { 243243466aaeSMax Grossman uint64_t refcount; 243343466aaeSMax Grossman 243443466aaeSMax Grossman error = feature_get_refcount_from_disk(spa, 243543466aaeSMax Grossman &spa_feature_table[i], &refcount); 243643466aaeSMax Grossman if (error == 0) { 243743466aaeSMax Grossman spa->spa_feat_refcount_cache[i] = refcount; 243843466aaeSMax Grossman } else if (error == ENOTSUP) { 243943466aaeSMax Grossman spa->spa_feat_refcount_cache[i] = 244043466aaeSMax Grossman SPA_FEATURE_DISABLED; 244143466aaeSMax Grossman } else { 244243466aaeSMax Grossman return (spa_vdev_err(rvd, 244343466aaeSMax Grossman VDEV_AUX_CORRUPT_DATA, EIO)); 244443466aaeSMax Grossman } 244543466aaeSMax Grossman } 244643466aaeSMax Grossman } 244743466aaeSMax Grossman 244843466aaeSMax Grossman if (spa_feature_is_active(spa, SPA_FEATURE_ENABLED_TXG)) { 244943466aaeSMax Grossman if (spa_dir_prop(spa, DMU_POOL_FEATURE_ENABLED_TXG, 24505d7b4d43SMatthew Ahrens &spa->spa_feat_enabled_txg_obj) != 0) 245143466aaeSMax Grossman return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 245243466aaeSMax Grossman } 2453ad135b5dSChristopher Siden 2454ad135b5dSChristopher Siden spa->spa_is_initializing = B_TRUE; 2455ad135b5dSChristopher Siden error = dsl_pool_open(spa->spa_dsl_pool); 2456ad135b5dSChristopher Siden spa->spa_is_initializing = B_FALSE; 2457ad135b5dSChristopher Siden if (error != 0) 2458ad135b5dSChristopher Siden return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2459ad135b5dSChristopher Siden 246088ecc943SGeorge Wilson if (!mosconfig) { 246188ecc943SGeorge Wilson uint64_t hostid; 2462871a9500SMark J Musante nvlist_t *policy = NULL, *nvconfig; 2463871a9500SMark J Musante 2464871a9500SMark J Musante if (load_nvlist(spa, spa->spa_config_object, &nvconfig) != 0) 2465871a9500SMark J Musante return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 246688ecc943SGeorge Wilson 246788ecc943SGeorge Wilson if (!spa_is_root(spa) && nvlist_lookup_uint64(nvconfig, 246877650510SLin Ling ZPOOL_CONFIG_HOSTID, &hostid) == 0) { 246995173954Sek110237 char *hostname; 247095173954Sek110237 unsigned long myhostid = 0; 247195173954Sek110237 247288ecc943SGeorge Wilson VERIFY(nvlist_lookup_string(nvconfig, 247395173954Sek110237 ZPOOL_CONFIG_HOSTNAME, &hostname) == 0); 247495173954Sek110237 24755679c89fSjv227347 #ifdef _KERNEL 24765679c89fSjv227347 myhostid = zone_get_hostid(NULL); 24775679c89fSjv227347 #else /* _KERNEL */ 24785679c89fSjv227347 /* 24795679c89fSjv227347 * We're emulating the system's hostid in userland, so 24805679c89fSjv227347 * we can't use zone_get_hostid(). 24815679c89fSjv227347 */ 248295173954Sek110237 (void) ddi_strtoul(hw_serial, NULL, 10, &myhostid); 24835679c89fSjv227347 #endif /* _KERNEL */ 248417194a52Slling if (hostid != 0 && myhostid != 0 && 24855679c89fSjv227347 hostid != myhostid) { 2486871a9500SMark J Musante nvlist_free(nvconfig); 248795173954Sek110237 cmn_err(CE_WARN, "pool '%s' could not be " 248895173954Sek110237 "loaded as it was last accessed by " 248995173954Sek110237 "another system (host: %s hostid: 0x%lx). " 2490654b400cSJoshua M. Clulow "See: http://illumos.org/msg/ZFS-8000-EY", 2491e14bb325SJeff Bonwick spa_name(spa), hostname, 249295173954Sek110237 (unsigned long)hostid); 2493be6fd75aSMatthew Ahrens return (SET_ERROR(EBADF)); 249495173954Sek110237 } 249595173954Sek110237 } 2496c8ee1847SVictor Latushkin if (nvlist_lookup_nvlist(spa->spa_config, 2497c8ee1847SVictor Latushkin ZPOOL_REWIND_POLICY, &policy) == 0) 2498c8ee1847SVictor Latushkin VERIFY(nvlist_add_nvlist(nvconfig, 2499c8ee1847SVictor Latushkin ZPOOL_REWIND_POLICY, policy) == 0); 250095173954Sek110237 250188ecc943SGeorge Wilson spa_config_set(spa, nvconfig); 2502fa9e4066Sahrens spa_unload(spa); 2503fa9e4066Sahrens spa_deactivate(spa); 25048ad4d6ddSJeff Bonwick spa_activate(spa, orig_mode); 2505fa9e4066Sahrens 25061195e687SMark J Musante return (spa_load(spa, state, SPA_IMPORT_EXISTING, B_TRUE)); 2507fa9e4066Sahrens } 2508fa9e4066Sahrens 250945818ee1SMatthew Ahrens /* Grab the secret checksum salt from the MOS. */ 251045818ee1SMatthew Ahrens error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 251145818ee1SMatthew Ahrens DMU_POOL_CHECKSUM_SALT, 1, 251245818ee1SMatthew Ahrens sizeof (spa->spa_cksum_salt.zcs_bytes), 251345818ee1SMatthew Ahrens spa->spa_cksum_salt.zcs_bytes); 251445818ee1SMatthew Ahrens if (error == ENOENT) { 251545818ee1SMatthew Ahrens /* Generate a new salt for subsequent use */ 251645818ee1SMatthew Ahrens (void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes, 251745818ee1SMatthew Ahrens sizeof (spa->spa_cksum_salt.zcs_bytes)); 251845818ee1SMatthew Ahrens } else if (error != 0) { 251945818ee1SMatthew Ahrens return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 252045818ee1SMatthew Ahrens } 252145818ee1SMatthew Ahrens 2522cde58dbcSMatthew Ahrens if (spa_dir_prop(spa, DMU_POOL_SYNC_BPOBJ, &obj) != 0) 2523cde58dbcSMatthew Ahrens return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2524cde58dbcSMatthew Ahrens error = bpobj_open(&spa->spa_deferred_bpobj, spa->spa_meta_objset, obj); 2525cde58dbcSMatthew Ahrens if (error != 0) 25261195e687SMark J Musante return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2527fa9e4066Sahrens 2528fa9e4066Sahrens /* 252999653d4eSeschrock * Load the bit that tells us to use the new accounting function 253099653d4eSeschrock * (raid-z deflation). If we have an older pool, this will not 253199653d4eSeschrock * be present. 253299653d4eSeschrock */ 25331195e687SMark J Musante error = spa_dir_prop(spa, DMU_POOL_DEFLATE, &spa->spa_deflate); 25341195e687SMark J Musante if (error != 0 && error != ENOENT) 25351195e687SMark J Musante return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 253699653d4eSeschrock 25373f9d6ad7SLin Ling error = spa_dir_prop(spa, DMU_POOL_CREATION_VERSION, 25383f9d6ad7SLin Ling &spa->spa_creation_version); 25393f9d6ad7SLin Ling if (error != 0 && error != ENOENT) 25403f9d6ad7SLin Ling return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 25413f9d6ad7SLin Ling 254299653d4eSeschrock /* 2543ea8dc4b6Seschrock * Load the persistent error log. If we have an older pool, this will 2544ea8dc4b6Seschrock * not be present. 2545fa9e4066Sahrens */ 25461195e687SMark J Musante error = spa_dir_prop(spa, DMU_POOL_ERRLOG_LAST, &spa->spa_errlog_last); 25471195e687SMark J Musante if (error != 0 && error != ENOENT) 25481195e687SMark J Musante return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2549ea8dc4b6Seschrock 25501195e687SMark J Musante error = spa_dir_prop(spa, DMU_POOL_ERRLOG_SCRUB, 25511195e687SMark J Musante &spa->spa_errlog_scrub); 25521195e687SMark J Musante if (error != 0 && error != ENOENT) 25531195e687SMark J Musante return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2554ea8dc4b6Seschrock 2555ea8dc4b6Seschrock /* 255606eeb2adSek110237 * Load the history object. If we have an older pool, this 255706eeb2adSek110237 * will not be present. 255806eeb2adSek110237 */ 25591195e687SMark J Musante error = spa_dir_prop(spa, DMU_POOL_HISTORY, &spa->spa_history); 25601195e687SMark J Musante if (error != 0 && error != ENOENT) 25611195e687SMark J Musante return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 25621195e687SMark J Musante 25631195e687SMark J Musante /* 25641195e687SMark J Musante * If we're assembling the pool from the split-off vdevs of 25651195e687SMark J Musante * an existing pool, we don't want to attach the spares & cache 25661195e687SMark J Musante * devices. 25671195e687SMark J Musante */ 256806eeb2adSek110237 256906eeb2adSek110237 /* 257099653d4eSeschrock * Load any hot spares for this pool. 257199653d4eSeschrock */ 25721195e687SMark J Musante error = spa_dir_prop(spa, DMU_POOL_SPARES, &spa->spa_spares.sav_object); 25731195e687SMark J Musante if (error != 0 && error != ENOENT) 25741195e687SMark J Musante return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 25751195e687SMark J Musante if (error == 0 && type != SPA_IMPORT_ASSEMBLE) { 2576e7437265Sahrens ASSERT(spa_version(spa) >= SPA_VERSION_SPARES); 2577fa94a07fSbrendan if (load_nvlist(spa, spa->spa_spares.sav_object, 25781195e687SMark J Musante &spa->spa_spares.sav_config) != 0) 25791195e687SMark J Musante return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 258099653d4eSeschrock 2581e14bb325SJeff Bonwick spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 258299653d4eSeschrock spa_load_spares(spa); 2583e14bb325SJeff Bonwick spa_config_exit(spa, SCL_ALL, FTAG); 25841195e687SMark J Musante } else if (error == 0) { 25851195e687SMark J Musante spa->spa_spares.sav_sync = B_TRUE; 258699653d4eSeschrock } 258799653d4eSeschrock 2588fa94a07fSbrendan /* 2589fa94a07fSbrendan * Load any level 2 ARC devices for this pool. 2590fa94a07fSbrendan */ 25911195e687SMark J Musante error = spa_dir_prop(spa, DMU_POOL_L2CACHE, 2592fa94a07fSbrendan &spa->spa_l2cache.sav_object); 25931195e687SMark J Musante if (error != 0 && error != ENOENT) 25941195e687SMark J Musante return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 25951195e687SMark J Musante if (error == 0 && type != SPA_IMPORT_ASSEMBLE) { 2596fa94a07fSbrendan ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE); 2597fa94a07fSbrendan if (load_nvlist(spa, spa->spa_l2cache.sav_object, 25981195e687SMark J Musante &spa->spa_l2cache.sav_config) != 0) 25991195e687SMark J Musante return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2600fa94a07fSbrendan 2601e14bb325SJeff Bonwick spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2602fa94a07fSbrendan spa_load_l2cache(spa); 2603e14bb325SJeff Bonwick spa_config_exit(spa, SCL_ALL, FTAG); 26041195e687SMark J Musante } else if (error == 0) { 26051195e687SMark J Musante spa->spa_l2cache.sav_sync = B_TRUE; 2606fa94a07fSbrendan } 2607fa94a07fSbrendan 2608990b4856Slling spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION); 2609ecd6cf80Smarks 26101195e687SMark J Musante error = spa_dir_prop(spa, DMU_POOL_PROPS, &spa->spa_pool_props_object); 26111195e687SMark J Musante if (error && error != ENOENT) 26121195e687SMark J Musante return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2613b1b8ab34Slling 2614b1b8ab34Slling if (error == 0) { 26151195e687SMark J Musante uint64_t autoreplace; 26161195e687SMark J Musante 26171195e687SMark J Musante spa_prop_find(spa, ZPOOL_PROP_BOOTFS, &spa->spa_bootfs); 26181195e687SMark J Musante spa_prop_find(spa, ZPOOL_PROP_AUTOREPLACE, &autoreplace); 26191195e687SMark J Musante spa_prop_find(spa, ZPOOL_PROP_DELEGATION, &spa->spa_delegation); 26201195e687SMark J Musante spa_prop_find(spa, ZPOOL_PROP_FAILUREMODE, &spa->spa_failmode); 26211195e687SMark J Musante spa_prop_find(spa, ZPOOL_PROP_AUTOEXPAND, &spa->spa_autoexpand); 26221195e687SMark J Musante spa_prop_find(spa, ZPOOL_PROP_DEDUPDITTO, 26231195e687SMark J Musante &spa->spa_dedup_ditto); 26241195e687SMark J Musante 2625b693757aSEric Schrock spa->spa_autoreplace = (autoreplace != 0); 2626b1b8ab34Slling } 2627b1b8ab34Slling 262899653d4eSeschrock /* 26293d7072f8Seschrock * If the 'autoreplace' property is set, then post a resource notifying 26303d7072f8Seschrock * the ZFS DE that it should not issue any faults for unopenable 26313d7072f8Seschrock * devices. We also iterate over the vdevs, and post a sysevent for any 26323d7072f8Seschrock * unopenable vdevs so that the normal autoreplace handler can take 26333d7072f8Seschrock * over. 26343d7072f8Seschrock */ 2635b693757aSEric Schrock if (spa->spa_autoreplace && state != SPA_LOAD_TRYIMPORT) { 26363d7072f8Seschrock spa_check_removed(spa->spa_root_vdev); 2637b693757aSEric Schrock /* 2638b693757aSEric Schrock * For the import case, this is done in spa_import(), because 2639b693757aSEric Schrock * at this point we're using the spare definitions from 2640b693757aSEric Schrock * the MOS config, not necessarily from the userland config. 2641b693757aSEric Schrock */ 2642b693757aSEric Schrock if (state != SPA_LOAD_IMPORT) { 2643b693757aSEric Schrock spa_aux_check_removed(&spa->spa_spares); 2644b693757aSEric Schrock spa_aux_check_removed(&spa->spa_l2cache); 2645b693757aSEric Schrock } 2646b693757aSEric Schrock } 26473d7072f8Seschrock 26483d7072f8Seschrock /* 2649560e6e96Seschrock * Load the vdev state for all toplevel vdevs. 2650ea8dc4b6Seschrock */ 2651560e6e96Seschrock vdev_load(rvd); 2652fa9e4066Sahrens 2653fa9e4066Sahrens /* 2654fa9e4066Sahrens * Propagate the leaf DTLs we just loaded all the way up the tree. 2655fa9e4066Sahrens */ 2656e14bb325SJeff Bonwick spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2657fa9e4066Sahrens vdev_dtl_reassess(rvd, 0, 0, B_FALSE); 2658e14bb325SJeff Bonwick spa_config_exit(spa, SCL_ALL, FTAG); 2659fa9e4066Sahrens 2660fa9e4066Sahrens /* 2661b24ab676SJeff Bonwick * Load the DDTs (dedup tables). 2662b24ab676SJeff Bonwick */ 2663b24ab676SJeff Bonwick error = ddt_load(spa); 26641195e687SMark J Musante if (error != 0) 26651195e687SMark J Musante return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2666b24ab676SJeff Bonwick 2667485bbbf5SGeorge Wilson spa_update_dspace(spa); 2668485bbbf5SGeorge Wilson 2669b24ab676SJeff Bonwick /* 26704b964adaSGeorge Wilson * Validate the config, using the MOS config to fill in any 26714b964adaSGeorge Wilson * information which might be missing. If we fail to validate 26724b964adaSGeorge Wilson * the config then declare the pool unfit for use. If we're 26734b964adaSGeorge Wilson * assembling a pool from a split, the log is not transferred 26744b964adaSGeorge Wilson * over. 2675b24ab676SJeff Bonwick */ 26761195e687SMark J Musante if (type != SPA_IMPORT_ASSEMBLE) { 2677871a9500SMark J Musante nvlist_t *nvconfig; 2678871a9500SMark J Musante 2679871a9500SMark J Musante if (load_nvlist(spa, spa->spa_config_object, &nvconfig) != 0) 2680871a9500SMark J Musante return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2681871a9500SMark J Musante 26824b964adaSGeorge Wilson if (!spa_config_valid(spa, nvconfig)) { 2683b24ab676SJeff Bonwick nvlist_free(nvconfig); 26844b964adaSGeorge Wilson return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, 26854b964adaSGeorge Wilson ENXIO)); 26864b964adaSGeorge Wilson } 26874b964adaSGeorge Wilson nvlist_free(nvconfig); 26884b964adaSGeorge Wilson 26894b964adaSGeorge Wilson /* 2690ad135b5dSChristopher Siden * Now that we've validated the config, check the state of the 26914b964adaSGeorge Wilson * root vdev. If it can't be opened, it indicates one or 26924b964adaSGeorge Wilson * more toplevel vdevs are faulted. 26934b964adaSGeorge Wilson */ 26944b964adaSGeorge Wilson if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) 2695be6fd75aSMatthew Ahrens return (SET_ERROR(ENXIO)); 2696b24ab676SJeff Bonwick 269723367a2fSMatthew Ahrens if (spa_writeable(spa) && spa_check_logs(spa)) { 26981195e687SMark J Musante *ereport = FM_EREPORT_ZFS_LOG_REPLAY; 26991195e687SMark J Musante return (spa_vdev_err(rvd, VDEV_AUX_BAD_LOG, ENXIO)); 27001195e687SMark J Musante } 2701b24ab676SJeff Bonwick } 2702b24ab676SJeff Bonwick 2703ad135b5dSChristopher Siden if (missing_feat_write) { 2704ad135b5dSChristopher Siden ASSERT(state == SPA_LOAD_TRYIMPORT); 2705ad135b5dSChristopher Siden 2706ad135b5dSChristopher Siden /* 2707ad135b5dSChristopher Siden * At this point, we know that we can open the pool in 2708ad135b5dSChristopher Siden * read-only mode but not read-write mode. We now have enough 2709ad135b5dSChristopher Siden * information and can return to userland. 2710ad135b5dSChristopher Siden */ 2711ad135b5dSChristopher Siden return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, ENOTSUP)); 2712ad135b5dSChristopher Siden } 2713ad135b5dSChristopher Siden 27144b964adaSGeorge Wilson /* 27154b964adaSGeorge Wilson * We've successfully opened the pool, verify that we're ready 27164b964adaSGeorge Wilson * to start pushing transactions. 27174b964adaSGeorge Wilson */ 27184b964adaSGeorge Wilson if (state != SPA_LOAD_TRYIMPORT) { 27194b964adaSGeorge Wilson if (error = spa_load_verify(spa)) 27204b964adaSGeorge Wilson return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, 27214b964adaSGeorge Wilson error)); 27224b964adaSGeorge Wilson } 27234b964adaSGeorge Wilson 2724468c413aSTim Haley if (spa_writeable(spa) && (state == SPA_LOAD_RECOVER || 2725468c413aSTim Haley spa->spa_load_max_txg == UINT64_MAX)) { 27260373e76bSbonwick dmu_tx_t *tx; 27270373e76bSbonwick int need_update = B_FALSE; 272812380e1eSArne Jansen dsl_pool_t *dp = spa_get_dsl(spa); 27298ad4d6ddSJeff Bonwick 27308ad4d6ddSJeff Bonwick ASSERT(state != SPA_LOAD_TRYIMPORT); 27310373e76bSbonwick 2732fa9e4066Sahrens /* 27330373e76bSbonwick * Claim log blocks that haven't been committed yet. 2734fa9e4066Sahrens * This must all happen in a single txg. 2735b24ab676SJeff Bonwick * Note: spa_claim_max_txg is updated by spa_claim_notify(), 2736b24ab676SJeff Bonwick * invoked from zil_claim_log_block()'s i/o done callback. 2737468c413aSTim Haley * Price of rollback is that we abandon the log. 2738fa9e4066Sahrens */ 2739b24ab676SJeff Bonwick spa->spa_claiming = B_TRUE; 2740b24ab676SJeff Bonwick 274112380e1eSArne Jansen tx = dmu_tx_create_assigned(dp, spa_first_txg(spa)); 274212380e1eSArne Jansen (void) dmu_objset_find_dp(dp, dp->dp_root_dir_obj, 27430b69c2f0Sahrens zil_claim, tx, DS_FIND_CHILDREN); 2744fa9e4066Sahrens dmu_tx_commit(tx); 2745fa9e4066Sahrens 2746b24ab676SJeff Bonwick spa->spa_claiming = B_FALSE; 2747b24ab676SJeff Bonwick 27481195e687SMark J Musante spa_set_log_state(spa, SPA_LOG_GOOD); 2749fa9e4066Sahrens spa->spa_sync_on = B_TRUE; 2750fa9e4066Sahrens txg_sync_start(spa->spa_dsl_pool); 2751fa9e4066Sahrens 2752fa9e4066Sahrens /* 2753b24ab676SJeff Bonwick * Wait for all claims to sync. We sync up to the highest 2754b24ab676SJeff Bonwick * claimed log block birth time so that claimed log blocks 2755b24ab676SJeff Bonwick * don't appear to be from the future. spa_claim_max_txg 2756b24ab676SJeff Bonwick * will have been set for us by either zil_check_log_chain() 2757b24ab676SJeff Bonwick * (invoked from spa_check_logs()) or zil_claim() above. 2758fa9e4066Sahrens */ 2759b24ab676SJeff Bonwick txg_wait_synced(spa->spa_dsl_pool, spa->spa_claim_max_txg); 27600e34b6a7Sbonwick 27610e34b6a7Sbonwick /* 27620373e76bSbonwick * If the config cache is stale, or we have uninitialized 27630373e76bSbonwick * metaslabs (see spa_vdev_add()), then update the config. 2764bc758434SLin Ling * 27654b964adaSGeorge Wilson * If this is a verbatim import, trust the current 2766bc758434SLin Ling * in-core spa_config and update the disk labels. 27670e34b6a7Sbonwick */ 27680373e76bSbonwick if (config_cache_txg != spa->spa_config_txg || 27694b964adaSGeorge Wilson state == SPA_LOAD_IMPORT || 27704b964adaSGeorge Wilson state == SPA_LOAD_RECOVER || 27714b964adaSGeorge Wilson (spa->spa_import_flags & ZFS_IMPORT_VERBATIM)) 27720373e76bSbonwick need_update = B_TRUE; 27730373e76bSbonwick 27748ad4d6ddSJeff Bonwick for (int c = 0; c < rvd->vdev_children; c++) 27750373e76bSbonwick if (rvd->vdev_child[c]->vdev_ms_array == 0) 27760373e76bSbonwick need_update = B_TRUE; 27770e34b6a7Sbonwick 27780e34b6a7Sbonwick /* 27790373e76bSbonwick * Update the config cache asychronously in case we're the 27800373e76bSbonwick * root pool, in which case the config cache isn't writable yet. 27810e34b6a7Sbonwick */ 27820373e76bSbonwick if (need_update) 27830373e76bSbonwick spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 27848ad4d6ddSJeff Bonwick 27858ad4d6ddSJeff Bonwick /* 27868ad4d6ddSJeff Bonwick * Check all DTLs to see if anything needs resilvering. 27878ad4d6ddSJeff Bonwick */ 27883f9d6ad7SLin Ling if (!dsl_scan_resilvering(spa->spa_dsl_pool) && 27893f9d6ad7SLin Ling vdev_resilver_needed(rvd, NULL, NULL)) 27908ad4d6ddSJeff Bonwick spa_async_request(spa, SPA_ASYNC_RESILVER); 2791503ad85cSMatthew Ahrens 2792503ad85cSMatthew Ahrens /* 27934445fffbSMatthew Ahrens * Log the fact that we booted up (so that we can detect if 27944445fffbSMatthew Ahrens * we rebooted in the middle of an operation). 27954445fffbSMatthew Ahrens */ 27964445fffbSMatthew Ahrens spa_history_log_version(spa, "open"); 27974445fffbSMatthew Ahrens 27984445fffbSMatthew Ahrens /* 2799503ad85cSMatthew Ahrens * Delete any inconsistent datasets. 2800503ad85cSMatthew Ahrens */ 2801503ad85cSMatthew Ahrens (void) dmu_objset_find(spa_name(spa), 2802503ad85cSMatthew Ahrens dsl_destroy_inconsistent, NULL, DS_FIND_CHILDREN); 2803ca45db41SChris Kirby 2804ca45db41SChris Kirby /* 2805ca45db41SChris Kirby * Clean up any stale temporary dataset userrefs. 2806ca45db41SChris Kirby */ 2807ca45db41SChris Kirby dsl_pool_clean_tmp_userrefs(spa->spa_dsl_pool); 2808fa9e4066Sahrens } 2809fa9e4066Sahrens 28101195e687SMark J Musante return (0); 2811fa9e4066Sahrens } 2812fa9e4066Sahrens 2813468c413aSTim Haley static int 2814468c413aSTim Haley spa_load_retry(spa_t *spa, spa_load_state_t state, int mosconfig) 2815468c413aSTim Haley { 2816f9af39baSGeorge Wilson int mode = spa->spa_mode; 2817f9af39baSGeorge Wilson 2818468c413aSTim Haley spa_unload(spa); 2819468c413aSTim Haley spa_deactivate(spa); 2820468c413aSTim Haley 2821e42d2059SMatthew Ahrens spa->spa_load_max_txg = spa->spa_uberblock.ub_txg - 1; 2822468c413aSTim Haley 2823f9af39baSGeorge Wilson spa_activate(spa, mode); 2824468c413aSTim Haley spa_async_suspend(spa); 2825468c413aSTim Haley 28261195e687SMark J Musante return (spa_load(spa, state, SPA_IMPORT_EXISTING, mosconfig)); 2827468c413aSTim Haley } 2828468c413aSTim Haley 2829ad135b5dSChristopher Siden /* 2830ad135b5dSChristopher Siden * If spa_load() fails this function will try loading prior txg's. If 2831ad135b5dSChristopher Siden * 'state' is SPA_LOAD_RECOVER and one of these loads succeeds the pool 2832ad135b5dSChristopher Siden * will be rewound to that txg. If 'state' is not SPA_LOAD_RECOVER this 2833ad135b5dSChristopher Siden * function will not rewind the pool and will return the same error as 2834ad135b5dSChristopher Siden * spa_load(). 2835ad135b5dSChristopher Siden */ 2836468c413aSTim Haley static int 2837468c413aSTim Haley spa_load_best(spa_t *spa, spa_load_state_t state, int mosconfig, 2838c8ee1847SVictor Latushkin uint64_t max_request, int rewind_flags) 2839468c413aSTim Haley { 2840ad135b5dSChristopher Siden nvlist_t *loadinfo = NULL; 2841468c413aSTim Haley nvlist_t *config = NULL; 2842468c413aSTim Haley int load_error, rewind_error; 2843c8ee1847SVictor Latushkin uint64_t safe_rewind_txg; 2844468c413aSTim Haley uint64_t min_txg; 2845468c413aSTim Haley 2846a33cae98STim Haley if (spa->spa_load_txg && state == SPA_LOAD_RECOVER) { 2847468c413aSTim Haley spa->spa_load_max_txg = spa->spa_load_txg; 28481195e687SMark J Musante spa_set_log_state(spa, SPA_LOG_CLEAR); 2849a33cae98STim Haley } else { 2850468c413aSTim Haley spa->spa_load_max_txg = max_request; 2851e42d2059SMatthew Ahrens if (max_request != UINT64_MAX) 2852e42d2059SMatthew Ahrens spa->spa_extreme_rewind = B_TRUE; 2853a33cae98STim Haley } 2854468c413aSTim Haley 28551195e687SMark J Musante load_error = rewind_error = spa_load(spa, state, SPA_IMPORT_EXISTING, 28561195e687SMark J Musante mosconfig); 2857468c413aSTim Haley if (load_error == 0) 2858468c413aSTim Haley return (0); 2859468c413aSTim Haley 2860468c413aSTim Haley if (spa->spa_root_vdev != NULL) 2861468c413aSTim Haley config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 2862468c413aSTim Haley 2863468c413aSTim Haley spa->spa_last_ubsync_txg = spa->spa_uberblock.ub_txg; 2864468c413aSTim Haley spa->spa_last_ubsync_txg_ts = spa->spa_uberblock.ub_timestamp; 2865468c413aSTim Haley 2866c8ee1847SVictor Latushkin if (rewind_flags & ZPOOL_NEVER_REWIND) { 2867468c413aSTim Haley nvlist_free(config); 2868468c413aSTim Haley return (load_error); 2869468c413aSTim Haley } 2870468c413aSTim Haley 2871ad135b5dSChristopher Siden if (state == SPA_LOAD_RECOVER) { 2872468c413aSTim Haley /* Price of rolling back is discarding txgs, including log */ 28731195e687SMark J Musante spa_set_log_state(spa, SPA_LOG_CLEAR); 2874ad135b5dSChristopher Siden } else { 2875ad135b5dSChristopher Siden /* 2876ad135b5dSChristopher Siden * If we aren't rolling back save the load info from our first 2877ad135b5dSChristopher Siden * import attempt so that we can restore it after attempting 2878ad135b5dSChristopher Siden * to rewind. 2879ad135b5dSChristopher Siden */ 2880ad135b5dSChristopher Siden loadinfo = spa->spa_load_info; 2881ad135b5dSChristopher Siden spa->spa_load_info = fnvlist_alloc(); 2882ad135b5dSChristopher Siden } 2883468c413aSTim Haley 2884c8ee1847SVictor Latushkin spa->spa_load_max_txg = spa->spa_last_ubsync_txg; 2885c8ee1847SVictor Latushkin safe_rewind_txg = spa->spa_last_ubsync_txg - TXG_DEFER_SIZE; 2886c8ee1847SVictor Latushkin min_txg = (rewind_flags & ZPOOL_EXTREME_REWIND) ? 2887c8ee1847SVictor Latushkin TXG_INITIAL : safe_rewind_txg; 2888468c413aSTim Haley 2889c8ee1847SVictor Latushkin /* 2890c8ee1847SVictor Latushkin * Continue as long as we're finding errors, we're still within 2891c8ee1847SVictor Latushkin * the acceptable rewind range, and we're still finding uberblocks 2892c8ee1847SVictor Latushkin */ 2893c8ee1847SVictor Latushkin while (rewind_error && spa->spa_uberblock.ub_txg >= min_txg && 2894c8ee1847SVictor Latushkin spa->spa_uberblock.ub_txg <= spa->spa_load_max_txg) { 2895c8ee1847SVictor Latushkin if (spa->spa_load_max_txg < safe_rewind_txg) 2896468c413aSTim Haley spa->spa_extreme_rewind = B_TRUE; 2897468c413aSTim Haley rewind_error = spa_load_retry(spa, state, mosconfig); 2898468c413aSTim Haley } 2899468c413aSTim Haley 2900468c413aSTim Haley spa->spa_extreme_rewind = B_FALSE; 2901468c413aSTim Haley spa->spa_load_max_txg = UINT64_MAX; 2902468c413aSTim Haley 2903468c413aSTim Haley if (config && (rewind_error || state != SPA_LOAD_RECOVER)) 2904468c413aSTim Haley spa_config_set(spa, config); 2905468c413aSTim Haley 2906ad135b5dSChristopher Siden if (state == SPA_LOAD_RECOVER) { 2907ad135b5dSChristopher Siden ASSERT3P(loadinfo, ==, NULL); 2908ad135b5dSChristopher Siden return (rewind_error); 2909ad135b5dSChristopher Siden } else { 2910ad135b5dSChristopher Siden /* Store the rewind info as part of the initial load info */ 2911ad135b5dSChristopher Siden fnvlist_add_nvlist(loadinfo, ZPOOL_CONFIG_REWIND_INFO, 2912ad135b5dSChristopher Siden spa->spa_load_info); 2913ad135b5dSChristopher Siden 2914ad135b5dSChristopher Siden /* Restore the initial load info */ 2915ad135b5dSChristopher Siden fnvlist_free(spa->spa_load_info); 2916ad135b5dSChristopher Siden spa->spa_load_info = loadinfo; 2917ad135b5dSChristopher Siden 2918ad135b5dSChristopher Siden return (load_error); 2919ad135b5dSChristopher Siden } 2920468c413aSTim Haley } 2921468c413aSTim Haley 2922fa9e4066Sahrens /* 2923fa9e4066Sahrens * Pool Open/Import 2924fa9e4066Sahrens * 2925fa9e4066Sahrens * The import case is identical to an open except that the configuration is sent 2926fa9e4066Sahrens * down from userland, instead of grabbed from the configuration cache. For the 2927fa9e4066Sahrens * case of an open, the pool configuration will exist in the 29283d7072f8Seschrock * POOL_STATE_UNINITIALIZED state. 2929fa9e4066Sahrens * 2930fa9e4066Sahrens * The stats information (gen/count/ustats) is used to gather vdev statistics at 2931fa9e4066Sahrens * the same time open the pool, without having to keep around the spa_t in some 2932fa9e4066Sahrens * ambiguous state. 2933fa9e4066Sahrens */ 2934fa9e4066Sahrens static int 2935468c413aSTim Haley spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy, 2936468c413aSTim Haley nvlist_t **config) 2937fa9e4066Sahrens { 2938fa9e4066Sahrens spa_t *spa; 29394b964adaSGeorge Wilson spa_load_state_t state = SPA_LOAD_OPEN; 2940fa9e4066Sahrens int error; 2941fa9e4066Sahrens int locked = B_FALSE; 2942fa9e4066Sahrens 2943fa9e4066Sahrens *spapp = NULL; 2944fa9e4066Sahrens 2945fa9e4066Sahrens /* 2946fa9e4066Sahrens * As disgusting as this is, we need to support recursive calls to this 2947fa9e4066Sahrens * function because dsl_dir_open() is called during spa_load(), and ends 2948fa9e4066Sahrens * up calling spa_open() again. The real fix is to figure out how to 2949fa9e4066Sahrens * avoid dsl_dir_open() calling this in the first place. 2950fa9e4066Sahrens */ 2951fa9e4066Sahrens if (mutex_owner(&spa_namespace_lock) != curthread) { 2952fa9e4066Sahrens mutex_enter(&spa_namespace_lock); 2953fa9e4066Sahrens locked = B_TRUE; 2954fa9e4066Sahrens } 2955fa9e4066Sahrens 2956fa9e4066Sahrens if ((spa = spa_lookup(pool)) == NULL) { 2957fa9e4066Sahrens if (locked) 2958fa9e4066Sahrens mutex_exit(&spa_namespace_lock); 2959be6fd75aSMatthew Ahrens return (SET_ERROR(ENOENT)); 2960fa9e4066Sahrens } 2961468c413aSTim Haley 29624b44c88cSTim Haley if (spa->spa_state == POOL_STATE_UNINITIALIZED) { 29634b44c88cSTim Haley zpool_rewind_policy_t policy; 29644b44c88cSTim Haley 29654b44c88cSTim Haley zpool_get_rewind_policy(nvpolicy ? nvpolicy : spa->spa_config, 29664b44c88cSTim Haley &policy); 2967c8ee1847SVictor Latushkin if (policy.zrp_request & ZPOOL_DO_REWIND) 2968c8ee1847SVictor Latushkin state = SPA_LOAD_RECOVER; 2969c8ee1847SVictor Latushkin 29708ad4d6ddSJeff Bonwick spa_activate(spa, spa_mode_global); 2971fa9e4066Sahrens 2972468c413aSTim Haley if (state != SPA_LOAD_RECOVER) 2973468c413aSTim Haley spa->spa_last_ubsync_txg = spa->spa_load_txg = 0; 2974468c413aSTim Haley 2975468c413aSTim Haley error = spa_load_best(spa, state, B_FALSE, policy.zrp_txg, 2976c8ee1847SVictor Latushkin policy.zrp_request); 2977fa9e4066Sahrens 2978fa9e4066Sahrens if (error == EBADF) { 2979fa9e4066Sahrens /* 2980560e6e96Seschrock * If vdev_validate() returns failure (indicated by 2981560e6e96Seschrock * EBADF), it indicates that one of the vdevs indicates 2982560e6e96Seschrock * that the pool has been exported or destroyed. If 2983560e6e96Seschrock * this is the case, the config cache is out of sync and 2984560e6e96Seschrock * we should remove the pool from the namespace. 2985fa9e4066Sahrens */ 2986fa9e4066Sahrens spa_unload(spa); 2987fa9e4066Sahrens spa_deactivate(spa); 2988c5904d13Seschrock spa_config_sync(spa, B_TRUE, B_TRUE); 2989fa9e4066Sahrens spa_remove(spa); 2990fa9e4066Sahrens if (locked) 2991fa9e4066Sahrens mutex_exit(&spa_namespace_lock); 2992be6fd75aSMatthew Ahrens return (SET_ERROR(ENOENT)); 2993ea8dc4b6Seschrock } 2994ea8dc4b6Seschrock 2995ea8dc4b6Seschrock if (error) { 2996fa9e4066Sahrens /* 2997fa9e4066Sahrens * We can't open the pool, but we still have useful 2998fa9e4066Sahrens * information: the state of each vdev after the 2999fa9e4066Sahrens * attempted vdev_open(). Return this to the user. 3000fa9e4066Sahrens */ 30014b964adaSGeorge Wilson if (config != NULL && spa->spa_config) { 3002468c413aSTim Haley VERIFY(nvlist_dup(spa->spa_config, config, 3003468c413aSTim Haley KM_SLEEP) == 0); 30044b964adaSGeorge Wilson VERIFY(nvlist_add_nvlist(*config, 30054b964adaSGeorge Wilson ZPOOL_CONFIG_LOAD_INFO, 30064b964adaSGeorge Wilson spa->spa_load_info) == 0); 30074b964adaSGeorge Wilson } 3008fa9e4066Sahrens spa_unload(spa); 3009fa9e4066Sahrens spa_deactivate(spa); 3010468c413aSTim Haley spa->spa_last_open_failed = error; 3011fa9e4066Sahrens if (locked) 3012fa9e4066Sahrens mutex_exit(&spa_namespace_lock); 3013fa9e4066Sahrens *spapp = NULL; 3014fa9e4066Sahrens return (error); 3015fa9e4066Sahrens } 3016fa9e4066Sahrens } 3017fa9e4066Sahrens 3018fa9e4066Sahrens spa_open_ref(spa, tag); 30193d7072f8Seschrock 3020468c413aSTim Haley if (config != NULL) 3021468c413aSTim Haley *config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 3022468c413aSTim Haley 30234b964adaSGeorge Wilson /* 30244b964adaSGeorge Wilson * If we've recovered the pool, pass back any information we 30254b964adaSGeorge Wilson * gathered while doing the load. 30264b964adaSGeorge Wilson */ 30274b964adaSGeorge Wilson if (state == SPA_LOAD_RECOVER) { 30284b964adaSGeorge Wilson VERIFY(nvlist_add_nvlist(*config, ZPOOL_CONFIG_LOAD_INFO, 30294b964adaSGeorge Wilson spa->spa_load_info) == 0); 30304b964adaSGeorge Wilson } 30314b964adaSGeorge Wilson 3032a33cae98STim Haley if (locked) { 3033a33cae98STim Haley spa->spa_last_open_failed = 0; 3034468c413aSTim Haley spa->spa_last_ubsync_txg = 0; 3035468c413aSTim Haley spa->spa_load_txg = 0; 3036fa9e4066Sahrens mutex_exit(&spa_namespace_lock); 3037a33cae98STim Haley } 3038fa9e4066Sahrens 3039fa9e4066Sahrens *spapp = spa; 3040fa9e4066Sahrens 3041fa9e4066Sahrens return (0); 3042fa9e4066Sahrens } 3043fa9e4066Sahrens 3044fa9e4066Sahrens int 3045468c413aSTim Haley spa_open_rewind(const char *name, spa_t **spapp, void *tag, nvlist_t *policy, 3046468c413aSTim Haley nvlist_t **config) 3047468c413aSTim Haley { 3048468c413aSTim Haley return (spa_open_common(name, spapp, tag, policy, config)); 3049468c413aSTim Haley } 3050468c413aSTim Haley 3051468c413aSTim Haley int 3052fa9e4066Sahrens spa_open(const char *name, spa_t **spapp, void *tag) 3053fa9e4066Sahrens { 3054468c413aSTim Haley return (spa_open_common(name, spapp, tag, NULL, NULL)); 3055fa9e4066Sahrens } 3056fa9e4066Sahrens 3057ea8dc4b6Seschrock /* 3058ea8dc4b6Seschrock * Lookup the given spa_t, incrementing the inject count in the process, 3059ea8dc4b6Seschrock * preventing it from being exported or destroyed. 3060ea8dc4b6Seschrock */ 3061ea8dc4b6Seschrock spa_t * 3062ea8dc4b6Seschrock spa_inject_addref(char *name) 3063ea8dc4b6Seschrock { 3064ea8dc4b6Seschrock spa_t *spa; 3065ea8dc4b6Seschrock 3066ea8dc4b6Seschrock mutex_enter(&spa_namespace_lock); 3067ea8dc4b6Seschrock if ((spa = spa_lookup(name)) == NULL) { 3068ea8dc4b6Seschrock mutex_exit(&spa_namespace_lock); 3069ea8dc4b6Seschrock return (NULL); 3070ea8dc4b6Seschrock } 3071ea8dc4b6Seschrock spa->spa_inject_ref++; 3072ea8dc4b6Seschrock mutex_exit(&spa_namespace_lock); 3073ea8dc4b6Seschrock 3074ea8dc4b6Seschrock return (spa); 3075ea8dc4b6Seschrock } 3076ea8dc4b6Seschrock 3077ea8dc4b6Seschrock void 3078ea8dc4b6Seschrock spa_inject_delref(spa_t *spa) 3079ea8dc4b6Seschrock { 3080ea8dc4b6Seschrock mutex_enter(&spa_namespace_lock); 3081ea8dc4b6Seschrock spa->spa_inject_ref--; 3082ea8dc4b6Seschrock mutex_exit(&spa_namespace_lock); 3083ea8dc4b6Seschrock } 3084ea8dc4b6Seschrock 3085fa94a07fSbrendan /* 3086fa94a07fSbrendan * Add spares device information to the nvlist. 3087fa94a07fSbrendan */ 308899653d4eSeschrock static void 308999653d4eSeschrock spa_add_spares(spa_t *spa, nvlist_t *config) 309099653d4eSeschrock { 309199653d4eSeschrock nvlist_t **spares; 309299653d4eSeschrock uint_t i, nspares; 309399653d4eSeschrock nvlist_t *nvroot; 309499653d4eSeschrock uint64_t guid; 309599653d4eSeschrock vdev_stat_t *vs; 309699653d4eSeschrock uint_t vsc; 309739c23413Seschrock uint64_t pool; 309899653d4eSeschrock 30996809eb4eSEric Schrock ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); 31006809eb4eSEric Schrock 3101fa94a07fSbrendan if (spa->spa_spares.sav_count == 0) 310299653d4eSeschrock return; 310399653d4eSeschrock 310499653d4eSeschrock VERIFY(nvlist_lookup_nvlist(config, 310599653d4eSeschrock ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 3106fa94a07fSbrendan VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 310799653d4eSeschrock ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 310899653d4eSeschrock if (nspares != 0) { 310999653d4eSeschrock VERIFY(nvlist_add_nvlist_array(nvroot, 311099653d4eSeschrock ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 311199653d4eSeschrock VERIFY(nvlist_lookup_nvlist_array(nvroot, 311299653d4eSeschrock ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 311399653d4eSeschrock 311499653d4eSeschrock /* 311599653d4eSeschrock * Go through and find any spares which have since been 311699653d4eSeschrock * repurposed as an active spare. If this is the case, update 311799653d4eSeschrock * their status appropriately. 311899653d4eSeschrock */ 311999653d4eSeschrock for (i = 0; i < nspares; i++) { 312099653d4eSeschrock VERIFY(nvlist_lookup_uint64(spares[i], 312199653d4eSeschrock ZPOOL_CONFIG_GUID, &guid) == 0); 312289a89ebfSlling if (spa_spare_exists(guid, &pool, NULL) && 312389a89ebfSlling pool != 0ULL) { 312499653d4eSeschrock VERIFY(nvlist_lookup_uint64_array( 31253f9d6ad7SLin Ling spares[i], ZPOOL_CONFIG_VDEV_STATS, 312699653d4eSeschrock (uint64_t **)&vs, &vsc) == 0); 312799653d4eSeschrock vs->vs_state = VDEV_STATE_CANT_OPEN; 312899653d4eSeschrock vs->vs_aux = VDEV_AUX_SPARED; 312999653d4eSeschrock } 313099653d4eSeschrock } 313199653d4eSeschrock } 313299653d4eSeschrock } 313399653d4eSeschrock 3134fa94a07fSbrendan /* 3135fa94a07fSbrendan * Add l2cache device information to the nvlist, including vdev stats. 3136fa94a07fSbrendan */ 3137fa94a07fSbrendan static void 3138fa94a07fSbrendan spa_add_l2cache(spa_t *spa, nvlist_t *config) 3139fa94a07fSbrendan { 3140fa94a07fSbrendan nvlist_t **l2cache; 3141fa94a07fSbrendan uint_t i, j, nl2cache; 3142fa94a07fSbrendan nvlist_t *nvroot; 3143fa94a07fSbrendan uint64_t guid; 3144fa94a07fSbrendan vdev_t *vd; 3145fa94a07fSbrendan vdev_stat_t *vs; 3146fa94a07fSbrendan uint_t vsc; 3147fa94a07fSbrendan 31486809eb4eSEric Schrock ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); 31496809eb4eSEric Schrock 3150fa94a07fSbrendan if (spa->spa_l2cache.sav_count == 0) 3151fa94a07fSbrendan return; 3152fa94a07fSbrendan 3153fa94a07fSbrendan VERIFY(nvlist_lookup_nvlist(config, 3154fa94a07fSbrendan ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 3155fa94a07fSbrendan VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config, 3156fa94a07fSbrendan ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 3157fa94a07fSbrendan if (nl2cache != 0) { 3158fa94a07fSbrendan VERIFY(nvlist_add_nvlist_array(nvroot, 3159fa94a07fSbrendan ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 3160fa94a07fSbrendan VERIFY(nvlist_lookup_nvlist_array(nvroot, 3161fa94a07fSbrendan ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 3162fa94a07fSbrendan 3163fa94a07fSbrendan /* 3164fa94a07fSbrendan * Update level 2 cache device stats. 3165fa94a07fSbrendan */ 3166fa94a07fSbrendan 3167fa94a07fSbrendan for (i = 0; i < nl2cache; i++) { 3168fa94a07fSbrendan VERIFY(nvlist_lookup_uint64(l2cache[i], 3169fa94a07fSbrendan ZPOOL_CONFIG_GUID, &guid) == 0); 3170fa94a07fSbrendan 3171fa94a07fSbrendan vd = NULL; 3172fa94a07fSbrendan for (j = 0; j < spa->spa_l2cache.sav_count; j++) { 3173fa94a07fSbrendan if (guid == 3174fa94a07fSbrendan spa->spa_l2cache.sav_vdevs[j]->vdev_guid) { 3175fa94a07fSbrendan vd = spa->spa_l2cache.sav_vdevs[j]; 3176fa94a07fSbrendan break; 3177fa94a07fSbrendan } 3178fa94a07fSbrendan } 3179fa94a07fSbrendan ASSERT(vd != NULL); 3180fa94a07fSbrendan 3181fa94a07fSbrendan VERIFY(nvlist_lookup_uint64_array(l2cache[i], 31823f9d6ad7SLin Ling ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc) 31833f9d6ad7SLin Ling == 0); 3184fa94a07fSbrendan vdev_get_stats(vd, vs); 3185fa94a07fSbrendan } 3186fa94a07fSbrendan } 3187fa94a07fSbrendan } 3188fa94a07fSbrendan 3189ad135b5dSChristopher Siden static void 3190ad135b5dSChristopher Siden spa_add_feature_stats(spa_t *spa, nvlist_t *config) 3191ad135b5dSChristopher Siden { 3192ad135b5dSChristopher Siden nvlist_t *features; 3193ad135b5dSChristopher Siden zap_cursor_t zc; 3194ad135b5dSChristopher Siden zap_attribute_t za; 3195ad135b5dSChristopher Siden 3196ad135b5dSChristopher Siden ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); 3197ad135b5dSChristopher Siden VERIFY(nvlist_alloc(&features, NV_UNIQUE_NAME, KM_SLEEP) == 0); 3198ad135b5dSChristopher Siden 3199ad135b5dSChristopher Siden if (spa->spa_feat_for_read_obj != 0) { 3200ad135b5dSChristopher Siden for (zap_cursor_init(&zc, spa->spa_meta_objset, 3201ad135b5dSChristopher Siden spa->spa_feat_for_read_obj); 3202ad135b5dSChristopher Siden zap_cursor_retrieve(&zc, &za) == 0; 3203ad135b5dSChristopher Siden zap_cursor_advance(&zc)) { 3204ad135b5dSChristopher Siden ASSERT(za.za_integer_length == sizeof (uint64_t) && 3205ad135b5dSChristopher Siden za.za_num_integers == 1); 3206b420f3adSRichard Lowe VERIFY3U(0, ==, nvlist_add_uint64(features, za.za_name, 3207ad135b5dSChristopher Siden za.za_first_integer)); 3208ad135b5dSChristopher Siden } 3209ad135b5dSChristopher Siden zap_cursor_fini(&zc); 3210ad135b5dSChristopher Siden } 3211ad135b5dSChristopher Siden 3212ad135b5dSChristopher Siden if (spa->spa_feat_for_write_obj != 0) { 3213ad135b5dSChristopher Siden for (zap_cursor_init(&zc, spa->spa_meta_objset, 3214ad135b5dSChristopher Siden spa->spa_feat_for_write_obj); 3215ad135b5dSChristopher Siden zap_cursor_retrieve(&zc, &za) == 0; 3216ad135b5dSChristopher Siden zap_cursor_advance(&zc)) { 3217ad135b5dSChristopher Siden ASSERT(za.za_integer_length == sizeof (uint64_t) && 3218ad135b5dSChristopher Siden za.za_num_integers == 1); 3219b420f3adSRichard Lowe VERIFY3U(0, ==, nvlist_add_uint64(features, za.za_name, 3220ad135b5dSChristopher Siden za.za_first_integer)); 3221ad135b5dSChristopher Siden } 3222ad135b5dSChristopher Siden zap_cursor_fini(&zc); 3223ad135b5dSChristopher Siden } 3224ad135b5dSChristopher Siden 3225ad135b5dSChristopher Siden VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS, 3226ad135b5dSChristopher Siden features) == 0); 3227ad135b5dSChristopher Siden nvlist_free(features); 3228ad135b5dSChristopher Siden } 3229ad135b5dSChristopher Siden 3230fa9e4066Sahrens int 3231ad135b5dSChristopher Siden spa_get_stats(const char *name, nvlist_t **config, 3232ad135b5dSChristopher Siden char *altroot, size_t buflen) 3233fa9e4066Sahrens { 3234fa9e4066Sahrens int error; 3235fa9e4066Sahrens spa_t *spa; 3236fa9e4066Sahrens 3237fa9e4066Sahrens *config = NULL; 3238468c413aSTim Haley error = spa_open_common(name, &spa, FTAG, NULL, config); 3239fa9e4066Sahrens 32406809eb4eSEric Schrock if (spa != NULL) { 32416809eb4eSEric Schrock /* 32426809eb4eSEric Schrock * This still leaves a window of inconsistency where the spares 32436809eb4eSEric Schrock * or l2cache devices could change and the config would be 32446809eb4eSEric Schrock * self-inconsistent. 32456809eb4eSEric Schrock */ 32466809eb4eSEric Schrock spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 32476809eb4eSEric Schrock 32486809eb4eSEric Schrock if (*config != NULL) { 324911027bc7STim Haley uint64_t loadtimes[2]; 325011027bc7STim Haley 325111027bc7STim Haley loadtimes[0] = spa->spa_loaded_ts.tv_sec; 325211027bc7STim Haley loadtimes[1] = spa->spa_loaded_ts.tv_nsec; 325311027bc7STim Haley VERIFY(nvlist_add_uint64_array(*config, 325411027bc7STim Haley ZPOOL_CONFIG_LOADED_TIME, loadtimes, 2) == 0); 325511027bc7STim Haley 32566809eb4eSEric Schrock VERIFY(nvlist_add_uint64(*config, 32576809eb4eSEric Schrock ZPOOL_CONFIG_ERRCOUNT, 3258ea8dc4b6Seschrock spa_get_errlog_size(spa)) == 0); 3259ea8dc4b6Seschrock 3260e14bb325SJeff Bonwick if (spa_suspended(spa)) 3261e14bb325SJeff Bonwick VERIFY(nvlist_add_uint64(*config, 32626809eb4eSEric Schrock ZPOOL_CONFIG_SUSPENDED, 32636809eb4eSEric Schrock spa->spa_failmode) == 0); 3264e14bb325SJeff Bonwick 326599653d4eSeschrock spa_add_spares(spa, *config); 3266fa94a07fSbrendan spa_add_l2cache(spa, *config); 3267ad135b5dSChristopher Siden spa_add_feature_stats(spa, *config); 326899653d4eSeschrock } 32696809eb4eSEric Schrock } 327099653d4eSeschrock 3271ea8dc4b6Seschrock /* 3272ea8dc4b6Seschrock * We want to get the alternate root even for faulted pools, so we cheat 3273ea8dc4b6Seschrock * and call spa_lookup() directly. 3274ea8dc4b6Seschrock */ 3275ea8dc4b6Seschrock if (altroot) { 3276ea8dc4b6Seschrock if (spa == NULL) { 3277ea8dc4b6Seschrock mutex_enter(&spa_namespace_lock); 3278ea8dc4b6Seschrock spa = spa_lookup(name); 3279ea8dc4b6Seschrock if (spa) 3280ea8dc4b6Seschrock spa_altroot(spa, altroot, buflen); 3281ea8dc4b6Seschrock else 3282ea8dc4b6Seschrock altroot[0] = '\0'; 3283ea8dc4b6Seschrock spa = NULL; 3284ea8dc4b6Seschrock mutex_exit(&spa_namespace_lock); 3285ea8dc4b6Seschrock } else { 3286ea8dc4b6Seschrock spa_altroot(spa, altroot, buflen); 3287ea8dc4b6Seschrock } 3288ea8dc4b6Seschrock } 3289ea8dc4b6Seschrock 32906809eb4eSEric Schrock if (spa != NULL) { 32916809eb4eSEric Schrock spa_config_exit(spa, SCL_CONFIG, FTAG); 3292fa9e4066Sahrens spa_close(spa, FTAG); 32936809eb4eSEric Schrock } 3294fa9e4066Sahrens 3295fa9e4066Sahrens return (error); 3296fa9e4066Sahrens } 3297fa9e4066Sahrens 3298fa9e4066Sahrens /* 3299fa94a07fSbrendan * Validate that the auxiliary device array is well formed. We must have an 3300fa94a07fSbrendan * array of nvlists, each which describes a valid leaf vdev. If this is an 3301fa94a07fSbrendan * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be 3302fa94a07fSbrendan * specified, as long as they are well-formed. 330399653d4eSeschrock */ 330499653d4eSeschrock static int 3305fa94a07fSbrendan spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode, 3306fa94a07fSbrendan spa_aux_vdev_t *sav, const char *config, uint64_t version, 3307fa94a07fSbrendan vdev_labeltype_t label) 330899653d4eSeschrock { 3309fa94a07fSbrendan nvlist_t **dev; 3310fa94a07fSbrendan uint_t i, ndev; 331199653d4eSeschrock vdev_t *vd; 331299653d4eSeschrock int error; 331399653d4eSeschrock 3314e14bb325SJeff Bonwick ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 3315e14bb325SJeff Bonwick 331699653d4eSeschrock /* 3317fa94a07fSbrendan * It's acceptable to have no devs specified. 331899653d4eSeschrock */ 3319fa94a07fSbrendan if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0) 332099653d4eSeschrock return (0); 332199653d4eSeschrock 3322fa94a07fSbrendan if (ndev == 0) 3323be6fd75aSMatthew Ahrens return (SET_ERROR(EINVAL)); 332499653d4eSeschrock 332599653d4eSeschrock /* 3326fa94a07fSbrendan * Make sure the pool is formatted with a version that supports this 3327fa94a07fSbrendan * device type. 332899653d4eSeschrock */ 3329fa94a07fSbrendan if (spa_version(spa) < version) 3330be6fd75aSMatthew Ahrens return (SET_ERROR(ENOTSUP)); 333199653d4eSeschrock 333239c23413Seschrock /* 3333fa94a07fSbrendan * Set the pending device list so we correctly handle device in-use 333439c23413Seschrock * checking. 333539c23413Seschrock */ 3336fa94a07fSbrendan sav->sav_pending = dev; 3337fa94a07fSbrendan sav->sav_npending = ndev; 333839c23413Seschrock 3339fa94a07fSbrendan for (i = 0; i < ndev; i++) { 3340fa94a07fSbrendan if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0, 334199653d4eSeschrock mode)) != 0) 334239c23413Seschrock goto out; 334399653d4eSeschrock 334499653d4eSeschrock if (!vd->vdev_ops->vdev_op_leaf) { 334599653d4eSeschrock vdev_free(vd); 3346be6fd75aSMatthew Ahrens error = SET_ERROR(EINVAL); 334739c23413Seschrock goto out; 334899653d4eSeschrock } 334999653d4eSeschrock 3350fa94a07fSbrendan /* 3351e14bb325SJeff Bonwick * The L2ARC currently only supports disk devices in 3352e14bb325SJeff Bonwick * kernel context. For user-level testing, we allow it. 3353fa94a07fSbrendan */ 3354e14bb325SJeff Bonwick #ifdef _KERNEL 3355fa94a07fSbrendan if ((strcmp(config, ZPOOL_CONFIG_L2CACHE) == 0) && 3356fa94a07fSbrendan strcmp(vd->vdev_ops->vdev_op_type, VDEV_TYPE_DISK) != 0) { 3357be6fd75aSMatthew Ahrens error = SET_ERROR(ENOTBLK); 3358cd0837ccSGeorge Wilson vdev_free(vd); 3359fa94a07fSbrendan goto out; 3360fa94a07fSbrendan } 3361e14bb325SJeff Bonwick #endif 336299653d4eSeschrock vd->vdev_top = vd; 336399653d4eSeschrock 336439c23413Seschrock if ((error = vdev_open(vd)) == 0 && 3365fa94a07fSbrendan (error = vdev_label_init(vd, crtxg, label)) == 0) { 3366fa94a07fSbrendan VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID, 336799653d4eSeschrock vd->vdev_guid) == 0); 336899653d4eSeschrock } 336999653d4eSeschrock 337039c23413Seschrock vdev_free(vd); 337139c23413Seschrock 3372fa94a07fSbrendan if (error && 3373fa94a07fSbrendan (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE)) 337439c23413Seschrock goto out; 337539c23413Seschrock else 337639c23413Seschrock error = 0; 337739c23413Seschrock } 337839c23413Seschrock 337939c23413Seschrock out: 3380fa94a07fSbrendan sav->sav_pending = NULL; 3381fa94a07fSbrendan sav->sav_npending = 0; 338239c23413Seschrock return (error); 338399653d4eSeschrock } 338499653d4eSeschrock 3385fa94a07fSbrendan static int 3386fa94a07fSbrendan spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode) 3387fa94a07fSbrendan { 3388fa94a07fSbrendan int error; 3389fa94a07fSbrendan 3390e14bb325SJeff Bonwick ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 3391e14bb325SJeff Bonwick 3392fa94a07fSbrendan if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode, 3393fa94a07fSbrendan &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES, 3394fa94a07fSbrendan VDEV_LABEL_SPARE)) != 0) { 3395fa94a07fSbrendan return (error); 3396fa94a07fSbrendan } 3397fa94a07fSbrendan 3398fa94a07fSbrendan return (spa_validate_aux_devs(spa, nvroot, crtxg, mode, 3399fa94a07fSbrendan &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE, 3400fa94a07fSbrendan VDEV_LABEL_L2CACHE)); 3401fa94a07fSbrendan } 3402fa94a07fSbrendan 3403fa94a07fSbrendan static void 3404fa94a07fSbrendan spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs, 3405fa94a07fSbrendan const char *config) 3406fa94a07fSbrendan { 3407fa94a07fSbrendan int i; 3408fa94a07fSbrendan 3409fa94a07fSbrendan if (sav->sav_config != NULL) { 3410fa94a07fSbrendan nvlist_t **olddevs; 3411fa94a07fSbrendan uint_t oldndevs; 3412fa94a07fSbrendan nvlist_t **newdevs; 3413fa94a07fSbrendan 3414fa94a07fSbrendan /* 3415fa94a07fSbrendan * Generate new dev list by concatentating with the 3416fa94a07fSbrendan * current dev list. 3417fa94a07fSbrendan */ 3418fa94a07fSbrendan VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config, 3419fa94a07fSbrendan &olddevs, &oldndevs) == 0); 3420fa94a07fSbrendan 3421fa94a07fSbrendan newdevs = kmem_alloc(sizeof (void *) * 3422fa94a07fSbrendan (ndevs + oldndevs), KM_SLEEP); 3423fa94a07fSbrendan for (i = 0; i < oldndevs; i++) 3424fa94a07fSbrendan VERIFY(nvlist_dup(olddevs[i], &newdevs[i], 3425fa94a07fSbrendan KM_SLEEP) == 0); 3426fa94a07fSbrendan for (i = 0; i < ndevs; i++) 3427fa94a07fSbrendan VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs], 3428fa94a07fSbrendan KM_SLEEP) == 0); 3429fa94a07fSbrendan 3430fa94a07fSbrendan VERIFY(nvlist_remove(sav->sav_config, config, 3431fa94a07fSbrendan DATA_TYPE_NVLIST_ARRAY) == 0); 3432fa94a07fSbrendan 3433fa94a07fSbrendan VERIFY(nvlist_add_nvlist_array(sav->sav_config, 3434fa94a07fSbrendan config, newdevs, ndevs + oldndevs) == 0); 3435fa94a07fSbrendan for (i = 0; i < oldndevs + ndevs; i++) 3436fa94a07fSbrendan nvlist_free(newdevs[i]); 3437fa94a07fSbrendan kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *)); 3438fa94a07fSbrendan } else { 3439fa94a07fSbrendan /* 3440fa94a07fSbrendan * Generate a new dev list. 3441fa94a07fSbrendan */ 3442fa94a07fSbrendan VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME, 3443fa94a07fSbrendan KM_SLEEP) == 0); 3444fa94a07fSbrendan VERIFY(nvlist_add_nvlist_array(sav->sav_config, config, 3445fa94a07fSbrendan devs, ndevs) == 0); 3446fa94a07fSbrendan } 3447fa94a07fSbrendan } 3448fa94a07fSbrendan 3449fa94a07fSbrendan /* 3450fa94a07fSbrendan * Stop and drop level 2 ARC devices 3451fa94a07fSbrendan */ 3452fa94a07fSbrendan void 3453fa94a07fSbrendan spa_l2cache_drop(spa_t *spa) 3454fa94a07fSbrendan { 3455fa94a07fSbrendan vdev_t *vd; 3456fa94a07fSbrendan int i; 3457fa94a07fSbrendan spa_aux_vdev_t *sav = &spa->spa_l2cache; 3458fa94a07fSbrendan 3459fa94a07fSbrendan for (i = 0; i < sav->sav_count; i++) { 3460fa94a07fSbrendan uint64_t pool; 3461fa94a07fSbrendan 3462fa94a07fSbrendan vd = sav->sav_vdevs[i]; 3463fa94a07fSbrendan ASSERT(vd != NULL); 3464fa94a07fSbrendan 34658ad4d6ddSJeff Bonwick if (spa_l2cache_exists(vd->vdev_guid, &pool) && 34668ad4d6ddSJeff Bonwick pool != 0ULL && l2arc_vdev_present(vd)) 3467fa94a07fSbrendan l2arc_remove_vdev(vd); 3468fa94a07fSbrendan } 3469fa94a07fSbrendan } 3470fa94a07fSbrendan 347199653d4eSeschrock /* 3472fa9e4066Sahrens * Pool Creation 3473fa9e4066Sahrens */ 3474fa9e4066Sahrens int 3475990b4856Slling spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props, 34764445fffbSMatthew Ahrens nvlist_t *zplprops) 3477fa9e4066Sahrens { 3478fa9e4066Sahrens spa_t *spa; 3479990b4856Slling char *altroot = NULL; 34800373e76bSbonwick vdev_t *rvd; 3481fa9e4066Sahrens dsl_pool_t *dp; 3482fa9e4066Sahrens dmu_tx_t *tx; 3483573ca77eSGeorge Wilson int error = 0; 3484fa9e4066Sahrens uint64_t txg = TXG_INITIAL; 3485fa94a07fSbrendan nvlist_t **spares, **l2cache; 3486fa94a07fSbrendan uint_t nspares, nl2cache; 3487cde58dbcSMatthew Ahrens uint64_t version, obj; 3488ad135b5dSChristopher Siden boolean_t has_features; 3489fa9e4066Sahrens 3490fa9e4066Sahrens /* 3491fa9e4066Sahrens * If this pool already exists, return failure. 3492fa9e4066Sahrens */ 3493fa9e4066Sahrens mutex_enter(&spa_namespace_lock); 3494fa9e4066Sahrens if (spa_lookup(pool) != NULL) { 3495fa9e4066Sahrens mutex_exit(&spa_namespace_lock); 3496be6fd75aSMatthew Ahrens return (SET_ERROR(EEXIST)); 3497fa9e4066Sahrens } 3498fa9e4066Sahrens 3499fa9e4066Sahrens /* 3500fa9e4066Sahrens * Allocate a new spa_t structure. 3501fa9e4066Sahrens */ 3502990b4856Slling (void) nvlist_lookup_string(props, 3503990b4856Slling zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 3504468c413aSTim Haley spa = spa_add(pool, NULL, altroot); 35058ad4d6ddSJeff Bonwick spa_activate(spa, spa_mode_global); 3506fa9e4066Sahrens 3507990b4856Slling if (props && (error = spa_prop_validate(spa, props))) { 3508990b4856Slling spa_deactivate(spa); 3509990b4856Slling spa_remove(spa); 3510c5904d13Seschrock mutex_exit(&spa_namespace_lock); 3511990b4856Slling return (error); 3512990b4856Slling } 3513990b4856Slling 3514ad135b5dSChristopher Siden has_features = B_FALSE; 3515ad135b5dSChristopher Siden for (nvpair_t *elem = nvlist_next_nvpair(props, NULL); 3516ad135b5dSChristopher Siden elem != NULL; elem = nvlist_next_nvpair(props, elem)) { 3517ad135b5dSChristopher Siden if (zpool_prop_feature(nvpair_name(elem))) 3518ad135b5dSChristopher Siden has_features = B_TRUE; 3519ad135b5dSChristopher Siden } 3520ad135b5dSChristopher Siden 3521ad135b5dSChristopher Siden if (has_features || nvlist_lookup_uint64(props, 3522ad135b5dSChristopher Siden zpool_prop_to_name(ZPOOL_PROP_VERSION), &version) != 0) { 3523990b4856Slling version = SPA_VERSION; 3524ad135b5dSChristopher Siden } 3525ad135b5dSChristopher Siden ASSERT(SPA_VERSION_IS_SUPPORTED(version)); 3526b24ab676SJeff Bonwick 3527b24ab676SJeff Bonwick spa->spa_first_txg = txg; 3528b24ab676SJeff Bonwick spa->spa_uberblock.ub_txg = txg - 1; 3529990b4856Slling spa->spa_uberblock.ub_version = version; 3530fa9e4066Sahrens spa->spa_ubsync = spa->spa_uberblock; 3531fa9e4066Sahrens 35320373e76bSbonwick /* 353354d692b7SGeorge Wilson * Create "The Godfather" zio to hold all async IOs 353454d692b7SGeorge Wilson */ 35356f834bc1SMatthew Ahrens spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *), 35366f834bc1SMatthew Ahrens KM_SLEEP); 35376f834bc1SMatthew Ahrens for (int i = 0; i < max_ncpus; i++) { 35386f834bc1SMatthew Ahrens spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL, 35396f834bc1SMatthew Ahrens ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | 35406f834bc1SMatthew Ahrens ZIO_FLAG_GODFATHER); 35416f834bc1SMatthew Ahrens } 354254d692b7SGeorge Wilson 354354d692b7SGeorge Wilson /* 35440373e76bSbonwick * Create the root vdev. 35450373e76bSbonwick */ 3546e14bb325SJeff Bonwick spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 35470373e76bSbonwick 354899653d4eSeschrock error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD); 35490373e76bSbonwick 355099653d4eSeschrock ASSERT(error != 0 || rvd != NULL); 355199653d4eSeschrock ASSERT(error != 0 || spa->spa_root_vdev == rvd); 35520373e76bSbonwick 3553b7b97454Sperrin if (error == 0 && !zfs_allocatable_devs(nvroot)) 3554be6fd75aSMatthew Ahrens error = SET_ERROR(EINVAL); 355599653d4eSeschrock 355699653d4eSeschrock if (error == 0 && 355799653d4eSeschrock (error = vdev_create(rvd, txg, B_FALSE)) == 0 && 3558fa94a07fSbrendan (error = spa_validate_aux(spa, nvroot, txg, 355999653d4eSeschrock VDEV_ALLOC_ADD)) == 0) { 3560573ca77eSGeorge Wilson for (int c = 0; c < rvd->vdev_children; c++) { 3561573ca77eSGeorge Wilson vdev_metaslab_set_size(rvd->vdev_child[c]); 3562573ca77eSGeorge Wilson vdev_expand(rvd->vdev_child[c], txg); 3563573ca77eSGeorge Wilson } 35640373e76bSbonwick } 35650373e76bSbonwick 3566e14bb325SJeff Bonwick spa_config_exit(spa, SCL_ALL, FTAG); 3567fa9e4066Sahrens 356899653d4eSeschrock if (error != 0) { 3569fa9e4066Sahrens spa_unload(spa); 3570fa9e4066Sahrens spa_deactivate(spa); 3571fa9e4066Sahrens spa_remove(spa); 3572fa9e4066Sahrens mutex_exit(&spa_namespace_lock); 3573fa9e4066Sahrens return (error); 3574fa9e4066Sahrens } 3575fa9e4066Sahrens 357699653d4eSeschrock /* 357799653d4eSeschrock * Get the list of spares, if specified. 357899653d4eSeschrock */ 357999653d4eSeschrock if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 358099653d4eSeschrock &spares, &nspares) == 0) { 3581fa94a07fSbrendan VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME, 358299653d4eSeschrock KM_SLEEP) == 0); 3583fa94a07fSbrendan VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 358499653d4eSeschrock ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 3585e14bb325SJeff Bonwick spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 358699653d4eSeschrock spa_load_spares(spa); 3587e14bb325SJeff Bonwick spa_config_exit(spa, SCL_ALL, FTAG); 3588fa94a07fSbrendan spa->spa_spares.sav_sync = B_TRUE; 3589fa94a07fSbrendan } 3590fa94a07fSbrendan 3591fa94a07fSbrendan /* 3592fa94a07fSbrendan * Get the list of level 2 cache devices, if specified. 3593fa94a07fSbrendan */ 3594fa94a07fSbrendan if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 3595fa94a07fSbrendan &l2cache, &nl2cache) == 0) { 3596fa94a07fSbrendan VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config, 3597fa94a07fSbrendan NV_UNIQUE_NAME, KM_SLEEP) == 0); 3598fa94a07fSbrendan VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config, 3599fa94a07fSbrendan ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 3600e14bb325SJeff Bonwick spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3601fa94a07fSbrendan spa_load_l2cache(spa); 3602e14bb325SJeff Bonwick spa_config_exit(spa, SCL_ALL, FTAG); 3603fa94a07fSbrendan spa->spa_l2cache.sav_sync = B_TRUE; 360499653d4eSeschrock } 360599653d4eSeschrock 3606ad135b5dSChristopher Siden spa->spa_is_initializing = B_TRUE; 36070a48a24eStimh spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, txg); 3608fa9e4066Sahrens spa->spa_meta_objset = dp->dp_meta_objset; 3609ad135b5dSChristopher Siden spa->spa_is_initializing = B_FALSE; 3610fa9e4066Sahrens 3611485bbbf5SGeorge Wilson /* 3612485bbbf5SGeorge Wilson * Create DDTs (dedup tables). 3613485bbbf5SGeorge Wilson */ 3614485bbbf5SGeorge Wilson ddt_create(spa); 3615485bbbf5SGeorge Wilson 3616485bbbf5SGeorge Wilson spa_update_dspace(spa); 3617485bbbf5SGeorge Wilson 3618fa9e4066Sahrens tx = dmu_tx_create_assigned(dp, txg); 3619fa9e4066Sahrens 3620fa9e4066Sahrens /* 3621fa9e4066Sahrens * Create the pool config object. 3622fa9e4066Sahrens */ 3623fa9e4066Sahrens spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset, 3624f7991ba4STim Haley DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE, 3625fa9e4066Sahrens DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx); 3626fa9e4066Sahrens 3627ea8dc4b6Seschrock if (zap_add(spa->spa_meta_objset, 3628fa9e4066Sahrens DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG, 3629ea8dc4b6Seschrock sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) { 3630ea8dc4b6Seschrock cmn_err(CE_PANIC, "failed to add pool config"); 3631ea8dc4b6Seschrock } 3632fa9e4066Sahrens 3633ad135b5dSChristopher Siden if (spa_version(spa) >= SPA_VERSION_FEATURES) 3634ad135b5dSChristopher Siden spa_feature_create_zap_objects(spa, tx); 3635ad135b5dSChristopher Siden 36363f9d6ad7SLin Ling if (zap_add(spa->spa_meta_objset, 36373f9d6ad7SLin Ling DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CREATION_VERSION, 36383f9d6ad7SLin Ling sizeof (uint64_t), 1, &version, tx) != 0) { 36393f9d6ad7SLin Ling cmn_err(CE_PANIC, "failed to add pool version"); 36403f9d6ad7SLin Ling } 36413f9d6ad7SLin Ling 3642990b4856Slling /* Newly created pools with the right version are always deflated. */ 3643990b4856Slling if (version >= SPA_VERSION_RAIDZ_DEFLATE) { 364499653d4eSeschrock spa->spa_deflate = TRUE; 364599653d4eSeschrock if (zap_add(spa->spa_meta_objset, 364699653d4eSeschrock DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 364799653d4eSeschrock sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) { 364899653d4eSeschrock cmn_err(CE_PANIC, "failed to add deflate"); 364999653d4eSeschrock } 3650990b4856Slling } 365199653d4eSeschrock 3652fa9e4066Sahrens /* 3653cde58dbcSMatthew Ahrens * Create the deferred-free bpobj. Turn off compression 3654fa9e4066Sahrens * because sync-to-convergence takes longer if the blocksize 3655fa9e4066Sahrens * keeps changing. 3656fa9e4066Sahrens */ 3657cde58dbcSMatthew Ahrens obj = bpobj_alloc(spa->spa_meta_objset, 1 << 14, tx); 3658cde58dbcSMatthew Ahrens dmu_object_set_compress(spa->spa_meta_objset, obj, 3659cde58dbcSMatthew Ahrens ZIO_COMPRESS_OFF, tx); 3660ea8dc4b6Seschrock if (zap_add(spa->spa_meta_objset, 3661cde58dbcSMatthew Ahrens DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPOBJ, 3662cde58dbcSMatthew Ahrens sizeof (uint64_t), 1, &obj, tx) != 0) { 3663cde58dbcSMatthew Ahrens cmn_err(CE_PANIC, "failed to add bpobj"); 3664ea8dc4b6Seschrock } 3665b420f3adSRichard Lowe VERIFY3U(0, ==, bpobj_open(&spa->spa_deferred_bpobj, 3666cde58dbcSMatthew Ahrens spa->spa_meta_objset, obj)); 3667fa9e4066Sahrens 366806eeb2adSek110237 /* 366906eeb2adSek110237 * Create the pool's history object. 367006eeb2adSek110237 */ 3671990b4856Slling if (version >= SPA_VERSION_ZPOOL_HISTORY) 367206eeb2adSek110237 spa_history_create_obj(spa, tx); 367306eeb2adSek110237 3674990b4856Slling /* 367545818ee1SMatthew Ahrens * Generate some random noise for salted checksums to operate on. 367645818ee1SMatthew Ahrens */ 367745818ee1SMatthew Ahrens (void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes, 367845818ee1SMatthew Ahrens sizeof (spa->spa_cksum_salt.zcs_bytes)); 367945818ee1SMatthew Ahrens 368045818ee1SMatthew Ahrens /* 3681990b4856Slling * Set pool properties. 3682990b4856Slling */ 3683990b4856Slling spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS); 3684990b4856Slling spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION); 36850a4e9518Sgw25295 spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE); 3686573ca77eSGeorge Wilson spa->spa_autoexpand = zpool_prop_default_numeric(ZPOOL_PROP_AUTOEXPAND); 3687b24ab676SJeff Bonwick 3688379c004dSEric Schrock if (props != NULL) { 3689379c004dSEric Schrock spa_configfile_set(spa, props, B_FALSE); 36903b2aab18SMatthew Ahrens spa_sync_props(props, tx); 3691379c004dSEric Schrock } 3692990b4856Slling 3693fa9e4066Sahrens dmu_tx_commit(tx); 3694fa9e4066Sahrens 3695fa9e4066Sahrens spa->spa_sync_on = B_TRUE; 3696fa9e4066Sahrens txg_sync_start(spa->spa_dsl_pool); 3697fa9e4066Sahrens 3698fa9e4066Sahrens /* 3699fa9e4066Sahrens * We explicitly wait for the first transaction to complete so that our 3700fa9e4066Sahrens * bean counters are appropriately updated. 3701fa9e4066Sahrens */ 3702fa9e4066Sahrens txg_wait_synced(spa->spa_dsl_pool, txg); 3703fa9e4066Sahrens 3704c5904d13Seschrock spa_config_sync(spa, B_FALSE, B_TRUE); 370514372834SHans Rosenfeld spa_event_notify(spa, NULL, ESC_ZFS_POOL_CREATE); 3706fa9e4066Sahrens 37074445fffbSMatthew Ahrens spa_history_log_version(spa, "create"); 3708228975ccSek110237 3709bc9014e6SJustin Gibbs /* 3710bc9014e6SJustin Gibbs * Don't count references from objsets that are already closed 3711bc9014e6SJustin Gibbs * and are making their way through the eviction process. 3712bc9014e6SJustin Gibbs */ 3713bc9014e6SJustin Gibbs spa_evicting_os_wait(spa); 3714088f3894Sahrens spa->spa_minref = refcount_count(&spa->spa_refcount); 3715088f3894Sahrens 3716daaa36a7SGeorge Wilson mutex_exit(&spa_namespace_lock); 3717daaa36a7SGeorge Wilson 3718fa9e4066Sahrens return (0); 3719fa9e4066Sahrens } 3720fa9e4066Sahrens 3721e7cbe64fSgw25295 #ifdef _KERNEL 3722e7cbe64fSgw25295 /* 372321ecdf64SLin Ling * Get the root pool information from the root disk, then import the root pool 372421ecdf64SLin Ling * during the system boot up time. 3725e7cbe64fSgw25295 */ 372621ecdf64SLin Ling extern int vdev_disk_read_rootlabel(char *, char *, nvlist_t **); 372721ecdf64SLin Ling 372821ecdf64SLin Ling static nvlist_t * 372921ecdf64SLin Ling spa_generate_rootconf(char *devpath, char *devid, uint64_t *guid) 3730e7cbe64fSgw25295 { 373121ecdf64SLin Ling nvlist_t *config; 3732e7cbe64fSgw25295 nvlist_t *nvtop, *nvroot; 3733e7cbe64fSgw25295 uint64_t pgid; 3734e7cbe64fSgw25295 373521ecdf64SLin Ling if (vdev_disk_read_rootlabel(devpath, devid, &config) != 0) 373621ecdf64SLin Ling return (NULL); 373721ecdf64SLin Ling 3738e7cbe64fSgw25295 /* 3739e7cbe64fSgw25295 * Add this top-level vdev to the child array. 3740e7cbe64fSgw25295 */ 374121ecdf64SLin Ling VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 374221ecdf64SLin Ling &nvtop) == 0); 374321ecdf64SLin Ling VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 374421ecdf64SLin Ling &pgid) == 0); 374521ecdf64SLin Ling VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, guid) == 0); 3746e7cbe64fSgw25295 3747e7cbe64fSgw25295 /* 3748e7cbe64fSgw25295 * Put this pool's top-level vdevs into a root vdev. 3749e7cbe64fSgw25295 */ 3750e7cbe64fSgw25295 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); 375121ecdf64SLin Ling VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, 375221ecdf64SLin Ling VDEV_TYPE_ROOT) == 0); 3753e7cbe64fSgw25295 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0); 3754e7cbe64fSgw25295 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0); 3755e7cbe64fSgw25295 VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 3756e7cbe64fSgw25295 &nvtop, 1) == 0); 3757e7cbe64fSgw25295 3758e7cbe64fSgw25295 /* 3759e7cbe64fSgw25295 * Replace the existing vdev_tree with the new root vdev in 3760e7cbe64fSgw25295 * this pool's configuration (remove the old, add the new). 3761e7cbe64fSgw25295 */ 3762e7cbe64fSgw25295 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0); 3763e7cbe64fSgw25295 nvlist_free(nvroot); 376421ecdf64SLin Ling return (config); 3765e7cbe64fSgw25295 } 3766e7cbe64fSgw25295 3767e7cbe64fSgw25295 /* 376821ecdf64SLin Ling * Walk the vdev tree and see if we can find a device with "better" 376921ecdf64SLin Ling * configuration. A configuration is "better" if the label on that 377021ecdf64SLin Ling * device has a more recent txg. 3771e7cbe64fSgw25295 */ 377221ecdf64SLin Ling static void 377321ecdf64SLin Ling spa_alt_rootvdev(vdev_t *vd, vdev_t **avd, uint64_t *txg) 3774e7cbe64fSgw25295 { 3775573ca77eSGeorge Wilson for (int c = 0; c < vd->vdev_children; c++) 377621ecdf64SLin Ling spa_alt_rootvdev(vd->vdev_child[c], avd, txg); 3777e7cbe64fSgw25295 377821ecdf64SLin Ling if (vd->vdev_ops->vdev_op_leaf) { 377921ecdf64SLin Ling nvlist_t *label; 378021ecdf64SLin Ling uint64_t label_txg; 3781e7cbe64fSgw25295 378221ecdf64SLin Ling if (vdev_disk_read_rootlabel(vd->vdev_physpath, vd->vdev_devid, 378321ecdf64SLin Ling &label) != 0) 378421ecdf64SLin Ling return; 3785e7cbe64fSgw25295 378621ecdf64SLin Ling VERIFY(nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_TXG, 378721ecdf64SLin Ling &label_txg) == 0); 3788051aabe6Staylor 3789051aabe6Staylor /* 379021ecdf64SLin Ling * Do we have a better boot device? 3791051aabe6Staylor */ 379221ecdf64SLin Ling if (label_txg > *txg) { 379321ecdf64SLin Ling *txg = label_txg; 379421ecdf64SLin Ling *avd = vd; 3795051aabe6Staylor } 379621ecdf64SLin Ling nvlist_free(label); 3797051aabe6Staylor } 3798051aabe6Staylor } 3799051aabe6Staylor 3800e7cbe64fSgw25295 /* 3801e7cbe64fSgw25295 * Import a root pool. 3802e7cbe64fSgw25295 * 3803051aabe6Staylor * For x86. devpath_list will consist of devid and/or physpath name of 3804051aabe6Staylor * the vdev (e.g. "id1,sd@SSEAGATE..." or "/pci@1f,0/ide@d/disk@0,0:a"). 3805051aabe6Staylor * The GRUB "findroot" command will return the vdev we should boot. 3806e7cbe64fSgw25295 * 3807e7cbe64fSgw25295 * For Sparc, devpath_list consists the physpath name of the booting device 3808e7cbe64fSgw25295 * no matter the rootpool is a single device pool or a mirrored pool. 3809e7cbe64fSgw25295 * e.g. 3810e7cbe64fSgw25295 * "/pci@1f,0/ide@d/disk@0,0:a" 3811e7cbe64fSgw25295 */ 3812e7cbe64fSgw25295 int 3813051aabe6Staylor spa_import_rootpool(char *devpath, char *devid) 3814e7cbe64fSgw25295 { 381521ecdf64SLin Ling spa_t *spa; 381621ecdf64SLin Ling vdev_t *rvd, *bvd, *avd = NULL; 381721ecdf64SLin Ling nvlist_t *config, *nvtop; 381821ecdf64SLin Ling uint64_t guid, txg; 3819e7cbe64fSgw25295 char *pname; 3820e7cbe64fSgw25295 int error; 3821e7cbe64fSgw25295 3822e7cbe64fSgw25295 /* 382321ecdf64SLin Ling * Read the label from the boot device and generate a configuration. 3824e7cbe64fSgw25295 */ 3825dedec472SJack Meng config = spa_generate_rootconf(devpath, devid, &guid); 3826dedec472SJack Meng #if defined(_OBP) && defined(_KERNEL) 3827dedec472SJack Meng if (config == NULL) { 3828dedec472SJack Meng if (strstr(devpath, "/iscsi/ssd") != NULL) { 3829dedec472SJack Meng /* iscsi boot */ 3830dedec472SJack Meng get_iscsi_bootpath_phy(devpath); 3831dedec472SJack Meng config = spa_generate_rootconf(devpath, devid, &guid); 3832dedec472SJack Meng } 3833dedec472SJack Meng } 3834dedec472SJack Meng #endif 3835dedec472SJack Meng if (config == NULL) { 383621ecdf64SLin Ling cmn_err(CE_NOTE, "Cannot read the pool label from '%s'", 383721ecdf64SLin Ling devpath); 3838be6fd75aSMatthew Ahrens return (SET_ERROR(EIO)); 383921ecdf64SLin Ling } 3840e7cbe64fSgw25295 384121ecdf64SLin Ling VERIFY(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 384221ecdf64SLin Ling &pname) == 0); 384321ecdf64SLin Ling VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) == 0); 3844e7cbe64fSgw25295 38456809eb4eSEric Schrock mutex_enter(&spa_namespace_lock); 38466809eb4eSEric Schrock if ((spa = spa_lookup(pname)) != NULL) { 3847bf82a41bSeschrock /* 38486809eb4eSEric Schrock * Remove the existing root pool from the namespace so that we 38496809eb4eSEric Schrock * can replace it with the correct config we just read in. 3850bf82a41bSeschrock */ 38516809eb4eSEric Schrock spa_remove(spa); 38526809eb4eSEric Schrock } 38536809eb4eSEric Schrock 3854468c413aSTim Haley spa = spa_add(pname, config, NULL); 38556809eb4eSEric Schrock spa->spa_is_root = B_TRUE; 38564b964adaSGeorge Wilson spa->spa_import_flags = ZFS_IMPORT_VERBATIM; 385721ecdf64SLin Ling 385821ecdf64SLin Ling /* 385921ecdf64SLin Ling * Build up a vdev tree based on the boot device's label config. 386021ecdf64SLin Ling */ 386121ecdf64SLin Ling VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 386221ecdf64SLin Ling &nvtop) == 0); 386321ecdf64SLin Ling spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 386421ecdf64SLin Ling error = spa_config_parse(spa, &rvd, nvtop, NULL, 0, 386521ecdf64SLin Ling VDEV_ALLOC_ROOTPOOL); 386621ecdf64SLin Ling spa_config_exit(spa, SCL_ALL, FTAG); 386721ecdf64SLin Ling if (error) { 38686809eb4eSEric Schrock mutex_exit(&spa_namespace_lock); 386921ecdf64SLin Ling nvlist_free(config); 387021ecdf64SLin Ling cmn_err(CE_NOTE, "Can not parse the config for pool '%s'", 387121ecdf64SLin Ling pname); 3872e7cbe64fSgw25295 return (error); 3873e7cbe64fSgw25295 } 387421ecdf64SLin Ling 387521ecdf64SLin Ling /* 387621ecdf64SLin Ling * Get the boot vdev. 387721ecdf64SLin Ling */ 387821ecdf64SLin Ling if ((bvd = vdev_lookup_by_guid(rvd, guid)) == NULL) { 387921ecdf64SLin Ling cmn_err(CE_NOTE, "Can not find the boot vdev for guid %llu", 388021ecdf64SLin Ling (u_longlong_t)guid); 3881be6fd75aSMatthew Ahrens error = SET_ERROR(ENOENT); 388221ecdf64SLin Ling goto out; 388321ecdf64SLin Ling } 388421ecdf64SLin Ling 388521ecdf64SLin Ling /* 388621ecdf64SLin Ling * Determine if there is a better boot device. 388721ecdf64SLin Ling */ 388821ecdf64SLin Ling avd = bvd; 388921ecdf64SLin Ling spa_alt_rootvdev(rvd, &avd, &txg); 389021ecdf64SLin Ling if (avd != bvd) { 389121ecdf64SLin Ling cmn_err(CE_NOTE, "The boot device is 'degraded'. Please " 389221ecdf64SLin Ling "try booting from '%s'", avd->vdev_path); 3893be6fd75aSMatthew Ahrens error = SET_ERROR(EINVAL); 389421ecdf64SLin Ling goto out; 389521ecdf64SLin Ling } 389621ecdf64SLin Ling 389721ecdf64SLin Ling /* 389821ecdf64SLin Ling * If the boot device is part of a spare vdev then ensure that 389921ecdf64SLin Ling * we're booting off the active spare. 390021ecdf64SLin Ling */ 390121ecdf64SLin Ling if (bvd->vdev_parent->vdev_ops == &vdev_spare_ops && 390221ecdf64SLin Ling !bvd->vdev_isspare) { 390321ecdf64SLin Ling cmn_err(CE_NOTE, "The boot device is currently spared. Please " 390421ecdf64SLin Ling "try booting from '%s'", 3905cb04b873SMark J Musante bvd->vdev_parent-> 3906cb04b873SMark J Musante vdev_child[bvd->vdev_parent->vdev_children - 1]->vdev_path); 3907be6fd75aSMatthew Ahrens error = SET_ERROR(EINVAL); 390821ecdf64SLin Ling goto out; 390921ecdf64SLin Ling } 391021ecdf64SLin Ling 391121ecdf64SLin Ling error = 0; 391221ecdf64SLin Ling out: 391321ecdf64SLin Ling spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 391421ecdf64SLin Ling vdev_free(rvd); 391521ecdf64SLin Ling spa_config_exit(spa, SCL_ALL, FTAG); 391621ecdf64SLin Ling mutex_exit(&spa_namespace_lock); 391721ecdf64SLin Ling 391821ecdf64SLin Ling nvlist_free(config); 391921ecdf64SLin Ling return (error); 392021ecdf64SLin Ling } 392121ecdf64SLin Ling 3922e7cbe64fSgw25295 #endif 3923e7cbe64fSgw25295 3924e7cbe64fSgw25295 /* 3925e7cbe64fSgw25295 * Import a non-root pool into the system. 3926e7cbe64fSgw25295 */ 3927e7cbe64fSgw25295 int 39284b964adaSGeorge Wilson spa_import(const char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags) 3929e7cbe64fSgw25295 { 39306809eb4eSEric Schrock spa_t *spa; 39316809eb4eSEric Schrock char *altroot = NULL; 3932468c413aSTim Haley spa_load_state_t state = SPA_LOAD_IMPORT; 3933468c413aSTim Haley zpool_rewind_policy_t policy; 3934f9af39baSGeorge Wilson uint64_t mode = spa_mode_global; 3935f9af39baSGeorge Wilson uint64_t readonly = B_FALSE; 39366809eb4eSEric Schrock int error; 39376809eb4eSEric Schrock nvlist_t *nvroot; 39386809eb4eSEric Schrock nvlist_t **spares, **l2cache; 39396809eb4eSEric Schrock uint_t nspares, nl2cache; 39406809eb4eSEric Schrock 39416809eb4eSEric Schrock /* 39426809eb4eSEric Schrock * If a pool with this name exists, return failure. 39436809eb4eSEric Schrock */ 39446809eb4eSEric Schrock mutex_enter(&spa_namespace_lock); 39451195e687SMark J Musante if (spa_lookup(pool) != NULL) { 39466809eb4eSEric Schrock mutex_exit(&spa_namespace_lock); 3947be6fd75aSMatthew Ahrens return (SET_ERROR(EEXIST)); 3948e7cbe64fSgw25295 } 3949e7cbe64fSgw25295 39506809eb4eSEric Schrock /* 39516809eb4eSEric Schrock * Create and initialize the spa structure. 39526809eb4eSEric Schrock */ 39536809eb4eSEric Schrock (void) nvlist_lookup_string(props, 39546809eb4eSEric Schrock zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 3955f9af39baSGeorge Wilson (void) nvlist_lookup_uint64(props, 3956f9af39baSGeorge Wilson zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly); 3957f9af39baSGeorge Wilson if (readonly) 3958f9af39baSGeorge Wilson mode = FREAD; 3959468c413aSTim Haley spa = spa_add(pool, config, altroot); 39604b964adaSGeorge Wilson spa->spa_import_flags = flags; 39614b964adaSGeorge Wilson 39624b964adaSGeorge Wilson /* 39634b964adaSGeorge Wilson * Verbatim import - Take a pool and insert it into the namespace 39644b964adaSGeorge Wilson * as if it had been loaded at boot. 39654b964adaSGeorge Wilson */ 39664b964adaSGeorge Wilson if (spa->spa_import_flags & ZFS_IMPORT_VERBATIM) { 39674b964adaSGeorge Wilson if (props != NULL) 39684b964adaSGeorge Wilson spa_configfile_set(spa, props, B_FALSE); 39694b964adaSGeorge Wilson 39704b964adaSGeorge Wilson spa_config_sync(spa, B_FALSE, B_TRUE); 397114372834SHans Rosenfeld spa_event_notify(spa, NULL, ESC_ZFS_POOL_IMPORT); 39724b964adaSGeorge Wilson 39734b964adaSGeorge Wilson mutex_exit(&spa_namespace_lock); 39744b964adaSGeorge Wilson return (0); 39754b964adaSGeorge Wilson } 39764b964adaSGeorge Wilson 3977f9af39baSGeorge Wilson spa_activate(spa, mode); 39786809eb4eSEric Schrock 39796809eb4eSEric Schrock /* 398025f89ee2SJeff Bonwick * Don't start async tasks until we know everything is healthy. 398125f89ee2SJeff Bonwick */ 398225f89ee2SJeff Bonwick spa_async_suspend(spa); 398325f89ee2SJeff Bonwick 39844b964adaSGeorge Wilson zpool_get_rewind_policy(config, &policy); 39854b964adaSGeorge Wilson if (policy.zrp_request & ZPOOL_DO_REWIND) 39864b964adaSGeorge Wilson state = SPA_LOAD_RECOVER; 39874b964adaSGeorge Wilson 398825f89ee2SJeff Bonwick /* 39896809eb4eSEric Schrock * Pass off the heavy lifting to spa_load(). Pass TRUE for mosconfig 39906809eb4eSEric Schrock * because the user-supplied config is actually the one to trust when 39916809eb4eSEric Schrock * doing an import. 39926809eb4eSEric Schrock */ 3993468c413aSTim Haley if (state != SPA_LOAD_RECOVER) 3994468c413aSTim Haley spa->spa_last_ubsync_txg = spa->spa_load_txg = 0; 39954b964adaSGeorge Wilson 3996468c413aSTim Haley error = spa_load_best(spa, state, B_TRUE, policy.zrp_txg, 3997c8ee1847SVictor Latushkin policy.zrp_request); 3998468c413aSTim Haley 3999468c413aSTim Haley /* 40004b964adaSGeorge Wilson * Propagate anything learned while loading the pool and pass it 40014b964adaSGeorge Wilson * back to caller (i.e. rewind info, missing devices, etc). 4002468c413aSTim Haley */ 40034b964adaSGeorge Wilson VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, 40044b964adaSGeorge Wilson spa->spa_load_info) == 0); 40056809eb4eSEric Schrock 40066809eb4eSEric Schrock spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 40076809eb4eSEric Schrock /* 40086809eb4eSEric Schrock * Toss any existing sparelist, as it doesn't have any validity 40096809eb4eSEric Schrock * anymore, and conflicts with spa_has_spare(). 40106809eb4eSEric Schrock */ 40116809eb4eSEric Schrock if (spa->spa_spares.sav_config) { 40126809eb4eSEric Schrock nvlist_free(spa->spa_spares.sav_config); 40136809eb4eSEric Schrock spa->spa_spares.sav_config = NULL; 40146809eb4eSEric Schrock spa_load_spares(spa); 40156809eb4eSEric Schrock } 40166809eb4eSEric Schrock if (spa->spa_l2cache.sav_config) { 40176809eb4eSEric Schrock nvlist_free(spa->spa_l2cache.sav_config); 40186809eb4eSEric Schrock spa->spa_l2cache.sav_config = NULL; 40196809eb4eSEric Schrock spa_load_l2cache(spa); 40206809eb4eSEric Schrock } 40216809eb4eSEric Schrock 40226809eb4eSEric Schrock VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 40236809eb4eSEric Schrock &nvroot) == 0); 40246809eb4eSEric Schrock if (error == 0) 40256809eb4eSEric Schrock error = spa_validate_aux(spa, nvroot, -1ULL, 40266809eb4eSEric Schrock VDEV_ALLOC_SPARE); 40276809eb4eSEric Schrock if (error == 0) 40286809eb4eSEric Schrock error = spa_validate_aux(spa, nvroot, -1ULL, 40296809eb4eSEric Schrock VDEV_ALLOC_L2CACHE); 40306809eb4eSEric Schrock spa_config_exit(spa, SCL_ALL, FTAG); 40316809eb4eSEric Schrock 40326809eb4eSEric Schrock if (props != NULL) 40336809eb4eSEric Schrock spa_configfile_set(spa, props, B_FALSE); 40346809eb4eSEric Schrock 40356809eb4eSEric Schrock if (error != 0 || (props && spa_writeable(spa) && 40366809eb4eSEric Schrock (error = spa_prop_set(spa, props)))) { 40376809eb4eSEric Schrock spa_unload(spa); 40386809eb4eSEric Schrock spa_deactivate(spa); 40396809eb4eSEric Schrock spa_remove(spa); 40406809eb4eSEric Schrock mutex_exit(&spa_namespace_lock); 40416809eb4eSEric Schrock return (error); 40426809eb4eSEric Schrock } 40436809eb4eSEric Schrock 4044955ef359SLin Ling spa_async_resume(spa); 4045955ef359SLin Ling 40466809eb4eSEric Schrock /* 40476809eb4eSEric Schrock * Override any spares and level 2 cache devices as specified by 40486809eb4eSEric Schrock * the user, as these may have correct device names/devids, etc. 40496809eb4eSEric Schrock */ 40506809eb4eSEric Schrock if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 40516809eb4eSEric Schrock &spares, &nspares) == 0) { 40526809eb4eSEric Schrock if (spa->spa_spares.sav_config) 40536809eb4eSEric Schrock VERIFY(nvlist_remove(spa->spa_spares.sav_config, 40546809eb4eSEric Schrock ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0); 40556809eb4eSEric Schrock else 40566809eb4eSEric Schrock VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, 40576809eb4eSEric Schrock NV_UNIQUE_NAME, KM_SLEEP) == 0); 40586809eb4eSEric Schrock VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 40596809eb4eSEric Schrock ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 40606809eb4eSEric Schrock spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 40616809eb4eSEric Schrock spa_load_spares(spa); 40626809eb4eSEric Schrock spa_config_exit(spa, SCL_ALL, FTAG); 40636809eb4eSEric Schrock spa->spa_spares.sav_sync = B_TRUE; 40646809eb4eSEric Schrock } 40656809eb4eSEric Schrock if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 40666809eb4eSEric Schrock &l2cache, &nl2cache) == 0) { 40676809eb4eSEric Schrock if (spa->spa_l2cache.sav_config) 40686809eb4eSEric Schrock VERIFY(nvlist_remove(spa->spa_l2cache.sav_config, 40696809eb4eSEric Schrock ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0); 40706809eb4eSEric Schrock else 40716809eb4eSEric Schrock VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config, 40726809eb4eSEric Schrock NV_UNIQUE_NAME, KM_SLEEP) == 0); 40736809eb4eSEric Schrock VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config, 40746809eb4eSEric Schrock ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 40756809eb4eSEric Schrock spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 40766809eb4eSEric Schrock spa_load_l2cache(spa); 40776809eb4eSEric Schrock spa_config_exit(spa, SCL_ALL, FTAG); 40786809eb4eSEric Schrock spa->spa_l2cache.sav_sync = B_TRUE; 40796809eb4eSEric Schrock } 40806809eb4eSEric Schrock 4081b693757aSEric Schrock /* 4082b693757aSEric Schrock * Check for any removed devices. 4083b693757aSEric Schrock */ 4084b693757aSEric Schrock if (spa->spa_autoreplace) { 4085b693757aSEric Schrock spa_aux_check_removed(&spa->spa_spares); 4086b693757aSEric Schrock spa_aux_check_removed(&spa->spa_l2cache); 4087b693757aSEric Schrock } 4088b693757aSEric Schrock 40896809eb4eSEric Schrock if (spa_writeable(spa)) { 40906809eb4eSEric Schrock /* 40916809eb4eSEric Schrock * Update the config cache to include the newly-imported pool. 40926809eb4eSEric Schrock */ 4093bc758434SLin Ling spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 40946809eb4eSEric Schrock } 40956809eb4eSEric Schrock 4096573ca77eSGeorge Wilson /* 4097573ca77eSGeorge Wilson * It's possible that the pool was expanded while it was exported. 4098573ca77eSGeorge Wilson * We kick off an async task to handle this for us. 4099573ca77eSGeorge Wilson */ 4100573ca77eSGeorge Wilson spa_async_request(spa, SPA_ASYNC_AUTOEXPAND); 4101573ca77eSGeorge Wilson 41024445fffbSMatthew Ahrens spa_history_log_version(spa, "import"); 41036809eb4eSEric Schrock 410414372834SHans Rosenfeld spa_event_notify(spa, NULL, ESC_ZFS_POOL_IMPORT); 410514372834SHans Rosenfeld 410614372834SHans Rosenfeld mutex_exit(&spa_namespace_lock); 410714372834SHans Rosenfeld 41086809eb4eSEric Schrock return (0); 4109c5904d13Seschrock } 4110c5904d13Seschrock 4111fa9e4066Sahrens nvlist_t * 4112fa9e4066Sahrens spa_tryimport(nvlist_t *tryconfig) 4113fa9e4066Sahrens { 4114fa9e4066Sahrens nvlist_t *config = NULL; 4115fa9e4066Sahrens char *poolname; 4116fa9e4066Sahrens spa_t *spa; 4117fa9e4066Sahrens uint64_t state; 41187b7154beSLin Ling int error; 4119fa9e4066Sahrens 4120fa9e4066Sahrens if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname)) 4121fa9e4066Sahrens return (NULL); 4122fa9e4066Sahrens 4123fa9e4066Sahrens if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state)) 4124fa9e4066Sahrens return (NULL); 4125fa9e4066Sahrens 4126fa9e4066Sahrens /* 41270373e76bSbonwick * Create and initialize the spa structure. 4128fa9e4066Sahrens */ 41290373e76bSbonwick mutex_enter(&spa_namespace_lock); 4130468c413aSTim Haley spa = spa_add(TRYIMPORT_NAME, tryconfig, NULL); 41318ad4d6ddSJeff Bonwick spa_activate(spa, FREAD); 4132fa9e4066Sahrens 4133fa9e4066Sahrens /* 41340373e76bSbonwick * Pass off the heavy lifting to spa_load(). 4135ecc2d604Sbonwick * Pass TRUE for mosconfig because the user-supplied config 4136ecc2d604Sbonwick * is actually the one to trust when doing an import. 4137fa9e4066Sahrens */ 41381195e687SMark J Musante error = spa_load(spa, SPA_LOAD_TRYIMPORT, SPA_IMPORT_EXISTING, B_TRUE); 4139fa9e4066Sahrens 4140fa9e4066Sahrens /* 4141fa9e4066Sahrens * If 'tryconfig' was at least parsable, return the current config. 4142fa9e4066Sahrens */ 4143fa9e4066Sahrens if (spa->spa_root_vdev != NULL) { 4144fa9e4066Sahrens config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 4145fa9e4066Sahrens VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, 4146fa9e4066Sahrens poolname) == 0); 4147fa9e4066Sahrens VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, 4148fa9e4066Sahrens state) == 0); 414995173954Sek110237 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP, 415095173954Sek110237 spa->spa_uberblock.ub_timestamp) == 0); 4151ad135b5dSChristopher Siden VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, 4152ad135b5dSChristopher Siden spa->spa_load_info) == 0); 415399653d4eSeschrock 415499653d4eSeschrock /* 4155e7cbe64fSgw25295 * If the bootfs property exists on this pool then we 4156e7cbe64fSgw25295 * copy it out so that external consumers can tell which 4157e7cbe64fSgw25295 * pools are bootable. 4158e7cbe64fSgw25295 */ 41597b7154beSLin Ling if ((!error || error == EEXIST) && spa->spa_bootfs) { 4160e7cbe64fSgw25295 char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 4161e7cbe64fSgw25295 4162e7cbe64fSgw25295 /* 4163e7cbe64fSgw25295 * We have to play games with the name since the 4164e7cbe64fSgw25295 * pool was opened as TRYIMPORT_NAME. 4165e7cbe64fSgw25295 */ 4166e14bb325SJeff Bonwick if (dsl_dsobj_to_dsname(spa_name(spa), 4167e7cbe64fSgw25295 spa->spa_bootfs, tmpname) == 0) { 4168e7cbe64fSgw25295 char *cp; 4169e7cbe64fSgw25295 char *dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 4170e7cbe64fSgw25295 4171e7cbe64fSgw25295 cp = strchr(tmpname, '/'); 4172e7cbe64fSgw25295 if (cp == NULL) { 4173e7cbe64fSgw25295 (void) strlcpy(dsname, tmpname, 4174e7cbe64fSgw25295 MAXPATHLEN); 4175e7cbe64fSgw25295 } else { 4176e7cbe64fSgw25295 (void) snprintf(dsname, MAXPATHLEN, 4177e7cbe64fSgw25295 "%s/%s", poolname, ++cp); 4178e7cbe64fSgw25295 } 4179e7cbe64fSgw25295 VERIFY(nvlist_add_string(config, 4180e7cbe64fSgw25295 ZPOOL_CONFIG_BOOTFS, dsname) == 0); 4181e7cbe64fSgw25295 kmem_free(dsname, MAXPATHLEN); 4182e7cbe64fSgw25295 } 4183e7cbe64fSgw25295 kmem_free(tmpname, MAXPATHLEN); 4184e7cbe64fSgw25295 } 4185e7cbe64fSgw25295 4186e7cbe64fSgw25295 /* 4187fa94a07fSbrendan * Add the list of hot spares and level 2 cache devices. 418899653d4eSeschrock */ 41896809eb4eSEric Schrock spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 419099653d4eSeschrock spa_add_spares(spa, config); 4191fa94a07fSbrendan spa_add_l2cache(spa, config); 41926809eb4eSEric Schrock spa_config_exit(spa, SCL_CONFIG, FTAG); 4193fa9e4066Sahrens } 4194fa9e4066Sahrens 4195fa9e4066Sahrens spa_unload(spa); 4196fa9e4066Sahrens spa_deactivate(spa); 4197fa9e4066Sahrens spa_remove(spa); 4198fa9e4066Sahrens mutex_exit(&spa_namespace_lock); 4199fa9e4066Sahrens 4200fa9e4066Sahrens return (config); 4201fa9e4066Sahrens } 4202fa9e4066Sahrens 4203fa9e4066Sahrens /* 4204fa9e4066Sahrens * Pool export/destroy 4205fa9e4066Sahrens * 4206fa9e4066Sahrens * The act of destroying or exporting a pool is very simple. We make sure there 4207fa9e4066Sahrens * is no more pending I/O and any references to the pool are gone. Then, we 4208fa9e4066Sahrens * update the pool state and sync all the labels to disk, removing the 4209394ab0cbSGeorge Wilson * configuration from the cache afterwards. If the 'hardforce' flag is set, then 4210394ab0cbSGeorge Wilson * we don't sync the labels or remove the configuration cache. 4211fa9e4066Sahrens */ 4212fa9e4066Sahrens static int 421389a89ebfSlling spa_export_common(char *pool, int new_state, nvlist_t **oldconfig, 4214394ab0cbSGeorge Wilson boolean_t force, boolean_t hardforce) 4215fa9e4066Sahrens { 4216fa9e4066Sahrens spa_t *spa; 4217fa9e4066Sahrens 421844cd46caSbillm if (oldconfig) 421944cd46caSbillm *oldconfig = NULL; 422044cd46caSbillm 42218ad4d6ddSJeff Bonwick if (!(spa_mode_global & FWRITE)) 4222be6fd75aSMatthew Ahrens return (SET_ERROR(EROFS)); 4223fa9e4066Sahrens 4224fa9e4066Sahrens mutex_enter(&spa_namespace_lock); 4225fa9e4066Sahrens if ((spa = spa_lookup(pool)) == NULL) { 4226fa9e4066Sahrens mutex_exit(&spa_namespace_lock); 4227be6fd75aSMatthew Ahrens return (SET_ERROR(ENOENT)); 4228fa9e4066Sahrens } 4229fa9e4066Sahrens 4230fa9e4066Sahrens /* 4231ea8dc4b6Seschrock * Put a hold on the pool, drop the namespace lock, stop async tasks, 4232ea8dc4b6Seschrock * reacquire the namespace lock, and see if we can export. 4233ea8dc4b6Seschrock */ 4234ea8dc4b6Seschrock spa_open_ref(spa, FTAG); 4235ea8dc4b6Seschrock mutex_exit(&spa_namespace_lock); 4236ea8dc4b6Seschrock spa_async_suspend(spa); 4237ea8dc4b6Seschrock mutex_enter(&spa_namespace_lock); 4238ea8dc4b6Seschrock spa_close(spa, FTAG); 4239ea8dc4b6Seschrock 4240ea8dc4b6Seschrock /* 4241fa9e4066Sahrens * The pool will be in core if it's openable, 4242fa9e4066Sahrens * in which case we can modify its state. 4243fa9e4066Sahrens */ 4244fa9e4066Sahrens if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) { 4245fa9e4066Sahrens /* 4246fa9e4066Sahrens * Objsets may be open only because they're dirty, so we 4247fa9e4066Sahrens * have to force it to sync before checking spa_refcnt. 4248fa9e4066Sahrens */ 4249fa9e4066Sahrens txg_wait_synced(spa->spa_dsl_pool, 0); 4250bc9014e6SJustin Gibbs spa_evicting_os_wait(spa); 4251fa9e4066Sahrens 4252ea8dc4b6Seschrock /* 4253ea8dc4b6Seschrock * A pool cannot be exported or destroyed if there are active 4254ea8dc4b6Seschrock * references. If we are resetting a pool, allow references by 4255ea8dc4b6Seschrock * fault injection handlers. 4256ea8dc4b6Seschrock */ 4257ea8dc4b6Seschrock if (!spa_refcount_zero(spa) || 4258ea8dc4b6Seschrock (spa->spa_inject_ref != 0 && 4259ea8dc4b6Seschrock new_state != POOL_STATE_UNINITIALIZED)) { 4260ea8dc4b6Seschrock spa_async_resume(spa); 4261fa9e4066Sahrens mutex_exit(&spa_namespace_lock); 4262be6fd75aSMatthew Ahrens return (SET_ERROR(EBUSY)); 4263fa9e4066Sahrens } 4264fa9e4066Sahrens 4265fa9e4066Sahrens /* 426689a89ebfSlling * A pool cannot be exported if it has an active shared spare. 426789a89ebfSlling * This is to prevent other pools stealing the active spare 426889a89ebfSlling * from an exported pool. At user's own will, such pool can 426989a89ebfSlling * be forcedly exported. 427089a89ebfSlling */ 427189a89ebfSlling if (!force && new_state == POOL_STATE_EXPORTED && 427289a89ebfSlling spa_has_active_shared_spare(spa)) { 427389a89ebfSlling spa_async_resume(spa); 427489a89ebfSlling mutex_exit(&spa_namespace_lock); 4275be6fd75aSMatthew Ahrens return (SET_ERROR(EXDEV)); 427689a89ebfSlling } 427789a89ebfSlling 427889a89ebfSlling /* 4279fa9e4066Sahrens * We want this to be reflected on every label, 4280fa9e4066Sahrens * so mark them all dirty. spa_unload() will do the 4281fa9e4066Sahrens * final sync that pushes these changes out. 4282fa9e4066Sahrens */ 4283394ab0cbSGeorge Wilson if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) { 4284e14bb325SJeff Bonwick spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4285ea8dc4b6Seschrock spa->spa_state = new_state; 42863f9d6ad7SLin Ling spa->spa_final_txg = spa_last_synced_txg(spa) + 42873f9d6ad7SLin Ling TXG_DEFER_SIZE + 1; 4288fa9e4066Sahrens vdev_config_dirty(spa->spa_root_vdev); 4289e14bb325SJeff Bonwick spa_config_exit(spa, SCL_ALL, FTAG); 4290fa9e4066Sahrens } 4291ea8dc4b6Seschrock } 4292fa9e4066Sahrens 42933d7072f8Seschrock spa_event_notify(spa, NULL, ESC_ZFS_POOL_DESTROY); 42943d7072f8Seschrock 4295fa9e4066Sahrens if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 4296fa9e4066Sahrens spa_unload(spa); 4297fa9e4066Sahrens spa_deactivate(spa); 4298fa9e4066Sahrens } 4299fa9e4066Sahrens 430044cd46caSbillm if (oldconfig && spa->spa_config) 430144cd46caSbillm VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0); 430244cd46caSbillm 4303ea8dc4b6Seschrock if (new_state != POOL_STATE_UNINITIALIZED) { 4304394ab0cbSGeorge Wilson if (!hardforce) 4305c5904d13Seschrock spa_config_sync(spa, B_TRUE, B_TRUE); 4306fa9e4066Sahrens spa_remove(spa); 4307ea8dc4b6Seschrock } 4308fa9e4066Sahrens mutex_exit(&spa_namespace_lock); 4309fa9e4066Sahrens 4310fa9e4066Sahrens return (0); 4311fa9e4066Sahrens } 4312fa9e4066Sahrens 4313fa9e4066Sahrens /* 4314fa9e4066Sahrens * Destroy a storage pool. 4315fa9e4066Sahrens */ 4316fa9e4066Sahrens int 4317fa9e4066Sahrens spa_destroy(char *pool) 4318fa9e4066Sahrens { 4319394ab0cbSGeorge Wilson return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL, 4320394ab0cbSGeorge Wilson B_FALSE, B_FALSE)); 4321fa9e4066Sahrens } 4322fa9e4066Sahrens 4323fa9e4066Sahrens /* 4324fa9e4066Sahrens * Export a storage pool. 4325fa9e4066Sahrens */ 4326fa9e4066Sahrens int 4327394ab0cbSGeorge Wilson spa_export(char *pool, nvlist_t **oldconfig, boolean_t force, 4328394ab0cbSGeorge Wilson boolean_t hardforce) 4329fa9e4066Sahrens { 4330394ab0cbSGeorge Wilson return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig, 4331394ab0cbSGeorge Wilson force, hardforce)); 4332fa9e4066Sahrens } 4333fa9e4066Sahrens 4334fa9e4066Sahrens /* 4335ea8dc4b6Seschrock * Similar to spa_export(), this unloads the spa_t without actually removing it 4336ea8dc4b6Seschrock * from the namespace in any way. 4337ea8dc4b6Seschrock */ 4338ea8dc4b6Seschrock int 4339ea8dc4b6Seschrock spa_reset(char *pool) 4340ea8dc4b6Seschrock { 434189a89ebfSlling return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL, 4342394ab0cbSGeorge Wilson B_FALSE, B_FALSE)); 4343ea8dc4b6Seschrock } 4344ea8dc4b6Seschrock 4345ea8dc4b6Seschrock /* 4346fa9e4066Sahrens * ========================================================================== 4347fa9e4066Sahrens * Device manipulation 4348fa9e4066Sahrens * ========================================================================== 4349fa9e4066Sahrens */ 4350fa9e4066Sahrens 4351fa9e4066Sahrens /* 43528654d025Sperrin * Add a device to a storage pool. 4353fa9e4066Sahrens */ 4354fa9e4066Sahrens int 4355fa9e4066Sahrens spa_vdev_add(spa_t *spa, nvlist_t *nvroot) 4356fa9e4066Sahrens { 435788ecc943SGeorge Wilson uint64_t txg, id; 43588ad4d6ddSJeff Bonwick int error; 4359fa9e4066Sahrens vdev_t *rvd = spa->spa_root_vdev; 43600e34b6a7Sbonwick vdev_t *vd, *tvd; 4361fa94a07fSbrendan nvlist_t **spares, **l2cache; 4362fa94a07fSbrendan uint_t nspares, nl2cache; 4363fa9e4066Sahrens 4364f9af39baSGeorge Wilson ASSERT(spa_writeable(spa)); 4365f9af39baSGeorge Wilson 4366fa9e4066Sahrens txg = spa_vdev_enter(spa); 4367fa9e4066Sahrens 436899653d4eSeschrock if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0, 436999653d4eSeschrock VDEV_ALLOC_ADD)) != 0) 437099653d4eSeschrock return (spa_vdev_exit(spa, NULL, txg, error)); 4371fa9e4066Sahrens 4372e14bb325SJeff Bonwick spa->spa_pending_vdev = vd; /* spa_vdev_exit() will clear this */ 437399653d4eSeschrock 4374fa94a07fSbrendan if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares, 4375fa94a07fSbrendan &nspares) != 0) 437699653d4eSeschrock nspares = 0; 437799653d4eSeschrock 4378fa94a07fSbrendan if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache, 4379fa94a07fSbrendan &nl2cache) != 0) 4380fa94a07fSbrendan nl2cache = 0; 4381fa94a07fSbrendan 4382e14bb325SJeff Bonwick if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0) 4383fa9e4066Sahrens return (spa_vdev_exit(spa, vd, txg, EINVAL)); 4384fa9e4066Sahrens 4385e14bb325SJeff Bonwick if (vd->vdev_children != 0 && 4386e14bb325SJeff Bonwick (error = vdev_create(vd, txg, B_FALSE)) != 0) 4387fa9e4066Sahrens return (spa_vdev_exit(spa, vd, txg, error)); 438839c23413Seschrock 438939c23413Seschrock /* 4390fa94a07fSbrendan * We must validate the spares and l2cache devices after checking the 4391fa94a07fSbrendan * children. Otherwise, vdev_inuse() will blindly overwrite the spare. 439239c23413Seschrock */ 4393e14bb325SJeff Bonwick if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0) 439439c23413Seschrock return (spa_vdev_exit(spa, vd, txg, error)); 4395fa9e4066Sahrens 4396fa9e4066Sahrens /* 43970e34b6a7Sbonwick * Transfer each new top-level vdev from vd to rvd. 4398fa9e4066Sahrens */ 43998ad4d6ddSJeff Bonwick for (int c = 0; c < vd->vdev_children; c++) { 440088ecc943SGeorge Wilson 440188ecc943SGeorge Wilson /* 440288ecc943SGeorge Wilson * Set the vdev id to the first hole, if one exists. 440388ecc943SGeorge Wilson */ 440488ecc943SGeorge Wilson for (id = 0; id < rvd->vdev_children; id++) { 440588ecc943SGeorge Wilson if (rvd->vdev_child[id]->vdev_ishole) { 440688ecc943SGeorge Wilson vdev_free(rvd->vdev_child[id]); 440788ecc943SGeorge Wilson break; 440888ecc943SGeorge Wilson } 440988ecc943SGeorge Wilson } 44100e34b6a7Sbonwick tvd = vd->vdev_child[c]; 4411fa9e4066Sahrens vdev_remove_child(vd, tvd); 441288ecc943SGeorge Wilson tvd->vdev_id = id; 4413fa9e4066Sahrens vdev_add_child(rvd, tvd); 4414fa9e4066Sahrens vdev_config_dirty(tvd); 4415fa9e4066Sahrens } 441699653d4eSeschrock 441799653d4eSeschrock if (nspares != 0) { 4418fa94a07fSbrendan spa_set_aux_vdevs(&spa->spa_spares, spares, nspares, 4419fa94a07fSbrendan ZPOOL_CONFIG_SPARES); 4420fa94a07fSbrendan spa_load_spares(spa); 4421fa94a07fSbrendan spa->spa_spares.sav_sync = B_TRUE; 442299653d4eSeschrock } 442399653d4eSeschrock 4424fa94a07fSbrendan if (nl2cache != 0) { 4425fa94a07fSbrendan spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache, 4426fa94a07fSbrendan ZPOOL_CONFIG_L2CACHE); 4427fa94a07fSbrendan spa_load_l2cache(spa); 4428fa94a07fSbrendan spa->spa_l2cache.sav_sync = B_TRUE; 442999653d4eSeschrock } 4430fa9e4066Sahrens 4431fa9e4066Sahrens /* 44320e34b6a7Sbonwick * We have to be careful when adding new vdevs to an existing pool. 44330e34b6a7Sbonwick * If other threads start allocating from these vdevs before we 44340e34b6a7Sbonwick * sync the config cache, and we lose power, then upon reboot we may 44350e34b6a7Sbonwick * fail to open the pool because there are DVAs that the config cache 44360e34b6a7Sbonwick * can't translate. Therefore, we first add the vdevs without 44370e34b6a7Sbonwick * initializing metaslabs; sync the config cache (via spa_vdev_exit()); 44380373e76bSbonwick * and then let spa_config_update() initialize the new metaslabs. 44390e34b6a7Sbonwick * 44400e34b6a7Sbonwick * spa_load() checks for added-but-not-initialized vdevs, so that 44410e34b6a7Sbonwick * if we lose power at any point in this sequence, the remaining 44420e34b6a7Sbonwick * steps will be completed the next time we load the pool. 4443fa9e4066Sahrens */ 44440e34b6a7Sbonwick (void) spa_vdev_exit(spa, vd, txg, 0); 44450e34b6a7Sbonwick 44460373e76bSbonwick mutex_enter(&spa_namespace_lock); 44470373e76bSbonwick spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 444814372834SHans Rosenfeld spa_event_notify(spa, NULL, ESC_ZFS_VDEV_ADD); 44490373e76bSbonwick mutex_exit(&spa_namespace_lock); 4450fa9e4066Sahrens 44510373e76bSbonwick return (0); 4452fa9e4066Sahrens } 4453fa9e4066Sahrens 4454fa9e4066Sahrens /* 4455fa9e4066Sahrens * Attach a device to a mirror. The arguments are the path to any device 4456fa9e4066Sahrens * in the mirror, and the nvroot for the new device. If the path specifies 4457fa9e4066Sahrens * a device that is not mirrored, we automatically insert the mirror vdev. 4458fa9e4066Sahrens * 4459fa9e4066Sahrens * If 'replacing' is specified, the new device is intended to replace the 4460fa9e4066Sahrens * existing device; in this case the two devices are made into their own 44613d7072f8Seschrock * mirror using the 'replacing' vdev, which is functionally identical to 4462fa9e4066Sahrens * the mirror vdev (it actually reuses all the same ops) but has a few 4463fa9e4066Sahrens * extra rules: you can't attach to it after it's been created, and upon 4464fa9e4066Sahrens * completion of resilvering, the first disk (the one being replaced) 4465fa9e4066Sahrens * is automatically detached. 4466fa9e4066Sahrens */ 4467fa9e4066Sahrens int 4468ea8dc4b6Seschrock spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing) 4469fa9e4066Sahrens { 44703f9d6ad7SLin Ling uint64_t txg, dtl_max_txg; 4471fa9e4066Sahrens vdev_t *rvd = spa->spa_root_vdev; 4472fa9e4066Sahrens vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd; 447399653d4eSeschrock vdev_ops_t *pvops; 44749b3f6b42SEric Kustarz char *oldvdpath, *newvdpath; 44759b3f6b42SEric Kustarz int newvd_isspare; 44769b3f6b42SEric Kustarz int error; 4477fa9e4066Sahrens 4478f9af39baSGeorge Wilson ASSERT(spa_writeable(spa)); 4479f9af39baSGeorge Wilson 4480fa9e4066Sahrens txg = spa_vdev_enter(spa); 4481fa9e4066Sahrens 4482c5904d13Seschrock oldvd = spa_lookup_by_guid(spa, guid, B_FALSE); 4483fa9e4066Sahrens 4484fa9e4066Sahrens if (oldvd == NULL) 4485fa9e4066Sahrens return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 4486fa9e4066Sahrens 44870e34b6a7Sbonwick if (!oldvd->vdev_ops->vdev_op_leaf) 44880e34b6a7Sbonwick return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 44890e34b6a7Sbonwick 4490fa9e4066Sahrens pvd = oldvd->vdev_parent; 4491fa9e4066Sahrens 449299653d4eSeschrock if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0, 4493cd0837ccSGeorge Wilson VDEV_ALLOC_ATTACH)) != 0) 44943d7072f8Seschrock return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 44953d7072f8Seschrock 44963d7072f8Seschrock if (newrootvd->vdev_children != 1) 4497fa9e4066Sahrens return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 4498fa9e4066Sahrens 4499fa9e4066Sahrens newvd = newrootvd->vdev_child[0]; 4500fa9e4066Sahrens 4501fa9e4066Sahrens if (!newvd->vdev_ops->vdev_op_leaf) 4502fa9e4066Sahrens return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 4503fa9e4066Sahrens 450499653d4eSeschrock if ((error = vdev_create(newrootvd, txg, replacing)) != 0) 4505fa9e4066Sahrens return (spa_vdev_exit(spa, newrootvd, txg, error)); 4506fa9e4066Sahrens 45078654d025Sperrin /* 45088654d025Sperrin * Spares can't replace logs 45098654d025Sperrin */ 4510ee0eb9f2SEric Schrock if (oldvd->vdev_top->vdev_islog && newvd->vdev_isspare) 45118654d025Sperrin return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 45128654d025Sperrin 451399653d4eSeschrock if (!replacing) { 451499653d4eSeschrock /* 451599653d4eSeschrock * For attach, the only allowable parent is a mirror or the root 451699653d4eSeschrock * vdev. 451799653d4eSeschrock */ 451899653d4eSeschrock if (pvd->vdev_ops != &vdev_mirror_ops && 451999653d4eSeschrock pvd->vdev_ops != &vdev_root_ops) 452099653d4eSeschrock return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 452199653d4eSeschrock 452299653d4eSeschrock pvops = &vdev_mirror_ops; 452399653d4eSeschrock } else { 452499653d4eSeschrock /* 452599653d4eSeschrock * Active hot spares can only be replaced by inactive hot 452699653d4eSeschrock * spares. 452799653d4eSeschrock */ 452899653d4eSeschrock if (pvd->vdev_ops == &vdev_spare_ops && 4529cb04b873SMark J Musante oldvd->vdev_isspare && 453099653d4eSeschrock !spa_has_spare(spa, newvd->vdev_guid)) 453199653d4eSeschrock return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 453299653d4eSeschrock 453399653d4eSeschrock /* 453499653d4eSeschrock * If the source is a hot spare, and the parent isn't already a 453599653d4eSeschrock * spare, then we want to create a new hot spare. Otherwise, we 453639c23413Seschrock * want to create a replacing vdev. The user is not allowed to 453739c23413Seschrock * attach to a spared vdev child unless the 'isspare' state is 453839c23413Seschrock * the same (spare replaces spare, non-spare replaces 453939c23413Seschrock * non-spare). 454099653d4eSeschrock */ 4541cb04b873SMark J Musante if (pvd->vdev_ops == &vdev_replacing_ops && 4542cb04b873SMark J Musante spa_version(spa) < SPA_VERSION_MULTI_REPLACE) { 454399653d4eSeschrock return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 4544cb04b873SMark J Musante } else if (pvd->vdev_ops == &vdev_spare_ops && 4545cb04b873SMark J Musante newvd->vdev_isspare != oldvd->vdev_isspare) { 454639c23413Seschrock return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 4547cb04b873SMark J Musante } 4548cb04b873SMark J Musante 4549cb04b873SMark J Musante if (newvd->vdev_isspare) 455099653d4eSeschrock pvops = &vdev_spare_ops; 455199653d4eSeschrock else 455299653d4eSeschrock pvops = &vdev_replacing_ops; 455399653d4eSeschrock } 455499653d4eSeschrock 45552a79c5feSlling /* 4556573ca77eSGeorge Wilson * Make sure the new device is big enough. 45572a79c5feSlling */ 4558573ca77eSGeorge Wilson if (newvd->vdev_asize < vdev_get_min_asize(oldvd)) 4559fa9e4066Sahrens return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW)); 4560fa9e4066Sahrens 4561ecc2d604Sbonwick /* 4562ecc2d604Sbonwick * The new device cannot have a higher alignment requirement 4563ecc2d604Sbonwick * than the top-level vdev. 4564ecc2d604Sbonwick */ 4565ecc2d604Sbonwick if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift) 4566fa9e4066Sahrens return (spa_vdev_exit(spa, newrootvd, txg, EDOM)); 4567fa9e4066Sahrens 4568fa9e4066Sahrens /* 4569fa9e4066Sahrens * If this is an in-place replacement, update oldvd's path and devid 4570fa9e4066Sahrens * to make it distinguishable from newvd, and unopenable from now on. 4571fa9e4066Sahrens */ 4572fa9e4066Sahrens if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) { 4573fa9e4066Sahrens spa_strfree(oldvd->vdev_path); 4574fa9e4066Sahrens oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5, 4575fa9e4066Sahrens KM_SLEEP); 4576fa9e4066Sahrens (void) sprintf(oldvd->vdev_path, "%s/%s", 4577fa9e4066Sahrens newvd->vdev_path, "old"); 4578fa9e4066Sahrens if (oldvd->vdev_devid != NULL) { 4579fa9e4066Sahrens spa_strfree(oldvd->vdev_devid); 4580fa9e4066Sahrens oldvd->vdev_devid = NULL; 4581fa9e4066Sahrens } 4582fa9e4066Sahrens } 4583fa9e4066Sahrens 4584cb04b873SMark J Musante /* mark the device being resilvered */ 4585b4952e17SGeorge Wilson newvd->vdev_resilver_txg = txg; 4586cb04b873SMark J Musante 4587fa9e4066Sahrens /* 458899653d4eSeschrock * If the parent is not a mirror, or if we're replacing, insert the new 458999653d4eSeschrock * mirror/replacing/spare vdev above oldvd. 4590fa9e4066Sahrens */ 4591fa9e4066Sahrens if (pvd->vdev_ops != pvops) 4592fa9e4066Sahrens pvd = vdev_add_parent(oldvd, pvops); 4593fa9e4066Sahrens 4594fa9e4066Sahrens ASSERT(pvd->vdev_top->vdev_parent == rvd); 4595fa9e4066Sahrens ASSERT(pvd->vdev_ops == pvops); 4596fa9e4066Sahrens ASSERT(oldvd->vdev_parent == pvd); 4597fa9e4066Sahrens 4598fa9e4066Sahrens /* 4599fa9e4066Sahrens * Extract the new device from its root and add it to pvd. 4600fa9e4066Sahrens */ 4601fa9e4066Sahrens vdev_remove_child(newrootvd, newvd); 4602fa9e4066Sahrens newvd->vdev_id = pvd->vdev_children; 460388ecc943SGeorge Wilson newvd->vdev_crtxg = oldvd->vdev_crtxg; 4604fa9e4066Sahrens vdev_add_child(pvd, newvd); 4605fa9e4066Sahrens 4606fa9e4066Sahrens tvd = newvd->vdev_top; 4607fa9e4066Sahrens ASSERT(pvd->vdev_top == tvd); 4608fa9e4066Sahrens ASSERT(tvd->vdev_parent == rvd); 4609fa9e4066Sahrens 4610fa9e4066Sahrens vdev_config_dirty(tvd); 4611fa9e4066Sahrens 4612fa9e4066Sahrens /* 46133f9d6ad7SLin Ling * Set newvd's DTL to [TXG_INITIAL, dtl_max_txg) so that we account 46143f9d6ad7SLin Ling * for any dmu_sync-ed blocks. It will propagate upward when 46153f9d6ad7SLin Ling * spa_vdev_exit() calls vdev_dtl_reassess(). 4616fa9e4066Sahrens */ 46173f9d6ad7SLin Ling dtl_max_txg = txg + TXG_CONCURRENT_STATES; 4618fa9e4066Sahrens 46193f9d6ad7SLin Ling vdev_dtl_dirty(newvd, DTL_MISSING, TXG_INITIAL, 46203f9d6ad7SLin Ling dtl_max_txg - TXG_INITIAL); 4621fa9e4066Sahrens 46226809eb4eSEric Schrock if (newvd->vdev_isspare) { 462339c23413Seschrock spa_spare_activate(newvd); 46246809eb4eSEric Schrock spa_event_notify(spa, newvd, ESC_ZFS_VDEV_SPARE); 46256809eb4eSEric Schrock } 46266809eb4eSEric Schrock 4627e14bb325SJeff Bonwick oldvdpath = spa_strdup(oldvd->vdev_path); 4628e14bb325SJeff Bonwick newvdpath = spa_strdup(newvd->vdev_path); 46299b3f6b42SEric Kustarz newvd_isspare = newvd->vdev_isspare; 4630ea8dc4b6Seschrock 4631fa9e4066Sahrens /* 4632fa9e4066Sahrens * Mark newvd's DTL dirty in this txg. 4633fa9e4066Sahrens */ 4634ecc2d604Sbonwick vdev_dirty(tvd, VDD_DTL, newvd, txg); 4635fa9e4066Sahrens 46363f9d6ad7SLin Ling /* 46370713e232SGeorge Wilson * Schedule the resilver to restart in the future. We do this to 46380713e232SGeorge Wilson * ensure that dmu_sync-ed blocks have been stitched into the 46390713e232SGeorge Wilson * respective datasets. 46403f9d6ad7SLin Ling */ 46413f9d6ad7SLin Ling dsl_resilver_restart(spa->spa_dsl_pool, dtl_max_txg); 4642fa9e4066Sahrens 464314372834SHans Rosenfeld if (spa->spa_bootfs) 464414372834SHans Rosenfeld spa_event_notify(spa, newvd, ESC_ZFS_BOOTFS_VDEV_ATTACH); 464514372834SHans Rosenfeld 464614372834SHans Rosenfeld spa_event_notify(spa, newvd, ESC_ZFS_VDEV_ATTACH); 464714372834SHans Rosenfeld 46483f9d6ad7SLin Ling /* 46493f9d6ad7SLin Ling * Commit the config 46503f9d6ad7SLin Ling */ 46513f9d6ad7SLin Ling (void) spa_vdev_exit(spa, newrootvd, dtl_max_txg, 0); 46523f9d6ad7SLin Ling 46534445fffbSMatthew Ahrens spa_history_log_internal(spa, "vdev attach", NULL, 46543f9d6ad7SLin Ling "%s vdev=%s %s vdev=%s", 46559b3f6b42SEric Kustarz replacing && newvd_isspare ? "spare in" : 46569b3f6b42SEric Kustarz replacing ? "replace" : "attach", newvdpath, 46579b3f6b42SEric Kustarz replacing ? "for" : "to", oldvdpath); 46589b3f6b42SEric Kustarz 46599b3f6b42SEric Kustarz spa_strfree(oldvdpath); 46609b3f6b42SEric Kustarz spa_strfree(newvdpath); 46619b3f6b42SEric Kustarz 4662fa9e4066Sahrens return (0); 4663fa9e4066Sahrens } 4664fa9e4066Sahrens 4665fa9e4066Sahrens /* 4666fa9e4066Sahrens * Detach a device from a mirror or replacing vdev. 4667f7170741SWill Andrews * 4668fa9e4066Sahrens * If 'replace_done' is specified, only detach if the parent 4669fa9e4066Sahrens * is a replacing vdev. 4670fa9e4066Sahrens */ 4671fa9e4066Sahrens int 46728ad4d6ddSJeff Bonwick spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done) 4673fa9e4066Sahrens { 4674fa9e4066Sahrens uint64_t txg; 46758ad4d6ddSJeff Bonwick int error; 4676fa9e4066Sahrens vdev_t *rvd = spa->spa_root_vdev; 4677fa9e4066Sahrens vdev_t *vd, *pvd, *cvd, *tvd; 467899653d4eSeschrock boolean_t unspare = B_FALSE; 4679d5285caeSGeorge Wilson uint64_t unspare_guid = 0; 46801195e687SMark J Musante char *vdpath; 4681fa9e4066Sahrens 4682f9af39baSGeorge Wilson ASSERT(spa_writeable(spa)); 4683f9af39baSGeorge Wilson 4684fa9e4066Sahrens txg = spa_vdev_enter(spa); 4685fa9e4066Sahrens 4686c5904d13Seschrock vd = spa_lookup_by_guid(spa, guid, B_FALSE); 4687fa9e4066Sahrens 4688fa9e4066Sahrens if (vd == NULL) 4689fa9e4066Sahrens return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 4690fa9e4066Sahrens 46910e34b6a7Sbonwick if (!vd->vdev_ops->vdev_op_leaf) 46920e34b6a7Sbonwick return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 46930e34b6a7Sbonwick 4694fa9e4066Sahrens pvd = vd->vdev_parent; 4695fa9e4066Sahrens 4696fa9e4066Sahrens /* 46978ad4d6ddSJeff Bonwick * If the parent/child relationship is not as expected, don't do it. 46988ad4d6ddSJeff Bonwick * Consider M(A,R(B,C)) -- that is, a mirror of A with a replacing 46998ad4d6ddSJeff Bonwick * vdev that's replacing B with C. The user's intent in replacing 47008ad4d6ddSJeff Bonwick * is to go from M(A,B) to M(A,C). If the user decides to cancel 47018ad4d6ddSJeff Bonwick * the replace by detaching C, the expected behavior is to end up 47028ad4d6ddSJeff Bonwick * M(A,B). But suppose that right after deciding to detach C, 47038ad4d6ddSJeff Bonwick * the replacement of B completes. We would have M(A,C), and then 47048ad4d6ddSJeff Bonwick * ask to detach C, which would leave us with just A -- not what 47058ad4d6ddSJeff Bonwick * the user wanted. To prevent this, we make sure that the 47068ad4d6ddSJeff Bonwick * parent/child relationship hasn't changed -- in this example, 47078ad4d6ddSJeff Bonwick * that C's parent is still the replacing vdev R. 47088ad4d6ddSJeff Bonwick */ 47098ad4d6ddSJeff Bonwick if (pvd->vdev_guid != pguid && pguid != 0) 47108ad4d6ddSJeff Bonwick return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 47118ad4d6ddSJeff Bonwick 47128ad4d6ddSJeff Bonwick /* 4713cb04b873SMark J Musante * Only 'replacing' or 'spare' vdevs can be replaced. 4714fa9e4066Sahrens */ 4715cb04b873SMark J Musante if (replace_done && pvd->vdev_ops != &vdev_replacing_ops && 4716cb04b873SMark J Musante pvd->vdev_ops != &vdev_spare_ops) 4717fa9e4066Sahrens return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 471899653d4eSeschrock 471999653d4eSeschrock ASSERT(pvd->vdev_ops != &vdev_spare_ops || 4720e7437265Sahrens spa_version(spa) >= SPA_VERSION_SPARES); 4721fa9e4066Sahrens 4722fa9e4066Sahrens /* 472399653d4eSeschrock * Only mirror, replacing, and spare vdevs support detach. 4724fa9e4066Sahrens */ 4725fa9e4066Sahrens if (pvd->vdev_ops != &vdev_replacing_ops && 472699653d4eSeschrock pvd->vdev_ops != &vdev_mirror_ops && 472799653d4eSeschrock pvd->vdev_ops != &vdev_spare_ops) 4728fa9e4066Sahrens return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 4729fa9e4066Sahrens 4730fa9e4066Sahrens /* 47318ad4d6ddSJeff Bonwick * If this device has the only valid copy of some data, 47328ad4d6ddSJeff Bonwick * we cannot safely detach it. 4733fa9e4066Sahrens */ 47348ad4d6ddSJeff Bonwick if (vdev_dtl_required(vd)) 4735fa9e4066Sahrens return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 4736fa9e4066Sahrens 47378ad4d6ddSJeff Bonwick ASSERT(pvd->vdev_children >= 2); 4738fa9e4066Sahrens 4739fa9e4066Sahrens /* 4740bf82a41bSeschrock * If we are detaching the second disk from a replacing vdev, then 4741bf82a41bSeschrock * check to see if we changed the original vdev's path to have "/old" 4742bf82a41bSeschrock * at the end in spa_vdev_attach(). If so, undo that change now. 4743bf82a41bSeschrock */ 4744cb04b873SMark J Musante if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id > 0 && 4745cb04b873SMark J Musante vd->vdev_path != NULL) { 4746cb04b873SMark J Musante size_t len = strlen(vd->vdev_path); 4747cb04b873SMark J Musante 4748cb04b873SMark J Musante for (int c = 0; c < pvd->vdev_children; c++) { 4749cb04b873SMark J Musante cvd = pvd->vdev_child[c]; 4750cb04b873SMark J Musante 4751cb04b873SMark J Musante if (cvd == vd || cvd->vdev_path == NULL) 4752cb04b873SMark J Musante continue; 4753cb04b873SMark J Musante 4754bf82a41bSeschrock if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 && 4755bf82a41bSeschrock strcmp(cvd->vdev_path + len, "/old") == 0) { 4756bf82a41bSeschrock spa_strfree(cvd->vdev_path); 4757bf82a41bSeschrock cvd->vdev_path = spa_strdup(vd->vdev_path); 4758cb04b873SMark J Musante break; 4759cb04b873SMark J Musante } 4760bf82a41bSeschrock } 4761bf82a41bSeschrock } 4762bf82a41bSeschrock 4763bf82a41bSeschrock /* 476499653d4eSeschrock * If we are detaching the original disk from a spare, then it implies 476599653d4eSeschrock * that the spare should become a real disk, and be removed from the 476699653d4eSeschrock * active spare list for the pool. 476799653d4eSeschrock */ 476899653d4eSeschrock if (pvd->vdev_ops == &vdev_spare_ops && 4769cb04b873SMark J Musante vd->vdev_id == 0 && 4770cb04b873SMark J Musante pvd->vdev_child[pvd->vdev_children - 1]->vdev_isspare) 477199653d4eSeschrock unspare = B_TRUE; 477299653d4eSeschrock 477399653d4eSeschrock /* 4774fa9e4066Sahrens * Erase the disk labels so the disk can be used for other things. 4775fa9e4066Sahrens * This must be done after all other error cases are handled, 4776fa9e4066Sahrens * but before we disembowel vd (so we can still do I/O to it). 4777fa9e4066Sahrens * But if we can't do it, don't treat the error as fatal -- 4778fa9e4066Sahrens * it may be that the unwritability of the disk is the reason 4779fa9e4066Sahrens * it's being detached! 4780fa9e4066Sahrens */ 478139c23413Seschrock error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE); 4782fa9e4066Sahrens 4783fa9e4066Sahrens /* 4784fa9e4066Sahrens * Remove vd from its parent and compact the parent's children. 4785fa9e4066Sahrens */ 4786fa9e4066Sahrens vdev_remove_child(pvd, vd); 4787fa9e4066Sahrens vdev_compact_children(pvd); 4788fa9e4066Sahrens 4789fa9e4066Sahrens /* 4790fa9e4066Sahrens * Remember one of the remaining children so we can get tvd below. 4791fa9e4066Sahrens */ 4792cb04b873SMark J Musante cvd = pvd->vdev_child[pvd->vdev_children - 1]; 4793fa9e4066Sahrens 4794fa9e4066Sahrens /* 479599653d4eSeschrock * If we need to remove the remaining child from the list of hot spares, 47968ad4d6ddSJeff Bonwick * do it now, marking the vdev as no longer a spare in the process. 47978ad4d6ddSJeff Bonwick * We must do this before vdev_remove_parent(), because that can 47988ad4d6ddSJeff Bonwick * change the GUID if it creates a new toplevel GUID. For a similar 47998ad4d6ddSJeff Bonwick * reason, we must remove the spare now, in the same txg as the detach; 48008ad4d6ddSJeff Bonwick * otherwise someone could attach a new sibling, change the GUID, and 48018ad4d6ddSJeff Bonwick * the subsequent attempt to spa_vdev_remove(unspare_guid) would fail. 480299653d4eSeschrock */ 480399653d4eSeschrock if (unspare) { 480499653d4eSeschrock ASSERT(cvd->vdev_isspare); 480539c23413Seschrock spa_spare_remove(cvd); 480699653d4eSeschrock unspare_guid = cvd->vdev_guid; 48078ad4d6ddSJeff Bonwick (void) spa_vdev_remove(spa, unspare_guid, B_TRUE); 4808cb04b873SMark J Musante cvd->vdev_unspare = B_TRUE; 480999653d4eSeschrock } 481099653d4eSeschrock 481199653d4eSeschrock /* 4812fa9e4066Sahrens * If the parent mirror/replacing vdev only has one child, 4813fa9e4066Sahrens * the parent is no longer needed. Remove it from the tree. 4814fa9e4066Sahrens */ 4815cb04b873SMark J Musante if (pvd->vdev_children == 1) { 4816cb04b873SMark J Musante if (pvd->vdev_ops == &vdev_spare_ops) 4817cb04b873SMark J Musante cvd->vdev_unspare = B_FALSE; 4818fa9e4066Sahrens vdev_remove_parent(cvd); 4819cb04b873SMark J Musante } 4820cb04b873SMark J Musante 4821fa9e4066Sahrens 4822fa9e4066Sahrens /* 4823fa9e4066Sahrens * We don't set tvd until now because the parent we just removed 4824fa9e4066Sahrens * may have been the previous top-level vdev. 4825fa9e4066Sahrens */ 4826fa9e4066Sahrens tvd = cvd->vdev_top; 4827fa9e4066Sahrens ASSERT(tvd->vdev_parent == rvd); 4828fa9e4066Sahrens 4829fa9e4066Sahrens /* 483039c23413Seschrock * Reevaluate the parent vdev state. 4831fa9e4066Sahrens */ 48323d7072f8Seschrock vdev_propagate_state(cvd); 4833fa9e4066Sahrens 4834fa9e4066Sahrens /* 4835573ca77eSGeorge Wilson * If the 'autoexpand' property is set on the pool then automatically 4836573ca77eSGeorge Wilson * try to expand the size of the pool. For example if the device we 4837573ca77eSGeorge Wilson * just detached was smaller than the others, it may be possible to 4838573ca77eSGeorge Wilson * add metaslabs (i.e. grow the pool). We need to reopen the vdev 4839573ca77eSGeorge Wilson * first so that we can obtain the updated sizes of the leaf vdevs. 4840fa9e4066Sahrens */ 4841573ca77eSGeorge Wilson if (spa->spa_autoexpand) { 4842573ca77eSGeorge Wilson vdev_reopen(tvd); 4843573ca77eSGeorge Wilson vdev_expand(tvd, txg); 4844573ca77eSGeorge Wilson } 4845fa9e4066Sahrens 4846fa9e4066Sahrens vdev_config_dirty(tvd); 4847fa9e4066Sahrens 4848fa9e4066Sahrens /* 484939c23413Seschrock * Mark vd's DTL as dirty in this txg. vdev_dtl_sync() will see that 485039c23413Seschrock * vd->vdev_detached is set and free vd's DTL object in syncing context. 485139c23413Seschrock * But first make sure we're not on any *other* txg's DTL list, to 485239c23413Seschrock * prevent vd from being accessed after it's freed. 4853fa9e4066Sahrens */ 48541195e687SMark J Musante vdpath = spa_strdup(vd->vdev_path); 48558ad4d6ddSJeff Bonwick for (int t = 0; t < TXG_SIZE; t++) 4856fa9e4066Sahrens (void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t); 4857ecc2d604Sbonwick vd->vdev_detached = B_TRUE; 4858ecc2d604Sbonwick vdev_dirty(tvd, VDD_DTL, vd, txg); 4859fa9e4066Sahrens 48603d7072f8Seschrock spa_event_notify(spa, vd, ESC_ZFS_VDEV_REMOVE); 48613d7072f8Seschrock 4862cb04b873SMark J Musante /* hang on to the spa before we release the lock */ 4863cb04b873SMark J Musante spa_open_ref(spa, FTAG); 4864cb04b873SMark J Musante 486599653d4eSeschrock error = spa_vdev_exit(spa, vd, txg, 0); 486699653d4eSeschrock 48674445fffbSMatthew Ahrens spa_history_log_internal(spa, "detach", NULL, 48681195e687SMark J Musante "vdev=%s", vdpath); 48691195e687SMark J Musante spa_strfree(vdpath); 48701195e687SMark J Musante 487199653d4eSeschrock /* 487239c23413Seschrock * If this was the removal of the original device in a hot spare vdev, 487339c23413Seschrock * then we want to go through and remove the device from the hot spare 487439c23413Seschrock * list of every other pool. 487599653d4eSeschrock */ 487699653d4eSeschrock if (unspare) { 4877cb04b873SMark J Musante spa_t *altspa = NULL; 4878cb04b873SMark J Musante 487999653d4eSeschrock mutex_enter(&spa_namespace_lock); 4880cb04b873SMark J Musante while ((altspa = spa_next(altspa)) != NULL) { 4881cb04b873SMark J Musante if (altspa->spa_state != POOL_STATE_ACTIVE || 4882cb04b873SMark J Musante altspa == spa) 488399653d4eSeschrock continue; 4884cb04b873SMark J Musante 4885cb04b873SMark J Musante spa_open_ref(altspa, FTAG); 48869af0a4dfSJeff Bonwick mutex_exit(&spa_namespace_lock); 4887cb04b873SMark J Musante (void) spa_vdev_remove(altspa, unspare_guid, B_TRUE); 4888cb04b873SMark J Musante mutex_enter(&spa_namespace_lock); 4889cb04b873SMark J Musante spa_close(altspa, FTAG); 4890cb04b873SMark J Musante } 4891cb04b873SMark J Musante mutex_exit(&spa_namespace_lock); 4892cb04b873SMark J Musante 4893cb04b873SMark J Musante /* search the rest of the vdevs for spares to remove */ 4894cb04b873SMark J Musante spa_vdev_resilver_done(spa); 4895cb04b873SMark J Musante } 4896cb04b873SMark J Musante 4897cb04b873SMark J Musante /* all done with the spa; OK to release */ 48989af0a4dfSJeff Bonwick mutex_enter(&spa_namespace_lock); 48999af0a4dfSJeff Bonwick spa_close(spa, FTAG); 490099653d4eSeschrock mutex_exit(&spa_namespace_lock); 490199653d4eSeschrock 490299653d4eSeschrock return (error); 490399653d4eSeschrock } 490499653d4eSeschrock 49051195e687SMark J Musante /* 49061195e687SMark J Musante * Split a set of devices from their mirrors, and create a new pool from them. 49071195e687SMark J Musante */ 49081195e687SMark J Musante int 49091195e687SMark J Musante spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config, 49101195e687SMark J Musante nvlist_t *props, boolean_t exp) 49111195e687SMark J Musante { 49121195e687SMark J Musante int error = 0; 49131195e687SMark J Musante uint64_t txg, *glist; 49141195e687SMark J Musante spa_t *newspa; 49151195e687SMark J Musante uint_t c, children, lastlog; 49161195e687SMark J Musante nvlist_t **child, *nvl, *tmp; 49171195e687SMark J Musante dmu_tx_t *tx; 49181195e687SMark J Musante char *altroot = NULL; 49191195e687SMark J Musante vdev_t *rvd, **vml = NULL; /* vdev modify list */ 49201195e687SMark J Musante boolean_t activate_slog; 49211195e687SMark J Musante 4922f9af39baSGeorge Wilson ASSERT(spa_writeable(spa)); 49231195e687SMark J Musante 49241195e687SMark J Musante txg = spa_vdev_enter(spa); 49251195e687SMark J Musante 49261195e687SMark J Musante /* clear the log and flush everything up to now */ 49271195e687SMark J Musante activate_slog = spa_passivate_log(spa); 49281195e687SMark J Musante (void) spa_vdev_config_exit(spa, NULL, txg, 0, FTAG); 49291195e687SMark J Musante error = spa_offline_log(spa); 49301195e687SMark J Musante txg = spa_vdev_config_enter(spa); 49311195e687SMark J Musante 49321195e687SMark J Musante if (activate_slog) 49331195e687SMark J Musante spa_activate_log(spa); 49341195e687SMark J Musante 49351195e687SMark J Musante if (error != 0) 49361195e687SMark J Musante return (spa_vdev_exit(spa, NULL, txg, error)); 49371195e687SMark J Musante 49381195e687SMark J Musante /* check new spa name before going any further */ 49391195e687SMark J Musante if (spa_lookup(newname) != NULL) 49401195e687SMark J Musante return (spa_vdev_exit(spa, NULL, txg, EEXIST)); 49411195e687SMark J Musante 49421195e687SMark J Musante /* 49431195e687SMark J Musante * scan through all the children to ensure they're all mirrors 49441195e687SMark J Musante */ 49451195e687SMark J Musante if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvl) != 0 || 49461195e687SMark J Musante nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN, &child, 49471195e687SMark J Musante &children) != 0) 49481195e687SMark J Musante return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 49491195e687SMark J Musante 49501195e687SMark J Musante /* first, check to ensure we've got the right child count */ 49511195e687SMark J Musante rvd = spa->spa_root_vdev; 49521195e687SMark J Musante lastlog = 0; 49531195e687SMark J Musante for (c = 0; c < rvd->vdev_children; c++) { 49541195e687SMark J Musante vdev_t *vd = rvd->vdev_child[c]; 49551195e687SMark J Musante 49561195e687SMark J Musante /* don't count the holes & logs as children */ 49571195e687SMark J Musante if (vd->vdev_islog || vd->vdev_ishole) { 49581195e687SMark J Musante if (lastlog == 0) 49591195e687SMark J Musante lastlog = c; 49601195e687SMark J Musante continue; 49611195e687SMark J Musante } 49621195e687SMark J Musante 49631195e687SMark J Musante lastlog = 0; 49641195e687SMark J Musante } 49651195e687SMark J Musante if (children != (lastlog != 0 ? lastlog : rvd->vdev_children)) 49661195e687SMark J Musante return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 49671195e687SMark J Musante 49681195e687SMark J Musante /* next, ensure no spare or cache devices are part of the split */ 49691195e687SMark J Musante if (nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_SPARES, &tmp) == 0 || 49701195e687SMark J Musante nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_L2CACHE, &tmp) == 0) 49711195e687SMark J Musante return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 49721195e687SMark J Musante 49731195e687SMark J Musante vml = kmem_zalloc(children * sizeof (vdev_t *), KM_SLEEP); 49741195e687SMark J Musante glist = kmem_zalloc(children * sizeof (uint64_t), KM_SLEEP); 49751195e687SMark J Musante 49761195e687SMark J Musante /* then, loop over each vdev and validate it */ 49771195e687SMark J Musante for (c = 0; c < children; c++) { 49781195e687SMark J Musante uint64_t is_hole = 0; 49791195e687SMark J Musante 49801195e687SMark J Musante (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 49811195e687SMark J Musante &is_hole); 49821195e687SMark J Musante 49831195e687SMark J Musante if (is_hole != 0) { 49841195e687SMark J Musante if (spa->spa_root_vdev->vdev_child[c]->vdev_ishole || 49851195e687SMark J Musante spa->spa_root_vdev->vdev_child[c]->vdev_islog) { 49861195e687SMark J Musante continue; 49871195e687SMark J Musante } else { 4988be6fd75aSMatthew Ahrens error = SET_ERROR(EINVAL); 49891195e687SMark J Musante break; 49901195e687SMark J Musante } 49911195e687SMark J Musante } 49921195e687SMark J Musante 49931195e687SMark J Musante /* which disk is going to be split? */ 49941195e687SMark J Musante if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_GUID, 49951195e687SMark J Musante &glist[c]) != 0) { 4996be6fd75aSMatthew Ahrens error = SET_ERROR(EINVAL); 49971195e687SMark J Musante break; 49981195e687SMark J Musante } 49991195e687SMark J Musante 50001195e687SMark J Musante /* look it up in the spa */ 50011195e687SMark J Musante vml[c] = spa_lookup_by_guid(spa, glist[c], B_FALSE); 50021195e687SMark J Musante if (vml[c] == NULL) { 5003be6fd75aSMatthew Ahrens error = SET_ERROR(ENODEV); 50041195e687SMark J Musante break; 50051195e687SMark J Musante } 50061195e687SMark J Musante 50071195e687SMark J Musante /* make sure there's nothing stopping the split */ 50081195e687SMark J Musante if (vml[c]->vdev_parent->vdev_ops != &vdev_mirror_ops || 50091195e687SMark J Musante vml[c]->vdev_islog || 50101195e687SMark J Musante vml[c]->vdev_ishole || 50111195e687SMark J Musante vml[c]->vdev_isspare || 50121195e687SMark J Musante vml[c]->vdev_isl2cache || 50131195e687SMark J Musante !vdev_writeable(vml[c]) || 5014d41c4376SMark J Musante vml[c]->vdev_children != 0 || 50151195e687SMark J Musante vml[c]->vdev_state != VDEV_STATE_HEALTHY || 50161195e687SMark J Musante c != spa->spa_root_vdev->vdev_child[c]->vdev_id) { 5017be6fd75aSMatthew Ahrens error = SET_ERROR(EINVAL); 50181195e687SMark J Musante break; 50191195e687SMark J Musante } 50201195e687SMark J Musante 50211195e687SMark J Musante if (vdev_dtl_required(vml[c])) { 5022be6fd75aSMatthew Ahrens error = SET_ERROR(EBUSY); 50231195e687SMark J Musante break; 50241195e687SMark J Musante } 50251195e687SMark J Musante 50261195e687SMark J Musante /* we need certain info from the top level */ 50271195e687SMark J Musante VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_ARRAY, 50281195e687SMark J Musante vml[c]->vdev_top->vdev_ms_array) == 0); 50291195e687SMark J Musante VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_SHIFT, 50301195e687SMark J Musante vml[c]->vdev_top->vdev_ms_shift) == 0); 50311195e687SMark J Musante VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASIZE, 50321195e687SMark J Musante vml[c]->vdev_top->vdev_asize) == 0); 50331195e687SMark J Musante VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASHIFT, 50341195e687SMark J Musante vml[c]->vdev_top->vdev_ashift) == 0); 50351195e687SMark J Musante } 50361195e687SMark J Musante 50371195e687SMark J Musante if (error != 0) { 50381195e687SMark J Musante kmem_free(vml, children * sizeof (vdev_t *)); 50391195e687SMark J Musante kmem_free(glist, children * sizeof (uint64_t)); 50401195e687SMark J Musante return (spa_vdev_exit(spa, NULL, txg, error)); 50411195e687SMark J Musante } 50421195e687SMark J Musante 50431195e687SMark J Musante /* stop writers from using the disks */ 50441195e687SMark J Musante for (c = 0; c < children; c++) { 50451195e687SMark J Musante if (vml[c] != NULL) 50461195e687SMark J Musante vml[c]->vdev_offline = B_TRUE; 50471195e687SMark J Musante } 50481195e687SMark J Musante vdev_reopen(spa->spa_root_vdev); 50491195e687SMark J Musante 50501195e687SMark J Musante /* 50511195e687SMark J Musante * Temporarily record the splitting vdevs in the spa config. This 50521195e687SMark J Musante * will disappear once the config is regenerated. 50531195e687SMark J Musante */ 50541195e687SMark J Musante VERIFY(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP) == 0); 50551195e687SMark J Musante VERIFY(nvlist_add_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST, 50561195e687SMark J Musante glist, children) == 0); 50571195e687SMark J Musante kmem_free(glist, children * sizeof (uint64_t)); 50581195e687SMark J Musante 505998295d61SMark J Musante mutex_enter(&spa->spa_props_lock); 50601195e687SMark J Musante VERIFY(nvlist_add_nvlist(spa->spa_config, ZPOOL_CONFIG_SPLIT, 50611195e687SMark J Musante nvl) == 0); 506298295d61SMark J Musante mutex_exit(&spa->spa_props_lock); 50631195e687SMark J Musante spa->spa_config_splitting = nvl; 50641195e687SMark J Musante vdev_config_dirty(spa->spa_root_vdev); 50651195e687SMark J Musante 50661195e687SMark J Musante /* configure and create the new pool */ 50671195e687SMark J Musante VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, newname) == 0); 50681195e687SMark J Musante VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, 50691195e687SMark J Musante exp ? POOL_STATE_EXPORTED : POOL_STATE_ACTIVE) == 0); 50701195e687SMark J Musante VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_VERSION, 50711195e687SMark J Musante spa_version(spa)) == 0); 50721195e687SMark J Musante VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG, 50731195e687SMark J Musante spa->spa_config_txg) == 0); 50741195e687SMark J Musante VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID, 50751195e687SMark J Musante spa_generate_guid(NULL)) == 0); 50761195e687SMark J Musante (void) nvlist_lookup_string(props, 50771195e687SMark J Musante zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 50781195e687SMark J Musante 5079d41c4376SMark J Musante /* add the new pool to the namespace */ 50801195e687SMark J Musante newspa = spa_add(newname, config, altroot); 50811195e687SMark J Musante newspa->spa_config_txg = spa->spa_config_txg; 50821195e687SMark J Musante spa_set_log_state(newspa, SPA_LOG_CLEAR); 50831195e687SMark J Musante 50841195e687SMark J Musante /* release the spa config lock, retaining the namespace lock */ 50851195e687SMark J Musante spa_vdev_config_exit(spa, NULL, txg, 0, FTAG); 50861195e687SMark J Musante 50871195e687SMark J Musante if (zio_injection_enabled) 50881195e687SMark J Musante zio_handle_panic_injection(spa, FTAG, 1); 50891195e687SMark J Musante 50901195e687SMark J Musante spa_activate(newspa, spa_mode_global); 50911195e687SMark J Musante spa_async_suspend(newspa); 50921195e687SMark J Musante 50931195e687SMark J Musante /* create the new pool from the disks of the original pool */ 50941195e687SMark J Musante error = spa_load(newspa, SPA_LOAD_IMPORT, SPA_IMPORT_ASSEMBLE, B_TRUE); 50951195e687SMark J Musante if (error) 50961195e687SMark J Musante goto out; 50971195e687SMark J Musante 50981195e687SMark J Musante /* if that worked, generate a real config for the new pool */ 50991195e687SMark J Musante if (newspa->spa_root_vdev != NULL) { 51001195e687SMark J Musante VERIFY(nvlist_alloc(&newspa->spa_config_splitting, 51011195e687SMark J Musante NV_UNIQUE_NAME, KM_SLEEP) == 0); 51021195e687SMark J Musante VERIFY(nvlist_add_uint64(newspa->spa_config_splitting, 51031195e687SMark J Musante ZPOOL_CONFIG_SPLIT_GUID, spa_guid(spa)) == 0); 51041195e687SMark J Musante spa_config_set(newspa, spa_config_generate(newspa, NULL, -1ULL, 51051195e687SMark J Musante B_TRUE)); 51061195e687SMark J Musante } 51071195e687SMark J Musante 51081195e687SMark J Musante /* set the props */ 51091195e687SMark J Musante if (props != NULL) { 51101195e687SMark J Musante spa_configfile_set(newspa, props, B_FALSE); 51111195e687SMark J Musante error = spa_prop_set(newspa, props); 51121195e687SMark J Musante if (error) 51131195e687SMark J Musante goto out; 51141195e687SMark J Musante } 51151195e687SMark J Musante 51161195e687SMark J Musante /* flush everything */ 51171195e687SMark J Musante txg = spa_vdev_config_enter(newspa); 51181195e687SMark J Musante vdev_config_dirty(newspa->spa_root_vdev); 51191195e687SMark J Musante (void) spa_vdev_config_exit(newspa, NULL, txg, 0, FTAG); 51201195e687SMark J Musante 51211195e687SMark J Musante if (zio_injection_enabled) 51221195e687SMark J Musante zio_handle_panic_injection(spa, FTAG, 2); 51231195e687SMark J Musante 51241195e687SMark J Musante spa_async_resume(newspa); 51251195e687SMark J Musante 51261195e687SMark J Musante /* finally, update the original pool's config */ 51271195e687SMark J Musante txg = spa_vdev_config_enter(spa); 51281195e687SMark J Musante tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 51291195e687SMark J Musante error = dmu_tx_assign(tx, TXG_WAIT); 51301195e687SMark J Musante if (error != 0) 51311195e687SMark J Musante dmu_tx_abort(tx); 51321195e687SMark J Musante for (c = 0; c < children; c++) { 51331195e687SMark J Musante if (vml[c] != NULL) { 51341195e687SMark J Musante vdev_split(vml[c]); 51351195e687SMark J Musante if (error == 0) 51364445fffbSMatthew Ahrens spa_history_log_internal(spa, "detach", tx, 51374445fffbSMatthew Ahrens "vdev=%s", vml[c]->vdev_path); 51381195e687SMark J Musante vdev_free(vml[c]); 51391195e687SMark J Musante } 51401195e687SMark J Musante } 51411195e687SMark J Musante vdev_config_dirty(spa->spa_root_vdev); 51421195e687SMark J Musante spa->spa_config_splitting = NULL; 51431195e687SMark J Musante nvlist_free(nvl); 51441195e687SMark J Musante if (error == 0) 51451195e687SMark J Musante dmu_tx_commit(tx); 51461195e687SMark J Musante (void) spa_vdev_exit(spa, NULL, txg, 0); 51471195e687SMark J Musante 51481195e687SMark J Musante if (zio_injection_enabled) 51491195e687SMark J Musante zio_handle_panic_injection(spa, FTAG, 3); 51501195e687SMark J Musante 51511195e687SMark J Musante /* split is complete; log a history record */ 51524445fffbSMatthew Ahrens spa_history_log_internal(newspa, "split", NULL, 51534445fffbSMatthew Ahrens "from pool %s", spa_name(spa)); 51541195e687SMark J Musante 51551195e687SMark J Musante kmem_free(vml, children * sizeof (vdev_t *)); 51561195e687SMark J Musante 51571195e687SMark J Musante /* if we're not going to mount the filesystems in userland, export */ 51581195e687SMark J Musante if (exp) 51591195e687SMark J Musante error = spa_export_common(newname, POOL_STATE_EXPORTED, NULL, 51601195e687SMark J Musante B_FALSE, B_FALSE); 51611195e687SMark J Musante 51621195e687SMark J Musante return (error); 51631195e687SMark J Musante 51641195e687SMark J Musante out: 51651195e687SMark J Musante spa_unload(newspa); 51661195e687SMark J Musante spa_deactivate(newspa); 51671195e687SMark J Musante spa_remove(newspa); 51681195e687SMark J Musante 51691195e687SMark J Musante txg = spa_vdev_config_enter(spa); 517098295d61SMark J Musante 517198295d61SMark J Musante /* re-online all offlined disks */ 517298295d61SMark J Musante for (c = 0; c < children; c++) { 517398295d61SMark J Musante if (vml[c] != NULL) 517498295d61SMark J Musante vml[c]->vdev_offline = B_FALSE; 517598295d61SMark J Musante } 517698295d61SMark J Musante vdev_reopen(spa->spa_root_vdev); 517798295d61SMark J Musante 51781195e687SMark J Musante nvlist_free(spa->spa_config_splitting); 51791195e687SMark J Musante spa->spa_config_splitting = NULL; 5180d41c4376SMark J Musante (void) spa_vdev_exit(spa, NULL, txg, error); 51811195e687SMark J Musante 51821195e687SMark J Musante kmem_free(vml, children * sizeof (vdev_t *)); 51831195e687SMark J Musante return (error); 51841195e687SMark J Musante } 51851195e687SMark J Musante 5186e14bb325SJeff Bonwick static nvlist_t * 5187e14bb325SJeff Bonwick spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid) 518899653d4eSeschrock { 5189e14bb325SJeff Bonwick for (int i = 0; i < count; i++) { 5190e14bb325SJeff Bonwick uint64_t guid; 519199653d4eSeschrock 5192e14bb325SJeff Bonwick VERIFY(nvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID, 5193e14bb325SJeff Bonwick &guid) == 0); 519499653d4eSeschrock 5195e14bb325SJeff Bonwick if (guid == target_guid) 5196e14bb325SJeff Bonwick return (nvpp[i]); 519799653d4eSeschrock } 519899653d4eSeschrock 5199e14bb325SJeff Bonwick return (NULL); 520099653d4eSeschrock } 520199653d4eSeschrock 5202e14bb325SJeff Bonwick static void 5203e14bb325SJeff Bonwick spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count, 5204e14bb325SJeff Bonwick nvlist_t *dev_to_remove) 5205fa94a07fSbrendan { 5206e14bb325SJeff Bonwick nvlist_t **newdev = NULL; 5207fa94a07fSbrendan 5208e14bb325SJeff Bonwick if (count > 1) 5209e14bb325SJeff Bonwick newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP); 5210fa94a07fSbrendan 5211e14bb325SJeff Bonwick for (int i = 0, j = 0; i < count; i++) { 5212e14bb325SJeff Bonwick if (dev[i] == dev_to_remove) 5213e14bb325SJeff Bonwick continue; 5214e14bb325SJeff Bonwick VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0); 5215fa94a07fSbrendan } 5216fa94a07fSbrendan 5217e14bb325SJeff Bonwick VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0); 5218e14bb325SJeff Bonwick VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0); 5219fa94a07fSbrendan 5220e14bb325SJeff Bonwick for (int i = 0; i < count - 1; i++) 5221e14bb325SJeff Bonwick nvlist_free(newdev[i]); 5222fa94a07fSbrendan 5223e14bb325SJeff Bonwick if (count > 1) 5224e14bb325SJeff Bonwick kmem_free(newdev, (count - 1) * sizeof (void *)); 5225fa94a07fSbrendan } 5226fa94a07fSbrendan 5227fa94a07fSbrendan /* 522888ecc943SGeorge Wilson * Evacuate the device. 522988ecc943SGeorge Wilson */ 52303f9d6ad7SLin Ling static int 523188ecc943SGeorge Wilson spa_vdev_remove_evacuate(spa_t *spa, vdev_t *vd) 523288ecc943SGeorge Wilson { 523388ecc943SGeorge Wilson uint64_t txg; 52343f9d6ad7SLin Ling int error = 0; 523588ecc943SGeorge Wilson 523688ecc943SGeorge Wilson ASSERT(MUTEX_HELD(&spa_namespace_lock)); 523788ecc943SGeorge Wilson ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0); 5238b24ab676SJeff Bonwick ASSERT(vd == vd->vdev_top); 523988ecc943SGeorge Wilson 524088ecc943SGeorge Wilson /* 524188ecc943SGeorge Wilson * Evacuate the device. We don't hold the config lock as writer 524288ecc943SGeorge Wilson * since we need to do I/O but we do keep the 524388ecc943SGeorge Wilson * spa_namespace_lock held. Once this completes the device 524488ecc943SGeorge Wilson * should no longer have any blocks allocated on it. 524588ecc943SGeorge Wilson */ 524688ecc943SGeorge Wilson if (vd->vdev_islog) { 52473f9d6ad7SLin Ling if (vd->vdev_stat.vs_alloc != 0) 52483f9d6ad7SLin Ling error = spa_offline_log(spa); 5249a1521560SJeff Bonwick } else { 5250be6fd75aSMatthew Ahrens error = SET_ERROR(ENOTSUP); 525188ecc943SGeorge Wilson } 5252a1521560SJeff Bonwick 5253a1521560SJeff Bonwick if (error) 5254a1521560SJeff Bonwick return (error); 525588ecc943SGeorge Wilson 525688ecc943SGeorge Wilson /* 5257a1521560SJeff Bonwick * The evacuation succeeded. Remove any remaining MOS metadata 5258a1521560SJeff Bonwick * associated with this vdev, and wait for these changes to sync. 525988ecc943SGeorge Wilson */ 5260fb09f5aaSMadhav Suresh ASSERT0(vd->vdev_stat.vs_alloc); 526188ecc943SGeorge Wilson txg = spa_vdev_config_enter(spa); 526288ecc943SGeorge Wilson vd->vdev_removing = B_TRUE; 52630713e232SGeorge Wilson vdev_dirty_leaves(vd, VDD_DTL, txg); 526488ecc943SGeorge Wilson vdev_config_dirty(vd); 526588ecc943SGeorge Wilson spa_vdev_config_exit(spa, NULL, txg, 0, FTAG); 526688ecc943SGeorge Wilson 526788ecc943SGeorge Wilson return (0); 526888ecc943SGeorge Wilson } 526988ecc943SGeorge Wilson 527088ecc943SGeorge Wilson /* 527188ecc943SGeorge Wilson * Complete the removal by cleaning up the namespace. 527288ecc943SGeorge Wilson */ 52733f9d6ad7SLin Ling static void 5274a1521560SJeff Bonwick spa_vdev_remove_from_namespace(spa_t *spa, vdev_t *vd) 527588ecc943SGeorge Wilson { 527688ecc943SGeorge Wilson vdev_t *rvd = spa->spa_root_vdev; 527788ecc943SGeorge Wilson uint64_t id = vd->vdev_id; 527888ecc943SGeorge Wilson boolean_t last_vdev = (id == (rvd->vdev_children - 1)); 527988ecc943SGeorge Wilson 528088ecc943SGeorge Wilson ASSERT(MUTEX_HELD(&spa_namespace_lock)); 528188ecc943SGeorge Wilson ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 5282b24ab676SJeff Bonwick ASSERT(vd == vd->vdev_top); 528388ecc943SGeorge Wilson 52843f9d6ad7SLin Ling /* 52853f9d6ad7SLin Ling * Only remove any devices which are empty. 52863f9d6ad7SLin Ling */ 52873f9d6ad7SLin Ling if (vd->vdev_stat.vs_alloc != 0) 52883f9d6ad7SLin Ling return; 52893f9d6ad7SLin Ling 529088ecc943SGeorge Wilson (void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE); 5291b24ab676SJeff Bonwick 5292b24ab676SJeff Bonwick if (list_link_active(&vd->vdev_state_dirty_node)) 5293b24ab676SJeff Bonwick vdev_state_clean(vd); 5294b24ab676SJeff Bonwick if (list_link_active(&vd->vdev_config_dirty_node)) 5295b24ab676SJeff Bonwick vdev_config_clean(vd); 5296b24ab676SJeff Bonwick 529788ecc943SGeorge Wilson vdev_free(vd); 529888ecc943SGeorge Wilson 529988ecc943SGeorge Wilson if (last_vdev) { 530088ecc943SGeorge Wilson vdev_compact_children(rvd); 530188ecc943SGeorge Wilson } else { 530288ecc943SGeorge Wilson vd = vdev_alloc_common(spa, id, 0, &vdev_hole_ops); 530388ecc943SGeorge Wilson vdev_add_child(rvd, vd); 530488ecc943SGeorge Wilson } 5305fcbfa62bSLin Ling vdev_config_dirty(rvd); 5306fcbfa62bSLin Ling 5307fcbfa62bSLin Ling /* 5308fcbfa62bSLin Ling * Reassess the health of our root vdev. 5309fcbfa62bSLin Ling */ 5310fcbfa62bSLin Ling vdev_reopen(rvd); 53113f9d6ad7SLin Ling } 531288ecc943SGeorge Wilson 531388ecc943SGeorge Wilson /* 53143f9d6ad7SLin Ling * Remove a device from the pool - 53153f9d6ad7SLin Ling * 53163f9d6ad7SLin Ling * Removing a device from the vdev namespace requires several steps 53173f9d6ad7SLin Ling * and can take a significant amount of time. As a result we use 53183f9d6ad7SLin Ling * the spa_vdev_config_[enter/exit] functions which allow us to 53193f9d6ad7SLin Ling * grab and release the spa_config_lock while still holding the namespace 53203f9d6ad7SLin Ling * lock. During each step the configuration is synced out. 5321f7170741SWill Andrews * 5322f7170741SWill Andrews * Currently, this supports removing only hot spares, slogs, and level 2 ARC 5323f7170741SWill Andrews * devices. 5324fa94a07fSbrendan */ 5325fa94a07fSbrendan int 5326fa94a07fSbrendan spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare) 5327fa94a07fSbrendan { 5328fa94a07fSbrendan vdev_t *vd; 5329a1521560SJeff Bonwick metaslab_group_t *mg; 5330e14bb325SJeff Bonwick nvlist_t **spares, **l2cache, *nv; 53318ad4d6ddSJeff Bonwick uint64_t txg = 0; 533288ecc943SGeorge Wilson uint_t nspares, nl2cache; 5333fa94a07fSbrendan int error = 0; 53348ad4d6ddSJeff Bonwick boolean_t locked = MUTEX_HELD(&spa_namespace_lock); 5335fa94a07fSbrendan 5336f9af39baSGeorge Wilson ASSERT(spa_writeable(spa)); 5337f9af39baSGeorge Wilson 53388ad4d6ddSJeff Bonwick if (!locked) 5339e14bb325SJeff Bonwick txg = spa_vdev_enter(spa); 5340fa94a07fSbrendan 5341c5904d13Seschrock vd = spa_lookup_by_guid(spa, guid, B_FALSE); 5342fa94a07fSbrendan 5343fa94a07fSbrendan if (spa->spa_spares.sav_vdevs != NULL && 5344fa94a07fSbrendan nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 5345e14bb325SJeff Bonwick ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0 && 5346e14bb325SJeff Bonwick (nv = spa_nvlist_lookup_by_guid(spares, nspares, guid)) != NULL) { 5347e14bb325SJeff Bonwick /* 5348e14bb325SJeff Bonwick * Only remove the hot spare if it's not currently in use 5349e14bb325SJeff Bonwick * in this pool. 5350e14bb325SJeff Bonwick */ 5351e14bb325SJeff Bonwick if (vd == NULL || unspare) { 5352e14bb325SJeff Bonwick spa_vdev_remove_aux(spa->spa_spares.sav_config, 5353e14bb325SJeff Bonwick ZPOOL_CONFIG_SPARES, spares, nspares, nv); 535499653d4eSeschrock spa_load_spares(spa); 5355fa94a07fSbrendan spa->spa_spares.sav_sync = B_TRUE; 5356e14bb325SJeff Bonwick } else { 5357be6fd75aSMatthew Ahrens error = SET_ERROR(EBUSY); 5358fa94a07fSbrendan } 5359e14bb325SJeff Bonwick } else if (spa->spa_l2cache.sav_vdevs != NULL && 5360fa94a07fSbrendan nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config, 5361e14bb325SJeff Bonwick ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0 && 5362e14bb325SJeff Bonwick (nv = spa_nvlist_lookup_by_guid(l2cache, nl2cache, guid)) != NULL) { 5363e14bb325SJeff Bonwick /* 5364e14bb325SJeff Bonwick * Cache devices can always be removed. 5365e14bb325SJeff Bonwick */ 5366e14bb325SJeff Bonwick spa_vdev_remove_aux(spa->spa_l2cache.sav_config, 5367e14bb325SJeff Bonwick ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache, nv); 5368fa94a07fSbrendan spa_load_l2cache(spa); 5369fa94a07fSbrendan spa->spa_l2cache.sav_sync = B_TRUE; 537088ecc943SGeorge Wilson } else if (vd != NULL && vd->vdev_islog) { 537188ecc943SGeorge Wilson ASSERT(!locked); 5372b24ab676SJeff Bonwick ASSERT(vd == vd->vdev_top); 537388ecc943SGeorge Wilson 5374a1521560SJeff Bonwick mg = vd->vdev_mg; 5375a1521560SJeff Bonwick 537688ecc943SGeorge Wilson /* 5377a1521560SJeff Bonwick * Stop allocating from this vdev. 537888ecc943SGeorge Wilson */ 5379a1521560SJeff Bonwick metaslab_group_passivate(mg); 538088ecc943SGeorge Wilson 5381b24ab676SJeff Bonwick /* 5382b24ab676SJeff Bonwick * Wait for the youngest allocations and frees to sync, 5383b24ab676SJeff Bonwick * and then wait for the deferral of those frees to finish. 5384b24ab676SJeff Bonwick */ 5385b24ab676SJeff Bonwick spa_vdev_config_exit(spa, NULL, 5386b24ab676SJeff Bonwick txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG); 5387b24ab676SJeff Bonwick 5388a1521560SJeff Bonwick /* 5389a1521560SJeff Bonwick * Attempt to evacuate the vdev. 5390a1521560SJeff Bonwick */ 5391a1521560SJeff Bonwick error = spa_vdev_remove_evacuate(spa, vd); 5392a1521560SJeff Bonwick 539388ecc943SGeorge Wilson txg = spa_vdev_config_enter(spa); 539488ecc943SGeorge Wilson 5395a1521560SJeff Bonwick /* 5396a1521560SJeff Bonwick * If we couldn't evacuate the vdev, unwind. 5397a1521560SJeff Bonwick */ 5398a1521560SJeff Bonwick if (error) { 5399a1521560SJeff Bonwick metaslab_group_activate(mg); 5400a1521560SJeff Bonwick return (spa_vdev_exit(spa, NULL, txg, error)); 5401a1521560SJeff Bonwick } 5402a1521560SJeff Bonwick 5403a1521560SJeff Bonwick /* 5404a1521560SJeff Bonwick * Clean up the vdev namespace. 5405a1521560SJeff Bonwick */ 5406a1521560SJeff Bonwick spa_vdev_remove_from_namespace(spa, vd); 540788ecc943SGeorge Wilson 5408e14bb325SJeff Bonwick } else if (vd != NULL) { 5409e14bb325SJeff Bonwick /* 5410e14bb325SJeff Bonwick * Normal vdevs cannot be removed (yet). 5411e14bb325SJeff Bonwick */ 5412be6fd75aSMatthew Ahrens error = SET_ERROR(ENOTSUP); 5413e14bb325SJeff Bonwick } else { 5414e14bb325SJeff Bonwick /* 5415e14bb325SJeff Bonwick * There is no vdev of any kind with the specified guid. 5416e14bb325SJeff Bonwick */ 5417be6fd75aSMatthew Ahrens error = SET_ERROR(ENOENT); 5418fa94a07fSbrendan } 541999653d4eSeschrock 54208ad4d6ddSJeff Bonwick if (!locked) 5421e14bb325SJeff Bonwick return (spa_vdev_exit(spa, NULL, txg, error)); 54228ad4d6ddSJeff Bonwick 54238ad4d6ddSJeff Bonwick return (error); 5424fa9e4066Sahrens } 5425fa9e4066Sahrens 5426fa9e4066Sahrens /* 54273d7072f8Seschrock * Find any device that's done replacing, or a vdev marked 'unspare' that's 5428f7170741SWill Andrews * currently spared, so we can detach it. 5429fa9e4066Sahrens */ 5430ea8dc4b6Seschrock static vdev_t * 54313d7072f8Seschrock spa_vdev_resilver_done_hunt(vdev_t *vd) 5432fa9e4066Sahrens { 5433ea8dc4b6Seschrock vdev_t *newvd, *oldvd; 5434fa9e4066Sahrens 5435573ca77eSGeorge Wilson for (int c = 0; c < vd->vdev_children; c++) { 54363d7072f8Seschrock oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]); 5437ea8dc4b6Seschrock if (oldvd != NULL) 5438ea8dc4b6Seschrock return (oldvd); 5439ea8dc4b6Seschrock } 5440fa9e4066Sahrens 54413d7072f8Seschrock /* 5442cb04b873SMark J Musante * Check for a completed replacement. We always consider the first 5443cb04b873SMark J Musante * vdev in the list to be the oldest vdev, and the last one to be 5444cb04b873SMark J Musante * the newest (see spa_vdev_attach() for how that works). In 5445cb04b873SMark J Musante * the case where the newest vdev is faulted, we will not automatically 5446cb04b873SMark J Musante * remove it after a resilver completes. This is OK as it will require 5447cb04b873SMark J Musante * user intervention to determine which disk the admin wishes to keep. 54483d7072f8Seschrock */ 5449cb04b873SMark J Musante if (vd->vdev_ops == &vdev_replacing_ops) { 5450cb04b873SMark J Musante ASSERT(vd->vdev_children > 1); 5451cb04b873SMark J Musante 5452cb04b873SMark J Musante newvd = vd->vdev_child[vd->vdev_children - 1]; 5453ea8dc4b6Seschrock oldvd = vd->vdev_child[0]; 5454fa9e4066Sahrens 54558ad4d6ddSJeff Bonwick if (vdev_dtl_empty(newvd, DTL_MISSING) && 5456e69acc92SVictor Latushkin vdev_dtl_empty(newvd, DTL_OUTAGE) && 54578ad4d6ddSJeff Bonwick !vdev_dtl_required(oldvd)) 5458ea8dc4b6Seschrock return (oldvd); 5459fa9e4066Sahrens } 5460fa9e4066Sahrens 54613d7072f8Seschrock /* 54623d7072f8Seschrock * Check for a completed resilver with the 'unspare' flag set. 54633d7072f8Seschrock */ 5464cb04b873SMark J Musante if (vd->vdev_ops == &vdev_spare_ops) { 5465cb04b873SMark J Musante vdev_t *first = vd->vdev_child[0]; 5466cb04b873SMark J Musante vdev_t *last = vd->vdev_child[vd->vdev_children - 1]; 54673d7072f8Seschrock 5468cb04b873SMark J Musante if (last->vdev_unspare) { 5469cb04b873SMark J Musante oldvd = first; 5470cb04b873SMark J Musante newvd = last; 5471cb04b873SMark J Musante } else if (first->vdev_unspare) { 5472cb04b873SMark J Musante oldvd = last; 5473cb04b873SMark J Musante newvd = first; 5474cb04b873SMark J Musante } else { 5475cb04b873SMark J Musante oldvd = NULL; 5476cb04b873SMark J Musante } 5477cb04b873SMark J Musante 5478cb04b873SMark J Musante if (oldvd != NULL && 54798ad4d6ddSJeff Bonwick vdev_dtl_empty(newvd, DTL_MISSING) && 5480e69acc92SVictor Latushkin vdev_dtl_empty(newvd, DTL_OUTAGE) && 5481cb04b873SMark J Musante !vdev_dtl_required(oldvd)) 54823d7072f8Seschrock return (oldvd); 5483cb04b873SMark J Musante 5484cb04b873SMark J Musante /* 5485cb04b873SMark J Musante * If there are more than two spares attached to a disk, 5486cb04b873SMark J Musante * and those spares are not required, then we want to 5487cb04b873SMark J Musante * attempt to free them up now so that they can be used 5488cb04b873SMark J Musante * by other pools. Once we're back down to a single 5489cb04b873SMark J Musante * disk+spare, we stop removing them. 5490cb04b873SMark J Musante */ 5491cb04b873SMark J Musante if (vd->vdev_children > 2) { 5492cb04b873SMark J Musante newvd = vd->vdev_child[1]; 5493cb04b873SMark J Musante 5494cb04b873SMark J Musante if (newvd->vdev_isspare && last->vdev_isspare && 5495cb04b873SMark J Musante vdev_dtl_empty(last, DTL_MISSING) && 5496cb04b873SMark J Musante vdev_dtl_empty(last, DTL_OUTAGE) && 5497cb04b873SMark J Musante !vdev_dtl_required(newvd)) 5498cb04b873SMark J Musante return (newvd); 54993d7072f8Seschrock } 55003d7072f8Seschrock } 55013d7072f8Seschrock 5502ea8dc4b6Seschrock return (NULL); 5503ea8dc4b6Seschrock } 5504ea8dc4b6Seschrock 5505ea8dc4b6Seschrock static void 55063d7072f8Seschrock spa_vdev_resilver_done(spa_t *spa) 5507fa9e4066Sahrens { 55088ad4d6ddSJeff Bonwick vdev_t *vd, *pvd, *ppvd; 55098ad4d6ddSJeff Bonwick uint64_t guid, sguid, pguid, ppguid; 5510fa9e4066Sahrens 55118ad4d6ddSJeff Bonwick spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 5512fa9e4066Sahrens 55133d7072f8Seschrock while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) { 55148ad4d6ddSJeff Bonwick pvd = vd->vdev_parent; 55158ad4d6ddSJeff Bonwick ppvd = pvd->vdev_parent; 5516ea8dc4b6Seschrock guid = vd->vdev_guid; 55178ad4d6ddSJeff Bonwick pguid = pvd->vdev_guid; 55188ad4d6ddSJeff Bonwick ppguid = ppvd->vdev_guid; 55198ad4d6ddSJeff Bonwick sguid = 0; 552099653d4eSeschrock /* 552199653d4eSeschrock * If we have just finished replacing a hot spared device, then 552299653d4eSeschrock * we need to detach the parent's first child (the original hot 552399653d4eSeschrock * spare) as well. 552499653d4eSeschrock */ 5525cb04b873SMark J Musante if (ppvd->vdev_ops == &vdev_spare_ops && pvd->vdev_id == 0 && 5526cb04b873SMark J Musante ppvd->vdev_children == 2) { 552799653d4eSeschrock ASSERT(pvd->vdev_ops == &vdev_replacing_ops); 55288ad4d6ddSJeff Bonwick sguid = ppvd->vdev_child[1]->vdev_guid; 552999653d4eSeschrock } 5530b4952e17SGeorge Wilson ASSERT(vd->vdev_resilver_txg == 0 || !vdev_dtl_required(vd)); 5531b4952e17SGeorge Wilson 55328ad4d6ddSJeff Bonwick spa_config_exit(spa, SCL_ALL, FTAG); 55338ad4d6ddSJeff Bonwick if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0) 5534ea8dc4b6Seschrock return; 55358ad4d6ddSJeff Bonwick if (sguid && spa_vdev_detach(spa, sguid, ppguid, B_TRUE) != 0) 553699653d4eSeschrock return; 55378ad4d6ddSJeff Bonwick spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 5538fa9e4066Sahrens } 5539fa9e4066Sahrens 55408ad4d6ddSJeff Bonwick spa_config_exit(spa, SCL_ALL, FTAG); 5541fa9e4066Sahrens } 5542fa9e4066Sahrens 5543fa9e4066Sahrens /* 5544b3388e4fSEric Taylor * Update the stored path or FRU for this vdev. 5545c67d9675Seschrock */ 5546c67d9675Seschrock int 55476809eb4eSEric Schrock spa_vdev_set_common(spa_t *spa, uint64_t guid, const char *value, 55486809eb4eSEric Schrock boolean_t ispath) 5549c67d9675Seschrock { 5550c5904d13Seschrock vdev_t *vd; 5551208044b8SGeorge Wilson boolean_t sync = B_FALSE; 5552c67d9675Seschrock 5553f9af39baSGeorge Wilson ASSERT(spa_writeable(spa)); 5554f9af39baSGeorge Wilson 5555b3388e4fSEric Taylor spa_vdev_state_enter(spa, SCL_ALL); 5556c67d9675Seschrock 55576809eb4eSEric Schrock if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 5558b3388e4fSEric Taylor return (spa_vdev_state_exit(spa, NULL, ENOENT)); 555999653d4eSeschrock 55600e34b6a7Sbonwick if (!vd->vdev_ops->vdev_op_leaf) 5561b3388e4fSEric Taylor return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 55620e34b6a7Sbonwick 55636809eb4eSEric Schrock if (ispath) { 5564208044b8SGeorge Wilson if (strcmp(value, vd->vdev_path) != 0) { 5565c67d9675Seschrock spa_strfree(vd->vdev_path); 55666809eb4eSEric Schrock vd->vdev_path = spa_strdup(value); 5567208044b8SGeorge Wilson sync = B_TRUE; 5568208044b8SGeorge Wilson } 55696809eb4eSEric Schrock } else { 5570208044b8SGeorge Wilson if (vd->vdev_fru == NULL) { 5571208044b8SGeorge Wilson vd->vdev_fru = spa_strdup(value); 5572208044b8SGeorge Wilson sync = B_TRUE; 5573208044b8SGeorge Wilson } else if (strcmp(value, vd->vdev_fru) != 0) { 55746809eb4eSEric Schrock spa_strfree(vd->vdev_fru); 55756809eb4eSEric Schrock vd->vdev_fru = spa_strdup(value); 5576208044b8SGeorge Wilson sync = B_TRUE; 5577208044b8SGeorge Wilson } 55786809eb4eSEric Schrock } 5579c67d9675Seschrock 5580208044b8SGeorge Wilson return (spa_vdev_state_exit(spa, sync ? vd : NULL, 0)); 5581c67d9675Seschrock } 5582c67d9675Seschrock 55836809eb4eSEric Schrock int 55846809eb4eSEric Schrock spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath) 55856809eb4eSEric Schrock { 55866809eb4eSEric Schrock return (spa_vdev_set_common(spa, guid, newpath, B_TRUE)); 55876809eb4eSEric Schrock } 55886809eb4eSEric Schrock 55896809eb4eSEric Schrock int 55906809eb4eSEric Schrock spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru) 55916809eb4eSEric Schrock { 55926809eb4eSEric Schrock return (spa_vdev_set_common(spa, guid, newfru, B_FALSE)); 55936809eb4eSEric Schrock } 55946809eb4eSEric Schrock 5595c67d9675Seschrock /* 5596fa9e4066Sahrens * ========================================================================== 55973f9d6ad7SLin Ling * SPA Scanning 5598fa9e4066Sahrens * ========================================================================== 5599fa9e4066Sahrens */ 5600fa9e4066Sahrens 5601ea8dc4b6Seschrock int 56023f9d6ad7SLin Ling spa_scan_stop(spa_t *spa) 56033f9d6ad7SLin Ling { 56043f9d6ad7SLin Ling ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0); 56053f9d6ad7SLin Ling if (dsl_scan_resilvering(spa->spa_dsl_pool)) 5606be6fd75aSMatthew Ahrens return (SET_ERROR(EBUSY)); 56073f9d6ad7SLin Ling return (dsl_scan_cancel(spa->spa_dsl_pool)); 56083f9d6ad7SLin Ling } 56093f9d6ad7SLin Ling 56103f9d6ad7SLin Ling int 56113f9d6ad7SLin Ling spa_scan(spa_t *spa, pool_scan_func_t func) 5612fa9e4066Sahrens { 5613e14bb325SJeff Bonwick ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0); 5614bb8b5132Sek110237 56153f9d6ad7SLin Ling if (func >= POOL_SCAN_FUNCS || func == POOL_SCAN_NONE) 5616be6fd75aSMatthew Ahrens return (SET_ERROR(ENOTSUP)); 5617fa9e4066Sahrens 5618fa9e4066Sahrens /* 5619088f3894Sahrens * If a resilver was requested, but there is no DTL on a 5620088f3894Sahrens * writeable leaf device, we have nothing to do. 5621fa9e4066Sahrens */ 56223f9d6ad7SLin Ling if (func == POOL_SCAN_RESILVER && 5623088f3894Sahrens !vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) { 56243d7072f8Seschrock spa_async_request(spa, SPA_ASYNC_RESILVER_DONE); 5625fa9e4066Sahrens return (0); 5626fa9e4066Sahrens } 5627fa9e4066Sahrens 56283f9d6ad7SLin Ling return (dsl_scan(spa->spa_dsl_pool, func)); 5629088f3894Sahrens } 5630088f3894Sahrens 5631ea8dc4b6Seschrock /* 5632ea8dc4b6Seschrock * ========================================================================== 5633ea8dc4b6Seschrock * SPA async task processing 5634ea8dc4b6Seschrock * ========================================================================== 5635ea8dc4b6Seschrock */ 5636ea8dc4b6Seschrock 5637ea8dc4b6Seschrock static void 56383d7072f8Seschrock spa_async_remove(spa_t *spa, vdev_t *vd) 5639fa9e4066Sahrens { 564049cf58c0SBrendan Gregg - Sun Microsystems if (vd->vdev_remove_wanted) { 564198d1cbfeSGeorge Wilson vd->vdev_remove_wanted = B_FALSE; 564298d1cbfeSGeorge Wilson vd->vdev_delayed_close = B_FALSE; 564349cf58c0SBrendan Gregg - Sun Microsystems vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE); 56441d713200SEric Schrock 56451d713200SEric Schrock /* 56461d713200SEric Schrock * We want to clear the stats, but we don't want to do a full 56471d713200SEric Schrock * vdev_clear() as that will cause us to throw away 56481d713200SEric Schrock * degraded/faulted state as well as attempt to reopen the 56491d713200SEric Schrock * device, all of which is a waste. 56501d713200SEric Schrock */ 56511d713200SEric Schrock vd->vdev_stat.vs_read_errors = 0; 56521d713200SEric Schrock vd->vdev_stat.vs_write_errors = 0; 56531d713200SEric Schrock vd->vdev_stat.vs_checksum_errors = 0; 56541d713200SEric Schrock 5655e14bb325SJeff Bonwick vdev_state_dirty(vd->vdev_top); 5656ea8dc4b6Seschrock } 565749cf58c0SBrendan Gregg - Sun Microsystems 5658e14bb325SJeff Bonwick for (int c = 0; c < vd->vdev_children; c++) 565949cf58c0SBrendan Gregg - Sun Microsystems spa_async_remove(spa, vd->vdev_child[c]); 5660ea8dc4b6Seschrock } 5661ea8dc4b6Seschrock 5662ea8dc4b6Seschrock static void 5663e14bb325SJeff Bonwick spa_async_probe(spa_t *spa, vdev_t *vd) 5664e14bb325SJeff Bonwick { 5665e14bb325SJeff Bonwick if (vd->vdev_probe_wanted) { 566698d1cbfeSGeorge Wilson vd->vdev_probe_wanted = B_FALSE; 5667e14bb325SJeff Bonwick vdev_reopen(vd); /* vdev_open() does the actual probe */ 5668e14bb325SJeff Bonwick } 5669e14bb325SJeff Bonwick 5670e14bb325SJeff Bonwick for (int c = 0; c < vd->vdev_children; c++) 5671e14bb325SJeff Bonwick spa_async_probe(spa, vd->vdev_child[c]); 5672e14bb325SJeff Bonwick } 5673e14bb325SJeff Bonwick 5674e14bb325SJeff Bonwick static void 5675573ca77eSGeorge Wilson spa_async_autoexpand(spa_t *spa, vdev_t *vd) 5676573ca77eSGeorge Wilson { 5677573ca77eSGeorge Wilson sysevent_id_t eid; 5678573ca77eSGeorge Wilson nvlist_t *attr; 5679573ca77eSGeorge Wilson char *physpath; 5680573ca77eSGeorge Wilson 5681573ca77eSGeorge Wilson if (!spa->spa_autoexpand) 5682573ca77eSGeorge Wilson return; 5683573ca77eSGeorge Wilson 5684573ca77eSGeorge Wilson for (int c = 0; c < vd->vdev_children; c++) { 5685573ca77eSGeorge Wilson vdev_t *cvd = vd->vdev_child[c]; 5686573ca77eSGeorge Wilson spa_async_autoexpand(spa, cvd); 5687573ca77eSGeorge Wilson } 5688573ca77eSGeorge Wilson 5689573ca77eSGeorge Wilson if (!vd->vdev_ops->vdev_op_leaf || vd->vdev_physpath == NULL) 5690573ca77eSGeorge Wilson return; 5691573ca77eSGeorge Wilson 5692573ca77eSGeorge Wilson physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 5693573ca77eSGeorge Wilson (void) snprintf(physpath, MAXPATHLEN, "/devices%s", vd->vdev_physpath); 5694573ca77eSGeorge Wilson 5695573ca77eSGeorge Wilson VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0); 5696573ca77eSGeorge Wilson VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0); 5697573ca77eSGeorge Wilson 5698573ca77eSGeorge Wilson (void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS, 5699573ca77eSGeorge Wilson ESC_DEV_DLE, attr, &eid, DDI_SLEEP); 5700573ca77eSGeorge Wilson 5701573ca77eSGeorge Wilson nvlist_free(attr); 5702573ca77eSGeorge Wilson kmem_free(physpath, MAXPATHLEN); 5703573ca77eSGeorge Wilson } 5704573ca77eSGeorge Wilson 5705573ca77eSGeorge Wilson static void 5706ea8dc4b6Seschrock spa_async_thread(spa_t *spa) 5707ea8dc4b6Seschrock { 5708e14bb325SJeff Bonwick int tasks; 5709ea8dc4b6Seschrock 5710ea8dc4b6Seschrock ASSERT(spa->spa_sync_on); 5711ea8dc4b6Seschrock 5712ea8dc4b6Seschrock mutex_enter(&spa->spa_async_lock); 5713ea8dc4b6Seschrock tasks = spa->spa_async_tasks; 5714ea8dc4b6Seschrock spa->spa_async_tasks = 0; 5715ea8dc4b6Seschrock mutex_exit(&spa->spa_async_lock); 5716ea8dc4b6Seschrock 5717ea8dc4b6Seschrock /* 57180373e76bSbonwick * See if the config needs to be updated. 57190373e76bSbonwick */ 57200373e76bSbonwick if (tasks & SPA_ASYNC_CONFIG_UPDATE) { 5721b24ab676SJeff Bonwick uint64_t old_space, new_space; 5722573ca77eSGeorge Wilson 57230373e76bSbonwick mutex_enter(&spa_namespace_lock); 5724b24ab676SJeff Bonwick old_space = metaslab_class_get_space(spa_normal_class(spa)); 57250373e76bSbonwick spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 5726b24ab676SJeff Bonwick new_space = metaslab_class_get_space(spa_normal_class(spa)); 57270373e76bSbonwick mutex_exit(&spa_namespace_lock); 5728573ca77eSGeorge Wilson 5729573ca77eSGeorge Wilson /* 5730573ca77eSGeorge Wilson * If the pool grew as a result of the config update, 5731573ca77eSGeorge Wilson * then log an internal history event. 5732573ca77eSGeorge Wilson */ 5733b24ab676SJeff Bonwick if (new_space != old_space) { 57344445fffbSMatthew Ahrens spa_history_log_internal(spa, "vdev online", NULL, 5735573ca77eSGeorge Wilson "pool '%s' size: %llu(+%llu)", 5736b24ab676SJeff Bonwick spa_name(spa), new_space, new_space - old_space); 5737573ca77eSGeorge Wilson } 57380373e76bSbonwick } 57390373e76bSbonwick 57400373e76bSbonwick /* 57413d7072f8Seschrock * See if any devices need to be marked REMOVED. 5742ea8dc4b6Seschrock */ 5743e14bb325SJeff Bonwick if (tasks & SPA_ASYNC_REMOVE) { 57448f18d1faSGeorge Wilson spa_vdev_state_enter(spa, SCL_NONE); 57453d7072f8Seschrock spa_async_remove(spa, spa->spa_root_vdev); 5746e14bb325SJeff Bonwick for (int i = 0; i < spa->spa_l2cache.sav_count; i++) 574749cf58c0SBrendan Gregg - Sun Microsystems spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]); 5748e14bb325SJeff Bonwick for (int i = 0; i < spa->spa_spares.sav_count; i++) 574949cf58c0SBrendan Gregg - Sun Microsystems spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]); 5750e14bb325SJeff Bonwick (void) spa_vdev_state_exit(spa, NULL, 0); 5751e14bb325SJeff Bonwick } 5752e14bb325SJeff Bonwick 5753573ca77eSGeorge Wilson if ((tasks & SPA_ASYNC_AUTOEXPAND) && !spa_suspended(spa)) { 5754573ca77eSGeorge Wilson spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 5755573ca77eSGeorge Wilson spa_async_autoexpand(spa, spa->spa_root_vdev); 5756573ca77eSGeorge Wilson spa_config_exit(spa, SCL_CONFIG, FTAG); 5757573ca77eSGeorge Wilson } 5758573ca77eSGeorge Wilson 5759e14bb325SJeff Bonwick /* 5760e14bb325SJeff Bonwick * See if any devices need to be probed. 5761e14bb325SJeff Bonwick */ 5762e14bb325SJeff Bonwick if (tasks & SPA_ASYNC_PROBE) { 57638f18d1faSGeorge Wilson spa_vdev_state_enter(spa, SCL_NONE); 5764e14bb325SJeff Bonwick spa_async_probe(spa, spa->spa_root_vdev); 5765e14bb325SJeff Bonwick (void) spa_vdev_state_exit(spa, NULL, 0); 57663d7072f8Seschrock } 5767ea8dc4b6Seschrock 5768ea8dc4b6Seschrock /* 5769ea8dc4b6Seschrock * If any devices are done replacing, detach them. 5770ea8dc4b6Seschrock */ 57713d7072f8Seschrock if (tasks & SPA_ASYNC_RESILVER_DONE) 57723d7072f8Seschrock spa_vdev_resilver_done(spa); 5773fa9e4066Sahrens 5774ea8dc4b6Seschrock /* 5775ea8dc4b6Seschrock * Kick off a resilver. 5776ea8dc4b6Seschrock */ 5777088f3894Sahrens if (tasks & SPA_ASYNC_RESILVER) 57783f9d6ad7SLin Ling dsl_resilver_restart(spa->spa_dsl_pool, 0); 5779ea8dc4b6Seschrock 5780ea8dc4b6Seschrock /* 5781ea8dc4b6Seschrock * Let the world know that we're done. 5782ea8dc4b6Seschrock */ 5783ea8dc4b6Seschrock mutex_enter(&spa->spa_async_lock); 5784ea8dc4b6Seschrock spa->spa_async_thread = NULL; 5785ea8dc4b6Seschrock cv_broadcast(&spa->spa_async_cv); 5786ea8dc4b6Seschrock mutex_exit(&spa->spa_async_lock); 5787ea8dc4b6Seschrock thread_exit(); 5788ea8dc4b6Seschrock } 5789ea8dc4b6Seschrock 5790ea8dc4b6Seschrock void 5791ea8dc4b6Seschrock spa_async_suspend(spa_t *spa) 5792ea8dc4b6Seschrock { 5793ea8dc4b6Seschrock mutex_enter(&spa->spa_async_lock); 5794ea8dc4b6Seschrock spa->spa_async_suspended++; 5795ea8dc4b6Seschrock while (spa->spa_async_thread != NULL) 5796ea8dc4b6Seschrock cv_wait(&spa->spa_async_cv, &spa->spa_async_lock); 5797ea8dc4b6Seschrock mutex_exit(&spa->spa_async_lock); 5798ea8dc4b6Seschrock } 5799ea8dc4b6Seschrock 5800ea8dc4b6Seschrock void 5801ea8dc4b6Seschrock spa_async_resume(spa_t *spa) 5802ea8dc4b6Seschrock { 5803ea8dc4b6Seschrock mutex_enter(&spa->spa_async_lock); 5804ea8dc4b6Seschrock ASSERT(spa->spa_async_suspended != 0); 5805ea8dc4b6Seschrock spa->spa_async_suspended--; 5806ea8dc4b6Seschrock mutex_exit(&spa->spa_async_lock); 5807ea8dc4b6Seschrock } 5808ea8dc4b6Seschrock 58093cb69f73SWill Andrews static boolean_t 58103cb69f73SWill Andrews spa_async_tasks_pending(spa_t *spa) 58113cb69f73SWill Andrews { 58123cb69f73SWill Andrews uint_t non_config_tasks; 58133cb69f73SWill Andrews uint_t config_task; 58143cb69f73SWill Andrews boolean_t config_task_suspended; 58153cb69f73SWill Andrews 58163cb69f73SWill Andrews non_config_tasks = spa->spa_async_tasks & ~SPA_ASYNC_CONFIG_UPDATE; 58173cb69f73SWill Andrews config_task = spa->spa_async_tasks & SPA_ASYNC_CONFIG_UPDATE; 58183cb69f73SWill Andrews if (spa->spa_ccw_fail_time == 0) { 58193cb69f73SWill Andrews config_task_suspended = B_FALSE; 58203cb69f73SWill Andrews } else { 58213cb69f73SWill Andrews config_task_suspended = 58223cb69f73SWill Andrews (gethrtime() - spa->spa_ccw_fail_time) < 58233cb69f73SWill Andrews (zfs_ccw_retry_interval * NANOSEC); 58243cb69f73SWill Andrews } 58253cb69f73SWill Andrews 58263cb69f73SWill Andrews return (non_config_tasks || (config_task && !config_task_suspended)); 58273cb69f73SWill Andrews } 58283cb69f73SWill Andrews 5829ea8dc4b6Seschrock static void 5830ea8dc4b6Seschrock spa_async_dispatch(spa_t *spa) 5831ea8dc4b6Seschrock { 5832ea8dc4b6Seschrock mutex_enter(&spa->spa_async_lock); 58333cb69f73SWill Andrews if (spa_async_tasks_pending(spa) && 58343cb69f73SWill Andrews !spa->spa_async_suspended && 58350373e76bSbonwick spa->spa_async_thread == NULL && 58363cb69f73SWill Andrews rootdir != NULL) 5837ea8dc4b6Seschrock spa->spa_async_thread = thread_create(NULL, 0, 5838ea8dc4b6Seschrock spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri); 5839ea8dc4b6Seschrock mutex_exit(&spa->spa_async_lock); 5840ea8dc4b6Seschrock } 5841ea8dc4b6Seschrock 5842ea8dc4b6Seschrock void 5843ea8dc4b6Seschrock spa_async_request(spa_t *spa, int task) 5844ea8dc4b6Seschrock { 58453f9d6ad7SLin Ling zfs_dbgmsg("spa=%s async request task=%u", spa->spa_name, task); 5846ea8dc4b6Seschrock mutex_enter(&spa->spa_async_lock); 5847ea8dc4b6Seschrock spa->spa_async_tasks |= task; 5848ea8dc4b6Seschrock mutex_exit(&spa->spa_async_lock); 5849fa9e4066Sahrens } 5850fa9e4066Sahrens 5851fa9e4066Sahrens /* 5852fa9e4066Sahrens * ========================================================================== 5853fa9e4066Sahrens * SPA syncing routines 5854fa9e4066Sahrens * ========================================================================== 5855fa9e4066Sahrens */ 5856cde58dbcSMatthew Ahrens 5857cde58dbcSMatthew Ahrens static int 5858cde58dbcSMatthew Ahrens bpobj_enqueue_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 5859fa9e4066Sahrens { 5860cde58dbcSMatthew Ahrens bpobj_t *bpo = arg; 5861cde58dbcSMatthew Ahrens bpobj_enqueue(bpo, bp, tx); 5862cde58dbcSMatthew Ahrens return (0); 5863e14bb325SJeff Bonwick } 5864fa9e4066Sahrens 5865cde58dbcSMatthew Ahrens static int 5866cde58dbcSMatthew Ahrens spa_free_sync_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 5867b24ab676SJeff Bonwick { 5868b24ab676SJeff Bonwick zio_t *zio = arg; 5869b24ab676SJeff Bonwick 5870b24ab676SJeff Bonwick zio_nowait(zio_free_sync(zio, zio->io_spa, dmu_tx_get_txg(tx), bp, 5871b24ab676SJeff Bonwick zio->io_flags)); 5872cde58dbcSMatthew Ahrens return (0); 5873fa9e4066Sahrens } 5874fa9e4066Sahrens 587569962b56SMatthew Ahrens /* 587669962b56SMatthew Ahrens * Note: this simple function is not inlined to make it easier to dtrace the 587769962b56SMatthew Ahrens * amount of time spent syncing frees. 587869962b56SMatthew Ahrens */ 587969962b56SMatthew Ahrens static void 588069962b56SMatthew Ahrens spa_sync_frees(spa_t *spa, bplist_t *bpl, dmu_tx_t *tx) 588169962b56SMatthew Ahrens { 588269962b56SMatthew Ahrens zio_t *zio = zio_root(spa, NULL, NULL, 0); 588369962b56SMatthew Ahrens bplist_iterate(bpl, spa_free_sync_cb, zio, tx); 588469962b56SMatthew Ahrens VERIFY(zio_wait(zio) == 0); 588569962b56SMatthew Ahrens } 588669962b56SMatthew Ahrens 588769962b56SMatthew Ahrens /* 588869962b56SMatthew Ahrens * Note: this simple function is not inlined to make it easier to dtrace the 588969962b56SMatthew Ahrens * amount of time spent syncing deferred frees. 589069962b56SMatthew Ahrens */ 589169962b56SMatthew Ahrens static void 589269962b56SMatthew Ahrens spa_sync_deferred_frees(spa_t *spa, dmu_tx_t *tx) 589369962b56SMatthew Ahrens { 589469962b56SMatthew Ahrens zio_t *zio = zio_root(spa, NULL, NULL, 0); 589569962b56SMatthew Ahrens VERIFY3U(bpobj_iterate(&spa->spa_deferred_bpobj, 589669962b56SMatthew Ahrens spa_free_sync_cb, zio, tx), ==, 0); 589769962b56SMatthew Ahrens VERIFY0(zio_wait(zio)); 589869962b56SMatthew Ahrens } 589969962b56SMatthew Ahrens 590069962b56SMatthew Ahrens 5901fa9e4066Sahrens static void 590299653d4eSeschrock spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx) 5903fa9e4066Sahrens { 5904fa9e4066Sahrens char *packed = NULL; 5905f7991ba4STim Haley size_t bufsize; 5906fa9e4066Sahrens size_t nvsize = 0; 5907fa9e4066Sahrens dmu_buf_t *db; 5908fa9e4066Sahrens 590999653d4eSeschrock VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0); 591099653d4eSeschrock 5911f7991ba4STim Haley /* 5912f7991ba4STim Haley * Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration 591343466aaeSMax Grossman * information. This avoids the dmu_buf_will_dirty() path and 5914f7991ba4STim Haley * saves us a pre-read to get data we don't actually care about. 5915f7991ba4STim Haley */ 5916ad135b5dSChristopher Siden bufsize = P2ROUNDUP((uint64_t)nvsize, SPA_CONFIG_BLOCKSIZE); 5917f7991ba4STim Haley packed = kmem_alloc(bufsize, KM_SLEEP); 591899653d4eSeschrock 591999653d4eSeschrock VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR, 592099653d4eSeschrock KM_SLEEP) == 0); 5921f7991ba4STim Haley bzero(packed + nvsize, bufsize - nvsize); 592299653d4eSeschrock 5923f7991ba4STim Haley dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx); 592499653d4eSeschrock 5925f7991ba4STim Haley kmem_free(packed, bufsize); 592699653d4eSeschrock 592799653d4eSeschrock VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db)); 592899653d4eSeschrock dmu_buf_will_dirty(db, tx); 592999653d4eSeschrock *(uint64_t *)db->db_data = nvsize; 593099653d4eSeschrock dmu_buf_rele(db, FTAG); 593199653d4eSeschrock } 593299653d4eSeschrock 593399653d4eSeschrock static void 5934fa94a07fSbrendan spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx, 5935fa94a07fSbrendan const char *config, const char *entry) 593699653d4eSeschrock { 593799653d4eSeschrock nvlist_t *nvroot; 5938fa94a07fSbrendan nvlist_t **list; 593999653d4eSeschrock int i; 594099653d4eSeschrock 5941fa94a07fSbrendan if (!sav->sav_sync) 594299653d4eSeschrock return; 594399653d4eSeschrock 594499653d4eSeschrock /* 5945fa94a07fSbrendan * Update the MOS nvlist describing the list of available devices. 5946fa94a07fSbrendan * spa_validate_aux() will have already made sure this nvlist is 59473d7072f8Seschrock * valid and the vdevs are labeled appropriately. 594899653d4eSeschrock */ 5949fa94a07fSbrendan if (sav->sav_object == 0) { 5950fa94a07fSbrendan sav->sav_object = dmu_object_alloc(spa->spa_meta_objset, 5951fa94a07fSbrendan DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE, 5952fa94a07fSbrendan sizeof (uint64_t), tx); 595399653d4eSeschrock VERIFY(zap_update(spa->spa_meta_objset, 5954fa94a07fSbrendan DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1, 5955fa94a07fSbrendan &sav->sav_object, tx) == 0); 595699653d4eSeschrock } 595799653d4eSeschrock 595899653d4eSeschrock VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); 5959fa94a07fSbrendan if (sav->sav_count == 0) { 5960fa94a07fSbrendan VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0); 596199653d4eSeschrock } else { 5962fa94a07fSbrendan list = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP); 5963fa94a07fSbrendan for (i = 0; i < sav->sav_count; i++) 5964fa94a07fSbrendan list[i] = vdev_config_generate(spa, sav->sav_vdevs[i], 59653f9d6ad7SLin Ling B_FALSE, VDEV_CONFIG_L2CACHE); 5966fa94a07fSbrendan VERIFY(nvlist_add_nvlist_array(nvroot, config, list, 5967fa94a07fSbrendan sav->sav_count) == 0); 5968fa94a07fSbrendan for (i = 0; i < sav->sav_count; i++) 5969fa94a07fSbrendan nvlist_free(list[i]); 5970fa94a07fSbrendan kmem_free(list, sav->sav_count * sizeof (void *)); 597199653d4eSeschrock } 597299653d4eSeschrock 5973fa94a07fSbrendan spa_sync_nvlist(spa, sav->sav_object, nvroot, tx); 597406eeb2adSek110237 nvlist_free(nvroot); 597599653d4eSeschrock 5976fa94a07fSbrendan sav->sav_sync = B_FALSE; 597799653d4eSeschrock } 597899653d4eSeschrock 597999653d4eSeschrock static void 598099653d4eSeschrock spa_sync_config_object(spa_t *spa, dmu_tx_t *tx) 598199653d4eSeschrock { 598299653d4eSeschrock nvlist_t *config; 598399653d4eSeschrock 5984e14bb325SJeff Bonwick if (list_is_empty(&spa->spa_config_dirty_list)) 5985fa9e4066Sahrens return; 5986fa9e4066Sahrens 5987e14bb325SJeff Bonwick spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 5988e14bb325SJeff Bonwick 5989e14bb325SJeff Bonwick config = spa_config_generate(spa, spa->spa_root_vdev, 5990e14bb325SJeff Bonwick dmu_tx_get_txg(tx), B_FALSE); 5991e14bb325SJeff Bonwick 599225345e46SGeorge Wilson /* 599325345e46SGeorge Wilson * If we're upgrading the spa version then make sure that 599425345e46SGeorge Wilson * the config object gets updated with the correct version. 599525345e46SGeorge Wilson */ 599625345e46SGeorge Wilson if (spa->spa_ubsync.ub_version < spa->spa_uberblock.ub_version) 599725345e46SGeorge Wilson fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION, 599825345e46SGeorge Wilson spa->spa_uberblock.ub_version); 599925345e46SGeorge Wilson 6000e14bb325SJeff Bonwick spa_config_exit(spa, SCL_STATE, FTAG); 6001fa9e4066Sahrens 60020373e76bSbonwick nvlist_free(spa->spa_config_syncing); 60030373e76bSbonwick spa->spa_config_syncing = config; 6004fa9e4066Sahrens 600599653d4eSeschrock spa_sync_nvlist(spa, spa->spa_config_object, config, tx); 6006fa9e4066Sahrens } 6007fa9e4066Sahrens 6008ad135b5dSChristopher Siden static void 60093b2aab18SMatthew Ahrens spa_sync_version(void *arg, dmu_tx_t *tx) 6010ad135b5dSChristopher Siden { 60113b2aab18SMatthew Ahrens uint64_t *versionp = arg; 60123b2aab18SMatthew Ahrens uint64_t version = *versionp; 60133b2aab18SMatthew Ahrens spa_t *spa = dmu_tx_pool(tx)->dp_spa; 6014ad135b5dSChristopher Siden 6015ad135b5dSChristopher Siden /* 6016ad135b5dSChristopher Siden * Setting the version is special cased when first creating the pool. 6017ad135b5dSChristopher Siden */ 6018ad135b5dSChristopher Siden ASSERT(tx->tx_txg != TXG_INITIAL); 6019ad135b5dSChristopher Siden 602062eae887SRichard Yao ASSERT(SPA_VERSION_IS_SUPPORTED(version)); 6021ad135b5dSChristopher Siden ASSERT(version >= spa_version(spa)); 6022ad135b5dSChristopher Siden 6023ad135b5dSChristopher Siden spa->spa_uberblock.ub_version = version; 6024ad135b5dSChristopher Siden vdev_config_dirty(spa->spa_root_vdev); 60254445fffbSMatthew Ahrens spa_history_log_internal(spa, "set", tx, "version=%lld", version); 6026ad135b5dSChristopher Siden } 6027ad135b5dSChristopher Siden 6028990b4856Slling /* 6029990b4856Slling * Set zpool properties. 6030990b4856Slling */ 6031b1b8ab34Slling static void 60323b2aab18SMatthew Ahrens spa_sync_props(void *arg, dmu_tx_t *tx) 6033b1b8ab34Slling { 60343b2aab18SMatthew Ahrens nvlist_t *nvp = arg; 60353b2aab18SMatthew Ahrens spa_t *spa = dmu_tx_pool(tx)->dp_spa; 6036b1b8ab34Slling objset_t *mos = spa->spa_meta_objset; 6037ad135b5dSChristopher Siden nvpair_t *elem = NULL; 6038b1b8ab34Slling 6039e14bb325SJeff Bonwick mutex_enter(&spa->spa_props_lock); 6040e14bb325SJeff Bonwick 6041990b4856Slling while ((elem = nvlist_next_nvpair(nvp, elem))) { 6042ad135b5dSChristopher Siden uint64_t intval; 6043ad135b5dSChristopher Siden char *strval, *fname; 6044ad135b5dSChristopher Siden zpool_prop_t prop; 6045ad135b5dSChristopher Siden const char *propname; 6046ad135b5dSChristopher Siden zprop_type_t proptype; 60472acef22dSMatthew Ahrens spa_feature_t fid; 6048ad135b5dSChristopher Siden 6049990b4856Slling switch (prop = zpool_name_to_prop(nvpair_name(elem))) { 6050ad135b5dSChristopher Siden case ZPROP_INVAL: 6051990b4856Slling /* 6052ad135b5dSChristopher Siden * We checked this earlier in spa_prop_validate(). 6053990b4856Slling */ 6054ad135b5dSChristopher Siden ASSERT(zpool_prop_feature(nvpair_name(elem))); 6055ad135b5dSChristopher Siden 6056ad135b5dSChristopher Siden fname = strchr(nvpair_name(elem), '@') + 1; 60572acef22dSMatthew Ahrens VERIFY0(zfeature_lookup_name(fname, &fid)); 6058ad135b5dSChristopher Siden 60592acef22dSMatthew Ahrens spa_feature_enable(spa, fid, tx); 60604445fffbSMatthew Ahrens spa_history_log_internal(spa, "set", tx, 60614445fffbSMatthew Ahrens "%s=enabled", nvpair_name(elem)); 6062ad135b5dSChristopher Siden break; 6063ad135b5dSChristopher Siden 6064ad135b5dSChristopher Siden case ZPOOL_PROP_VERSION: 60650713e232SGeorge Wilson intval = fnvpair_value_uint64(elem); 6066ad135b5dSChristopher Siden /* 6067ad135b5dSChristopher Siden * The version is synced seperatly before other 6068ad135b5dSChristopher Siden * properties and should be correct by now. 6069ad135b5dSChristopher Siden */ 6070ad135b5dSChristopher Siden ASSERT3U(spa_version(spa), >=, intval); 6071990b4856Slling break; 6072990b4856Slling 6073990b4856Slling case ZPOOL_PROP_ALTROOT: 6074990b4856Slling /* 6075990b4856Slling * 'altroot' is a non-persistent property. It should 6076990b4856Slling * have been set temporarily at creation or import time. 6077990b4856Slling */ 6078990b4856Slling ASSERT(spa->spa_root != NULL); 6079990b4856Slling break; 6080990b4856Slling 6081f9af39baSGeorge Wilson case ZPOOL_PROP_READONLY: 60822f8aaab3Seschrock case ZPOOL_PROP_CACHEFILE: 6083990b4856Slling /* 6084f9af39baSGeorge Wilson * 'readonly' and 'cachefile' are also non-persisitent 6085f9af39baSGeorge Wilson * properties. 6086990b4856Slling */ 6087990b4856Slling break; 60888704186eSDan McDonald case ZPOOL_PROP_COMMENT: 60890713e232SGeorge Wilson strval = fnvpair_value_string(elem); 60908704186eSDan McDonald if (spa->spa_comment != NULL) 60918704186eSDan McDonald spa_strfree(spa->spa_comment); 60928704186eSDan McDonald spa->spa_comment = spa_strdup(strval); 60938704186eSDan McDonald /* 60948704186eSDan McDonald * We need to dirty the configuration on all the vdevs 60958704186eSDan McDonald * so that their labels get updated. It's unnecessary 60968704186eSDan McDonald * to do this for pool creation since the vdev's 60978704186eSDan McDonald * configuratoin has already been dirtied. 60988704186eSDan McDonald */ 60998704186eSDan McDonald if (tx->tx_txg != TXG_INITIAL) 61008704186eSDan McDonald vdev_config_dirty(spa->spa_root_vdev); 61014445fffbSMatthew Ahrens spa_history_log_internal(spa, "set", tx, 61024445fffbSMatthew Ahrens "%s=%s", nvpair_name(elem), strval); 61038704186eSDan McDonald break; 6104990b4856Slling default: 6105990b4856Slling /* 6106990b4856Slling * Set pool property values in the poolprops mos object. 6107990b4856Slling */ 6108b1b8ab34Slling if (spa->spa_pool_props_object == 0) { 6109ad135b5dSChristopher Siden spa->spa_pool_props_object = 6110ad135b5dSChristopher Siden zap_create_link(mos, DMU_OT_POOL_PROPS, 6111990b4856Slling DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS, 6112ad135b5dSChristopher Siden tx); 6113b1b8ab34Slling } 6114b1b8ab34Slling 6115990b4856Slling /* normalize the property name */ 6116990b4856Slling propname = zpool_prop_to_name(prop); 6117990b4856Slling proptype = zpool_prop_get_type(prop); 61183d7072f8Seschrock 6119990b4856Slling if (nvpair_type(elem) == DATA_TYPE_STRING) { 6120990b4856Slling ASSERT(proptype == PROP_TYPE_STRING); 61210713e232SGeorge Wilson strval = fnvpair_value_string(elem); 61220713e232SGeorge Wilson VERIFY0(zap_update(mos, 6123990b4856Slling spa->spa_pool_props_object, propname, 61240713e232SGeorge Wilson 1, strlen(strval) + 1, strval, tx)); 61254445fffbSMatthew Ahrens spa_history_log_internal(spa, "set", tx, 61264445fffbSMatthew Ahrens "%s=%s", nvpair_name(elem), strval); 6127990b4856Slling } else if (nvpair_type(elem) == DATA_TYPE_UINT64) { 61280713e232SGeorge Wilson intval = fnvpair_value_uint64(elem); 6129990b4856Slling 6130990b4856Slling if (proptype == PROP_TYPE_INDEX) { 6131990b4856Slling const char *unused; 61320713e232SGeorge Wilson VERIFY0(zpool_prop_index_to_string( 61330713e232SGeorge Wilson prop, intval, &unused)); 6134b1b8ab34Slling } 61350713e232SGeorge Wilson VERIFY0(zap_update(mos, 6136990b4856Slling spa->spa_pool_props_object, propname, 61370713e232SGeorge Wilson 8, 1, &intval, tx)); 61384445fffbSMatthew Ahrens spa_history_log_internal(spa, "set", tx, 61394445fffbSMatthew Ahrens "%s=%lld", nvpair_name(elem), intval); 6140990b4856Slling } else { 6141990b4856Slling ASSERT(0); /* not allowed */ 6142990b4856Slling } 6143990b4856Slling 61440a4e9518Sgw25295 switch (prop) { 61450a4e9518Sgw25295 case ZPOOL_PROP_DELEGATION: 6146990b4856Slling spa->spa_delegation = intval; 61470a4e9518Sgw25295 break; 61480a4e9518Sgw25295 case ZPOOL_PROP_BOOTFS: 6149990b4856Slling spa->spa_bootfs = intval; 61500a4e9518Sgw25295 break; 61510a4e9518Sgw25295 case ZPOOL_PROP_FAILUREMODE: 61520a4e9518Sgw25295 spa->spa_failmode = intval; 61530a4e9518Sgw25295 break; 6154573ca77eSGeorge Wilson case ZPOOL_PROP_AUTOEXPAND: 6155573ca77eSGeorge Wilson spa->spa_autoexpand = intval; 6156b98131cfSEric Taylor if (tx->tx_txg != TXG_INITIAL) 6157b98131cfSEric Taylor spa_async_request(spa, 6158b98131cfSEric Taylor SPA_ASYNC_AUTOEXPAND); 6159573ca77eSGeorge Wilson break; 6160b24ab676SJeff Bonwick case ZPOOL_PROP_DEDUPDITTO: 6161b24ab676SJeff Bonwick spa->spa_dedup_ditto = intval; 6162b24ab676SJeff Bonwick break; 61630a4e9518Sgw25295 default: 61640a4e9518Sgw25295 break; 61650a4e9518Sgw25295 } 6166990b4856Slling } 6167990b4856Slling 6168b1b8ab34Slling } 6169e14bb325SJeff Bonwick 6170e14bb325SJeff Bonwick mutex_exit(&spa->spa_props_lock); 6171b1b8ab34Slling } 6172b1b8ab34Slling 6173fa9e4066Sahrens /* 6174cde58dbcSMatthew Ahrens * Perform one-time upgrade on-disk changes. spa_version() does not 6175cde58dbcSMatthew Ahrens * reflect the new version this txg, so there must be no changes this 6176cde58dbcSMatthew Ahrens * txg to anything that the upgrade code depends on after it executes. 6177cde58dbcSMatthew Ahrens * Therefore this must be called after dsl_pool_sync() does the sync 6178cde58dbcSMatthew Ahrens * tasks. 6179cde58dbcSMatthew Ahrens */ 6180cde58dbcSMatthew Ahrens static void 6181cde58dbcSMatthew Ahrens spa_sync_upgrades(spa_t *spa, dmu_tx_t *tx) 6182cde58dbcSMatthew Ahrens { 6183cde58dbcSMatthew Ahrens dsl_pool_t *dp = spa->spa_dsl_pool; 6184cde58dbcSMatthew Ahrens 6185cde58dbcSMatthew Ahrens ASSERT(spa->spa_sync_pass == 1); 6186cde58dbcSMatthew Ahrens 61873b2aab18SMatthew Ahrens rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG); 61883b2aab18SMatthew Ahrens 6189cde58dbcSMatthew Ahrens if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN && 6190cde58dbcSMatthew Ahrens spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) { 6191cde58dbcSMatthew Ahrens dsl_pool_create_origin(dp, tx); 6192cde58dbcSMatthew Ahrens 6193cde58dbcSMatthew Ahrens /* Keeping the origin open increases spa_minref */ 6194cde58dbcSMatthew Ahrens spa->spa_minref += 3; 6195cde58dbcSMatthew Ahrens } 6196cde58dbcSMatthew Ahrens 6197cde58dbcSMatthew Ahrens if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES && 6198cde58dbcSMatthew Ahrens spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) { 6199cde58dbcSMatthew Ahrens dsl_pool_upgrade_clones(dp, tx); 6200cde58dbcSMatthew Ahrens } 6201cde58dbcSMatthew Ahrens 6202cde58dbcSMatthew Ahrens if (spa->spa_ubsync.ub_version < SPA_VERSION_DIR_CLONES && 6203cde58dbcSMatthew Ahrens spa->spa_uberblock.ub_version >= SPA_VERSION_DIR_CLONES) { 6204cde58dbcSMatthew Ahrens dsl_pool_upgrade_dir_clones(dp, tx); 6205cde58dbcSMatthew Ahrens 6206cde58dbcSMatthew Ahrens /* Keeping the freedir open increases spa_minref */ 6207cde58dbcSMatthew Ahrens spa->spa_minref += 3; 6208cde58dbcSMatthew Ahrens } 6209ad135b5dSChristopher Siden 6210ad135b5dSChristopher Siden if (spa->spa_ubsync.ub_version < SPA_VERSION_FEATURES && 6211ad135b5dSChristopher Siden spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) { 6212ad135b5dSChristopher Siden spa_feature_create_zap_objects(spa, tx); 6213ad135b5dSChristopher Siden } 6214b8289d24SDaniil Lunev 6215b8289d24SDaniil Lunev /* 6216b8289d24SDaniil Lunev * LZ4_COMPRESS feature's behaviour was changed to activate_on_enable 6217b8289d24SDaniil Lunev * when possibility to use lz4 compression for metadata was added 6218b8289d24SDaniil Lunev * Old pools that have this feature enabled must be upgraded to have 6219b8289d24SDaniil Lunev * this feature active 6220b8289d24SDaniil Lunev */ 6221b8289d24SDaniil Lunev if (spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) { 6222b8289d24SDaniil Lunev boolean_t lz4_en = spa_feature_is_enabled(spa, 6223b8289d24SDaniil Lunev SPA_FEATURE_LZ4_COMPRESS); 6224b8289d24SDaniil Lunev boolean_t lz4_ac = spa_feature_is_active(spa, 6225b8289d24SDaniil Lunev SPA_FEATURE_LZ4_COMPRESS); 6226b8289d24SDaniil Lunev 6227b8289d24SDaniil Lunev if (lz4_en && !lz4_ac) 6228b8289d24SDaniil Lunev spa_feature_incr(spa, SPA_FEATURE_LZ4_COMPRESS, tx); 6229b8289d24SDaniil Lunev } 623045818ee1SMatthew Ahrens 623145818ee1SMatthew Ahrens /* 623245818ee1SMatthew Ahrens * If we haven't written the salt, do so now. Note that the 623345818ee1SMatthew Ahrens * feature may not be activated yet, but that's fine since 623445818ee1SMatthew Ahrens * the presence of this ZAP entry is backwards compatible. 623545818ee1SMatthew Ahrens */ 623645818ee1SMatthew Ahrens if (zap_contains(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 623745818ee1SMatthew Ahrens DMU_POOL_CHECKSUM_SALT) == ENOENT) { 623845818ee1SMatthew Ahrens VERIFY0(zap_add(spa->spa_meta_objset, 623945818ee1SMatthew Ahrens DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CHECKSUM_SALT, 1, 624045818ee1SMatthew Ahrens sizeof (spa->spa_cksum_salt.zcs_bytes), 624145818ee1SMatthew Ahrens spa->spa_cksum_salt.zcs_bytes, tx)); 624245818ee1SMatthew Ahrens } 624345818ee1SMatthew Ahrens 62443b2aab18SMatthew Ahrens rrw_exit(&dp->dp_config_rwlock, FTAG); 6245cde58dbcSMatthew Ahrens } 6246cde58dbcSMatthew Ahrens 6247cde58dbcSMatthew Ahrens /* 6248fa9e4066Sahrens * Sync the specified transaction group. New blocks may be dirtied as 6249fa9e4066Sahrens * part of the process, so we iterate until it converges. 6250fa9e4066Sahrens */ 6251fa9e4066Sahrens void 6252fa9e4066Sahrens spa_sync(spa_t *spa, uint64_t txg) 6253fa9e4066Sahrens { 6254fa9e4066Sahrens dsl_pool_t *dp = spa->spa_dsl_pool; 6255fa9e4066Sahrens objset_t *mos = spa->spa_meta_objset; 6256b24ab676SJeff Bonwick bplist_t *free_bpl = &spa->spa_free_bplist[txg & TXG_MASK]; 62570373e76bSbonwick vdev_t *rvd = spa->spa_root_vdev; 6258fa9e4066Sahrens vdev_t *vd; 6259fa9e4066Sahrens dmu_tx_t *tx; 6260e14bb325SJeff Bonwick int error; 6261fa9e4066Sahrens 6262f9af39baSGeorge Wilson VERIFY(spa_writeable(spa)); 6263f9af39baSGeorge Wilson 6264fa9e4066Sahrens /* 6265fa9e4066Sahrens * Lock out configuration changes. 6266fa9e4066Sahrens */ 6267e14bb325SJeff Bonwick spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 6268fa9e4066Sahrens 6269fa9e4066Sahrens spa->spa_syncing_txg = txg; 6270fa9e4066Sahrens spa->spa_sync_pass = 0; 6271fa9e4066Sahrens 6272e14bb325SJeff Bonwick /* 6273e14bb325SJeff Bonwick * If there are any pending vdev state changes, convert them 6274e14bb325SJeff Bonwick * into config changes that go out with this transaction group. 6275e14bb325SJeff Bonwick */ 6276e14bb325SJeff Bonwick spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 62778ad4d6ddSJeff Bonwick while (list_head(&spa->spa_state_dirty_list) != NULL) { 62788ad4d6ddSJeff Bonwick /* 62798ad4d6ddSJeff Bonwick * We need the write lock here because, for aux vdevs, 62808ad4d6ddSJeff Bonwick * calling vdev_config_dirty() modifies sav_config. 62818ad4d6ddSJeff Bonwick * This is ugly and will become unnecessary when we 62828ad4d6ddSJeff Bonwick * eliminate the aux vdev wart by integrating all vdevs 62838ad4d6ddSJeff Bonwick * into the root vdev tree. 62848ad4d6ddSJeff Bonwick */ 62858ad4d6ddSJeff Bonwick spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 62868ad4d6ddSJeff Bonwick spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER); 6287e14bb325SJeff Bonwick while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) { 6288e14bb325SJeff Bonwick vdev_state_clean(vd); 6289e14bb325SJeff Bonwick vdev_config_dirty(vd); 6290e14bb325SJeff Bonwick } 62918ad4d6ddSJeff Bonwick spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 62928ad4d6ddSJeff Bonwick spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER); 62938ad4d6ddSJeff Bonwick } 6294e14bb325SJeff Bonwick spa_config_exit(spa, SCL_STATE, FTAG); 6295e14bb325SJeff Bonwick 629699653d4eSeschrock tx = dmu_tx_create_assigned(dp, txg); 629799653d4eSeschrock 6298283b8460SGeorge.Wilson spa->spa_sync_starttime = gethrtime(); 6299283b8460SGeorge.Wilson VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, 6300283b8460SGeorge.Wilson spa->spa_sync_starttime + spa->spa_deadman_synctime)); 6301283b8460SGeorge.Wilson 630299653d4eSeschrock /* 6303e7437265Sahrens * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg, 630499653d4eSeschrock * set spa_deflate if we have no raid-z vdevs. 630599653d4eSeschrock */ 6306e7437265Sahrens if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE && 6307e7437265Sahrens spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) { 630899653d4eSeschrock int i; 630999653d4eSeschrock 631099653d4eSeschrock for (i = 0; i < rvd->vdev_children; i++) { 631199653d4eSeschrock vd = rvd->vdev_child[i]; 631299653d4eSeschrock if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE) 631399653d4eSeschrock break; 631499653d4eSeschrock } 631599653d4eSeschrock if (i == rvd->vdev_children) { 631699653d4eSeschrock spa->spa_deflate = TRUE; 631799653d4eSeschrock VERIFY(0 == zap_add(spa->spa_meta_objset, 631899653d4eSeschrock DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 631999653d4eSeschrock sizeof (uint64_t), 1, &spa->spa_deflate, tx)); 632099653d4eSeschrock } 632199653d4eSeschrock } 632299653d4eSeschrock 6323fa9e4066Sahrens /* 6324fa9e4066Sahrens * Iterate to convergence. 6325fa9e4066Sahrens */ 6326fa9e4066Sahrens do { 6327b24ab676SJeff Bonwick int pass = ++spa->spa_sync_pass; 6328fa9e4066Sahrens 6329fa9e4066Sahrens spa_sync_config_object(spa, tx); 6330fa94a07fSbrendan spa_sync_aux_dev(spa, &spa->spa_spares, tx, 6331fa94a07fSbrendan ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES); 6332fa94a07fSbrendan spa_sync_aux_dev(spa, &spa->spa_l2cache, tx, 6333fa94a07fSbrendan ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE); 6334ea8dc4b6Seschrock spa_errlog_sync(spa, txg); 6335fa9e4066Sahrens dsl_pool_sync(dp, txg); 6336fa9e4066Sahrens 633701f55e48SGeorge Wilson if (pass < zfs_sync_pass_deferred_free) { 633869962b56SMatthew Ahrens spa_sync_frees(spa, free_bpl, tx); 6339b24ab676SJeff Bonwick } else { 6340231aab85SMatthew Ahrens /* 6341231aab85SMatthew Ahrens * We can not defer frees in pass 1, because 6342231aab85SMatthew Ahrens * we sync the deferred frees later in pass 1. 6343231aab85SMatthew Ahrens */ 6344231aab85SMatthew Ahrens ASSERT3U(pass, >, 1); 6345cde58dbcSMatthew Ahrens bplist_iterate(free_bpl, bpobj_enqueue_cb, 634669962b56SMatthew Ahrens &spa->spa_deferred_bpobj, tx); 6347fa9e4066Sahrens } 6348fa9e4066Sahrens 6349b24ab676SJeff Bonwick ddt_sync(spa, txg); 63503f9d6ad7SLin Ling dsl_scan_sync(dp, tx); 6351afee20e4SGeorge Wilson 6352b24ab676SJeff Bonwick while (vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)) 6353b24ab676SJeff Bonwick vdev_sync(vd, txg); 6354fa9e4066Sahrens 6355231aab85SMatthew Ahrens if (pass == 1) { 6356cde58dbcSMatthew Ahrens spa_sync_upgrades(spa, tx); 6357231aab85SMatthew Ahrens ASSERT3U(txg, >=, 6358231aab85SMatthew Ahrens spa->spa_uberblock.ub_rootbp.blk_birth); 6359231aab85SMatthew Ahrens /* 6360231aab85SMatthew Ahrens * Note: We need to check if the MOS is dirty 6361231aab85SMatthew Ahrens * because we could have marked the MOS dirty 6362231aab85SMatthew Ahrens * without updating the uberblock (e.g. if we 6363231aab85SMatthew Ahrens * have sync tasks but no dirty user data). We 6364231aab85SMatthew Ahrens * need to check the uberblock's rootbp because 6365231aab85SMatthew Ahrens * it is updated if we have synced out dirty 6366231aab85SMatthew Ahrens * data (though in this case the MOS will most 6367231aab85SMatthew Ahrens * likely also be dirty due to second order 6368231aab85SMatthew Ahrens * effects, we don't want to rely on that here). 6369231aab85SMatthew Ahrens */ 6370231aab85SMatthew Ahrens if (spa->spa_uberblock.ub_rootbp.blk_birth < txg && 6371231aab85SMatthew Ahrens !dmu_objset_is_dirty(mos, txg)) { 6372231aab85SMatthew Ahrens /* 6373231aab85SMatthew Ahrens * Nothing changed on the first pass, 6374231aab85SMatthew Ahrens * therefore this TXG is a no-op. Avoid 6375231aab85SMatthew Ahrens * syncing deferred frees, so that we 6376231aab85SMatthew Ahrens * can keep this TXG as a no-op. 6377231aab85SMatthew Ahrens */ 6378231aab85SMatthew Ahrens ASSERT(txg_list_empty(&dp->dp_dirty_datasets, 6379231aab85SMatthew Ahrens txg)); 6380231aab85SMatthew Ahrens ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg)); 6381231aab85SMatthew Ahrens ASSERT(txg_list_empty(&dp->dp_sync_tasks, txg)); 6382231aab85SMatthew Ahrens break; 6383231aab85SMatthew Ahrens } 6384231aab85SMatthew Ahrens spa_sync_deferred_frees(spa, tx); 6385231aab85SMatthew Ahrens } 6386cde58dbcSMatthew Ahrens 6387b24ab676SJeff Bonwick } while (dmu_objset_is_dirty(mos, txg)); 6388b24ab676SJeff Bonwick 6389fa9e4066Sahrens /* 6390fa9e4066Sahrens * Rewrite the vdev configuration (which includes the uberblock) 6391fa9e4066Sahrens * to commit the transaction group. 63920373e76bSbonwick * 639317f17c2dSbonwick * If there are no dirty vdevs, we sync the uberblock to a few 639417f17c2dSbonwick * random top-level vdevs that are known to be visible in the 6395e14bb325SJeff Bonwick * config cache (see spa_vdev_add() for a complete description). 6396e14bb325SJeff Bonwick * If there *are* dirty vdevs, sync the uberblock to all vdevs. 6397fa9e4066Sahrens */ 6398e14bb325SJeff Bonwick for (;;) { 6399e14bb325SJeff Bonwick /* 6400e14bb325SJeff Bonwick * We hold SCL_STATE to prevent vdev open/close/etc. 6401e14bb325SJeff Bonwick * while we're attempting to write the vdev labels. 6402e14bb325SJeff Bonwick */ 6403e14bb325SJeff Bonwick spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 6404e14bb325SJeff Bonwick 6405e14bb325SJeff Bonwick if (list_is_empty(&spa->spa_config_dirty_list)) { 640621bf64a7Sgw25295 vdev_t *svd[SPA_DVAS_PER_BP]; 640721bf64a7Sgw25295 int svdcount = 0; 64080373e76bSbonwick int children = rvd->vdev_children; 64090373e76bSbonwick int c0 = spa_get_random(children); 64100373e76bSbonwick 6411573ca77eSGeorge Wilson for (int c = 0; c < children; c++) { 64120373e76bSbonwick vd = rvd->vdev_child[(c0 + c) % children]; 641317f17c2dSbonwick if (vd->vdev_ms_array == 0 || vd->vdev_islog) 64140373e76bSbonwick continue; 641517f17c2dSbonwick svd[svdcount++] = vd; 641617f17c2dSbonwick if (svdcount == SPA_DVAS_PER_BP) 64170373e76bSbonwick break; 64180373e76bSbonwick } 6419eb5bb584SWill Andrews error = vdev_config_sync(svd, svdcount, txg); 642021bf64a7Sgw25295 } else { 6421e14bb325SJeff Bonwick error = vdev_config_sync(rvd->vdev_child, 6422eb5bb584SWill Andrews rvd->vdev_children, txg); 6423e14bb325SJeff Bonwick } 6424e14bb325SJeff Bonwick 6425dfbb9432SGeorge Wilson if (error == 0) 6426dfbb9432SGeorge Wilson spa->spa_last_synced_guid = rvd->vdev_guid; 6427dfbb9432SGeorge Wilson 6428e14bb325SJeff Bonwick spa_config_exit(spa, SCL_STATE, FTAG); 6429e14bb325SJeff Bonwick 6430e14bb325SJeff Bonwick if (error == 0) 6431e14bb325SJeff Bonwick break; 6432e14bb325SJeff Bonwick zio_suspend(spa, NULL); 6433e14bb325SJeff Bonwick zio_resume_wait(spa); 64340373e76bSbonwick } 643599653d4eSeschrock dmu_tx_commit(tx); 643699653d4eSeschrock 6437283b8460SGeorge.Wilson VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, CY_INFINITY)); 6438283b8460SGeorge.Wilson 64390373e76bSbonwick /* 64400373e76bSbonwick * Clear the dirty config list. 64410373e76bSbonwick */ 6442e14bb325SJeff Bonwick while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL) 64430373e76bSbonwick vdev_config_clean(vd); 64440373e76bSbonwick 64450373e76bSbonwick /* 64460373e76bSbonwick * Now that the new config has synced transactionally, 64470373e76bSbonwick * let it become visible to the config cache. 64480373e76bSbonwick */ 64490373e76bSbonwick if (spa->spa_config_syncing != NULL) { 64500373e76bSbonwick spa_config_set(spa, spa->spa_config_syncing); 64510373e76bSbonwick spa->spa_config_txg = txg; 64520373e76bSbonwick spa->spa_config_syncing = NULL; 64530373e76bSbonwick } 6454fa9e4066Sahrens 6455fa9e4066Sahrens spa->spa_ubsync = spa->spa_uberblock; 6456fa9e4066Sahrens 6457b24ab676SJeff Bonwick dsl_pool_sync_done(dp, txg); 6458fa9e4066Sahrens 6459fa9e4066Sahrens /* 6460fa9e4066Sahrens * Update usable space statistics. 6461fa9e4066Sahrens */ 6462fa9e4066Sahrens while (vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg))) 6463fa9e4066Sahrens vdev_sync_done(vd, txg); 6464fa9e4066Sahrens 6465485bbbf5SGeorge Wilson spa_update_dspace(spa); 6466485bbbf5SGeorge Wilson 6467fa9e4066Sahrens /* 6468fa9e4066Sahrens * It had better be the case that we didn't dirty anything 646999653d4eSeschrock * since vdev_config_sync(). 6470fa9e4066Sahrens */ 6471fa9e4066Sahrens ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg)); 6472fa9e4066Sahrens ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg)); 6473fa9e4066Sahrens ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg)); 6474b24ab676SJeff Bonwick 6475b24ab676SJeff Bonwick spa->spa_sync_pass = 0; 6476fa9e4066Sahrens 6477e14bb325SJeff Bonwick spa_config_exit(spa, SCL_CONFIG, FTAG); 6478ea8dc4b6Seschrock 6479468c413aSTim Haley spa_handle_ignored_writes(spa); 6480468c413aSTim Haley 6481ea8dc4b6Seschrock /* 6482ea8dc4b6Seschrock * If any async tasks have been requested, kick them off. 6483ea8dc4b6Seschrock */ 6484ea8dc4b6Seschrock spa_async_dispatch(spa); 6485fa9e4066Sahrens } 6486fa9e4066Sahrens 6487fa9e4066Sahrens /* 6488fa9e4066Sahrens * Sync all pools. We don't want to hold the namespace lock across these 6489fa9e4066Sahrens * operations, so we take a reference on the spa_t and drop the lock during the 6490fa9e4066Sahrens * sync. 6491fa9e4066Sahrens */ 6492fa9e4066Sahrens void 6493fa9e4066Sahrens spa_sync_allpools(void) 6494fa9e4066Sahrens { 6495fa9e4066Sahrens spa_t *spa = NULL; 6496fa9e4066Sahrens mutex_enter(&spa_namespace_lock); 6497fa9e4066Sahrens while ((spa = spa_next(spa)) != NULL) { 6498f9af39baSGeorge Wilson if (spa_state(spa) != POOL_STATE_ACTIVE || 6499f9af39baSGeorge Wilson !spa_writeable(spa) || spa_suspended(spa)) 6500fa9e4066Sahrens continue; 6501fa9e4066Sahrens spa_open_ref(spa, FTAG); 6502fa9e4066Sahrens mutex_exit(&spa_namespace_lock); 6503fa9e4066Sahrens txg_wait_synced(spa_get_dsl(spa), 0); 6504fa9e4066Sahrens mutex_enter(&spa_namespace_lock); 6505fa9e4066Sahrens spa_close(spa, FTAG); 6506fa9e4066Sahrens } 6507fa9e4066Sahrens mutex_exit(&spa_namespace_lock); 6508fa9e4066Sahrens } 6509fa9e4066Sahrens 6510fa9e4066Sahrens /* 6511fa9e4066Sahrens * ========================================================================== 6512fa9e4066Sahrens * Miscellaneous routines 6513fa9e4066Sahrens * ========================================================================== 6514fa9e4066Sahrens */ 6515fa9e4066Sahrens 6516fa9e4066Sahrens /* 6517fa9e4066Sahrens * Remove all pools in the system. 6518fa9e4066Sahrens */ 6519fa9e4066Sahrens void 6520fa9e4066Sahrens spa_evict_all(void) 6521fa9e4066Sahrens { 6522fa9e4066Sahrens spa_t *spa; 6523fa9e4066Sahrens 6524fa9e4066Sahrens /* 6525fa9e4066Sahrens * Remove all cached state. All pools should be closed now, 6526fa9e4066Sahrens * so every spa in the AVL tree should be unreferenced. 6527fa9e4066Sahrens */ 6528fa9e4066Sahrens mutex_enter(&spa_namespace_lock); 6529fa9e4066Sahrens while ((spa = spa_next(NULL)) != NULL) { 6530fa9e4066Sahrens /* 6531ea8dc4b6Seschrock * Stop async tasks. The async thread may need to detach 6532ea8dc4b6Seschrock * a device that's been replaced, which requires grabbing 6533ea8dc4b6Seschrock * spa_namespace_lock, so we must drop it here. 6534fa9e4066Sahrens */ 6535fa9e4066Sahrens spa_open_ref(spa, FTAG); 6536fa9e4066Sahrens mutex_exit(&spa_namespace_lock); 6537ea8dc4b6Seschrock spa_async_suspend(spa); 6538fa9e4066Sahrens mutex_enter(&spa_namespace_lock); 6539fa9e4066Sahrens spa_close(spa, FTAG); 6540fa9e4066Sahrens 6541fa9e4066Sahrens if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 6542fa9e4066Sahrens spa_unload(spa); 6543fa9e4066Sahrens spa_deactivate(spa); 6544fa9e4066Sahrens } 6545fa9e4066Sahrens spa_remove(spa); 6546fa9e4066Sahrens } 6547fa9e4066Sahrens mutex_exit(&spa_namespace_lock); 6548fa9e4066Sahrens } 6549ea8dc4b6Seschrock 6550ea8dc4b6Seschrock vdev_t * 65516809eb4eSEric Schrock spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t aux) 6552ea8dc4b6Seschrock { 6553c5904d13Seschrock vdev_t *vd; 6554c5904d13Seschrock int i; 6555c5904d13Seschrock 6556c5904d13Seschrock if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL) 6557c5904d13Seschrock return (vd); 6558c5904d13Seschrock 65596809eb4eSEric Schrock if (aux) { 6560c5904d13Seschrock for (i = 0; i < spa->spa_l2cache.sav_count; i++) { 6561c5904d13Seschrock vd = spa->spa_l2cache.sav_vdevs[i]; 6562c5904d13Seschrock if (vd->vdev_guid == guid) 6563c5904d13Seschrock return (vd); 6564c5904d13Seschrock } 65656809eb4eSEric Schrock 65666809eb4eSEric Schrock for (i = 0; i < spa->spa_spares.sav_count; i++) { 65676809eb4eSEric Schrock vd = spa->spa_spares.sav_vdevs[i]; 65686809eb4eSEric Schrock if (vd->vdev_guid == guid) 65696809eb4eSEric Schrock return (vd); 65706809eb4eSEric Schrock } 6571c5904d13Seschrock } 6572c5904d13Seschrock 6573c5904d13Seschrock return (NULL); 6574ea8dc4b6Seschrock } 6575eaca9bbdSeschrock 6576eaca9bbdSeschrock void 6577990b4856Slling spa_upgrade(spa_t *spa, uint64_t version) 6578eaca9bbdSeschrock { 6579f9af39baSGeorge Wilson ASSERT(spa_writeable(spa)); 6580f9af39baSGeorge Wilson 6581e14bb325SJeff Bonwick spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 6582eaca9bbdSeschrock 6583eaca9bbdSeschrock /* 6584eaca9bbdSeschrock * This should only be called for a non-faulted pool, and since a 6585eaca9bbdSeschrock * future version would result in an unopenable pool, this shouldn't be 6586eaca9bbdSeschrock * possible. 6587eaca9bbdSeschrock */ 658862eae887SRichard Yao ASSERT(SPA_VERSION_IS_SUPPORTED(spa->spa_uberblock.ub_version)); 65895d7b4d43SMatthew Ahrens ASSERT3U(version, >=, spa->spa_uberblock.ub_version); 6590eaca9bbdSeschrock 6591990b4856Slling spa->spa_uberblock.ub_version = version; 6592eaca9bbdSeschrock vdev_config_dirty(spa->spa_root_vdev); 6593eaca9bbdSeschrock 6594e14bb325SJeff Bonwick spa_config_exit(spa, SCL_ALL, FTAG); 659599653d4eSeschrock 659699653d4eSeschrock txg_wait_synced(spa_get_dsl(spa), 0); 659799653d4eSeschrock } 659899653d4eSeschrock 659999653d4eSeschrock boolean_t 660099653d4eSeschrock spa_has_spare(spa_t *spa, uint64_t guid) 660199653d4eSeschrock { 660299653d4eSeschrock int i; 660339c23413Seschrock uint64_t spareguid; 6604fa94a07fSbrendan spa_aux_vdev_t *sav = &spa->spa_spares; 660599653d4eSeschrock 6606fa94a07fSbrendan for (i = 0; i < sav->sav_count; i++) 6607fa94a07fSbrendan if (sav->sav_vdevs[i]->vdev_guid == guid) 660899653d4eSeschrock return (B_TRUE); 660999653d4eSeschrock 6610fa94a07fSbrendan for (i = 0; i < sav->sav_npending; i++) { 6611fa94a07fSbrendan if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID, 6612fa94a07fSbrendan &spareguid) == 0 && spareguid == guid) 661339c23413Seschrock return (B_TRUE); 661439c23413Seschrock } 661539c23413Seschrock 661699653d4eSeschrock return (B_FALSE); 6617eaca9bbdSeschrock } 6618b1b8ab34Slling 66193d7072f8Seschrock /* 662089a89ebfSlling * Check if a pool has an active shared spare device. 662189a89ebfSlling * Note: reference count of an active spare is 2, as a spare and as a replace 662289a89ebfSlling */ 662389a89ebfSlling static boolean_t 662489a89ebfSlling spa_has_active_shared_spare(spa_t *spa) 662589a89ebfSlling { 662689a89ebfSlling int i, refcnt; 662789a89ebfSlling uint64_t pool; 662889a89ebfSlling spa_aux_vdev_t *sav = &spa->spa_spares; 662989a89ebfSlling 663089a89ebfSlling for (i = 0; i < sav->sav_count; i++) { 663189a89ebfSlling if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool, 663289a89ebfSlling &refcnt) && pool != 0ULL && pool == spa_guid(spa) && 663389a89ebfSlling refcnt > 2) 663489a89ebfSlling return (B_TRUE); 663589a89ebfSlling } 663689a89ebfSlling 663789a89ebfSlling return (B_FALSE); 663889a89ebfSlling } 663989a89ebfSlling 664089a89ebfSlling /* 66413d7072f8Seschrock * Post a sysevent corresponding to the given event. The 'name' must be one of 66423d7072f8Seschrock * the event definitions in sys/sysevent/eventdefs.h. The payload will be 66433d7072f8Seschrock * filled in from the spa and (optionally) the vdev. This doesn't do anything 66443d7072f8Seschrock * in the userland libzpool, as we don't want consumers to misinterpret ztest 66453d7072f8Seschrock * or zdb as real changes. 66463d7072f8Seschrock */ 66473d7072f8Seschrock void 66483d7072f8Seschrock spa_event_notify(spa_t *spa, vdev_t *vd, const char *name) 66493d7072f8Seschrock { 66503d7072f8Seschrock #ifdef _KERNEL 66513d7072f8Seschrock sysevent_t *ev; 66523d7072f8Seschrock sysevent_attr_list_t *attr = NULL; 66533d7072f8Seschrock sysevent_value_t value; 66543d7072f8Seschrock sysevent_id_t eid; 66553d7072f8Seschrock 66563d7072f8Seschrock ev = sysevent_alloc(EC_ZFS, (char *)name, SUNW_KERN_PUB "zfs", 66573d7072f8Seschrock SE_SLEEP); 66583d7072f8Seschrock 66593d7072f8Seschrock value.value_type = SE_DATA_TYPE_STRING; 66603d7072f8Seschrock value.value.sv_string = spa_name(spa); 66613d7072f8Seschrock if (sysevent_add_attr(&attr, ZFS_EV_POOL_NAME, &value, SE_SLEEP) != 0) 66623d7072f8Seschrock goto done; 66633d7072f8Seschrock 66643d7072f8Seschrock value.value_type = SE_DATA_TYPE_UINT64; 66653d7072f8Seschrock value.value.sv_uint64 = spa_guid(spa); 66663d7072f8Seschrock if (sysevent_add_attr(&attr, ZFS_EV_POOL_GUID, &value, SE_SLEEP) != 0) 66673d7072f8Seschrock goto done; 66683d7072f8Seschrock 66693d7072f8Seschrock if (vd) { 66703d7072f8Seschrock value.value_type = SE_DATA_TYPE_UINT64; 66713d7072f8Seschrock value.value.sv_uint64 = vd->vdev_guid; 66723d7072f8Seschrock if (sysevent_add_attr(&attr, ZFS_EV_VDEV_GUID, &value, 66733d7072f8Seschrock SE_SLEEP) != 0) 66743d7072f8Seschrock goto done; 66753d7072f8Seschrock 66763d7072f8Seschrock if (vd->vdev_path) { 66773d7072f8Seschrock value.value_type = SE_DATA_TYPE_STRING; 66783d7072f8Seschrock value.value.sv_string = vd->vdev_path; 66793d7072f8Seschrock if (sysevent_add_attr(&attr, ZFS_EV_VDEV_PATH, 66803d7072f8Seschrock &value, SE_SLEEP) != 0) 66813d7072f8Seschrock goto done; 66823d7072f8Seschrock } 66833d7072f8Seschrock } 66843d7072f8Seschrock 6685b01c3b58Seschrock if (sysevent_attach_attributes(ev, attr) != 0) 6686b01c3b58Seschrock goto done; 6687b01c3b58Seschrock attr = NULL; 6688b01c3b58Seschrock 66893d7072f8Seschrock (void) log_sysevent(ev, SE_SLEEP, &eid); 66903d7072f8Seschrock 66913d7072f8Seschrock done: 66923d7072f8Seschrock if (attr) 66933d7072f8Seschrock sysevent_free_attr(attr); 66943d7072f8Seschrock sysevent_free(ev); 66953d7072f8Seschrock #endif 66963d7072f8Seschrock } 6697