1*eda14cbcSMatt Macy /* 2*eda14cbcSMatt Macy * CDDL HEADER START 3*eda14cbcSMatt Macy * 4*eda14cbcSMatt Macy * The contents of this file are subject to the terms of the 5*eda14cbcSMatt Macy * Common Development and Distribution License (the "License"). 6*eda14cbcSMatt Macy * You may not use this file except in compliance with the License. 7*eda14cbcSMatt Macy * 8*eda14cbcSMatt Macy * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9*eda14cbcSMatt Macy * or http://www.opensolaris.org/os/licensing. 10*eda14cbcSMatt Macy * See the License for the specific language governing permissions 11*eda14cbcSMatt Macy * and limitations under the License. 12*eda14cbcSMatt Macy * 13*eda14cbcSMatt Macy * When distributing Covered Code, include this CDDL HEADER in each 14*eda14cbcSMatt Macy * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15*eda14cbcSMatt Macy * If applicable, add the following below this CDDL HEADER, with the 16*eda14cbcSMatt Macy * fields enclosed by brackets "[]" replaced with your own identifying 17*eda14cbcSMatt Macy * information: Portions Copyright [yyyy] [name of copyright owner] 18*eda14cbcSMatt Macy * 19*eda14cbcSMatt Macy * CDDL HEADER END 20*eda14cbcSMatt Macy */ 21*eda14cbcSMatt Macy 22*eda14cbcSMatt Macy /* 23*eda14cbcSMatt Macy * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24*eda14cbcSMatt Macy * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 25*eda14cbcSMatt Macy * Copyright (c) 2011, 2018 by Delphix. All rights reserved. 26*eda14cbcSMatt Macy * Copyright 2017 Joyent, Inc. 27*eda14cbcSMatt Macy */ 28*eda14cbcSMatt Macy 29*eda14cbcSMatt Macy #include <sys/spa.h> 30*eda14cbcSMatt Macy #include <sys/file.h> 31*eda14cbcSMatt Macy #include <sys/fm/fs/zfs.h> 32*eda14cbcSMatt Macy #include <sys/spa_impl.h> 33*eda14cbcSMatt Macy #include <sys/nvpair.h> 34*eda14cbcSMatt Macy #include <sys/uio.h> 35*eda14cbcSMatt Macy #include <sys/fs/zfs.h> 36*eda14cbcSMatt Macy #include <sys/vdev_impl.h> 37*eda14cbcSMatt Macy #include <sys/zfs_ioctl.h> 38*eda14cbcSMatt Macy #include <sys/systeminfo.h> 39*eda14cbcSMatt Macy #include <sys/sunddi.h> 40*eda14cbcSMatt Macy #include <sys/zfeature.h> 41*eda14cbcSMatt Macy #include <sys/zfs_file.h> 42*eda14cbcSMatt Macy #ifdef _KERNEL 43*eda14cbcSMatt Macy #include <sys/zone.h> 44*eda14cbcSMatt Macy #endif 45*eda14cbcSMatt Macy 46*eda14cbcSMatt Macy /* 47*eda14cbcSMatt Macy * Pool configuration repository. 48*eda14cbcSMatt Macy * 49*eda14cbcSMatt Macy * Pool configuration is stored as a packed nvlist on the filesystem. By 50*eda14cbcSMatt Macy * default, all pools are stored in /etc/zfs/zpool.cache and loaded on boot 51*eda14cbcSMatt Macy * (when the ZFS module is loaded). Pools can also have the 'cachefile' 52*eda14cbcSMatt Macy * property set that allows them to be stored in an alternate location until 53*eda14cbcSMatt Macy * the control of external software. 54*eda14cbcSMatt Macy * 55*eda14cbcSMatt Macy * For each cache file, we have a single nvlist which holds all the 56*eda14cbcSMatt Macy * configuration information. When the module loads, we read this information 57*eda14cbcSMatt Macy * from /etc/zfs/zpool.cache and populate the SPA namespace. This namespace is 58*eda14cbcSMatt Macy * maintained independently in spa.c. Whenever the namespace is modified, or 59*eda14cbcSMatt Macy * the configuration of a pool is changed, we call spa_write_cachefile(), which 60*eda14cbcSMatt Macy * walks through all the active pools and writes the configuration to disk. 61*eda14cbcSMatt Macy */ 62*eda14cbcSMatt Macy 63*eda14cbcSMatt Macy static uint64_t spa_config_generation = 1; 64*eda14cbcSMatt Macy 65*eda14cbcSMatt Macy /* 66*eda14cbcSMatt Macy * This can be overridden in userland to preserve an alternate namespace for 67*eda14cbcSMatt Macy * userland pools when doing testing. 68*eda14cbcSMatt Macy */ 69*eda14cbcSMatt Macy char *spa_config_path = ZPOOL_CACHE; 70*eda14cbcSMatt Macy int zfs_autoimport_disable = 1; 71*eda14cbcSMatt Macy 72*eda14cbcSMatt Macy /* 73*eda14cbcSMatt Macy * Called when the module is first loaded, this routine loads the configuration 74*eda14cbcSMatt Macy * file into the SPA namespace. It does not actually open or load the pools; it 75*eda14cbcSMatt Macy * only populates the namespace. 76*eda14cbcSMatt Macy */ 77*eda14cbcSMatt Macy void 78*eda14cbcSMatt Macy spa_config_load(void) 79*eda14cbcSMatt Macy { 80*eda14cbcSMatt Macy void *buf = NULL; 81*eda14cbcSMatt Macy nvlist_t *nvlist, *child; 82*eda14cbcSMatt Macy nvpair_t *nvpair; 83*eda14cbcSMatt Macy char *pathname; 84*eda14cbcSMatt Macy zfs_file_t *fp; 85*eda14cbcSMatt Macy zfs_file_attr_t zfa; 86*eda14cbcSMatt Macy uint64_t fsize; 87*eda14cbcSMatt Macy int err; 88*eda14cbcSMatt Macy 89*eda14cbcSMatt Macy #ifdef _KERNEL 90*eda14cbcSMatt Macy if (zfs_autoimport_disable) 91*eda14cbcSMatt Macy return; 92*eda14cbcSMatt Macy #endif 93*eda14cbcSMatt Macy 94*eda14cbcSMatt Macy /* 95*eda14cbcSMatt Macy * Open the configuration file. 96*eda14cbcSMatt Macy */ 97*eda14cbcSMatt Macy pathname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 98*eda14cbcSMatt Macy 99*eda14cbcSMatt Macy (void) snprintf(pathname, MAXPATHLEN, "%s", spa_config_path); 100*eda14cbcSMatt Macy 101*eda14cbcSMatt Macy err = zfs_file_open(pathname, O_RDONLY, 0, &fp); 102*eda14cbcSMatt Macy 103*eda14cbcSMatt Macy #ifdef __FreeBSD__ 104*eda14cbcSMatt Macy if (err) 105*eda14cbcSMatt Macy err = zfs_file_open(ZPOOL_CACHE_BOOT, O_RDONLY, 0, &fp); 106*eda14cbcSMatt Macy #endif 107*eda14cbcSMatt Macy kmem_free(pathname, MAXPATHLEN); 108*eda14cbcSMatt Macy 109*eda14cbcSMatt Macy if (err) 110*eda14cbcSMatt Macy return; 111*eda14cbcSMatt Macy 112*eda14cbcSMatt Macy if (zfs_file_getattr(fp, &zfa)) 113*eda14cbcSMatt Macy goto out; 114*eda14cbcSMatt Macy 115*eda14cbcSMatt Macy fsize = zfa.zfa_size; 116*eda14cbcSMatt Macy buf = kmem_alloc(fsize, KM_SLEEP); 117*eda14cbcSMatt Macy 118*eda14cbcSMatt Macy /* 119*eda14cbcSMatt Macy * Read the nvlist from the file. 120*eda14cbcSMatt Macy */ 121*eda14cbcSMatt Macy if (zfs_file_read(fp, buf, fsize, NULL) < 0) 122*eda14cbcSMatt Macy goto out; 123*eda14cbcSMatt Macy 124*eda14cbcSMatt Macy /* 125*eda14cbcSMatt Macy * Unpack the nvlist. 126*eda14cbcSMatt Macy */ 127*eda14cbcSMatt Macy if (nvlist_unpack(buf, fsize, &nvlist, KM_SLEEP) != 0) 128*eda14cbcSMatt Macy goto out; 129*eda14cbcSMatt Macy 130*eda14cbcSMatt Macy /* 131*eda14cbcSMatt Macy * Iterate over all elements in the nvlist, creating a new spa_t for 132*eda14cbcSMatt Macy * each one with the specified configuration. 133*eda14cbcSMatt Macy */ 134*eda14cbcSMatt Macy mutex_enter(&spa_namespace_lock); 135*eda14cbcSMatt Macy nvpair = NULL; 136*eda14cbcSMatt Macy while ((nvpair = nvlist_next_nvpair(nvlist, nvpair)) != NULL) { 137*eda14cbcSMatt Macy if (nvpair_type(nvpair) != DATA_TYPE_NVLIST) 138*eda14cbcSMatt Macy continue; 139*eda14cbcSMatt Macy 140*eda14cbcSMatt Macy child = fnvpair_value_nvlist(nvpair); 141*eda14cbcSMatt Macy 142*eda14cbcSMatt Macy if (spa_lookup(nvpair_name(nvpair)) != NULL) 143*eda14cbcSMatt Macy continue; 144*eda14cbcSMatt Macy (void) spa_add(nvpair_name(nvpair), child, NULL); 145*eda14cbcSMatt Macy } 146*eda14cbcSMatt Macy mutex_exit(&spa_namespace_lock); 147*eda14cbcSMatt Macy 148*eda14cbcSMatt Macy nvlist_free(nvlist); 149*eda14cbcSMatt Macy 150*eda14cbcSMatt Macy out: 151*eda14cbcSMatt Macy if (buf != NULL) 152*eda14cbcSMatt Macy kmem_free(buf, fsize); 153*eda14cbcSMatt Macy 154*eda14cbcSMatt Macy zfs_file_close(fp); 155*eda14cbcSMatt Macy } 156*eda14cbcSMatt Macy 157*eda14cbcSMatt Macy static int 158*eda14cbcSMatt Macy spa_config_remove(spa_config_dirent_t *dp) 159*eda14cbcSMatt Macy { 160*eda14cbcSMatt Macy int error = 0; 161*eda14cbcSMatt Macy 162*eda14cbcSMatt Macy /* 163*eda14cbcSMatt Macy * Remove the cache file. If zfs_file_unlink() in not supported by the 164*eda14cbcSMatt Macy * platform fallback to truncating the file which is functionally 165*eda14cbcSMatt Macy * equivalent. 166*eda14cbcSMatt Macy */ 167*eda14cbcSMatt Macy error = zfs_file_unlink(dp->scd_path); 168*eda14cbcSMatt Macy if (error == EOPNOTSUPP) { 169*eda14cbcSMatt Macy int flags = O_RDWR | O_TRUNC; 170*eda14cbcSMatt Macy zfs_file_t *fp; 171*eda14cbcSMatt Macy 172*eda14cbcSMatt Macy error = zfs_file_open(dp->scd_path, flags, 0644, &fp); 173*eda14cbcSMatt Macy if (error == 0) { 174*eda14cbcSMatt Macy (void) zfs_file_fsync(fp, O_SYNC); 175*eda14cbcSMatt Macy (void) zfs_file_close(fp); 176*eda14cbcSMatt Macy } 177*eda14cbcSMatt Macy } 178*eda14cbcSMatt Macy 179*eda14cbcSMatt Macy return (error); 180*eda14cbcSMatt Macy } 181*eda14cbcSMatt Macy 182*eda14cbcSMatt Macy static int 183*eda14cbcSMatt Macy spa_config_write(spa_config_dirent_t *dp, nvlist_t *nvl) 184*eda14cbcSMatt Macy { 185*eda14cbcSMatt Macy size_t buflen; 186*eda14cbcSMatt Macy char *buf; 187*eda14cbcSMatt Macy int oflags = O_RDWR | O_TRUNC | O_CREAT | O_LARGEFILE; 188*eda14cbcSMatt Macy char *temp; 189*eda14cbcSMatt Macy int err; 190*eda14cbcSMatt Macy zfs_file_t *fp; 191*eda14cbcSMatt Macy 192*eda14cbcSMatt Macy /* 193*eda14cbcSMatt Macy * If the nvlist is empty (NULL), then remove the old cachefile. 194*eda14cbcSMatt Macy */ 195*eda14cbcSMatt Macy if (nvl == NULL) { 196*eda14cbcSMatt Macy err = spa_config_remove(dp); 197*eda14cbcSMatt Macy if (err == ENOENT) 198*eda14cbcSMatt Macy err = 0; 199*eda14cbcSMatt Macy 200*eda14cbcSMatt Macy return (err); 201*eda14cbcSMatt Macy } 202*eda14cbcSMatt Macy 203*eda14cbcSMatt Macy /* 204*eda14cbcSMatt Macy * Pack the configuration into a buffer. 205*eda14cbcSMatt Macy */ 206*eda14cbcSMatt Macy buf = fnvlist_pack(nvl, &buflen); 207*eda14cbcSMatt Macy temp = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 208*eda14cbcSMatt Macy 209*eda14cbcSMatt Macy /* 210*eda14cbcSMatt Macy * Write the configuration to disk. Due to the complexity involved 211*eda14cbcSMatt Macy * in performing a rename and remove from within the kernel the file 212*eda14cbcSMatt Macy * is instead truncated and overwritten in place. This way we always 213*eda14cbcSMatt Macy * have a consistent view of the data or a zero length file. 214*eda14cbcSMatt Macy */ 215*eda14cbcSMatt Macy err = zfs_file_open(dp->scd_path, oflags, 0644, &fp); 216*eda14cbcSMatt Macy if (err == 0) { 217*eda14cbcSMatt Macy err = zfs_file_write(fp, buf, buflen, NULL); 218*eda14cbcSMatt Macy if (err == 0) 219*eda14cbcSMatt Macy err = zfs_file_fsync(fp, O_SYNC); 220*eda14cbcSMatt Macy 221*eda14cbcSMatt Macy zfs_file_close(fp); 222*eda14cbcSMatt Macy if (err) 223*eda14cbcSMatt Macy (void) spa_config_remove(dp); 224*eda14cbcSMatt Macy } 225*eda14cbcSMatt Macy fnvlist_pack_free(buf, buflen); 226*eda14cbcSMatt Macy kmem_free(temp, MAXPATHLEN); 227*eda14cbcSMatt Macy return (err); 228*eda14cbcSMatt Macy } 229*eda14cbcSMatt Macy 230*eda14cbcSMatt Macy /* 231*eda14cbcSMatt Macy * Synchronize pool configuration to disk. This must be called with the 232*eda14cbcSMatt Macy * namespace lock held. Synchronizing the pool cache is typically done after 233*eda14cbcSMatt Macy * the configuration has been synced to the MOS. This exposes a window where 234*eda14cbcSMatt Macy * the MOS config will have been updated but the cache file has not. If 235*eda14cbcSMatt Macy * the system were to crash at that instant then the cached config may not 236*eda14cbcSMatt Macy * contain the correct information to open the pool and an explicit import 237*eda14cbcSMatt Macy * would be required. 238*eda14cbcSMatt Macy */ 239*eda14cbcSMatt Macy void 240*eda14cbcSMatt Macy spa_write_cachefile(spa_t *target, boolean_t removing, boolean_t postsysevent) 241*eda14cbcSMatt Macy { 242*eda14cbcSMatt Macy spa_config_dirent_t *dp, *tdp; 243*eda14cbcSMatt Macy nvlist_t *nvl; 244*eda14cbcSMatt Macy char *pool_name; 245*eda14cbcSMatt Macy boolean_t ccw_failure; 246*eda14cbcSMatt Macy int error = 0; 247*eda14cbcSMatt Macy 248*eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&spa_namespace_lock)); 249*eda14cbcSMatt Macy 250*eda14cbcSMatt Macy if (!(spa_mode_global & SPA_MODE_WRITE)) 251*eda14cbcSMatt Macy return; 252*eda14cbcSMatt Macy 253*eda14cbcSMatt Macy /* 254*eda14cbcSMatt Macy * Iterate over all cachefiles for the pool, past or present. When the 255*eda14cbcSMatt Macy * cachefile is changed, the new one is pushed onto this list, allowing 256*eda14cbcSMatt Macy * us to update previous cachefiles that no longer contain this pool. 257*eda14cbcSMatt Macy */ 258*eda14cbcSMatt Macy ccw_failure = B_FALSE; 259*eda14cbcSMatt Macy for (dp = list_head(&target->spa_config_list); dp != NULL; 260*eda14cbcSMatt Macy dp = list_next(&target->spa_config_list, dp)) { 261*eda14cbcSMatt Macy spa_t *spa = NULL; 262*eda14cbcSMatt Macy if (dp->scd_path == NULL) 263*eda14cbcSMatt Macy continue; 264*eda14cbcSMatt Macy 265*eda14cbcSMatt Macy /* 266*eda14cbcSMatt Macy * Iterate over all pools, adding any matching pools to 'nvl'. 267*eda14cbcSMatt Macy */ 268*eda14cbcSMatt Macy nvl = NULL; 269*eda14cbcSMatt Macy while ((spa = spa_next(spa)) != NULL) { 270*eda14cbcSMatt Macy /* 271*eda14cbcSMatt Macy * Skip over our own pool if we're about to remove 272*eda14cbcSMatt Macy * ourselves from the spa namespace or any pool that 273*eda14cbcSMatt Macy * is readonly. Since we cannot guarantee that a 274*eda14cbcSMatt Macy * readonly pool would successfully import upon reboot, 275*eda14cbcSMatt Macy * we don't allow them to be written to the cache file. 276*eda14cbcSMatt Macy */ 277*eda14cbcSMatt Macy if ((spa == target && removing) || 278*eda14cbcSMatt Macy !spa_writeable(spa)) 279*eda14cbcSMatt Macy continue; 280*eda14cbcSMatt Macy 281*eda14cbcSMatt Macy mutex_enter(&spa->spa_props_lock); 282*eda14cbcSMatt Macy tdp = list_head(&spa->spa_config_list); 283*eda14cbcSMatt Macy if (spa->spa_config == NULL || 284*eda14cbcSMatt Macy tdp == NULL || 285*eda14cbcSMatt Macy tdp->scd_path == NULL || 286*eda14cbcSMatt Macy strcmp(tdp->scd_path, dp->scd_path) != 0) { 287*eda14cbcSMatt Macy mutex_exit(&spa->spa_props_lock); 288*eda14cbcSMatt Macy continue; 289*eda14cbcSMatt Macy } 290*eda14cbcSMatt Macy 291*eda14cbcSMatt Macy if (nvl == NULL) 292*eda14cbcSMatt Macy nvl = fnvlist_alloc(); 293*eda14cbcSMatt Macy 294*eda14cbcSMatt Macy if (spa->spa_import_flags & ZFS_IMPORT_TEMP_NAME) 295*eda14cbcSMatt Macy pool_name = fnvlist_lookup_string( 296*eda14cbcSMatt Macy spa->spa_config, ZPOOL_CONFIG_POOL_NAME); 297*eda14cbcSMatt Macy else 298*eda14cbcSMatt Macy pool_name = spa_name(spa); 299*eda14cbcSMatt Macy 300*eda14cbcSMatt Macy fnvlist_add_nvlist(nvl, pool_name, spa->spa_config); 301*eda14cbcSMatt Macy mutex_exit(&spa->spa_props_lock); 302*eda14cbcSMatt Macy } 303*eda14cbcSMatt Macy 304*eda14cbcSMatt Macy error = spa_config_write(dp, nvl); 305*eda14cbcSMatt Macy if (error != 0) 306*eda14cbcSMatt Macy ccw_failure = B_TRUE; 307*eda14cbcSMatt Macy nvlist_free(nvl); 308*eda14cbcSMatt Macy } 309*eda14cbcSMatt Macy 310*eda14cbcSMatt Macy if (ccw_failure) { 311*eda14cbcSMatt Macy /* 312*eda14cbcSMatt Macy * Keep trying so that configuration data is 313*eda14cbcSMatt Macy * written if/when any temporary filesystem 314*eda14cbcSMatt Macy * resource issues are resolved. 315*eda14cbcSMatt Macy */ 316*eda14cbcSMatt Macy if (target->spa_ccw_fail_time == 0) { 317*eda14cbcSMatt Macy zfs_ereport_post(FM_EREPORT_ZFS_CONFIG_CACHE_WRITE, 318*eda14cbcSMatt Macy target, NULL, NULL, NULL, 0, 0); 319*eda14cbcSMatt Macy } 320*eda14cbcSMatt Macy target->spa_ccw_fail_time = gethrtime(); 321*eda14cbcSMatt Macy spa_async_request(target, SPA_ASYNC_CONFIG_UPDATE); 322*eda14cbcSMatt Macy } else { 323*eda14cbcSMatt Macy /* 324*eda14cbcSMatt Macy * Do not rate limit future attempts to update 325*eda14cbcSMatt Macy * the config cache. 326*eda14cbcSMatt Macy */ 327*eda14cbcSMatt Macy target->spa_ccw_fail_time = 0; 328*eda14cbcSMatt Macy } 329*eda14cbcSMatt Macy 330*eda14cbcSMatt Macy /* 331*eda14cbcSMatt Macy * Remove any config entries older than the current one. 332*eda14cbcSMatt Macy */ 333*eda14cbcSMatt Macy dp = list_head(&target->spa_config_list); 334*eda14cbcSMatt Macy while ((tdp = list_next(&target->spa_config_list, dp)) != NULL) { 335*eda14cbcSMatt Macy list_remove(&target->spa_config_list, tdp); 336*eda14cbcSMatt Macy if (tdp->scd_path != NULL) 337*eda14cbcSMatt Macy spa_strfree(tdp->scd_path); 338*eda14cbcSMatt Macy kmem_free(tdp, sizeof (spa_config_dirent_t)); 339*eda14cbcSMatt Macy } 340*eda14cbcSMatt Macy 341*eda14cbcSMatt Macy spa_config_generation++; 342*eda14cbcSMatt Macy 343*eda14cbcSMatt Macy if (postsysevent) 344*eda14cbcSMatt Macy spa_event_notify(target, NULL, NULL, ESC_ZFS_CONFIG_SYNC); 345*eda14cbcSMatt Macy } 346*eda14cbcSMatt Macy 347*eda14cbcSMatt Macy /* 348*eda14cbcSMatt Macy * Sigh. Inside a local zone, we don't have access to /etc/zfs/zpool.cache, 349*eda14cbcSMatt Macy * and we don't want to allow the local zone to see all the pools anyway. 350*eda14cbcSMatt Macy * So we have to invent the ZFS_IOC_CONFIG ioctl to grab the configuration 351*eda14cbcSMatt Macy * information for all pool visible within the zone. 352*eda14cbcSMatt Macy */ 353*eda14cbcSMatt Macy nvlist_t * 354*eda14cbcSMatt Macy spa_all_configs(uint64_t *generation) 355*eda14cbcSMatt Macy { 356*eda14cbcSMatt Macy nvlist_t *pools; 357*eda14cbcSMatt Macy spa_t *spa = NULL; 358*eda14cbcSMatt Macy 359*eda14cbcSMatt Macy if (*generation == spa_config_generation) 360*eda14cbcSMatt Macy return (NULL); 361*eda14cbcSMatt Macy 362*eda14cbcSMatt Macy pools = fnvlist_alloc(); 363*eda14cbcSMatt Macy 364*eda14cbcSMatt Macy mutex_enter(&spa_namespace_lock); 365*eda14cbcSMatt Macy while ((spa = spa_next(spa)) != NULL) { 366*eda14cbcSMatt Macy if (INGLOBALZONE(curproc) || 367*eda14cbcSMatt Macy zone_dataset_visible(spa_name(spa), NULL)) { 368*eda14cbcSMatt Macy mutex_enter(&spa->spa_props_lock); 369*eda14cbcSMatt Macy fnvlist_add_nvlist(pools, spa_name(spa), 370*eda14cbcSMatt Macy spa->spa_config); 371*eda14cbcSMatt Macy mutex_exit(&spa->spa_props_lock); 372*eda14cbcSMatt Macy } 373*eda14cbcSMatt Macy } 374*eda14cbcSMatt Macy *generation = spa_config_generation; 375*eda14cbcSMatt Macy mutex_exit(&spa_namespace_lock); 376*eda14cbcSMatt Macy 377*eda14cbcSMatt Macy return (pools); 378*eda14cbcSMatt Macy } 379*eda14cbcSMatt Macy 380*eda14cbcSMatt Macy void 381*eda14cbcSMatt Macy spa_config_set(spa_t *spa, nvlist_t *config) 382*eda14cbcSMatt Macy { 383*eda14cbcSMatt Macy mutex_enter(&spa->spa_props_lock); 384*eda14cbcSMatt Macy if (spa->spa_config != NULL && spa->spa_config != config) 385*eda14cbcSMatt Macy nvlist_free(spa->spa_config); 386*eda14cbcSMatt Macy spa->spa_config = config; 387*eda14cbcSMatt Macy mutex_exit(&spa->spa_props_lock); 388*eda14cbcSMatt Macy } 389*eda14cbcSMatt Macy 390*eda14cbcSMatt Macy /* 391*eda14cbcSMatt Macy * Generate the pool's configuration based on the current in-core state. 392*eda14cbcSMatt Macy * 393*eda14cbcSMatt Macy * We infer whether to generate a complete config or just one top-level config 394*eda14cbcSMatt Macy * based on whether vd is the root vdev. 395*eda14cbcSMatt Macy */ 396*eda14cbcSMatt Macy nvlist_t * 397*eda14cbcSMatt Macy spa_config_generate(spa_t *spa, vdev_t *vd, uint64_t txg, int getstats) 398*eda14cbcSMatt Macy { 399*eda14cbcSMatt Macy nvlist_t *config, *nvroot; 400*eda14cbcSMatt Macy vdev_t *rvd = spa->spa_root_vdev; 401*eda14cbcSMatt Macy unsigned long hostid = 0; 402*eda14cbcSMatt Macy boolean_t locked = B_FALSE; 403*eda14cbcSMatt Macy uint64_t split_guid; 404*eda14cbcSMatt Macy char *pool_name; 405*eda14cbcSMatt Macy 406*eda14cbcSMatt Macy if (vd == NULL) { 407*eda14cbcSMatt Macy vd = rvd; 408*eda14cbcSMatt Macy locked = B_TRUE; 409*eda14cbcSMatt Macy spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER); 410*eda14cbcSMatt Macy } 411*eda14cbcSMatt Macy 412*eda14cbcSMatt Macy ASSERT(spa_config_held(spa, SCL_CONFIG | SCL_STATE, RW_READER) == 413*eda14cbcSMatt Macy (SCL_CONFIG | SCL_STATE)); 414*eda14cbcSMatt Macy 415*eda14cbcSMatt Macy /* 416*eda14cbcSMatt Macy * If txg is -1, report the current value of spa->spa_config_txg. 417*eda14cbcSMatt Macy */ 418*eda14cbcSMatt Macy if (txg == -1ULL) 419*eda14cbcSMatt Macy txg = spa->spa_config_txg; 420*eda14cbcSMatt Macy 421*eda14cbcSMatt Macy /* 422*eda14cbcSMatt Macy * Originally, users had to handle spa namespace collisions by either 423*eda14cbcSMatt Macy * exporting the already imported pool or by specifying a new name for 424*eda14cbcSMatt Macy * the pool with a conflicting name. In the case of root pools from 425*eda14cbcSMatt Macy * virtual guests, neither approach to collision resolution is 426*eda14cbcSMatt Macy * reasonable. This is addressed by extending the new name syntax with 427*eda14cbcSMatt Macy * an option to specify that the new name is temporary. When specified, 428*eda14cbcSMatt Macy * ZFS_IMPORT_TEMP_NAME will be set in spa->spa_import_flags to tell us 429*eda14cbcSMatt Macy * to use the previous name, which we do below. 430*eda14cbcSMatt Macy */ 431*eda14cbcSMatt Macy if (spa->spa_import_flags & ZFS_IMPORT_TEMP_NAME) { 432*eda14cbcSMatt Macy VERIFY0(nvlist_lookup_string(spa->spa_config, 433*eda14cbcSMatt Macy ZPOOL_CONFIG_POOL_NAME, &pool_name)); 434*eda14cbcSMatt Macy } else 435*eda14cbcSMatt Macy pool_name = spa_name(spa); 436*eda14cbcSMatt Macy 437*eda14cbcSMatt Macy config = fnvlist_alloc(); 438*eda14cbcSMatt Macy 439*eda14cbcSMatt Macy fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION, spa_version(spa)); 440*eda14cbcSMatt Macy fnvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, pool_name); 441*eda14cbcSMatt Macy fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, spa_state(spa)); 442*eda14cbcSMatt Macy fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG, txg); 443*eda14cbcSMatt Macy fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID, spa_guid(spa)); 444*eda14cbcSMatt Macy fnvlist_add_uint64(config, ZPOOL_CONFIG_ERRATA, spa->spa_errata); 445*eda14cbcSMatt Macy if (spa->spa_comment != NULL) 446*eda14cbcSMatt Macy fnvlist_add_string(config, ZPOOL_CONFIG_COMMENT, 447*eda14cbcSMatt Macy spa->spa_comment); 448*eda14cbcSMatt Macy 449*eda14cbcSMatt Macy hostid = spa_get_hostid(spa); 450*eda14cbcSMatt Macy if (hostid != 0) 451*eda14cbcSMatt Macy fnvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID, hostid); 452*eda14cbcSMatt Macy fnvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME, utsname()->nodename); 453*eda14cbcSMatt Macy 454*eda14cbcSMatt Macy int config_gen_flags = 0; 455*eda14cbcSMatt Macy if (vd != rvd) { 456*eda14cbcSMatt Macy fnvlist_add_uint64(config, ZPOOL_CONFIG_TOP_GUID, 457*eda14cbcSMatt Macy vd->vdev_top->vdev_guid); 458*eda14cbcSMatt Macy fnvlist_add_uint64(config, ZPOOL_CONFIG_GUID, 459*eda14cbcSMatt Macy vd->vdev_guid); 460*eda14cbcSMatt Macy if (vd->vdev_isspare) 461*eda14cbcSMatt Macy fnvlist_add_uint64(config, 462*eda14cbcSMatt Macy ZPOOL_CONFIG_IS_SPARE, 1ULL); 463*eda14cbcSMatt Macy if (vd->vdev_islog) 464*eda14cbcSMatt Macy fnvlist_add_uint64(config, 465*eda14cbcSMatt Macy ZPOOL_CONFIG_IS_LOG, 1ULL); 466*eda14cbcSMatt Macy vd = vd->vdev_top; /* label contains top config */ 467*eda14cbcSMatt Macy } else { 468*eda14cbcSMatt Macy /* 469*eda14cbcSMatt Macy * Only add the (potentially large) split information 470*eda14cbcSMatt Macy * in the mos config, and not in the vdev labels 471*eda14cbcSMatt Macy */ 472*eda14cbcSMatt Macy if (spa->spa_config_splitting != NULL) 473*eda14cbcSMatt Macy fnvlist_add_nvlist(config, ZPOOL_CONFIG_SPLIT, 474*eda14cbcSMatt Macy spa->spa_config_splitting); 475*eda14cbcSMatt Macy 476*eda14cbcSMatt Macy fnvlist_add_boolean(config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS); 477*eda14cbcSMatt Macy 478*eda14cbcSMatt Macy config_gen_flags |= VDEV_CONFIG_MOS; 479*eda14cbcSMatt Macy } 480*eda14cbcSMatt Macy 481*eda14cbcSMatt Macy /* 482*eda14cbcSMatt Macy * Add the top-level config. We even add this on pools which 483*eda14cbcSMatt Macy * don't support holes in the namespace. 484*eda14cbcSMatt Macy */ 485*eda14cbcSMatt Macy vdev_top_config_generate(spa, config); 486*eda14cbcSMatt Macy 487*eda14cbcSMatt Macy /* 488*eda14cbcSMatt Macy * If we're splitting, record the original pool's guid. 489*eda14cbcSMatt Macy */ 490*eda14cbcSMatt Macy if (spa->spa_config_splitting != NULL && 491*eda14cbcSMatt Macy nvlist_lookup_uint64(spa->spa_config_splitting, 492*eda14cbcSMatt Macy ZPOOL_CONFIG_SPLIT_GUID, &split_guid) == 0) { 493*eda14cbcSMatt Macy fnvlist_add_uint64(config, ZPOOL_CONFIG_SPLIT_GUID, split_guid); 494*eda14cbcSMatt Macy } 495*eda14cbcSMatt Macy 496*eda14cbcSMatt Macy nvroot = vdev_config_generate(spa, vd, getstats, config_gen_flags); 497*eda14cbcSMatt Macy fnvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot); 498*eda14cbcSMatt Macy nvlist_free(nvroot); 499*eda14cbcSMatt Macy 500*eda14cbcSMatt Macy /* 501*eda14cbcSMatt Macy * Store what's necessary for reading the MOS in the label. 502*eda14cbcSMatt Macy */ 503*eda14cbcSMatt Macy fnvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ, 504*eda14cbcSMatt Macy spa->spa_label_features); 505*eda14cbcSMatt Macy 506*eda14cbcSMatt Macy if (getstats && spa_load_state(spa) == SPA_LOAD_NONE) { 507*eda14cbcSMatt Macy ddt_histogram_t *ddh; 508*eda14cbcSMatt Macy ddt_stat_t *dds; 509*eda14cbcSMatt Macy ddt_object_t *ddo; 510*eda14cbcSMatt Macy 511*eda14cbcSMatt Macy ddh = kmem_zalloc(sizeof (ddt_histogram_t), KM_SLEEP); 512*eda14cbcSMatt Macy ddt_get_dedup_histogram(spa, ddh); 513*eda14cbcSMatt Macy fnvlist_add_uint64_array(config, 514*eda14cbcSMatt Macy ZPOOL_CONFIG_DDT_HISTOGRAM, 515*eda14cbcSMatt Macy (uint64_t *)ddh, sizeof (*ddh) / sizeof (uint64_t)); 516*eda14cbcSMatt Macy kmem_free(ddh, sizeof (ddt_histogram_t)); 517*eda14cbcSMatt Macy 518*eda14cbcSMatt Macy ddo = kmem_zalloc(sizeof (ddt_object_t), KM_SLEEP); 519*eda14cbcSMatt Macy ddt_get_dedup_object_stats(spa, ddo); 520*eda14cbcSMatt Macy fnvlist_add_uint64_array(config, 521*eda14cbcSMatt Macy ZPOOL_CONFIG_DDT_OBJ_STATS, 522*eda14cbcSMatt Macy (uint64_t *)ddo, sizeof (*ddo) / sizeof (uint64_t)); 523*eda14cbcSMatt Macy kmem_free(ddo, sizeof (ddt_object_t)); 524*eda14cbcSMatt Macy 525*eda14cbcSMatt Macy dds = kmem_zalloc(sizeof (ddt_stat_t), KM_SLEEP); 526*eda14cbcSMatt Macy ddt_get_dedup_stats(spa, dds); 527*eda14cbcSMatt Macy fnvlist_add_uint64_array(config, 528*eda14cbcSMatt Macy ZPOOL_CONFIG_DDT_STATS, 529*eda14cbcSMatt Macy (uint64_t *)dds, sizeof (*dds) / sizeof (uint64_t)); 530*eda14cbcSMatt Macy kmem_free(dds, sizeof (ddt_stat_t)); 531*eda14cbcSMatt Macy } 532*eda14cbcSMatt Macy 533*eda14cbcSMatt Macy if (locked) 534*eda14cbcSMatt Macy spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 535*eda14cbcSMatt Macy 536*eda14cbcSMatt Macy return (config); 537*eda14cbcSMatt Macy } 538*eda14cbcSMatt Macy 539*eda14cbcSMatt Macy /* 540*eda14cbcSMatt Macy * Update all disk labels, generate a fresh config based on the current 541*eda14cbcSMatt Macy * in-core state, and sync the global config cache (do not sync the config 542*eda14cbcSMatt Macy * cache if this is a booting rootpool). 543*eda14cbcSMatt Macy */ 544*eda14cbcSMatt Macy void 545*eda14cbcSMatt Macy spa_config_update(spa_t *spa, int what) 546*eda14cbcSMatt Macy { 547*eda14cbcSMatt Macy vdev_t *rvd = spa->spa_root_vdev; 548*eda14cbcSMatt Macy uint64_t txg; 549*eda14cbcSMatt Macy int c; 550*eda14cbcSMatt Macy 551*eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&spa_namespace_lock)); 552*eda14cbcSMatt Macy 553*eda14cbcSMatt Macy spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 554*eda14cbcSMatt Macy txg = spa_last_synced_txg(spa) + 1; 555*eda14cbcSMatt Macy if (what == SPA_CONFIG_UPDATE_POOL) { 556*eda14cbcSMatt Macy vdev_config_dirty(rvd); 557*eda14cbcSMatt Macy } else { 558*eda14cbcSMatt Macy /* 559*eda14cbcSMatt Macy * If we have top-level vdevs that were added but have 560*eda14cbcSMatt Macy * not yet been prepared for allocation, do that now. 561*eda14cbcSMatt Macy * (It's safe now because the config cache is up to date, 562*eda14cbcSMatt Macy * so it will be able to translate the new DVAs.) 563*eda14cbcSMatt Macy * See comments in spa_vdev_add() for full details. 564*eda14cbcSMatt Macy */ 565*eda14cbcSMatt Macy for (c = 0; c < rvd->vdev_children; c++) { 566*eda14cbcSMatt Macy vdev_t *tvd = rvd->vdev_child[c]; 567*eda14cbcSMatt Macy 568*eda14cbcSMatt Macy /* 569*eda14cbcSMatt Macy * Explicitly skip vdevs that are indirect or 570*eda14cbcSMatt Macy * log vdevs that are being removed. The reason 571*eda14cbcSMatt Macy * is that both of those can have vdev_ms_array 572*eda14cbcSMatt Macy * set to 0 and we wouldn't want to change their 573*eda14cbcSMatt Macy * metaslab size nor call vdev_expand() on them. 574*eda14cbcSMatt Macy */ 575*eda14cbcSMatt Macy if (!vdev_is_concrete(tvd) || 576*eda14cbcSMatt Macy (tvd->vdev_islog && tvd->vdev_removing)) 577*eda14cbcSMatt Macy continue; 578*eda14cbcSMatt Macy 579*eda14cbcSMatt Macy if (tvd->vdev_ms_array == 0) { 580*eda14cbcSMatt Macy vdev_ashift_optimize(tvd); 581*eda14cbcSMatt Macy vdev_metaslab_set_size(tvd); 582*eda14cbcSMatt Macy } 583*eda14cbcSMatt Macy vdev_expand(tvd, txg); 584*eda14cbcSMatt Macy } 585*eda14cbcSMatt Macy } 586*eda14cbcSMatt Macy spa_config_exit(spa, SCL_ALL, FTAG); 587*eda14cbcSMatt Macy 588*eda14cbcSMatt Macy /* 589*eda14cbcSMatt Macy * Wait for the mosconfig to be regenerated and synced. 590*eda14cbcSMatt Macy */ 591*eda14cbcSMatt Macy txg_wait_synced(spa->spa_dsl_pool, txg); 592*eda14cbcSMatt Macy 593*eda14cbcSMatt Macy /* 594*eda14cbcSMatt Macy * Update the global config cache to reflect the new mosconfig. 595*eda14cbcSMatt Macy */ 596*eda14cbcSMatt Macy if (!spa->spa_is_root) { 597*eda14cbcSMatt Macy spa_write_cachefile(spa, B_FALSE, 598*eda14cbcSMatt Macy what != SPA_CONFIG_UPDATE_POOL); 599*eda14cbcSMatt Macy } 600*eda14cbcSMatt Macy 601*eda14cbcSMatt Macy if (what == SPA_CONFIG_UPDATE_POOL) 602*eda14cbcSMatt Macy spa_config_update(spa, SPA_CONFIG_UPDATE_VDEVS); 603*eda14cbcSMatt Macy } 604*eda14cbcSMatt Macy 605*eda14cbcSMatt Macy EXPORT_SYMBOL(spa_config_load); 606*eda14cbcSMatt Macy EXPORT_SYMBOL(spa_all_configs); 607*eda14cbcSMatt Macy EXPORT_SYMBOL(spa_config_set); 608*eda14cbcSMatt Macy EXPORT_SYMBOL(spa_config_generate); 609*eda14cbcSMatt Macy EXPORT_SYMBOL(spa_config_update); 610*eda14cbcSMatt Macy 611*eda14cbcSMatt Macy /* BEGIN CSTYLED */ 612*eda14cbcSMatt Macy #ifdef __linux__ 613*eda14cbcSMatt Macy /* string sysctls require a char array on FreeBSD */ 614*eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_spa, spa_, config_path, STRING, ZMOD_RD, 615*eda14cbcSMatt Macy "SPA config file (/etc/zfs/zpool.cache)"); 616*eda14cbcSMatt Macy #endif 617*eda14cbcSMatt Macy 618*eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs, zfs_, autoimport_disable, INT, ZMOD_RW, 619*eda14cbcSMatt Macy "Disable pool import at module load"); 620*eda14cbcSMatt Macy /* END CSTYLED */ 621