xref: /illumos-gate/usr/src/lib/libzfs/common/libzfs_import.c (revision 1769817e9676d2db341d2b6828c199f0eabac823)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
25  * Copyright 2015 RackTop Systems.
26  * Copyright 2017 Nexenta Systems, Inc.
27  */
28 
29 /*
30  * Pool import support functions.
31  *
32  * To import a pool, we rely on reading the configuration information from the
33  * ZFS label of each device.  If we successfully read the label, then we
34  * organize the configuration information in the following hierarchy:
35  *
36  *	pool guid -> toplevel vdev guid -> label txg
37  *
38  * Duplicate entries matching this same tuple will be discarded.  Once we have
39  * examined every device, we pick the best label txg config for each toplevel
40  * vdev.  We then arrange these toplevel vdevs into a complete pool config, and
41  * update any paths that have changed.  Finally, we attempt to import the pool
42  * using our derived config, and record the results.
43  */
44 
45 #include <ctype.h>
46 #include <devid.h>
47 #include <dirent.h>
48 #include <errno.h>
49 #include <libintl.h>
50 #include <stddef.h>
51 #include <stdlib.h>
52 #include <string.h>
53 #include <sys/stat.h>
54 #include <unistd.h>
55 #include <fcntl.h>
56 #include <sys/vtoc.h>
57 #include <sys/dktp/fdisk.h>
58 #include <sys/efi_partition.h>
59 #include <thread_pool.h>
60 
61 #include <sys/vdev_impl.h>
62 #include <libzutil.h>
63 #include <sys/arc_impl.h>
64 
65 #include "libzfs.h"
66 #include "libzfs_impl.h"
67 
68 /*
69  * Returns true if the named pool matches the given GUID.
70  */
71 static int
72 pool_active(libzfs_handle_t *hdl, const char *name, uint64_t guid,
73     boolean_t *isactive)
74 {
75 	zpool_handle_t *zhp;
76 	uint64_t theguid;
77 
78 	if (zpool_open_silent(hdl, name, &zhp) != 0)
79 		return (-1);
80 
81 	if (zhp == NULL) {
82 		*isactive = B_FALSE;
83 		return (0);
84 	}
85 
86 	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID,
87 	    &theguid) == 0);
88 
89 	zpool_close(zhp);
90 
91 	*isactive = (theguid == guid);
92 	return (0);
93 }
94 
95 static nvlist_t *
96 refresh_config(libzfs_handle_t *hdl, nvlist_t *config)
97 {
98 	nvlist_t *nvl;
99 	zfs_cmd_t zc = {"\0"};
100 	int err, dstbuf_size;
101 
102 	if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0)
103 		return (NULL);
104 
105 	dstbuf_size = MAX(CONFIG_BUF_MINSIZE, zc.zc_nvlist_conf_size * 4);
106 
107 	if (zcmd_alloc_dst_nvlist(hdl, &zc, dstbuf_size) != 0) {
108 		zcmd_free_nvlists(&zc);
109 		return (NULL);
110 	}
111 
112 	while ((err = zfs_ioctl(hdl, ZFS_IOC_POOL_TRYIMPORT,
113 	    &zc)) != 0 && errno == ENOMEM) {
114 		if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
115 			zcmd_free_nvlists(&zc);
116 			return (NULL);
117 		}
118 	}
119 
120 	if (err) {
121 		zcmd_free_nvlists(&zc);
122 		return (NULL);
123 	}
124 
125 	if (zcmd_read_dst_nvlist(hdl, &zc, &nvl) != 0) {
126 		zcmd_free_nvlists(&zc);
127 		return (NULL);
128 	}
129 
130 	zcmd_free_nvlists(&zc);
131 	return (nvl);
132 }
133 
134 static nvlist_t *
135 refresh_config_libzfs(void *handle, nvlist_t *tryconfig)
136 {
137 	return (refresh_config((libzfs_handle_t *)handle, tryconfig));
138 }
139 
140 static int
141 pool_active_libzfs(void *handle, const char *name, uint64_t guid,
142     boolean_t *isactive)
143 {
144 	return (pool_active((libzfs_handle_t *)handle, name, guid, isactive));
145 }
146 
147 const pool_config_ops_t libzfs_config_ops = {
148 	.pco_refresh_config = refresh_config_libzfs,
149 	.pco_pool_active = pool_active_libzfs,
150 };
151 
152 /*
153  * Return the offset of the given label.
154  */
155 static uint64_t
156 label_offset(uint64_t size, int l)
157 {
158 	ASSERT(P2PHASE_TYPED(size, sizeof (vdev_label_t), uint64_t) == 0);
159 	return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
160 	    0 : size - VDEV_LABELS * sizeof (vdev_label_t)));
161 }
162 
163 /*
164  * Given a file descriptor, clear (zero) the label information.
165  */
166 int
167 zpool_clear_label(int fd)
168 {
169 	struct stat64 statbuf;
170 	int l;
171 	vdev_label_t *label;
172 	l2arc_dev_hdr_phys_t *l2dhdr;
173 	uint64_t size;
174 	int labels_cleared = 0, header_cleared = 0;
175 	boolean_t clear_l2arc_header = B_FALSE;
176 
177 	if (fstat64(fd, &statbuf) == -1)
178 		return (0);
179 
180 	size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
181 
182 	if ((label = calloc(sizeof (vdev_label_t), 1)) == NULL)
183 		return (-1);
184 
185 	if ((l2dhdr = calloc(1, sizeof (l2arc_dev_hdr_phys_t))) == NULL) {
186 		free(label);
187 		return (-1);
188 	}
189 
190 	for (l = 0; l < VDEV_LABELS; l++) {
191 		uint64_t state, guid, l2cache;
192 		nvlist_t *config;
193 
194 		if (pread64(fd, label, sizeof (vdev_label_t),
195 		    label_offset(size, l)) != sizeof (vdev_label_t)) {
196 			continue;
197 		}
198 
199 		if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist,
200 		    sizeof (label->vl_vdev_phys.vp_nvlist), &config, 0) != 0) {
201 			continue;
202 		}
203 
204 		/* Skip labels which do not have a valid guid. */
205 		if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
206 		    &guid) != 0 || guid == 0) {
207 			nvlist_free(config);
208 			continue;
209 		}
210 
211 		/* Skip labels which are not in a known valid state. */
212 		if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
213 		    &state) != 0 || state > POOL_STATE_L2CACHE) {
214 			nvlist_free(config);
215 			continue;
216 		}
217 
218 		/* If the device is a cache device clear the header. */
219 		if (!clear_l2arc_header) {
220 			if (nvlist_lookup_uint64(config,
221 			    ZPOOL_CONFIG_POOL_STATE, &l2cache) == 0 &&
222 			    l2cache == POOL_STATE_L2CACHE) {
223 				clear_l2arc_header = B_TRUE;
224 			}
225 		}
226 
227 		nvlist_free(config);
228 
229 		/*
230 		 * A valid label was found, overwrite this label's nvlist
231 		 * and uberblocks with zeros on disk.  This is done to prevent
232 		 * system utilities, like blkid, from incorrectly detecting a
233 		 * partial label.  The leading pad space is left untouched.
234 		 */
235 		memset(label, 0, sizeof (vdev_label_t));
236 		size_t label_size = sizeof (vdev_label_t) - (2 * VDEV_PAD_SIZE);
237 
238 		if (pwrite64(fd, label, label_size, label_offset(size, l) +
239 		    (2 * VDEV_PAD_SIZE)) == label_size) {
240 			labels_cleared++;
241 		}
242 	}
243 
244 	/* Clear the L2ARC header. */
245 	if (clear_l2arc_header) {
246 		memset(l2dhdr, 0, sizeof (l2arc_dev_hdr_phys_t));
247 		if (pwrite64(fd, l2dhdr, sizeof (l2arc_dev_hdr_phys_t),
248 		    VDEV_LABEL_START_SIZE) == sizeof (l2arc_dev_hdr_phys_t)) {
249 			header_cleared++;
250 		}
251 	}
252 
253 	free(label);
254 	free(l2dhdr);
255 
256 	if (labels_cleared == 0)
257 		return (-1);
258 
259 	return (0);
260 }
261 
262 boolean_t
263 find_guid(nvlist_t *nv, uint64_t guid)
264 {
265 	uint64_t tmp;
266 	nvlist_t **child;
267 	uint_t c, children;
268 
269 	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &tmp) == 0);
270 	if (tmp == guid)
271 		return (B_TRUE);
272 
273 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
274 	    &child, &children) == 0) {
275 		for (c = 0; c < children; c++)
276 			if (find_guid(child[c], guid))
277 				return (B_TRUE);
278 	}
279 
280 	return (B_FALSE);
281 }
282 
283 typedef struct aux_cbdata {
284 	const char	*cb_type;
285 	uint64_t	cb_guid;
286 	zpool_handle_t	*cb_zhp;
287 } aux_cbdata_t;
288 
289 static int
290 find_aux(zpool_handle_t *zhp, void *data)
291 {
292 	aux_cbdata_t *cbp = data;
293 	nvlist_t **list;
294 	uint_t i, count;
295 	uint64_t guid;
296 	nvlist_t *nvroot;
297 
298 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
299 	    &nvroot) == 0);
300 
301 	if (nvlist_lookup_nvlist_array(nvroot, cbp->cb_type,
302 	    &list, &count) == 0) {
303 		for (i = 0; i < count; i++) {
304 			verify(nvlist_lookup_uint64(list[i],
305 			    ZPOOL_CONFIG_GUID, &guid) == 0);
306 			if (guid == cbp->cb_guid) {
307 				cbp->cb_zhp = zhp;
308 				return (1);
309 			}
310 		}
311 	}
312 
313 	zpool_close(zhp);
314 	return (0);
315 }
316 
317 /*
318  * Determines if the pool is in use.  If so, it returns true and the state of
319  * the pool as well as the name of the pool.  Both strings are allocated and
320  * must be freed by the caller.
321  */
322 int
323 zpool_in_use(libzfs_handle_t *hdl, int fd, pool_state_t *state, char **namestr,
324     boolean_t *inuse)
325 {
326 	nvlist_t *config;
327 	char *name;
328 	boolean_t ret;
329 	uint64_t guid, vdev_guid;
330 	zpool_handle_t *zhp;
331 	nvlist_t *pool_config;
332 	uint64_t stateval, isspare;
333 	aux_cbdata_t cb = { 0 };
334 	boolean_t isactive;
335 
336 	*inuse = B_FALSE;
337 
338 	if (zpool_read_label(fd, &config, NULL) != 0 && errno == ENOMEM) {
339 		(void) no_memory(hdl);
340 		return (-1);
341 	}
342 
343 	if (config == NULL)
344 		return (0);
345 
346 	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
347 	    &stateval) == 0);
348 	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
349 	    &vdev_guid) == 0);
350 
351 	if (stateval != POOL_STATE_SPARE && stateval != POOL_STATE_L2CACHE) {
352 		verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
353 		    &name) == 0);
354 		verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
355 		    &guid) == 0);
356 	}
357 
358 	switch (stateval) {
359 	case POOL_STATE_EXPORTED:
360 		/*
361 		 * A pool with an exported state may in fact be imported
362 		 * read-only, so check the in-core state to see if it's
363 		 * active and imported read-only.  If it is, set
364 		 * its state to active.
365 		 */
366 		if (pool_active(hdl, name, guid, &isactive) == 0 && isactive &&
367 		    (zhp = zpool_open_canfail(hdl, name)) != NULL) {
368 			if (zpool_get_prop_int(zhp, ZPOOL_PROP_READONLY, NULL))
369 				stateval = POOL_STATE_ACTIVE;
370 
371 			/*
372 			 * All we needed the zpool handle for is the
373 			 * readonly prop check.
374 			 */
375 			zpool_close(zhp);
376 		}
377 
378 		ret = B_TRUE;
379 		break;
380 
381 	case POOL_STATE_ACTIVE:
382 		/*
383 		 * For an active pool, we have to determine if it's really part
384 		 * of a currently active pool (in which case the pool will exist
385 		 * and the guid will be the same), or whether it's part of an
386 		 * active pool that was disconnected without being explicitly
387 		 * exported.
388 		 */
389 		if (pool_active(hdl, name, guid, &isactive) != 0) {
390 			nvlist_free(config);
391 			return (-1);
392 		}
393 
394 		if (isactive) {
395 			/*
396 			 * Because the device may have been removed while
397 			 * offlined, we only report it as active if the vdev is
398 			 * still present in the config.  Otherwise, pretend like
399 			 * it's not in use.
400 			 */
401 			if ((zhp = zpool_open_canfail(hdl, name)) != NULL &&
402 			    (pool_config = zpool_get_config(zhp, NULL))
403 			    != NULL) {
404 				nvlist_t *nvroot;
405 
406 				verify(nvlist_lookup_nvlist(pool_config,
407 				    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
408 				ret = find_guid(nvroot, vdev_guid);
409 			} else {
410 				ret = B_FALSE;
411 			}
412 
413 			/*
414 			 * If this is an active spare within another pool, we
415 			 * treat it like an unused hot spare.  This allows the
416 			 * user to create a pool with a hot spare that currently
417 			 * in use within another pool.  Since we return B_TRUE,
418 			 * libdiskmgt will continue to prevent generic consumers
419 			 * from using the device.
420 			 */
421 			if (ret && nvlist_lookup_uint64(config,
422 			    ZPOOL_CONFIG_IS_SPARE, &isspare) == 0 && isspare)
423 				stateval = POOL_STATE_SPARE;
424 
425 			if (zhp != NULL)
426 				zpool_close(zhp);
427 		} else {
428 			stateval = POOL_STATE_POTENTIALLY_ACTIVE;
429 			ret = B_TRUE;
430 		}
431 		break;
432 
433 	case POOL_STATE_SPARE:
434 		/*
435 		 * For a hot spare, it can be either definitively in use, or
436 		 * potentially active.  To determine if it's in use, we iterate
437 		 * over all pools in the system and search for one with a spare
438 		 * with a matching guid.
439 		 *
440 		 * Due to the shared nature of spares, we don't actually report
441 		 * the potentially active case as in use.  This means the user
442 		 * can freely create pools on the hot spares of exported pools,
443 		 * but to do otherwise makes the resulting code complicated, and
444 		 * we end up having to deal with this case anyway.
445 		 */
446 		cb.cb_zhp = NULL;
447 		cb.cb_guid = vdev_guid;
448 		cb.cb_type = ZPOOL_CONFIG_SPARES;
449 		if (zpool_iter(hdl, find_aux, &cb) == 1) {
450 			name = (char *)zpool_get_name(cb.cb_zhp);
451 			ret = B_TRUE;
452 		} else {
453 			ret = B_FALSE;
454 		}
455 		break;
456 
457 	case POOL_STATE_L2CACHE:
458 
459 		/*
460 		 * Check if any pool is currently using this l2cache device.
461 		 */
462 		cb.cb_zhp = NULL;
463 		cb.cb_guid = vdev_guid;
464 		cb.cb_type = ZPOOL_CONFIG_L2CACHE;
465 		if (zpool_iter(hdl, find_aux, &cb) == 1) {
466 			name = (char *)zpool_get_name(cb.cb_zhp);
467 			ret = B_TRUE;
468 		} else {
469 			ret = B_FALSE;
470 		}
471 		break;
472 
473 	default:
474 		ret = B_FALSE;
475 	}
476 
477 
478 	if (ret) {
479 		if ((*namestr = zfs_strdup(hdl, name)) == NULL) {
480 			if (cb.cb_zhp)
481 				zpool_close(cb.cb_zhp);
482 			nvlist_free(config);
483 			return (-1);
484 		}
485 		*state = (pool_state_t)stateval;
486 	}
487 
488 	if (cb.cb_zhp)
489 		zpool_close(cb.cb_zhp);
490 
491 	nvlist_free(config);
492 	*inuse = ret;
493 	return (0);
494 }
495