xref: /freebsd/sys/contrib/openzfs/lib/libzfs/libzfs_import.c (revision c66ec88fed842fbaad62c30d510644ceb7bd2d71)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
23  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
25  * Copyright 2015 RackTop Systems.
26  * Copyright (c) 2016, Intel Corporation.
27  */
28 
29 #include <errno.h>
30 #include <libintl.h>
31 #include <libgen.h>
32 #include <stddef.h>
33 #include <stdlib.h>
34 #include <string.h>
35 #include <sys/stat.h>
36 #include <unistd.h>
37 #include <sys/vdev_impl.h>
38 #include <libzfs.h>
39 #include <libzfs_impl.h>
40 #include <libzutil.h>
41 #include <sys/arc_impl.h>
42 
43 /*
44  * Returns true if the named pool matches the given GUID.
45  */
46 static int
47 pool_active(libzfs_handle_t *hdl, const char *name, uint64_t guid,
48     boolean_t *isactive)
49 {
50 	zpool_handle_t *zhp;
51 	uint64_t theguid;
52 
53 	if (zpool_open_silent(hdl, name, &zhp) != 0)
54 		return (-1);
55 
56 	if (zhp == NULL) {
57 		*isactive = B_FALSE;
58 		return (0);
59 	}
60 
61 	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID,
62 	    &theguid) == 0);
63 
64 	zpool_close(zhp);
65 
66 	*isactive = (theguid == guid);
67 	return (0);
68 }
69 
70 static nvlist_t *
71 refresh_config(libzfs_handle_t *hdl, nvlist_t *config)
72 {
73 	nvlist_t *nvl;
74 	zfs_cmd_t zc = {"\0"};
75 	int err, dstbuf_size;
76 
77 	if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0)
78 		return (NULL);
79 
80 	dstbuf_size = MAX(CONFIG_BUF_MINSIZE, zc.zc_nvlist_conf_size * 4);
81 
82 	if (zcmd_alloc_dst_nvlist(hdl, &zc, dstbuf_size) != 0) {
83 		zcmd_free_nvlists(&zc);
84 		return (NULL);
85 	}
86 
87 	while ((err = zfs_ioctl(hdl, ZFS_IOC_POOL_TRYIMPORT,
88 	    &zc)) != 0 && errno == ENOMEM) {
89 		if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
90 			zcmd_free_nvlists(&zc);
91 			return (NULL);
92 		}
93 	}
94 
95 	if (err) {
96 		zcmd_free_nvlists(&zc);
97 		return (NULL);
98 	}
99 
100 	if (zcmd_read_dst_nvlist(hdl, &zc, &nvl) != 0) {
101 		zcmd_free_nvlists(&zc);
102 		return (NULL);
103 	}
104 
105 	zcmd_free_nvlists(&zc);
106 	return (nvl);
107 }
108 
109 static nvlist_t *
110 refresh_config_libzfs(void *handle, nvlist_t *tryconfig)
111 {
112 	return (refresh_config((libzfs_handle_t *)handle, tryconfig));
113 }
114 
115 static int
116 pool_active_libzfs(void *handle, const char *name, uint64_t guid,
117     boolean_t *isactive)
118 {
119 	return (pool_active((libzfs_handle_t *)handle, name, guid, isactive));
120 }
121 
122 const pool_config_ops_t libzfs_config_ops = {
123 	.pco_refresh_config = refresh_config_libzfs,
124 	.pco_pool_active = pool_active_libzfs,
125 };
126 
127 /*
128  * Return the offset of the given label.
129  */
130 static uint64_t
131 label_offset(uint64_t size, int l)
132 {
133 	ASSERT(P2PHASE_TYPED(size, sizeof (vdev_label_t), uint64_t) == 0);
134 	return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
135 	    0 : size - VDEV_LABELS * sizeof (vdev_label_t)));
136 }
137 
138 /*
139  * Given a file descriptor, clear (zero) the label information.  This function
140  * is used in the appliance stack as part of the ZFS sysevent module and
141  * to implement the "zpool labelclear" command.
142  */
143 int
144 zpool_clear_label(int fd)
145 {
146 	struct stat64 statbuf;
147 	int l;
148 	vdev_label_t *label;
149 	l2arc_dev_hdr_phys_t *l2dhdr;
150 	uint64_t size;
151 	int labels_cleared = 0, header_cleared = 0;
152 	boolean_t clear_l2arc_header = B_FALSE;
153 
154 	if (fstat64_blk(fd, &statbuf) == -1)
155 		return (0);
156 
157 	size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
158 
159 	if ((label = calloc(1, sizeof (vdev_label_t))) == NULL)
160 		return (-1);
161 
162 	if ((l2dhdr = calloc(1, sizeof (l2arc_dev_hdr_phys_t))) == NULL) {
163 		free(label);
164 		return (-1);
165 	}
166 
167 	for (l = 0; l < VDEV_LABELS; l++) {
168 		uint64_t state, guid, l2cache;
169 		nvlist_t *config;
170 
171 		if (pread64(fd, label, sizeof (vdev_label_t),
172 		    label_offset(size, l)) != sizeof (vdev_label_t)) {
173 			continue;
174 		}
175 
176 		if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist,
177 		    sizeof (label->vl_vdev_phys.vp_nvlist), &config, 0) != 0) {
178 			continue;
179 		}
180 
181 		/* Skip labels which do not have a valid guid. */
182 		if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
183 		    &guid) != 0 || guid == 0) {
184 			nvlist_free(config);
185 			continue;
186 		}
187 
188 		/* Skip labels which are not in a known valid state. */
189 		if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
190 		    &state) != 0 || state > POOL_STATE_L2CACHE) {
191 			nvlist_free(config);
192 			continue;
193 		}
194 
195 		/* If the device is a cache device clear the header. */
196 		if (!clear_l2arc_header) {
197 			if (nvlist_lookup_uint64(config,
198 			    ZPOOL_CONFIG_POOL_STATE, &l2cache) == 0 &&
199 			    l2cache == POOL_STATE_L2CACHE) {
200 				clear_l2arc_header = B_TRUE;
201 			}
202 		}
203 
204 		nvlist_free(config);
205 
206 		/*
207 		 * A valid label was found, overwrite this label's nvlist
208 		 * and uberblocks with zeros on disk.  This is done to prevent
209 		 * system utilities, like blkid, from incorrectly detecting a
210 		 * partial label.  The leading pad space is left untouched.
211 		 */
212 		memset(label, 0, sizeof (vdev_label_t));
213 		size_t label_size = sizeof (vdev_label_t) - (2 * VDEV_PAD_SIZE);
214 
215 		if (pwrite64(fd, label, label_size, label_offset(size, l) +
216 		    (2 * VDEV_PAD_SIZE)) == label_size) {
217 			labels_cleared++;
218 		}
219 	}
220 
221 	/* Clear the L2ARC header. */
222 	if (clear_l2arc_header) {
223 		memset(l2dhdr, 0, sizeof (l2arc_dev_hdr_phys_t));
224 		if (pwrite64(fd, l2dhdr, sizeof (l2arc_dev_hdr_phys_t),
225 		    VDEV_LABEL_START_SIZE) == sizeof (l2arc_dev_hdr_phys_t)) {
226 			header_cleared++;
227 		}
228 	}
229 
230 	free(label);
231 	free(l2dhdr);
232 
233 	if (labels_cleared == 0)
234 		return (-1);
235 
236 	return (0);
237 }
238 
239 static boolean_t
240 find_guid(nvlist_t *nv, uint64_t guid)
241 {
242 	uint64_t tmp;
243 	nvlist_t **child;
244 	uint_t c, children;
245 
246 	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &tmp) == 0);
247 	if (tmp == guid)
248 		return (B_TRUE);
249 
250 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
251 	    &child, &children) == 0) {
252 		for (c = 0; c < children; c++)
253 			if (find_guid(child[c], guid))
254 				return (B_TRUE);
255 	}
256 
257 	return (B_FALSE);
258 }
259 
260 typedef struct aux_cbdata {
261 	const char	*cb_type;
262 	uint64_t	cb_guid;
263 	zpool_handle_t	*cb_zhp;
264 } aux_cbdata_t;
265 
266 static int
267 find_aux(zpool_handle_t *zhp, void *data)
268 {
269 	aux_cbdata_t *cbp = data;
270 	nvlist_t **list;
271 	uint_t i, count;
272 	uint64_t guid;
273 	nvlist_t *nvroot;
274 
275 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
276 	    &nvroot) == 0);
277 
278 	if (nvlist_lookup_nvlist_array(nvroot, cbp->cb_type,
279 	    &list, &count) == 0) {
280 		for (i = 0; i < count; i++) {
281 			verify(nvlist_lookup_uint64(list[i],
282 			    ZPOOL_CONFIG_GUID, &guid) == 0);
283 			if (guid == cbp->cb_guid) {
284 				cbp->cb_zhp = zhp;
285 				return (1);
286 			}
287 		}
288 	}
289 
290 	zpool_close(zhp);
291 	return (0);
292 }
293 
294 /*
295  * Determines if the pool is in use.  If so, it returns true and the state of
296  * the pool as well as the name of the pool.  Name string is allocated and
297  * must be freed by the caller.
298  */
299 int
300 zpool_in_use(libzfs_handle_t *hdl, int fd, pool_state_t *state, char **namestr,
301     boolean_t *inuse)
302 {
303 	nvlist_t *config;
304 	char *name;
305 	boolean_t ret;
306 	uint64_t guid, vdev_guid;
307 	zpool_handle_t *zhp;
308 	nvlist_t *pool_config;
309 	uint64_t stateval, isspare;
310 	aux_cbdata_t cb = { 0 };
311 	boolean_t isactive;
312 
313 	*inuse = B_FALSE;
314 
315 	if (zpool_read_label(fd, &config, NULL) != 0) {
316 		(void) no_memory(hdl);
317 		return (-1);
318 	}
319 
320 	if (config == NULL)
321 		return (0);
322 
323 	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
324 	    &stateval) == 0);
325 	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
326 	    &vdev_guid) == 0);
327 
328 	if (stateval != POOL_STATE_SPARE && stateval != POOL_STATE_L2CACHE) {
329 		verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
330 		    &name) == 0);
331 		verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
332 		    &guid) == 0);
333 	}
334 
335 	switch (stateval) {
336 	case POOL_STATE_EXPORTED:
337 		/*
338 		 * A pool with an exported state may in fact be imported
339 		 * read-only, so check the in-core state to see if it's
340 		 * active and imported read-only.  If it is, set
341 		 * its state to active.
342 		 */
343 		if (pool_active(hdl, name, guid, &isactive) == 0 && isactive &&
344 		    (zhp = zpool_open_canfail(hdl, name)) != NULL) {
345 			if (zpool_get_prop_int(zhp, ZPOOL_PROP_READONLY, NULL))
346 				stateval = POOL_STATE_ACTIVE;
347 
348 			/*
349 			 * All we needed the zpool handle for is the
350 			 * readonly prop check.
351 			 */
352 			zpool_close(zhp);
353 		}
354 
355 		ret = B_TRUE;
356 		break;
357 
358 	case POOL_STATE_ACTIVE:
359 		/*
360 		 * For an active pool, we have to determine if it's really part
361 		 * of a currently active pool (in which case the pool will exist
362 		 * and the guid will be the same), or whether it's part of an
363 		 * active pool that was disconnected without being explicitly
364 		 * exported.
365 		 */
366 		if (pool_active(hdl, name, guid, &isactive) != 0) {
367 			nvlist_free(config);
368 			return (-1);
369 		}
370 
371 		if (isactive) {
372 			/*
373 			 * Because the device may have been removed while
374 			 * offlined, we only report it as active if the vdev is
375 			 * still present in the config.  Otherwise, pretend like
376 			 * it's not in use.
377 			 */
378 			if ((zhp = zpool_open_canfail(hdl, name)) != NULL &&
379 			    (pool_config = zpool_get_config(zhp, NULL))
380 			    != NULL) {
381 				nvlist_t *nvroot;
382 
383 				verify(nvlist_lookup_nvlist(pool_config,
384 				    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
385 				ret = find_guid(nvroot, vdev_guid);
386 			} else {
387 				ret = B_FALSE;
388 			}
389 
390 			/*
391 			 * If this is an active spare within another pool, we
392 			 * treat it like an unused hot spare.  This allows the
393 			 * user to create a pool with a hot spare that currently
394 			 * in use within another pool.  Since we return B_TRUE,
395 			 * libdiskmgt will continue to prevent generic consumers
396 			 * from using the device.
397 			 */
398 			if (ret && nvlist_lookup_uint64(config,
399 			    ZPOOL_CONFIG_IS_SPARE, &isspare) == 0 && isspare)
400 				stateval = POOL_STATE_SPARE;
401 
402 			if (zhp != NULL)
403 				zpool_close(zhp);
404 		} else {
405 			stateval = POOL_STATE_POTENTIALLY_ACTIVE;
406 			ret = B_TRUE;
407 		}
408 		break;
409 
410 	case POOL_STATE_SPARE:
411 		/*
412 		 * For a hot spare, it can be either definitively in use, or
413 		 * potentially active.  To determine if it's in use, we iterate
414 		 * over all pools in the system and search for one with a spare
415 		 * with a matching guid.
416 		 *
417 		 * Due to the shared nature of spares, we don't actually report
418 		 * the potentially active case as in use.  This means the user
419 		 * can freely create pools on the hot spares of exported pools,
420 		 * but to do otherwise makes the resulting code complicated, and
421 		 * we end up having to deal with this case anyway.
422 		 */
423 		cb.cb_zhp = NULL;
424 		cb.cb_guid = vdev_guid;
425 		cb.cb_type = ZPOOL_CONFIG_SPARES;
426 		if (zpool_iter(hdl, find_aux, &cb) == 1) {
427 			name = (char *)zpool_get_name(cb.cb_zhp);
428 			ret = B_TRUE;
429 		} else {
430 			ret = B_FALSE;
431 		}
432 		break;
433 
434 	case POOL_STATE_L2CACHE:
435 
436 		/*
437 		 * Check if any pool is currently using this l2cache device.
438 		 */
439 		cb.cb_zhp = NULL;
440 		cb.cb_guid = vdev_guid;
441 		cb.cb_type = ZPOOL_CONFIG_L2CACHE;
442 		if (zpool_iter(hdl, find_aux, &cb) == 1) {
443 			name = (char *)zpool_get_name(cb.cb_zhp);
444 			ret = B_TRUE;
445 		} else {
446 			ret = B_FALSE;
447 		}
448 		break;
449 
450 	default:
451 		ret = B_FALSE;
452 	}
453 
454 
455 	if (ret) {
456 		if ((*namestr = zfs_strdup(hdl, name)) == NULL) {
457 			if (cb.cb_zhp)
458 				zpool_close(cb.cb_zhp);
459 			nvlist_free(config);
460 			return (-1);
461 		}
462 		*state = (pool_state_t)stateval;
463 	}
464 
465 	if (cb.cb_zhp)
466 		zpool_close(cb.cb_zhp);
467 
468 	nvlist_free(config);
469 	*inuse = ret;
470 	return (0);
471 }
472