xref: /titanic_41/usr/src/lib/libzfs/common/libzfs_import.c (revision 69bbc66400b6af121ee9f95667811cc0acd84d6e)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * Pool import support functions.
30  *
31  * To import a pool, we rely on reading the configuration information from the
32  * ZFS label of each device.  If we successfully read the label, then we
33  * organize the configuration information in the following hierarchy:
34  *
35  * 	pool guid -> toplevel vdev guid -> label txg
36  *
37  * Duplicate entries matching this same tuple will be discarded.  Once we have
38  * examined every device, we pick the best label txg config for each toplevel
39  * vdev.  We then arrange these toplevel vdevs into a complete pool config, and
40  * update any paths that have changed.  Finally, we attempt to import the pool
41  * using our derived config, and record the results.
42  */
43 
44 #include <devid.h>
45 #include <dirent.h>
46 #include <errno.h>
47 #include <libintl.h>
48 #include <stdlib.h>
49 #include <string.h>
50 #include <sys/stat.h>
51 #include <unistd.h>
52 #include <fcntl.h>
53 
54 #include <sys/vdev_impl.h>
55 
56 #include "libzfs.h"
57 #include "libzfs_impl.h"
58 
59 /*
60  * Intermediate structures used to gather configuration information.
61  */
62 typedef struct config_entry {
63 	uint64_t		ce_txg;
64 	nvlist_t		*ce_config;
65 	struct config_entry	*ce_next;
66 } config_entry_t;
67 
68 typedef struct vdev_entry {
69 	uint64_t		ve_guid;
70 	config_entry_t		*ve_configs;
71 	struct vdev_entry	*ve_next;
72 } vdev_entry_t;
73 
74 typedef struct pool_entry {
75 	uint64_t		pe_guid;
76 	vdev_entry_t		*pe_vdevs;
77 	struct pool_entry	*pe_next;
78 } pool_entry_t;
79 
80 typedef struct name_entry {
81 	const char		*ne_name;
82 	uint64_t		ne_guid;
83 	struct name_entry	*ne_next;
84 } name_entry_t;
85 
86 typedef struct pool_list {
87 	pool_entry_t		*pools;
88 	name_entry_t		*names;
89 } pool_list_t;
90 
91 static char *
92 get_devid(const char *path)
93 {
94 	int fd;
95 	ddi_devid_t devid;
96 	char *minor, *ret;
97 
98 	if ((fd = open(path, O_RDONLY)) < 0)
99 		return (NULL);
100 
101 	minor = NULL;
102 	ret = NULL;
103 	if (devid_get(fd, &devid) == 0) {
104 		if (devid_get_minor_name(fd, &minor) == 0)
105 			ret = devid_str_encode(devid, minor);
106 		if (minor != NULL)
107 			devid_str_free(minor);
108 		devid_free(devid);
109 	}
110 	(void) close(fd);
111 
112 	return (ret);
113 }
114 
115 
116 /*
117  * Go through and fix up any path and/or devid information for the given vdev
118  * configuration.
119  */
120 static void
121 fix_paths(nvlist_t *nv, name_entry_t *names)
122 {
123 	nvlist_t **child;
124 	uint_t c, children;
125 	uint64_t guid;
126 	name_entry_t *ne, *best;
127 	char *path, *devid;
128 	int matched;
129 
130 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
131 	    &child, &children) == 0) {
132 		for (c = 0; c < children; c++)
133 			fix_paths(child[c], names);
134 		return;
135 	}
136 
137 	/*
138 	 * This is a leaf (file or disk) vdev.  In either case, go through
139 	 * the name list and see if we find a matching guid.  If so, replace
140 	 * the path and see if we can calculate a new devid.
141 	 *
142 	 * There may be multiple names associated with a particular guid, in
143 	 * which case we have overlapping slices or multiple paths to the same
144 	 * disk.  If this is the case, then we want to pick the path that is
145 	 * the most similar to the original, where "most similar" is the number
146 	 * of matching characters starting from the end of the path.  This will
147 	 * preserve slice numbers even if the disks have been reorganized, and
148 	 * will also catch preferred disk names if multiple paths exist.
149 	 */
150 	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0);
151 	if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0)
152 		path = NULL;
153 
154 	matched = 0;
155 	best = NULL;
156 	for (ne = names; ne != NULL; ne = ne->ne_next) {
157 		if (ne->ne_guid == guid) {
158 			const char *src, *dst;
159 			int count;
160 
161 			if (path == NULL) {
162 				best = ne;
163 				break;
164 			}
165 
166 			src = ne->ne_name + strlen(ne->ne_name) - 1;
167 			dst = path + strlen(path) - 1;
168 			for (count = 0; src >= ne->ne_name && dst >= path;
169 			    src--, dst--, count++)
170 				if (*src != *dst)
171 					break;
172 
173 			/*
174 			 * At this point, 'count' is the number of characters
175 			 * matched from the end.
176 			 */
177 			if (count > matched || best == NULL) {
178 				best = ne;
179 				matched = count;
180 			}
181 		}
182 	}
183 
184 	if (best == NULL)
185 		return;
186 
187 	verify(nvlist_add_string(nv, ZPOOL_CONFIG_PATH, best->ne_name) == 0);
188 
189 	if ((devid = get_devid(best->ne_name)) == NULL) {
190 		(void) nvlist_remove_all(nv, ZPOOL_CONFIG_DEVID);
191 	} else {
192 		verify(nvlist_add_string(nv, ZPOOL_CONFIG_DEVID, devid) == 0);
193 		devid_str_free(devid);
194 	}
195 }
196 
197 /*
198  * Add the given configuration to the list of known devices.
199  */
200 static void
201 add_config(pool_list_t *pl, const char *path, nvlist_t *config)
202 {
203 	uint64_t pool_guid, vdev_guid, top_guid, txg;
204 	pool_entry_t *pe;
205 	vdev_entry_t *ve;
206 	config_entry_t *ce;
207 	name_entry_t *ne;
208 
209 	/*
210 	 * If we have a valid config but cannot read any of these fields, then
211 	 * it means we have a half-initialized label.  In vdev_label_init()
212 	 * we write a label with txg == 0 so that we can identify the device
213 	 * in case the user refers to the same disk later on.  If we fail to
214 	 * create the pool, we'll be left with a label in this state
215 	 * which should not be considered part of a valid pool.
216 	 */
217 	if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
218 	    &pool_guid) != 0 ||
219 	    nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
220 	    &vdev_guid) != 0 ||
221 	    nvlist_lookup_uint64(config, ZPOOL_CONFIG_TOP_GUID,
222 	    &top_guid) != 0 ||
223 	    nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
224 	    &txg) != 0 || txg == 0) {
225 		nvlist_free(config);
226 		return;
227 	}
228 
229 	/*
230 	 * First, see if we know about this pool.  If not, then add it to the
231 	 * list of known pools.
232 	 */
233 	for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
234 		if (pe->pe_guid == pool_guid)
235 			break;
236 	}
237 
238 	if (pe == NULL) {
239 		pe = zfs_malloc(sizeof (pool_entry_t));
240 		pe->pe_guid = pool_guid;
241 		pe->pe_next = pl->pools;
242 		pl->pools = pe;
243 	}
244 
245 	/*
246 	 * Second, see if we know about this toplevel vdev.  Add it if its
247 	 * missing.
248 	 */
249 	for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
250 		if (ve->ve_guid == top_guid)
251 			break;
252 	}
253 
254 	if (ve == NULL) {
255 		ve = zfs_malloc(sizeof (vdev_entry_t));
256 		ve->ve_guid = top_guid;
257 		ve->ve_next = pe->pe_vdevs;
258 		pe->pe_vdevs = ve;
259 	}
260 
261 	/*
262 	 * Third, see if we have a config with a matching transaction group.  If
263 	 * so, then we do nothing.  Otherwise, add it to the list of known
264 	 * configs.
265 	 */
266 	for (ce = ve->ve_configs; ce != NULL; ce = ce->ce_next) {
267 		if (ce->ce_txg == txg)
268 			break;
269 	}
270 
271 	if (ce == NULL) {
272 		ce = zfs_malloc(sizeof (config_entry_t));
273 		ce->ce_txg = txg;
274 		ce->ce_config = config;
275 		ce->ce_next = ve->ve_configs;
276 		ve->ve_configs = ce;
277 	} else {
278 		nvlist_free(config);
279 	}
280 
281 	/*
282 	 * At this point we've successfully added our config to the list of
283 	 * known configs.  The last thing to do is add the vdev guid -> path
284 	 * mappings so that we can fix up the configuration as necessary before
285 	 * doing the import.
286 	 */
287 	ne = zfs_malloc(sizeof (name_entry_t));
288 
289 	ne->ne_name = zfs_strdup(path);
290 	ne->ne_guid = vdev_guid;
291 	ne->ne_next = pl->names;
292 	pl->names = ne;
293 }
294 
295 /*
296  * Returns true if the named pool matches the given GUID.
297  */
298 boolean_t
299 pool_active(const char *name, uint64_t guid)
300 {
301 	zpool_handle_t *zhp;
302 	uint64_t theguid;
303 
304 	if ((zhp = zpool_open_silent(name)) == NULL)
305 		return (B_FALSE);
306 
307 	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID,
308 	    &theguid) == 0);
309 
310 	zpool_close(zhp);
311 
312 	return (theguid == guid);
313 }
314 
315 /*
316  * Convert our list of pools into the definitive set of configurations.  We
317  * start by picking the best config for each toplevel vdev.  Once that's done,
318  * we assemble the toplevel vdevs into a full config for the pool.  We make a
319  * pass to fix up any incorrect paths, and then add it to the main list to
320  * return to the user.
321  */
322 static nvlist_t *
323 get_configs(pool_list_t *pl)
324 {
325 	pool_entry_t *pe, *penext;
326 	vdev_entry_t *ve, *venext;
327 	config_entry_t *ce, *cenext;
328 	nvlist_t *ret, *config, *tmp, *nvtop, *nvroot;
329 	int config_seen;
330 	uint64_t best_txg;
331 	char *name;
332 	zfs_cmd_t zc = { 0 };
333 	uint64_t guid;
334 	char *packed;
335 	size_t len;
336 	int err;
337 
338 	verify(nvlist_alloc(&ret, 0, 0) == 0);
339 
340 	for (pe = pl->pools; pe != NULL; pe = penext) {
341 		uint_t c;
342 		uint_t children = 0;
343 		uint64_t id;
344 		nvlist_t **child = NULL;
345 
346 		penext = pe->pe_next;
347 
348 		verify(nvlist_alloc(&config, NV_UNIQUE_NAME, 0) == 0);
349 		config_seen = FALSE;
350 
351 		/*
352 		 * Iterate over all toplevel vdevs.  Grab the pool configuration
353 		 * from the first one we find, and then go through the rest and
354 		 * add them as necessary to the 'vdevs' member of the config.
355 		 */
356 		for (ve = pe->pe_vdevs; ve != NULL; ve = venext) {
357 			venext = ve->ve_next;
358 
359 			/*
360 			 * Determine the best configuration for this vdev by
361 			 * selecting the config with the latest transaction
362 			 * group.
363 			 */
364 			best_txg = 0;
365 			for (ce = ve->ve_configs; ce != NULL;
366 			    ce = ce->ce_next) {
367 
368 				if (ce->ce_txg > best_txg)
369 					tmp = ce->ce_config;
370 			}
371 
372 			if (!config_seen) {
373 				/*
374 				 * Copy the relevant pieces of data to the pool
375 				 * configuration:
376 				 *
377 				 * 	pool guid
378 				 * 	name
379 				 * 	pool state
380 				 */
381 				uint64_t state;
382 
383 				verify(nvlist_lookup_uint64(tmp,
384 				    ZPOOL_CONFIG_POOL_GUID, &guid) == 0);
385 				verify(nvlist_add_uint64(config,
386 				    ZPOOL_CONFIG_POOL_GUID, guid) == 0);
387 				verify(nvlist_lookup_string(tmp,
388 				    ZPOOL_CONFIG_POOL_NAME, &name) == 0);
389 				verify(nvlist_add_string(config,
390 				    ZPOOL_CONFIG_POOL_NAME, name) == 0);
391 				verify(nvlist_lookup_uint64(tmp,
392 				    ZPOOL_CONFIG_POOL_STATE, &state) == 0);
393 				verify(nvlist_add_uint64(config,
394 				    ZPOOL_CONFIG_POOL_STATE, state) == 0);
395 
396 				config_seen = TRUE;
397 			}
398 
399 			/*
400 			 * Add this top-level vdev to the child array.
401 			 */
402 			verify(nvlist_lookup_nvlist(tmp,
403 			    ZPOOL_CONFIG_VDEV_TREE, &nvtop) == 0);
404 			verify(nvlist_lookup_uint64(nvtop, ZPOOL_CONFIG_ID,
405 			    &id) == 0);
406 			if (id >= children) {
407 				nvlist_t **newchild;
408 
409 				newchild = zfs_malloc((id + 1) *
410 				    sizeof (nvlist_t *));
411 
412 				for (c = 0; c < children; c++)
413 					newchild[c] = child[c];
414 
415 				free(child);
416 				child = newchild;
417 				children = id + 1;
418 			}
419 			verify(nvlist_dup(nvtop, &child[id], 0) == 0);
420 
421 			/*
422 			 * Go through and free all config information.
423 			 */
424 			for (ce = ve->ve_configs; ce != NULL; ce = cenext) {
425 				cenext = ce->ce_next;
426 
427 				nvlist_free(ce->ce_config);
428 				free(ce);
429 			}
430 
431 			/*
432 			 * Free this vdev entry, since it has now been merged
433 			 * into the main config.
434 			 */
435 			free(ve);
436 		}
437 
438 		verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
439 		    &guid) == 0);
440 
441 		/*
442 		 * Look for any missing top-level vdevs.  If this is the case,
443 		 * create a faked up 'missing' vdev as a placeholder.  We cannot
444 		 * simply compress the child array, because the kernel performs
445 		 * certain checks to make sure the vdev IDs match their location
446 		 * in the configuration.
447 		 */
448 		for (c = 0; c < children; c++)
449 			if (child[c] == NULL) {
450 				nvlist_t *missing;
451 				verify(nvlist_alloc(&missing, NV_UNIQUE_NAME,
452 				    0) == 0);
453 				verify(nvlist_add_string(missing,
454 				    ZPOOL_CONFIG_TYPE, VDEV_TYPE_MISSING) == 0);
455 				verify(nvlist_add_uint64(missing,
456 				    ZPOOL_CONFIG_ID, c) == 0);
457 				verify(nvlist_add_uint64(missing,
458 				    ZPOOL_CONFIG_GUID, 0ULL) == 0);
459 				child[c] = missing;
460 			}
461 
462 		/*
463 		 * Put all of this pool's top-level vdevs into a root vdev.
464 		 */
465 		verify(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) == 0);
466 		verify(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
467 		    VDEV_TYPE_ROOT) == 0);
468 		verify(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0);
469 		verify(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, guid) == 0);
470 		verify(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
471 		    child, children) == 0);
472 
473 		for (c = 0; c < children; c++)
474 			nvlist_free(child[c]);
475 		free(child);
476 
477 		/*
478 		 * Go through and fix up any paths and/or devids based on our
479 		 * known list of vdev GUID -> path mappings.
480 		 */
481 		fix_paths(nvroot, pl->names);
482 
483 		/*
484 		 * Add the root vdev to this pool's configuration.
485 		 */
486 		verify(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
487 		    nvroot) == 0);
488 		nvlist_free(nvroot);
489 
490 		/*
491 		 * Free this pool entry.
492 		 */
493 		free(pe);
494 
495 		/*
496 		 * Determine if this pool is currently active, in which case we
497 		 * can't actually import it.
498 		 */
499 		verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
500 		    &name) == 0);
501 		verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
502 		    &guid) == 0);
503 
504 		if (pool_active(name, guid)) {
505 			nvlist_free(config);
506 			continue;
507 		}
508 
509 		/*
510 		 * Try to do the import in order to get vdev state.
511 		 */
512 		if ((err = nvlist_size(config, &len, NV_ENCODE_NATIVE)) != 0)
513 			zfs_baderror(err);
514 
515 		packed = zfs_malloc(len);
516 
517 		if ((err = nvlist_pack(config, &packed, &len,
518 		    NV_ENCODE_NATIVE, 0)) != 0)
519 			zfs_baderror(err);
520 
521 		nvlist_free(config);
522 		config = NULL;
523 
524 		zc.zc_config_src_size = len;
525 		zc.zc_config_src = (uint64_t)(uintptr_t)packed;
526 
527 		zc.zc_config_dst_size = 2 * len;
528 		zc.zc_config_dst = (uint64_t)(uintptr_t)
529 		    zfs_malloc(zc.zc_config_dst_size);
530 
531 		while ((err = zfs_ioctl(ZFS_IOC_POOL_TRYIMPORT,
532 		    &zc)) != 0 && errno == ENOMEM) {
533 			free((void *)(uintptr_t)zc.zc_config_dst);
534 			zc.zc_config_dst = (uint64_t)(uintptr_t)
535 			    zfs_malloc(zc.zc_config_dst_size);
536 		}
537 
538 		free(packed);
539 
540 		if (err)
541 			zfs_baderror(errno);
542 
543 		verify(nvlist_unpack((void *)(uintptr_t)zc.zc_config_dst,
544 		    zc.zc_config_dst_size, &config, 0) == 0);
545 
546 		set_pool_health(config);
547 
548 		/*
549 		 * Add this pool to the list of configs.
550 		 */
551 		verify(nvlist_add_nvlist(ret, name, config) == 0);
552 
553 		nvlist_free(config);
554 
555 		free((void *)(uintptr_t)zc.zc_config_dst);
556 	}
557 
558 	return (ret);
559 }
560 
561 /*
562  * Return the offset of the given label.
563  */
564 static uint64_t
565 label_offset(size_t size, int l)
566 {
567 	return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
568 	    0 : size - VDEV_LABELS * sizeof (vdev_label_t)));
569 }
570 
571 /*
572  * Given a file descriptor, read the label information and return an nvlist
573  * describing the configuration, if there is one.
574  */
575 nvlist_t *
576 zpool_read_label(int fd)
577 {
578 	struct stat64 statbuf;
579 	int l;
580 	vdev_label_t *label;
581 	nvlist_t *config;
582 	uint64_t state, txg;
583 
584 	if (fstat64(fd, &statbuf) == -1)
585 		return (NULL);
586 
587 	label = zfs_malloc(sizeof (vdev_label_t));
588 
589 	for (l = 0; l < VDEV_LABELS; l++) {
590 		if (pread(fd, label, sizeof (vdev_label_t),
591 		    label_offset(statbuf.st_size, l)) != sizeof (vdev_label_t))
592 			continue;
593 
594 		if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist,
595 		    sizeof (label->vl_vdev_phys.vp_nvlist), &config, 0) != 0)
596 			continue;
597 
598 		if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
599 		    &state) != 0 || state > POOL_STATE_DESTROYED) {
600 			nvlist_free(config);
601 			continue;
602 		}
603 
604 		if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
605 		    &txg) != 0 || txg == 0) {
606 			nvlist_free(config);
607 			continue;
608 		}
609 
610 		free(label);
611 		return (config);
612 	}
613 
614 	free(label);
615 	return (NULL);
616 }
617 
618 /*
619  * Given a list of directories to search, find all pools stored on disk.  This
620  * includes partial pools which are not available to import.  If no args are
621  * given (argc is 0), then the default directory (/dev/dsk) is searched.
622  */
623 nvlist_t *
624 zpool_find_import(int argc, char **argv)
625 {
626 	int i;
627 	DIR *dirp;
628 	struct dirent64 *dp;
629 	char path[MAXPATHLEN];
630 	struct stat64 statbuf;
631 	nvlist_t *ret, *config;
632 	static char *default_dir = "/dev/dsk";
633 	int fd;
634 	pool_list_t pools = { 0 };
635 
636 	if (argc == 0) {
637 		argc = 1;
638 		argv = &default_dir;
639 	}
640 
641 	/*
642 	 * Go through and read the label configuration information from every
643 	 * possible device, organizing the information according to pool GUID
644 	 * and toplevel GUID.
645 	 */
646 	for (i = 0; i < argc; i++) {
647 		if (argv[i][0] != '/') {
648 			zfs_error(dgettext(TEXT_DOMAIN,
649 			    "cannot open '%s': must be an absolute path"),
650 			    argv[i]);
651 			return (NULL);
652 		}
653 
654 		if ((dirp = opendir(argv[i])) == NULL) {
655 			zfs_error(dgettext(TEXT_DOMAIN,
656 			    "cannot open '%s': %s"), argv[i],
657 			    strerror(errno));
658 			return (NULL);
659 		}
660 
661 		/*
662 		 * This is not MT-safe, but we have no MT consumers of libzfs
663 		 */
664 		while ((dp = readdir64(dirp)) != NULL) {
665 
666 			(void) snprintf(path, sizeof (path), "%s/%s",
667 			    argv[i], dp->d_name);
668 
669 			if (stat64(path, &statbuf) != 0)
670 				continue;
671 
672 			/*
673 			 * Ignore directories (which includes "." and "..").
674 			 */
675 			if (S_ISDIR(statbuf.st_mode))
676 				continue;
677 
678 			if ((fd = open64(path, O_RDONLY)) < 0)
679 				continue;
680 
681 			config = zpool_read_label(fd);
682 
683 			(void) close(fd);
684 
685 			if (config != NULL)
686 				add_config(&pools, path, config);
687 		}
688 	}
689 
690 	ret = get_configs(&pools);
691 
692 	return (ret);
693 }
694 
695 int
696 find_guid(nvlist_t *nv, uint64_t guid)
697 {
698 	uint64_t tmp;
699 	nvlist_t **child;
700 	uint_t c, children;
701 
702 	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &tmp) == 0);
703 	if (tmp == guid)
704 		return (TRUE);
705 
706 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
707 	    &child, &children) == 0) {
708 		for (c = 0; c < children; c++)
709 			if (find_guid(child[c], guid))
710 				return (TRUE);
711 	}
712 
713 	return (FALSE);
714 }
715 
716 /*
717  * Determines if the pool is in use.  If so, it returns TRUE and the state of
718  * the pool as well as the name of the pool.  Both strings are allocated and
719  * must be freed by the caller.
720  */
721 int
722 zpool_in_use(int fd, pool_state_t *state, char **namestr)
723 {
724 	nvlist_t *config;
725 	char *name;
726 	int ret;
727 	uint64_t guid, vdev_guid;
728 	zpool_handle_t *zhp;
729 	nvlist_t *pool_config;
730 	uint64_t stateval;
731 
732 	if ((config = zpool_read_label(fd)) == NULL)
733 		return (FALSE);
734 
735 	verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
736 	    &name) == 0);
737 	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
738 	    &stateval) == 0);
739 	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
740 	    &guid) == 0);
741 	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
742 	    &vdev_guid) == 0);
743 
744 	switch (stateval) {
745 	case POOL_STATE_EXPORTED:
746 		ret = TRUE;
747 		break;
748 
749 	case POOL_STATE_ACTIVE:
750 		/*
751 		 * For an active pool, we have to determine if it's really part
752 		 * of a currently active pool (in which case the pool will exist
753 		 * and the guid will be the same), or whether it's part of an
754 		 * active pool that was disconnected without being explicitly
755 		 * exported.
756 		 */
757 		if (pool_active(name, guid)) {
758 			/*
759 			 * Because the device may have been removed while
760 			 * offlined, we only report it as active if the vdev is
761 			 * still present in the config.  Otherwise, pretend like
762 			 * it's not in use.
763 			 */
764 			if ((zhp = zpool_open_canfail(name)) != NULL &&
765 			    (pool_config = zpool_get_config(zhp, NULL))
766 			    != NULL) {
767 				nvlist_t *nvroot;
768 
769 				verify(nvlist_lookup_nvlist(pool_config,
770 				    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
771 				ret = find_guid(nvroot, vdev_guid);
772 			} else {
773 				ret = FALSE;
774 			}
775 		} else {
776 			stateval = POOL_STATE_POTENTIALLY_ACTIVE;
777 			ret = TRUE;
778 		}
779 		break;
780 
781 	default:
782 		ret = FALSE;
783 	}
784 
785 
786 	if (ret) {
787 		*namestr = zfs_strdup(name);
788 		*state = (pool_state_t)stateval;
789 	}
790 
791 	nvlist_free(config);
792 	return (ret);
793 }
794