xref: /titanic_51/usr/src/lib/libzfs/common/libzfs_import.c (revision 9a09d68d9dc5e0bdb1fbc9945e1091efa737f98f)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * Pool import support functions.
30  *
31  * To import a pool, we rely on reading the configuration information from the
32  * ZFS label of each device.  If we successfully read the label, then we
33  * organize the configuration information in the following hierarchy:
34  *
35  * 	pool guid -> toplevel vdev guid -> label txg
36  *
37  * Duplicate entries matching this same tuple will be discarded.  Once we have
38  * examined every device, we pick the best label txg config for each toplevel
39  * vdev.  We then arrange these toplevel vdevs into a complete pool config, and
40  * update any paths that have changed.  Finally, we attempt to import the pool
41  * using our derived config, and record the results.
42  */
43 
44 #include <devid.h>
45 #include <dirent.h>
46 #include <errno.h>
47 #include <libintl.h>
48 #include <stdlib.h>
49 #include <string.h>
50 #include <sys/stat.h>
51 #include <unistd.h>
52 #include <fcntl.h>
53 
54 #include <sys/vdev_impl.h>
55 
56 #include "libzfs.h"
57 #include "libzfs_impl.h"
58 
59 /*
60  * Intermediate structures used to gather configuration information.
61  */
62 typedef struct config_entry {
63 	uint64_t		ce_txg;
64 	nvlist_t		*ce_config;
65 	struct config_entry	*ce_next;
66 } config_entry_t;
67 
68 typedef struct vdev_entry {
69 	uint64_t		ve_guid;
70 	config_entry_t		*ve_configs;
71 	struct vdev_entry	*ve_next;
72 } vdev_entry_t;
73 
74 typedef struct pool_entry {
75 	uint64_t		pe_guid;
76 	vdev_entry_t		*pe_vdevs;
77 	struct pool_entry	*pe_next;
78 } pool_entry_t;
79 
80 typedef struct name_entry {
81 	char			*ne_name;
82 	uint64_t		ne_guid;
83 	struct name_entry	*ne_next;
84 } name_entry_t;
85 
86 typedef struct pool_list {
87 	pool_entry_t		*pools;
88 	name_entry_t		*names;
89 } pool_list_t;
90 
91 static char *
92 get_devid(const char *path)
93 {
94 	int fd;
95 	ddi_devid_t devid;
96 	char *minor, *ret;
97 
98 	if ((fd = open(path, O_RDONLY)) < 0)
99 		return (NULL);
100 
101 	minor = NULL;
102 	ret = NULL;
103 	if (devid_get(fd, &devid) == 0) {
104 		if (devid_get_minor_name(fd, &minor) == 0)
105 			ret = devid_str_encode(devid, minor);
106 		if (minor != NULL)
107 			devid_str_free(minor);
108 		devid_free(devid);
109 	}
110 	(void) close(fd);
111 
112 	return (ret);
113 }
114 
115 
116 /*
117  * Go through and fix up any path and/or devid information for the given vdev
118  * configuration.
119  */
120 static int
121 fix_paths(nvlist_t *nv, name_entry_t *names)
122 {
123 	nvlist_t **child;
124 	uint_t c, children;
125 	uint64_t guid;
126 	name_entry_t *ne, *best;
127 	char *path, *devid;
128 	int matched;
129 
130 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
131 	    &child, &children) == 0) {
132 		for (c = 0; c < children; c++)
133 			if (fix_paths(child[c], names) != 0)
134 				return (-1);
135 		return (0);
136 	}
137 
138 	/*
139 	 * This is a leaf (file or disk) vdev.  In either case, go through
140 	 * the name list and see if we find a matching guid.  If so, replace
141 	 * the path and see if we can calculate a new devid.
142 	 *
143 	 * There may be multiple names associated with a particular guid, in
144 	 * which case we have overlapping slices or multiple paths to the same
145 	 * disk.  If this is the case, then we want to pick the path that is
146 	 * the most similar to the original, where "most similar" is the number
147 	 * of matching characters starting from the end of the path.  This will
148 	 * preserve slice numbers even if the disks have been reorganized, and
149 	 * will also catch preferred disk names if multiple paths exist.
150 	 */
151 	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0);
152 	if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0)
153 		path = NULL;
154 
155 	matched = 0;
156 	best = NULL;
157 	for (ne = names; ne != NULL; ne = ne->ne_next) {
158 		if (ne->ne_guid == guid) {
159 			const char *src, *dst;
160 			int count;
161 
162 			if (path == NULL) {
163 				best = ne;
164 				break;
165 			}
166 
167 			src = ne->ne_name + strlen(ne->ne_name) - 1;
168 			dst = path + strlen(path) - 1;
169 			for (count = 0; src >= ne->ne_name && dst >= path;
170 			    src--, dst--, count++)
171 				if (*src != *dst)
172 					break;
173 
174 			/*
175 			 * At this point, 'count' is the number of characters
176 			 * matched from the end.
177 			 */
178 			if (count > matched || best == NULL) {
179 				best = ne;
180 				matched = count;
181 			}
182 		}
183 	}
184 
185 	if (best == NULL)
186 		return (0);
187 
188 	if (nvlist_add_string(nv, ZPOOL_CONFIG_PATH, best->ne_name) != 0)
189 		return (-1);
190 
191 	if ((devid = get_devid(best->ne_name)) == NULL) {
192 		(void) nvlist_remove_all(nv, ZPOOL_CONFIG_DEVID);
193 	} else {
194 		if (nvlist_add_string(nv, ZPOOL_CONFIG_DEVID, devid) != 0)
195 			return (-1);
196 		devid_str_free(devid);
197 	}
198 
199 	return (0);
200 }
201 
202 /*
203  * Add the given configuration to the list of known devices.
204  */
205 static int
206 add_config(libzfs_handle_t *hdl, pool_list_t *pl, const char *path,
207     nvlist_t *config)
208 {
209 	uint64_t pool_guid, vdev_guid, top_guid, txg, state;
210 	pool_entry_t *pe;
211 	vdev_entry_t *ve;
212 	config_entry_t *ce;
213 	name_entry_t *ne;
214 
215 	/*
216 	 * If this is a hot spare not currently in use, add it to the list of
217 	 * names to translate, but don't do anything else.
218 	 */
219 	if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
220 	    &state) == 0 && state == POOL_STATE_SPARE &&
221 	    nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid) == 0) {
222 		if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
223 			return (-1);
224 
225 		if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) {
226 			free(ne);
227 			return (-1);
228 		}
229 		ne->ne_guid = vdev_guid;
230 		ne->ne_next = pl->names;
231 		pl->names = ne;
232 		return (0);
233 	}
234 
235 	/*
236 	 * If we have a valid config but cannot read any of these fields, then
237 	 * it means we have a half-initialized label.  In vdev_label_init()
238 	 * we write a label with txg == 0 so that we can identify the device
239 	 * in case the user refers to the same disk later on.  If we fail to
240 	 * create the pool, we'll be left with a label in this state
241 	 * which should not be considered part of a valid pool.
242 	 */
243 	if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
244 	    &pool_guid) != 0 ||
245 	    nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
246 	    &vdev_guid) != 0 ||
247 	    nvlist_lookup_uint64(config, ZPOOL_CONFIG_TOP_GUID,
248 	    &top_guid) != 0 ||
249 	    nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
250 	    &txg) != 0 || txg == 0) {
251 		nvlist_free(config);
252 		return (0);
253 	}
254 
255 	/*
256 	 * First, see if we know about this pool.  If not, then add it to the
257 	 * list of known pools.
258 	 */
259 	for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
260 		if (pe->pe_guid == pool_guid)
261 			break;
262 	}
263 
264 	if (pe == NULL) {
265 		if ((pe = zfs_alloc(hdl, sizeof (pool_entry_t))) == NULL) {
266 			nvlist_free(config);
267 			return (-1);
268 		}
269 		pe->pe_guid = pool_guid;
270 		pe->pe_next = pl->pools;
271 		pl->pools = pe;
272 	}
273 
274 	/*
275 	 * Second, see if we know about this toplevel vdev.  Add it if its
276 	 * missing.
277 	 */
278 	for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
279 		if (ve->ve_guid == top_guid)
280 			break;
281 	}
282 
283 	if (ve == NULL) {
284 		if ((ve = zfs_alloc(hdl, sizeof (vdev_entry_t))) == NULL) {
285 			nvlist_free(config);
286 			return (-1);
287 		}
288 		ve->ve_guid = top_guid;
289 		ve->ve_next = pe->pe_vdevs;
290 		pe->pe_vdevs = ve;
291 	}
292 
293 	/*
294 	 * Third, see if we have a config with a matching transaction group.  If
295 	 * so, then we do nothing.  Otherwise, add it to the list of known
296 	 * configs.
297 	 */
298 	for (ce = ve->ve_configs; ce != NULL; ce = ce->ce_next) {
299 		if (ce->ce_txg == txg)
300 			break;
301 	}
302 
303 	if (ce == NULL) {
304 		if ((ce = zfs_alloc(hdl, sizeof (config_entry_t))) == NULL) {
305 			nvlist_free(config);
306 			return (-1);
307 		}
308 		ce->ce_txg = txg;
309 		ce->ce_config = config;
310 		ce->ce_next = ve->ve_configs;
311 		ve->ve_configs = ce;
312 	} else {
313 		nvlist_free(config);
314 	}
315 
316 	/*
317 	 * At this point we've successfully added our config to the list of
318 	 * known configs.  The last thing to do is add the vdev guid -> path
319 	 * mappings so that we can fix up the configuration as necessary before
320 	 * doing the import.
321 	 */
322 	if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
323 		return (-1);
324 
325 	if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) {
326 		free(ne);
327 		return (-1);
328 	}
329 
330 	ne->ne_guid = vdev_guid;
331 	ne->ne_next = pl->names;
332 	pl->names = ne;
333 
334 	return (0);
335 }
336 
337 /*
338  * Returns true if the named pool matches the given GUID.
339  */
340 static int
341 pool_active(libzfs_handle_t *hdl, const char *name, uint64_t guid,
342     boolean_t *isactive)
343 {
344 	zpool_handle_t *zhp;
345 	uint64_t theguid;
346 
347 	if (zpool_open_silent(hdl, name, &zhp) != 0)
348 		return (-1);
349 
350 	if (zhp == NULL) {
351 		*isactive = B_FALSE;
352 		return (0);
353 	}
354 
355 	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID,
356 	    &theguid) == 0);
357 
358 	zpool_close(zhp);
359 
360 	*isactive = (theguid == guid);
361 	return (0);
362 }
363 
364 static nvlist_t *
365 refresh_config(libzfs_handle_t *hdl, nvlist_t *config)
366 {
367 	nvlist_t *nvl;
368 	zfs_cmd_t zc = { 0 };
369 	int err;
370 
371 	if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0)
372 		return (NULL);
373 
374 	if (zcmd_alloc_dst_nvlist(hdl, &zc,
375 	    zc.zc_nvlist_conf_size * 2) != 0) {
376 		zcmd_free_nvlists(&zc);
377 		return (NULL);
378 	}
379 
380 	while ((err = ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_TRYIMPORT,
381 	    &zc)) != 0 && errno == ENOMEM) {
382 		if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
383 			zcmd_free_nvlists(&zc);
384 			return (NULL);
385 		}
386 	}
387 
388 	if (err) {
389 		(void) zpool_standard_error(hdl, errno,
390 		    dgettext(TEXT_DOMAIN, "cannot discover pools"));
391 		zcmd_free_nvlists(&zc);
392 		return (NULL);
393 	}
394 
395 	if (zcmd_read_dst_nvlist(hdl, &zc, &nvl) != 0) {
396 		zcmd_free_nvlists(&zc);
397 		return (NULL);
398 	}
399 
400 	zcmd_free_nvlists(&zc);
401 	return (nvl);
402 }
403 
404 /*
405  * Convert our list of pools into the definitive set of configurations.  We
406  * start by picking the best config for each toplevel vdev.  Once that's done,
407  * we assemble the toplevel vdevs into a full config for the pool.  We make a
408  * pass to fix up any incorrect paths, and then add it to the main list to
409  * return to the user.
410  */
411 static nvlist_t *
412 get_configs(libzfs_handle_t *hdl, pool_list_t *pl)
413 {
414 	pool_entry_t *pe;
415 	vdev_entry_t *ve;
416 	config_entry_t *ce;
417 	nvlist_t *ret = NULL, *config = NULL, *tmp, *nvtop, *nvroot;
418 	nvlist_t **spares;
419 	uint_t i, nspares;
420 	boolean_t config_seen;
421 	uint64_t best_txg;
422 	char *name, *hostname;
423 	uint64_t version, guid;
424 	uint_t children = 0;
425 	nvlist_t **child = NULL;
426 	uint_t c;
427 	boolean_t isactive;
428 	uint64_t hostid;
429 	nvlist_t *nvl;
430 
431 	if (nvlist_alloc(&ret, 0, 0) != 0)
432 		goto nomem;
433 
434 	for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
435 		uint64_t id;
436 
437 		if (nvlist_alloc(&config, NV_UNIQUE_NAME, 0) != 0)
438 			goto nomem;
439 		config_seen = B_FALSE;
440 
441 		/*
442 		 * Iterate over all toplevel vdevs.  Grab the pool configuration
443 		 * from the first one we find, and then go through the rest and
444 		 * add them as necessary to the 'vdevs' member of the config.
445 		 */
446 		for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
447 
448 			/*
449 			 * Determine the best configuration for this vdev by
450 			 * selecting the config with the latest transaction
451 			 * group.
452 			 */
453 			best_txg = 0;
454 			for (ce = ve->ve_configs; ce != NULL;
455 			    ce = ce->ce_next) {
456 
457 				if (ce->ce_txg > best_txg) {
458 					tmp = ce->ce_config;
459 					best_txg = ce->ce_txg;
460 				}
461 			}
462 
463 			if (!config_seen) {
464 				/*
465 				 * Copy the relevant pieces of data to the pool
466 				 * configuration:
467 				 *
468 				 *	version
469 				 * 	pool guid
470 				 * 	name
471 				 * 	pool state
472 				 *	hostid (if available)
473 				 *	hostname (if available)
474 				 */
475 				uint64_t state;
476 
477 				verify(nvlist_lookup_uint64(tmp,
478 				    ZPOOL_CONFIG_VERSION, &version) == 0);
479 				if (nvlist_add_uint64(config,
480 				    ZPOOL_CONFIG_VERSION, version) != 0)
481 					goto nomem;
482 				verify(nvlist_lookup_uint64(tmp,
483 				    ZPOOL_CONFIG_POOL_GUID, &guid) == 0);
484 				if (nvlist_add_uint64(config,
485 				    ZPOOL_CONFIG_POOL_GUID, guid) != 0)
486 					goto nomem;
487 				verify(nvlist_lookup_string(tmp,
488 				    ZPOOL_CONFIG_POOL_NAME, &name) == 0);
489 				if (nvlist_add_string(config,
490 				    ZPOOL_CONFIG_POOL_NAME, name) != 0)
491 					goto nomem;
492 				verify(nvlist_lookup_uint64(tmp,
493 				    ZPOOL_CONFIG_POOL_STATE, &state) == 0);
494 				if (nvlist_add_uint64(config,
495 				    ZPOOL_CONFIG_POOL_STATE, state) != 0)
496 					goto nomem;
497 				hostid = 0;
498 				if (nvlist_lookup_uint64(tmp,
499 				    ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
500 					if (nvlist_add_uint64(config,
501 					    ZPOOL_CONFIG_HOSTID, hostid) != 0)
502 						goto nomem;
503 					verify(nvlist_lookup_string(tmp,
504 					    ZPOOL_CONFIG_HOSTNAME,
505 					    &hostname) == 0);
506 					if (nvlist_add_string(config,
507 					    ZPOOL_CONFIG_HOSTNAME,
508 					    hostname) != 0)
509 						goto nomem;
510 				}
511 
512 				config_seen = B_TRUE;
513 			}
514 
515 			/*
516 			 * Add this top-level vdev to the child array.
517 			 */
518 			verify(nvlist_lookup_nvlist(tmp,
519 			    ZPOOL_CONFIG_VDEV_TREE, &nvtop) == 0);
520 			verify(nvlist_lookup_uint64(nvtop, ZPOOL_CONFIG_ID,
521 			    &id) == 0);
522 			if (id >= children) {
523 				nvlist_t **newchild;
524 
525 				newchild = zfs_alloc(hdl, (id + 1) *
526 				    sizeof (nvlist_t *));
527 				if (newchild == NULL)
528 					goto nomem;
529 
530 				for (c = 0; c < children; c++)
531 					newchild[c] = child[c];
532 
533 				free(child);
534 				child = newchild;
535 				children = id + 1;
536 			}
537 			if (nvlist_dup(nvtop, &child[id], 0) != 0)
538 				goto nomem;
539 
540 		}
541 
542 		verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
543 		    &guid) == 0);
544 
545 		/*
546 		 * Look for any missing top-level vdevs.  If this is the case,
547 		 * create a faked up 'missing' vdev as a placeholder.  We cannot
548 		 * simply compress the child array, because the kernel performs
549 		 * certain checks to make sure the vdev IDs match their location
550 		 * in the configuration.
551 		 */
552 		for (c = 0; c < children; c++)
553 			if (child[c] == NULL) {
554 				nvlist_t *missing;
555 				if (nvlist_alloc(&missing, NV_UNIQUE_NAME,
556 				    0) != 0)
557 					goto nomem;
558 				if (nvlist_add_string(missing,
559 				    ZPOOL_CONFIG_TYPE,
560 				    VDEV_TYPE_MISSING) != 0 ||
561 				    nvlist_add_uint64(missing,
562 				    ZPOOL_CONFIG_ID, c) != 0 ||
563 				    nvlist_add_uint64(missing,
564 				    ZPOOL_CONFIG_GUID, 0ULL) != 0) {
565 					nvlist_free(missing);
566 					goto nomem;
567 				}
568 				child[c] = missing;
569 			}
570 
571 		/*
572 		 * Put all of this pool's top-level vdevs into a root vdev.
573 		 */
574 		if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0)
575 			goto nomem;
576 		if (nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
577 		    VDEV_TYPE_ROOT) != 0 ||
578 		    nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) != 0 ||
579 		    nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, guid) != 0 ||
580 		    nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
581 		    child, children) != 0) {
582 			nvlist_free(nvroot);
583 			goto nomem;
584 		}
585 
586 		for (c = 0; c < children; c++)
587 			nvlist_free(child[c]);
588 		free(child);
589 		children = 0;
590 		child = NULL;
591 
592 		/*
593 		 * Go through and fix up any paths and/or devids based on our
594 		 * known list of vdev GUID -> path mappings.
595 		 */
596 		if (fix_paths(nvroot, pl->names) != 0) {
597 			nvlist_free(nvroot);
598 			goto nomem;
599 		}
600 
601 		/*
602 		 * Add the root vdev to this pool's configuration.
603 		 */
604 		if (nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
605 		    nvroot) != 0) {
606 			nvlist_free(nvroot);
607 			goto nomem;
608 		}
609 		nvlist_free(nvroot);
610 
611 		/*
612 		 * Determine if this pool is currently active, in which case we
613 		 * can't actually import it.
614 		 */
615 		verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
616 		    &name) == 0);
617 		verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
618 		    &guid) == 0);
619 
620 		if (pool_active(hdl, name, guid, &isactive) != 0)
621 			goto error;
622 
623 		if (isactive) {
624 			nvlist_free(config);
625 			config = NULL;
626 			continue;
627 		}
628 
629 		if ((nvl = refresh_config(hdl, config)) == NULL)
630 			goto error;
631 
632 		nvlist_free(config);
633 		config = nvl;
634 
635 		/*
636 		 * Go through and update the paths for spares, now that we have
637 		 * them.
638 		 */
639 		verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
640 		    &nvroot) == 0);
641 		if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
642 		    &spares, &nspares) == 0) {
643 			for (i = 0; i < nspares; i++) {
644 				if (fix_paths(spares[i], pl->names) != 0)
645 					goto nomem;
646 			}
647 		}
648 
649 		/*
650 		 * Restore the original information read from the actual label.
651 		 */
652 		(void) nvlist_remove(config, ZPOOL_CONFIG_HOSTID,
653 		    DATA_TYPE_UINT64);
654 		(void) nvlist_remove(config, ZPOOL_CONFIG_HOSTNAME,
655 		    DATA_TYPE_STRING);
656 		if (hostid != 0) {
657 			verify(nvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID,
658 			    hostid) == 0);
659 			verify(nvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME,
660 			    hostname) == 0);
661 		}
662 
663 		/*
664 		 * Add this pool to the list of configs.
665 		 */
666 		verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
667 		    &name) == 0);
668 		if (nvlist_add_nvlist(ret, name, config) != 0)
669 			goto nomem;
670 
671 		nvlist_free(config);
672 		config = NULL;
673 	}
674 
675 	return (ret);
676 
677 nomem:
678 	(void) no_memory(hdl);
679 error:
680 	nvlist_free(config);
681 	nvlist_free(ret);
682 	for (c = 0; c < children; c++)
683 		nvlist_free(child[c]);
684 	free(child);
685 
686 	return (NULL);
687 }
688 
689 /*
690  * Return the offset of the given label.
691  */
692 static uint64_t
693 label_offset(uint64_t size, int l)
694 {
695 	ASSERT(P2PHASE_TYPED(size, sizeof (vdev_label_t), uint64_t) == 0);
696 	return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
697 	    0 : size - VDEV_LABELS * sizeof (vdev_label_t)));
698 }
699 
700 /*
701  * Given a file descriptor, read the label information and return an nvlist
702  * describing the configuration, if there is one.
703  */
704 int
705 zpool_read_label(int fd, nvlist_t **config)
706 {
707 	struct stat64 statbuf;
708 	int l;
709 	vdev_label_t *label;
710 	uint64_t state, txg, size;
711 
712 	*config = NULL;
713 
714 	if (fstat64(fd, &statbuf) == -1)
715 		return (0);
716 	size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
717 
718 	if ((label = malloc(sizeof (vdev_label_t))) == NULL)
719 		return (-1);
720 
721 	for (l = 0; l < VDEV_LABELS; l++) {
722 		if (pread(fd, label, sizeof (vdev_label_t),
723 		    label_offset(size, l)) != sizeof (vdev_label_t))
724 			continue;
725 
726 		if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist,
727 		    sizeof (label->vl_vdev_phys.vp_nvlist), config, 0) != 0)
728 			continue;
729 
730 		if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE,
731 		    &state) != 0 || state > POOL_STATE_SPARE) {
732 			nvlist_free(*config);
733 			continue;
734 		}
735 
736 		if (state != POOL_STATE_SPARE &&
737 		    (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG,
738 		    &txg) != 0 || txg == 0)) {
739 			nvlist_free(*config);
740 			continue;
741 		}
742 
743 		free(label);
744 		return (0);
745 	}
746 
747 	free(label);
748 	*config = NULL;
749 	return (0);
750 }
751 
752 /*
753  * Given a list of directories to search, find all pools stored on disk.  This
754  * includes partial pools which are not available to import.  If no args are
755  * given (argc is 0), then the default directory (/dev/dsk) is searched.
756  */
757 nvlist_t *
758 zpool_find_import(libzfs_handle_t *hdl, int argc, char **argv)
759 {
760 	int i;
761 	DIR *dirp = NULL;
762 	struct dirent64 *dp;
763 	char path[MAXPATHLEN];
764 	struct stat64 statbuf;
765 	nvlist_t *ret = NULL, *config;
766 	static char *default_dir = "/dev/dsk";
767 	int fd;
768 	pool_list_t pools = { 0 };
769 	pool_entry_t *pe, *penext;
770 	vdev_entry_t *ve, *venext;
771 	config_entry_t *ce, *cenext;
772 	name_entry_t *ne, *nenext;
773 
774 
775 	if (argc == 0) {
776 		argc = 1;
777 		argv = &default_dir;
778 	}
779 
780 	/*
781 	 * Go through and read the label configuration information from every
782 	 * possible device, organizing the information according to pool GUID
783 	 * and toplevel GUID.
784 	 */
785 	for (i = 0; i < argc; i++) {
786 		if (argv[i][0] != '/') {
787 			(void) zfs_error_fmt(hdl, EZFS_BADPATH,
788 			    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
789 			    argv[i]);
790 			goto error;
791 		}
792 
793 		if ((dirp = opendir(argv[i])) == NULL) {
794 			zfs_error_aux(hdl, strerror(errno));
795 			(void) zfs_error_fmt(hdl, EZFS_BADPATH,
796 			    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
797 			    argv[i]);
798 			goto error;
799 		}
800 
801 		/*
802 		 * This is not MT-safe, but we have no MT consumers of libzfs
803 		 */
804 		while ((dp = readdir64(dirp)) != NULL) {
805 
806 			(void) snprintf(path, sizeof (path), "%s/%s",
807 			    argv[i], dp->d_name);
808 
809 			if (stat64(path, &statbuf) != 0)
810 				continue;
811 
812 			/*
813 			 * Ignore directories (which includes "." and "..").
814 			 */
815 			if (S_ISDIR(statbuf.st_mode))
816 				continue;
817 
818 			/*
819 			 * Ignore special (non-character or non-block) files.
820 			 */
821 			if (!S_ISREG(statbuf.st_mode) &&
822 			    !S_ISBLK(statbuf.st_mode))
823 				continue;
824 
825 			if ((fd = open64(path, O_RDONLY)) < 0)
826 				continue;
827 
828 			if ((zpool_read_label(fd, &config)) != 0) {
829 				(void) close(fd);
830 				(void) no_memory(hdl);
831 				goto error;
832 			}
833 
834 			(void) close(fd);
835 
836 			if (config != NULL)
837 				if (add_config(hdl, &pools, path, config) != 0)
838 					goto error;
839 		}
840 
841 		(void) closedir(dirp);
842 		dirp = NULL;
843 	}
844 
845 	ret = get_configs(hdl, &pools);
846 
847 error:
848 	for (pe = pools.pools; pe != NULL; pe = penext) {
849 		penext = pe->pe_next;
850 		for (ve = pe->pe_vdevs; ve != NULL; ve = venext) {
851 			venext = ve->ve_next;
852 			for (ce = ve->ve_configs; ce != NULL; ce = cenext) {
853 				cenext = ce->ce_next;
854 				if (ce->ce_config)
855 					nvlist_free(ce->ce_config);
856 				free(ce);
857 			}
858 			free(ve);
859 		}
860 		free(pe);
861 	}
862 
863 	for (ne = pools.names; ne != NULL; ne = nenext) {
864 		nenext = ne->ne_next;
865 		if (ne->ne_name)
866 			free(ne->ne_name);
867 		free(ne);
868 	}
869 
870 	if (dirp)
871 		(void) closedir(dirp);
872 
873 	return (ret);
874 }
875 
876 /*
877  * Given a cache file, return the contents as a list of importable pools.
878  */
879 nvlist_t *
880 zpool_find_import_cached(libzfs_handle_t *hdl, const char *cachefile)
881 {
882 	char *buf;
883 	int fd;
884 	struct stat64 statbuf;
885 	nvlist_t *raw, *src, *dst;
886 	nvlist_t *pools;
887 	nvpair_t *elem;
888 	char *name;
889 	uint64_t guid;
890 	boolean_t active;
891 
892 	if ((fd = open(cachefile, O_RDONLY)) < 0) {
893 		zfs_error_aux(hdl, "%s", strerror(errno));
894 		(void) zfs_error(hdl, EZFS_BADCACHE,
895 		    dgettext(TEXT_DOMAIN, "failed to open cache file"));
896 		return (NULL);
897 	}
898 
899 	if (fstat64(fd, &statbuf) != 0) {
900 		zfs_error_aux(hdl, "%s", strerror(errno));
901 		(void) close(fd);
902 		(void) zfs_error(hdl, EZFS_BADCACHE,
903 		    dgettext(TEXT_DOMAIN, "failed to get size of cache file"));
904 		return (NULL);
905 	}
906 
907 	if ((buf = zfs_alloc(hdl, statbuf.st_size)) == NULL) {
908 		(void) close(fd);
909 		return (NULL);
910 	}
911 
912 	if (read(fd, buf, statbuf.st_size) != statbuf.st_size) {
913 		(void) close(fd);
914 		free(buf);
915 		(void) zfs_error(hdl, EZFS_BADCACHE,
916 		    dgettext(TEXT_DOMAIN,
917 		    "failed to read cache file contents"));
918 		return (NULL);
919 	}
920 
921 	(void) close(fd);
922 
923 	if (nvlist_unpack(buf, statbuf.st_size, &raw, 0) != 0) {
924 		free(buf);
925 		(void) zfs_error(hdl, EZFS_BADCACHE,
926 		    dgettext(TEXT_DOMAIN,
927 		    "invalid or corrupt cache file contents"));
928 		return (NULL);
929 	}
930 
931 	free(buf);
932 
933 	/*
934 	 * Go through and get the current state of the pools and refresh their
935 	 * state.
936 	 */
937 	if (nvlist_alloc(&pools, 0, 0) != 0) {
938 		(void) no_memory(hdl);
939 		nvlist_free(raw);
940 		return (NULL);
941 	}
942 
943 	elem = NULL;
944 	while ((elem = nvlist_next_nvpair(raw, elem)) != NULL) {
945 		verify(nvpair_value_nvlist(elem, &src) == 0);
946 
947 		verify(nvlist_lookup_string(src, ZPOOL_CONFIG_POOL_NAME,
948 		    &name) == 0);
949 		verify(nvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID,
950 		    &guid) == 0);
951 
952 		if (pool_active(hdl, name, guid, &active) != 0) {
953 			nvlist_free(raw);
954 			nvlist_free(pools);
955 			return (NULL);
956 		}
957 
958 		if (active)
959 			continue;
960 
961 		if ((dst = refresh_config(hdl, src)) == NULL) {
962 			nvlist_free(raw);
963 			nvlist_free(pools);
964 			return (NULL);
965 		}
966 
967 		if (nvlist_add_nvlist(pools, nvpair_name(elem), dst) != 0) {
968 			(void) no_memory(hdl);
969 			nvlist_free(dst);
970 			nvlist_free(raw);
971 			nvlist_free(pools);
972 			return (NULL);
973 		}
974 
975 		nvlist_free(dst);
976 	}
977 
978 	nvlist_free(raw);
979 	return (pools);
980 }
981 
982 
983 boolean_t
984 find_guid(nvlist_t *nv, uint64_t guid)
985 {
986 	uint64_t tmp;
987 	nvlist_t **child;
988 	uint_t c, children;
989 
990 	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &tmp) == 0);
991 	if (tmp == guid)
992 		return (B_TRUE);
993 
994 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
995 	    &child, &children) == 0) {
996 		for (c = 0; c < children; c++)
997 			if (find_guid(child[c], guid))
998 				return (B_TRUE);
999 	}
1000 
1001 	return (B_FALSE);
1002 }
1003 
1004 typedef struct spare_cbdata {
1005 	uint64_t	cb_guid;
1006 	zpool_handle_t	*cb_zhp;
1007 } spare_cbdata_t;
1008 
1009 static int
1010 find_spare(zpool_handle_t *zhp, void *data)
1011 {
1012 	spare_cbdata_t *cbp = data;
1013 	nvlist_t **spares;
1014 	uint_t i, nspares;
1015 	uint64_t guid;
1016 	nvlist_t *nvroot;
1017 
1018 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1019 	    &nvroot) == 0);
1020 
1021 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1022 	    &spares, &nspares) == 0) {
1023 		for (i = 0; i < nspares; i++) {
1024 			verify(nvlist_lookup_uint64(spares[i],
1025 			    ZPOOL_CONFIG_GUID, &guid) == 0);
1026 			if (guid == cbp->cb_guid) {
1027 				cbp->cb_zhp = zhp;
1028 				return (1);
1029 			}
1030 		}
1031 	}
1032 
1033 	zpool_close(zhp);
1034 	return (0);
1035 }
1036 
1037 /*
1038  * Determines if the pool is in use.  If so, it returns true and the state of
1039  * the pool as well as the name of the pool.  Both strings are allocated and
1040  * must be freed by the caller.
1041  */
1042 int
1043 zpool_in_use(libzfs_handle_t *hdl, int fd, pool_state_t *state, char **namestr,
1044     boolean_t *inuse)
1045 {
1046 	nvlist_t *config;
1047 	char *name;
1048 	boolean_t ret;
1049 	uint64_t guid, vdev_guid;
1050 	zpool_handle_t *zhp;
1051 	nvlist_t *pool_config;
1052 	uint64_t stateval, isspare;
1053 	spare_cbdata_t cb = { 0 };
1054 	boolean_t isactive;
1055 
1056 	*inuse = B_FALSE;
1057 
1058 	if (zpool_read_label(fd, &config) != 0) {
1059 		(void) no_memory(hdl);
1060 		return (-1);
1061 	}
1062 
1063 	if (config == NULL)
1064 		return (0);
1065 
1066 	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
1067 	    &stateval) == 0);
1068 	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
1069 	    &vdev_guid) == 0);
1070 
1071 	if (stateval != POOL_STATE_SPARE) {
1072 		verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1073 		    &name) == 0);
1074 		verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1075 		    &guid) == 0);
1076 	}
1077 
1078 	switch (stateval) {
1079 	case POOL_STATE_EXPORTED:
1080 		ret = B_TRUE;
1081 		break;
1082 
1083 	case POOL_STATE_ACTIVE:
1084 		/*
1085 		 * For an active pool, we have to determine if it's really part
1086 		 * of a currently active pool (in which case the pool will exist
1087 		 * and the guid will be the same), or whether it's part of an
1088 		 * active pool that was disconnected without being explicitly
1089 		 * exported.
1090 		 */
1091 		if (pool_active(hdl, name, guid, &isactive) != 0) {
1092 			nvlist_free(config);
1093 			return (-1);
1094 		}
1095 
1096 		if (isactive) {
1097 			/*
1098 			 * Because the device may have been removed while
1099 			 * offlined, we only report it as active if the vdev is
1100 			 * still present in the config.  Otherwise, pretend like
1101 			 * it's not in use.
1102 			 */
1103 			if ((zhp = zpool_open_canfail(hdl, name)) != NULL &&
1104 			    (pool_config = zpool_get_config(zhp, NULL))
1105 			    != NULL) {
1106 				nvlist_t *nvroot;
1107 
1108 				verify(nvlist_lookup_nvlist(pool_config,
1109 				    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1110 				ret = find_guid(nvroot, vdev_guid);
1111 			} else {
1112 				ret = B_FALSE;
1113 			}
1114 
1115 			/*
1116 			 * If this is an active spare within another pool, we
1117 			 * treat it like an unused hot spare.  This allows the
1118 			 * user to create a pool with a hot spare that currently
1119 			 * in use within another pool.  Since we return B_TRUE,
1120 			 * libdiskmgt will continue to prevent generic consumers
1121 			 * from using the device.
1122 			 */
1123 			if (ret && nvlist_lookup_uint64(config,
1124 			    ZPOOL_CONFIG_IS_SPARE, &isspare) == 0 && isspare)
1125 				stateval = POOL_STATE_SPARE;
1126 
1127 			if (zhp != NULL)
1128 				zpool_close(zhp);
1129 		} else {
1130 			stateval = POOL_STATE_POTENTIALLY_ACTIVE;
1131 			ret = B_TRUE;
1132 		}
1133 		break;
1134 
1135 	case POOL_STATE_SPARE:
1136 		/*
1137 		 * For a hot spare, it can be either definitively in use, or
1138 		 * potentially active.  To determine if it's in use, we iterate
1139 		 * over all pools in the system and search for one with a spare
1140 		 * with a matching guid.
1141 		 *
1142 		 * Due to the shared nature of spares, we don't actually report
1143 		 * the potentially active case as in use.  This means the user
1144 		 * can freely create pools on the hot spares of exported pools,
1145 		 * but to do otherwise makes the resulting code complicated, and
1146 		 * we end up having to deal with this case anyway.
1147 		 */
1148 		cb.cb_zhp = NULL;
1149 		cb.cb_guid = vdev_guid;
1150 		if (zpool_iter(hdl, find_spare, &cb) == 1) {
1151 			name = (char *)zpool_get_name(cb.cb_zhp);
1152 			ret = TRUE;
1153 		} else {
1154 			ret = FALSE;
1155 		}
1156 		break;
1157 
1158 	default:
1159 		ret = B_FALSE;
1160 	}
1161 
1162 
1163 	if (ret) {
1164 		if ((*namestr = zfs_strdup(hdl, name)) == NULL) {
1165 			nvlist_free(config);
1166 			return (-1);
1167 		}
1168 		*state = (pool_state_t)stateval;
1169 	}
1170 
1171 	if (cb.cb_zhp)
1172 		zpool_close(cb.cb_zhp);
1173 
1174 	nvlist_free(config);
1175 	*inuse = ret;
1176 	return (0);
1177 }
1178