xref: /titanic_51/usr/src/lib/libzfs/common/libzfs_import.c (revision 3ad684d66b78e06edd37e2c4fd3b3949f095194b)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * Pool import support functions.
30  *
31  * To import a pool, we rely on reading the configuration information from the
32  * ZFS label of each device.  If we successfully read the label, then we
33  * organize the configuration information in the following hierarchy:
34  *
35  * 	pool guid -> toplevel vdev guid -> label txg
36  *
37  * Duplicate entries matching this same tuple will be discarded.  Once we have
38  * examined every device, we pick the best label txg config for each toplevel
39  * vdev.  We then arrange these toplevel vdevs into a complete pool config, and
40  * update any paths that have changed.  Finally, we attempt to import the pool
41  * using our derived config, and record the results.
42  */
43 
44 #include <devid.h>
45 #include <dirent.h>
46 #include <errno.h>
47 #include <libintl.h>
48 #include <stdlib.h>
49 #include <string.h>
50 #include <sys/stat.h>
51 #include <unistd.h>
52 #include <fcntl.h>
53 
54 #include <sys/vdev_impl.h>
55 
56 #include "libzfs.h"
57 #include "libzfs_impl.h"
58 
59 /*
60  * Intermediate structures used to gather configuration information.
61  */
62 typedef struct config_entry {
63 	uint64_t		ce_txg;
64 	nvlist_t		*ce_config;
65 	struct config_entry	*ce_next;
66 } config_entry_t;
67 
68 typedef struct vdev_entry {
69 	uint64_t		ve_guid;
70 	config_entry_t		*ve_configs;
71 	struct vdev_entry	*ve_next;
72 } vdev_entry_t;
73 
74 typedef struct pool_entry {
75 	uint64_t		pe_guid;
76 	vdev_entry_t		*pe_vdevs;
77 	struct pool_entry	*pe_next;
78 } pool_entry_t;
79 
80 typedef struct name_entry {
81 	char			*ne_name;
82 	uint64_t		ne_guid;
83 	struct name_entry	*ne_next;
84 } name_entry_t;
85 
86 typedef struct pool_list {
87 	pool_entry_t		*pools;
88 	name_entry_t		*names;
89 } pool_list_t;
90 
91 static char *
92 get_devid(const char *path)
93 {
94 	int fd;
95 	ddi_devid_t devid;
96 	char *minor, *ret;
97 
98 	if ((fd = open(path, O_RDONLY)) < 0)
99 		return (NULL);
100 
101 	minor = NULL;
102 	ret = NULL;
103 	if (devid_get(fd, &devid) == 0) {
104 		if (devid_get_minor_name(fd, &minor) == 0)
105 			ret = devid_str_encode(devid, minor);
106 		if (minor != NULL)
107 			devid_str_free(minor);
108 		devid_free(devid);
109 	}
110 	(void) close(fd);
111 
112 	return (ret);
113 }
114 
115 
116 /*
117  * Go through and fix up any path and/or devid information for the given vdev
118  * configuration.
119  */
120 static int
121 fix_paths(nvlist_t *nv, name_entry_t *names)
122 {
123 	nvlist_t **child;
124 	uint_t c, children;
125 	uint64_t guid;
126 	name_entry_t *ne, *best;
127 	char *path, *devid;
128 	int matched;
129 
130 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
131 	    &child, &children) == 0) {
132 		for (c = 0; c < children; c++)
133 			if (fix_paths(child[c], names) != 0)
134 				return (-1);
135 		return (0);
136 	}
137 
138 	/*
139 	 * This is a leaf (file or disk) vdev.  In either case, go through
140 	 * the name list and see if we find a matching guid.  If so, replace
141 	 * the path and see if we can calculate a new devid.
142 	 *
143 	 * There may be multiple names associated with a particular guid, in
144 	 * which case we have overlapping slices or multiple paths to the same
145 	 * disk.  If this is the case, then we want to pick the path that is
146 	 * the most similar to the original, where "most similar" is the number
147 	 * of matching characters starting from the end of the path.  This will
148 	 * preserve slice numbers even if the disks have been reorganized, and
149 	 * will also catch preferred disk names if multiple paths exist.
150 	 */
151 	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0);
152 	if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0)
153 		path = NULL;
154 
155 	matched = 0;
156 	best = NULL;
157 	for (ne = names; ne != NULL; ne = ne->ne_next) {
158 		if (ne->ne_guid == guid) {
159 			const char *src, *dst;
160 			int count;
161 
162 			if (path == NULL) {
163 				best = ne;
164 				break;
165 			}
166 
167 			src = ne->ne_name + strlen(ne->ne_name) - 1;
168 			dst = path + strlen(path) - 1;
169 			for (count = 0; src >= ne->ne_name && dst >= path;
170 			    src--, dst--, count++)
171 				if (*src != *dst)
172 					break;
173 
174 			/*
175 			 * At this point, 'count' is the number of characters
176 			 * matched from the end.
177 			 */
178 			if (count > matched || best == NULL) {
179 				best = ne;
180 				matched = count;
181 			}
182 		}
183 	}
184 
185 	if (best == NULL)
186 		return (0);
187 
188 	if (nvlist_add_string(nv, ZPOOL_CONFIG_PATH, best->ne_name) != 0)
189 		return (-1);
190 
191 	if ((devid = get_devid(best->ne_name)) == NULL) {
192 		(void) nvlist_remove_all(nv, ZPOOL_CONFIG_DEVID);
193 	} else {
194 		if (nvlist_add_string(nv, ZPOOL_CONFIG_DEVID, devid) != 0)
195 			return (-1);
196 		devid_str_free(devid);
197 	}
198 
199 	return (0);
200 }
201 
202 /*
203  * Add the given configuration to the list of known devices.
204  */
205 static int
206 add_config(libzfs_handle_t *hdl, pool_list_t *pl, const char *path,
207     nvlist_t *config)
208 {
209 	uint64_t pool_guid, vdev_guid, top_guid, txg, state;
210 	pool_entry_t *pe;
211 	vdev_entry_t *ve;
212 	config_entry_t *ce;
213 	name_entry_t *ne;
214 
215 	/*
216 	 * If this is a hot spare not currently in use or level 2 cache
217 	 * device, add it to the list of names to translate, but don't do
218 	 * anything else.
219 	 */
220 	if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
221 	    &state) == 0 &&
222 	    (state == POOL_STATE_SPARE || state == POOL_STATE_L2CACHE) &&
223 	    nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid) == 0) {
224 		if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
225 			return (-1);
226 
227 		if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) {
228 			free(ne);
229 			return (-1);
230 		}
231 		ne->ne_guid = vdev_guid;
232 		ne->ne_next = pl->names;
233 		pl->names = ne;
234 		return (0);
235 	}
236 
237 	/*
238 	 * If we have a valid config but cannot read any of these fields, then
239 	 * it means we have a half-initialized label.  In vdev_label_init()
240 	 * we write a label with txg == 0 so that we can identify the device
241 	 * in case the user refers to the same disk later on.  If we fail to
242 	 * create the pool, we'll be left with a label in this state
243 	 * which should not be considered part of a valid pool.
244 	 */
245 	if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
246 	    &pool_guid) != 0 ||
247 	    nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
248 	    &vdev_guid) != 0 ||
249 	    nvlist_lookup_uint64(config, ZPOOL_CONFIG_TOP_GUID,
250 	    &top_guid) != 0 ||
251 	    nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
252 	    &txg) != 0 || txg == 0) {
253 		nvlist_free(config);
254 		return (0);
255 	}
256 
257 	/*
258 	 * First, see if we know about this pool.  If not, then add it to the
259 	 * list of known pools.
260 	 */
261 	for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
262 		if (pe->pe_guid == pool_guid)
263 			break;
264 	}
265 
266 	if (pe == NULL) {
267 		if ((pe = zfs_alloc(hdl, sizeof (pool_entry_t))) == NULL) {
268 			nvlist_free(config);
269 			return (-1);
270 		}
271 		pe->pe_guid = pool_guid;
272 		pe->pe_next = pl->pools;
273 		pl->pools = pe;
274 	}
275 
276 	/*
277 	 * Second, see if we know about this toplevel vdev.  Add it if its
278 	 * missing.
279 	 */
280 	for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
281 		if (ve->ve_guid == top_guid)
282 			break;
283 	}
284 
285 	if (ve == NULL) {
286 		if ((ve = zfs_alloc(hdl, sizeof (vdev_entry_t))) == NULL) {
287 			nvlist_free(config);
288 			return (-1);
289 		}
290 		ve->ve_guid = top_guid;
291 		ve->ve_next = pe->pe_vdevs;
292 		pe->pe_vdevs = ve;
293 	}
294 
295 	/*
296 	 * Third, see if we have a config with a matching transaction group.  If
297 	 * so, then we do nothing.  Otherwise, add it to the list of known
298 	 * configs.
299 	 */
300 	for (ce = ve->ve_configs; ce != NULL; ce = ce->ce_next) {
301 		if (ce->ce_txg == txg)
302 			break;
303 	}
304 
305 	if (ce == NULL) {
306 		if ((ce = zfs_alloc(hdl, sizeof (config_entry_t))) == NULL) {
307 			nvlist_free(config);
308 			return (-1);
309 		}
310 		ce->ce_txg = txg;
311 		ce->ce_config = config;
312 		ce->ce_next = ve->ve_configs;
313 		ve->ve_configs = ce;
314 	} else {
315 		nvlist_free(config);
316 	}
317 
318 	/*
319 	 * At this point we've successfully added our config to the list of
320 	 * known configs.  The last thing to do is add the vdev guid -> path
321 	 * mappings so that we can fix up the configuration as necessary before
322 	 * doing the import.
323 	 */
324 	if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
325 		return (-1);
326 
327 	if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) {
328 		free(ne);
329 		return (-1);
330 	}
331 
332 	ne->ne_guid = vdev_guid;
333 	ne->ne_next = pl->names;
334 	pl->names = ne;
335 
336 	return (0);
337 }
338 
339 /*
340  * Returns true if the named pool matches the given GUID.
341  */
342 static int
343 pool_active(libzfs_handle_t *hdl, const char *name, uint64_t guid,
344     boolean_t *isactive)
345 {
346 	zpool_handle_t *zhp;
347 	uint64_t theguid;
348 
349 	if (zpool_open_silent(hdl, name, &zhp) != 0)
350 		return (-1);
351 
352 	if (zhp == NULL) {
353 		*isactive = B_FALSE;
354 		return (0);
355 	}
356 
357 	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID,
358 	    &theguid) == 0);
359 
360 	zpool_close(zhp);
361 
362 	*isactive = (theguid == guid);
363 	return (0);
364 }
365 
366 static nvlist_t *
367 refresh_config(libzfs_handle_t *hdl, nvlist_t *config)
368 {
369 	nvlist_t *nvl;
370 	zfs_cmd_t zc = { 0 };
371 	int err;
372 
373 	if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0)
374 		return (NULL);
375 
376 	if (zcmd_alloc_dst_nvlist(hdl, &zc,
377 	    zc.zc_nvlist_conf_size * 2) != 0) {
378 		zcmd_free_nvlists(&zc);
379 		return (NULL);
380 	}
381 
382 	while ((err = ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_TRYIMPORT,
383 	    &zc)) != 0 && errno == ENOMEM) {
384 		if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
385 			zcmd_free_nvlists(&zc);
386 			return (NULL);
387 		}
388 	}
389 
390 	if (err) {
391 		(void) zpool_standard_error(hdl, errno,
392 		    dgettext(TEXT_DOMAIN, "cannot discover pools"));
393 		zcmd_free_nvlists(&zc);
394 		return (NULL);
395 	}
396 
397 	if (zcmd_read_dst_nvlist(hdl, &zc, &nvl) != 0) {
398 		zcmd_free_nvlists(&zc);
399 		return (NULL);
400 	}
401 
402 	zcmd_free_nvlists(&zc);
403 	return (nvl);
404 }
405 
406 /*
407  * Convert our list of pools into the definitive set of configurations.  We
408  * start by picking the best config for each toplevel vdev.  Once that's done,
409  * we assemble the toplevel vdevs into a full config for the pool.  We make a
410  * pass to fix up any incorrect paths, and then add it to the main list to
411  * return to the user.
412  */
413 static nvlist_t *
414 get_configs(libzfs_handle_t *hdl, pool_list_t *pl, boolean_t active_ok)
415 {
416 	pool_entry_t *pe;
417 	vdev_entry_t *ve;
418 	config_entry_t *ce;
419 	nvlist_t *ret = NULL, *config = NULL, *tmp, *nvtop, *nvroot;
420 	nvlist_t **spares, **l2cache;
421 	uint_t i, nspares, nl2cache;
422 	boolean_t config_seen;
423 	uint64_t best_txg;
424 	char *name, *hostname;
425 	uint64_t version, guid;
426 	uint_t children = 0;
427 	nvlist_t **child = NULL;
428 	uint_t c;
429 	boolean_t isactive;
430 	uint64_t hostid;
431 	nvlist_t *nvl;
432 
433 	if (nvlist_alloc(&ret, 0, 0) != 0)
434 		goto nomem;
435 
436 	for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
437 		uint64_t id;
438 
439 		if (nvlist_alloc(&config, NV_UNIQUE_NAME, 0) != 0)
440 			goto nomem;
441 		config_seen = B_FALSE;
442 
443 		/*
444 		 * Iterate over all toplevel vdevs.  Grab the pool configuration
445 		 * from the first one we find, and then go through the rest and
446 		 * add them as necessary to the 'vdevs' member of the config.
447 		 */
448 		for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
449 
450 			/*
451 			 * Determine the best configuration for this vdev by
452 			 * selecting the config with the latest transaction
453 			 * group.
454 			 */
455 			best_txg = 0;
456 			for (ce = ve->ve_configs; ce != NULL;
457 			    ce = ce->ce_next) {
458 
459 				if (ce->ce_txg > best_txg) {
460 					tmp = ce->ce_config;
461 					best_txg = ce->ce_txg;
462 				}
463 			}
464 
465 			if (!config_seen) {
466 				/*
467 				 * Copy the relevant pieces of data to the pool
468 				 * configuration:
469 				 *
470 				 *	version
471 				 * 	pool guid
472 				 * 	name
473 				 * 	pool state
474 				 *	hostid (if available)
475 				 *	hostname (if available)
476 				 */
477 				uint64_t state;
478 
479 				verify(nvlist_lookup_uint64(tmp,
480 				    ZPOOL_CONFIG_VERSION, &version) == 0);
481 				if (nvlist_add_uint64(config,
482 				    ZPOOL_CONFIG_VERSION, version) != 0)
483 					goto nomem;
484 				verify(nvlist_lookup_uint64(tmp,
485 				    ZPOOL_CONFIG_POOL_GUID, &guid) == 0);
486 				if (nvlist_add_uint64(config,
487 				    ZPOOL_CONFIG_POOL_GUID, guid) != 0)
488 					goto nomem;
489 				verify(nvlist_lookup_string(tmp,
490 				    ZPOOL_CONFIG_POOL_NAME, &name) == 0);
491 				if (nvlist_add_string(config,
492 				    ZPOOL_CONFIG_POOL_NAME, name) != 0)
493 					goto nomem;
494 				verify(nvlist_lookup_uint64(tmp,
495 				    ZPOOL_CONFIG_POOL_STATE, &state) == 0);
496 				if (nvlist_add_uint64(config,
497 				    ZPOOL_CONFIG_POOL_STATE, state) != 0)
498 					goto nomem;
499 				hostid = 0;
500 				if (nvlist_lookup_uint64(tmp,
501 				    ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
502 					if (nvlist_add_uint64(config,
503 					    ZPOOL_CONFIG_HOSTID, hostid) != 0)
504 						goto nomem;
505 					verify(nvlist_lookup_string(tmp,
506 					    ZPOOL_CONFIG_HOSTNAME,
507 					    &hostname) == 0);
508 					if (nvlist_add_string(config,
509 					    ZPOOL_CONFIG_HOSTNAME,
510 					    hostname) != 0)
511 						goto nomem;
512 				}
513 
514 				config_seen = B_TRUE;
515 			}
516 
517 			/*
518 			 * Add this top-level vdev to the child array.
519 			 */
520 			verify(nvlist_lookup_nvlist(tmp,
521 			    ZPOOL_CONFIG_VDEV_TREE, &nvtop) == 0);
522 			verify(nvlist_lookup_uint64(nvtop, ZPOOL_CONFIG_ID,
523 			    &id) == 0);
524 			if (id >= children) {
525 				nvlist_t **newchild;
526 
527 				newchild = zfs_alloc(hdl, (id + 1) *
528 				    sizeof (nvlist_t *));
529 				if (newchild == NULL)
530 					goto nomem;
531 
532 				for (c = 0; c < children; c++)
533 					newchild[c] = child[c];
534 
535 				free(child);
536 				child = newchild;
537 				children = id + 1;
538 			}
539 			if (nvlist_dup(nvtop, &child[id], 0) != 0)
540 				goto nomem;
541 
542 		}
543 
544 		verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
545 		    &guid) == 0);
546 
547 		/*
548 		 * Look for any missing top-level vdevs.  If this is the case,
549 		 * create a faked up 'missing' vdev as a placeholder.  We cannot
550 		 * simply compress the child array, because the kernel performs
551 		 * certain checks to make sure the vdev IDs match their location
552 		 * in the configuration.
553 		 */
554 		for (c = 0; c < children; c++)
555 			if (child[c] == NULL) {
556 				nvlist_t *missing;
557 				if (nvlist_alloc(&missing, NV_UNIQUE_NAME,
558 				    0) != 0)
559 					goto nomem;
560 				if (nvlist_add_string(missing,
561 				    ZPOOL_CONFIG_TYPE,
562 				    VDEV_TYPE_MISSING) != 0 ||
563 				    nvlist_add_uint64(missing,
564 				    ZPOOL_CONFIG_ID, c) != 0 ||
565 				    nvlist_add_uint64(missing,
566 				    ZPOOL_CONFIG_GUID, 0ULL) != 0) {
567 					nvlist_free(missing);
568 					goto nomem;
569 				}
570 				child[c] = missing;
571 			}
572 
573 		/*
574 		 * Put all of this pool's top-level vdevs into a root vdev.
575 		 */
576 		if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0)
577 			goto nomem;
578 		if (nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
579 		    VDEV_TYPE_ROOT) != 0 ||
580 		    nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) != 0 ||
581 		    nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, guid) != 0 ||
582 		    nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
583 		    child, children) != 0) {
584 			nvlist_free(nvroot);
585 			goto nomem;
586 		}
587 
588 		for (c = 0; c < children; c++)
589 			nvlist_free(child[c]);
590 		free(child);
591 		children = 0;
592 		child = NULL;
593 
594 		/*
595 		 * Go through and fix up any paths and/or devids based on our
596 		 * known list of vdev GUID -> path mappings.
597 		 */
598 		if (fix_paths(nvroot, pl->names) != 0) {
599 			nvlist_free(nvroot);
600 			goto nomem;
601 		}
602 
603 		/*
604 		 * Add the root vdev to this pool's configuration.
605 		 */
606 		if (nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
607 		    nvroot) != 0) {
608 			nvlist_free(nvroot);
609 			goto nomem;
610 		}
611 		nvlist_free(nvroot);
612 
613 		/*
614 		 * zdb uses this path to report on active pools that were
615 		 * imported or created using -R.
616 		 */
617 		if (active_ok)
618 			goto add_pool;
619 
620 		/*
621 		 * Determine if this pool is currently active, in which case we
622 		 * can't actually import it.
623 		 */
624 		verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
625 		    &name) == 0);
626 		verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
627 		    &guid) == 0);
628 
629 		if (pool_active(hdl, name, guid, &isactive) != 0)
630 			goto error;
631 
632 		if (isactive) {
633 			nvlist_free(config);
634 			config = NULL;
635 			continue;
636 		}
637 
638 		if ((nvl = refresh_config(hdl, config)) == NULL)
639 			goto error;
640 
641 		nvlist_free(config);
642 		config = nvl;
643 
644 		/*
645 		 * Go through and update the paths for spares, now that we have
646 		 * them.
647 		 */
648 		verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
649 		    &nvroot) == 0);
650 		if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
651 		    &spares, &nspares) == 0) {
652 			for (i = 0; i < nspares; i++) {
653 				if (fix_paths(spares[i], pl->names) != 0)
654 					goto nomem;
655 			}
656 		}
657 
658 		/*
659 		 * Update the paths for l2cache devices.
660 		 */
661 		if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
662 		    &l2cache, &nl2cache) == 0) {
663 			for (i = 0; i < nl2cache; i++) {
664 				if (fix_paths(l2cache[i], pl->names) != 0)
665 					goto nomem;
666 			}
667 		}
668 
669 		/*
670 		 * Restore the original information read from the actual label.
671 		 */
672 		(void) nvlist_remove(config, ZPOOL_CONFIG_HOSTID,
673 		    DATA_TYPE_UINT64);
674 		(void) nvlist_remove(config, ZPOOL_CONFIG_HOSTNAME,
675 		    DATA_TYPE_STRING);
676 		if (hostid != 0) {
677 			verify(nvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID,
678 			    hostid) == 0);
679 			verify(nvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME,
680 			    hostname) == 0);
681 		}
682 
683 add_pool:
684 		/*
685 		 * Add this pool to the list of configs.
686 		 */
687 		verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
688 		    &name) == 0);
689 		if (nvlist_add_nvlist(ret, name, config) != 0)
690 			goto nomem;
691 
692 		nvlist_free(config);
693 		config = NULL;
694 	}
695 
696 	return (ret);
697 
698 nomem:
699 	(void) no_memory(hdl);
700 error:
701 	nvlist_free(config);
702 	nvlist_free(ret);
703 	for (c = 0; c < children; c++)
704 		nvlist_free(child[c]);
705 	free(child);
706 
707 	return (NULL);
708 }
709 
710 /*
711  * Return the offset of the given label.
712  */
713 static uint64_t
714 label_offset(uint64_t size, int l)
715 {
716 	ASSERT(P2PHASE_TYPED(size, sizeof (vdev_label_t), uint64_t) == 0);
717 	return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
718 	    0 : size - VDEV_LABELS * sizeof (vdev_label_t)));
719 }
720 
721 /*
722  * Given a file descriptor, read the label information and return an nvlist
723  * describing the configuration, if there is one.
724  */
725 int
726 zpool_read_label(int fd, nvlist_t **config)
727 {
728 	struct stat64 statbuf;
729 	int l;
730 	vdev_label_t *label;
731 	uint64_t state, txg, size;
732 
733 	*config = NULL;
734 
735 	if (fstat64(fd, &statbuf) == -1)
736 		return (0);
737 	size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
738 
739 	if ((label = malloc(sizeof (vdev_label_t))) == NULL)
740 		return (-1);
741 
742 	for (l = 0; l < VDEV_LABELS; l++) {
743 		if (pread64(fd, label, sizeof (vdev_label_t),
744 		    label_offset(size, l)) != sizeof (vdev_label_t))
745 			continue;
746 
747 		if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist,
748 		    sizeof (label->vl_vdev_phys.vp_nvlist), config, 0) != 0)
749 			continue;
750 
751 		if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE,
752 		    &state) != 0 || state > POOL_STATE_L2CACHE) {
753 			nvlist_free(*config);
754 			continue;
755 		}
756 
757 		if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
758 		    (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG,
759 		    &txg) != 0 || txg == 0)) {
760 			nvlist_free(*config);
761 			continue;
762 		}
763 
764 		free(label);
765 		return (0);
766 	}
767 
768 	free(label);
769 	*config = NULL;
770 	return (0);
771 }
772 
773 /*
774  * Given a list of directories to search, find all pools stored on disk.  This
775  * includes partial pools which are not available to import.  If no args are
776  * given (argc is 0), then the default directory (/dev/dsk) is searched.
777  */
778 nvlist_t *
779 zpool_find_import(libzfs_handle_t *hdl, int argc, char **argv,
780     boolean_t active_ok)
781 {
782 	int i;
783 	DIR *dirp = NULL;
784 	struct dirent64 *dp;
785 	char path[MAXPATHLEN];
786 	char *end;
787 	size_t pathleft;
788 	struct stat64 statbuf;
789 	nvlist_t *ret = NULL, *config;
790 	static char *default_dir = "/dev/dsk";
791 	int fd;
792 	pool_list_t pools = { 0 };
793 	pool_entry_t *pe, *penext;
794 	vdev_entry_t *ve, *venext;
795 	config_entry_t *ce, *cenext;
796 	name_entry_t *ne, *nenext;
797 
798 	if (argc == 0) {
799 		argc = 1;
800 		argv = &default_dir;
801 	}
802 
803 	/*
804 	 * Go through and read the label configuration information from every
805 	 * possible device, organizing the information according to pool GUID
806 	 * and toplevel GUID.
807 	 */
808 	for (i = 0; i < argc; i++) {
809 		char *rdsk;
810 		int dfd;
811 
812 		/* use realpath to normalize the path */
813 		if (realpath(argv[i], path) == 0) {
814 			(void) zfs_error_fmt(hdl, EZFS_BADPATH,
815 			    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
816 			    argv[i]);
817 			goto error;
818 		}
819 		end = &path[strlen(path)];
820 		*end++ = '/';
821 		*end = 0;
822 		pathleft = &path[sizeof (path)] - end;
823 
824 		/*
825 		 * Using raw devices instead of block devices when we're
826 		 * reading the labels skips a bunch of slow operations during
827 		 * close(2) processing, so we replace /dev/dsk with /dev/rdsk.
828 		 */
829 		if (strcmp(path, "/dev/dsk/") == 0)
830 			rdsk = "/dev/rdsk/";
831 		else
832 			rdsk = path;
833 
834 		if ((dfd = open64(rdsk, O_RDONLY)) < 0 ||
835 		    (dirp = fdopendir(dfd)) == NULL) {
836 			zfs_error_aux(hdl, strerror(errno));
837 			(void) zfs_error_fmt(hdl, EZFS_BADPATH,
838 			    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
839 			    rdsk);
840 			goto error;
841 		}
842 
843 		/*
844 		 * This is not MT-safe, but we have no MT consumers of libzfs
845 		 */
846 		while ((dp = readdir64(dirp)) != NULL) {
847 			const char *name = dp->d_name;
848 			if (name[0] == '.' &&
849 			    (name[1] == 0 || (name[1] == '.' && name[2] == 0)))
850 				continue;
851 
852 			if ((fd = openat64(dfd, name, O_RDONLY)) < 0)
853 				continue;
854 
855 			/*
856 			 * Ignore failed stats.  We only want regular
857 			 * files, character devs and block devs.
858 			 */
859 			if (fstat64(fd, &statbuf) != 0 ||
860 			    (!S_ISREG(statbuf.st_mode) &&
861 			    !S_ISCHR(statbuf.st_mode) &&
862 			    !S_ISBLK(statbuf.st_mode))) {
863 				(void) close(fd);
864 				continue;
865 			}
866 
867 			if ((zpool_read_label(fd, &config)) != 0) {
868 				(void) close(fd);
869 				(void) no_memory(hdl);
870 				goto error;
871 			}
872 
873 			(void) close(fd);
874 
875 			if (config != NULL) {
876 				/* use the non-raw path for the config */
877 				(void) strlcpy(end, name, pathleft);
878 				if (add_config(hdl, &pools, path, config) != 0)
879 					goto error;
880 			}
881 		}
882 
883 		(void) closedir(dirp);
884 		dirp = NULL;
885 	}
886 
887 	ret = get_configs(hdl, &pools, active_ok);
888 
889 error:
890 	for (pe = pools.pools; pe != NULL; pe = penext) {
891 		penext = pe->pe_next;
892 		for (ve = pe->pe_vdevs; ve != NULL; ve = venext) {
893 			venext = ve->ve_next;
894 			for (ce = ve->ve_configs; ce != NULL; ce = cenext) {
895 				cenext = ce->ce_next;
896 				if (ce->ce_config)
897 					nvlist_free(ce->ce_config);
898 				free(ce);
899 			}
900 			free(ve);
901 		}
902 		free(pe);
903 	}
904 
905 	for (ne = pools.names; ne != NULL; ne = nenext) {
906 		nenext = ne->ne_next;
907 		if (ne->ne_name)
908 			free(ne->ne_name);
909 		free(ne);
910 	}
911 
912 	if (dirp)
913 		(void) closedir(dirp);
914 
915 	return (ret);
916 }
917 
918 /*
919  * Given a cache file, return the contents as a list of importable pools.
920  */
921 nvlist_t *
922 zpool_find_import_cached(libzfs_handle_t *hdl, const char *cachefile,
923     boolean_t active_ok)
924 {
925 	char *buf;
926 	int fd;
927 	struct stat64 statbuf;
928 	nvlist_t *raw, *src, *dst;
929 	nvlist_t *pools;
930 	nvpair_t *elem;
931 	char *name;
932 	uint64_t guid;
933 	boolean_t active;
934 
935 	if ((fd = open(cachefile, O_RDONLY)) < 0) {
936 		zfs_error_aux(hdl, "%s", strerror(errno));
937 		(void) zfs_error(hdl, EZFS_BADCACHE,
938 		    dgettext(TEXT_DOMAIN, "failed to open cache file"));
939 		return (NULL);
940 	}
941 
942 	if (fstat64(fd, &statbuf) != 0) {
943 		zfs_error_aux(hdl, "%s", strerror(errno));
944 		(void) close(fd);
945 		(void) zfs_error(hdl, EZFS_BADCACHE,
946 		    dgettext(TEXT_DOMAIN, "failed to get size of cache file"));
947 		return (NULL);
948 	}
949 
950 	if ((buf = zfs_alloc(hdl, statbuf.st_size)) == NULL) {
951 		(void) close(fd);
952 		return (NULL);
953 	}
954 
955 	if (read(fd, buf, statbuf.st_size) != statbuf.st_size) {
956 		(void) close(fd);
957 		free(buf);
958 		(void) zfs_error(hdl, EZFS_BADCACHE,
959 		    dgettext(TEXT_DOMAIN,
960 		    "failed to read cache file contents"));
961 		return (NULL);
962 	}
963 
964 	(void) close(fd);
965 
966 	if (nvlist_unpack(buf, statbuf.st_size, &raw, 0) != 0) {
967 		free(buf);
968 		(void) zfs_error(hdl, EZFS_BADCACHE,
969 		    dgettext(TEXT_DOMAIN,
970 		    "invalid or corrupt cache file contents"));
971 		return (NULL);
972 	}
973 
974 	free(buf);
975 
976 	/*
977 	 * Go through and get the current state of the pools and refresh their
978 	 * state.
979 	 */
980 	if (nvlist_alloc(&pools, 0, 0) != 0) {
981 		(void) no_memory(hdl);
982 		nvlist_free(raw);
983 		return (NULL);
984 	}
985 
986 	elem = NULL;
987 	while ((elem = nvlist_next_nvpair(raw, elem)) != NULL) {
988 		verify(nvpair_value_nvlist(elem, &src) == 0);
989 
990 		verify(nvlist_lookup_string(src, ZPOOL_CONFIG_POOL_NAME,
991 		    &name) == 0);
992 		verify(nvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID,
993 		    &guid) == 0);
994 
995 		if (!active_ok) {
996 			if (pool_active(hdl, name, guid, &active) != 0) {
997 				nvlist_free(raw);
998 				nvlist_free(pools);
999 				return (NULL);
1000 			}
1001 
1002 			if (active)
1003 				continue;
1004 
1005 			if ((dst = refresh_config(hdl, src)) == NULL) {
1006 				nvlist_free(raw);
1007 				nvlist_free(pools);
1008 				return (NULL);
1009 			}
1010 
1011 			if (nvlist_add_nvlist(pools, nvpair_name(elem), dst)
1012 			    != 0) {
1013 				(void) no_memory(hdl);
1014 				nvlist_free(dst);
1015 				nvlist_free(raw);
1016 				nvlist_free(pools);
1017 				return (NULL);
1018 			}
1019 			nvlist_free(dst);
1020 		} else {
1021 			if (nvlist_add_nvlist(pools, nvpair_name(elem), src)
1022 			    != 0) {
1023 				(void) no_memory(hdl);
1024 				nvlist_free(raw);
1025 				nvlist_free(pools);
1026 				return (NULL);
1027 			}
1028 		}
1029 	}
1030 
1031 	nvlist_free(raw);
1032 	return (pools);
1033 }
1034 
1035 
1036 boolean_t
1037 find_guid(nvlist_t *nv, uint64_t guid)
1038 {
1039 	uint64_t tmp;
1040 	nvlist_t **child;
1041 	uint_t c, children;
1042 
1043 	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &tmp) == 0);
1044 	if (tmp == guid)
1045 		return (B_TRUE);
1046 
1047 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1048 	    &child, &children) == 0) {
1049 		for (c = 0; c < children; c++)
1050 			if (find_guid(child[c], guid))
1051 				return (B_TRUE);
1052 	}
1053 
1054 	return (B_FALSE);
1055 }
1056 
1057 typedef struct aux_cbdata {
1058 	const char	*cb_type;
1059 	uint64_t	cb_guid;
1060 	zpool_handle_t	*cb_zhp;
1061 } aux_cbdata_t;
1062 
1063 static int
1064 find_aux(zpool_handle_t *zhp, void *data)
1065 {
1066 	aux_cbdata_t *cbp = data;
1067 	nvlist_t **list;
1068 	uint_t i, count;
1069 	uint64_t guid;
1070 	nvlist_t *nvroot;
1071 
1072 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1073 	    &nvroot) == 0);
1074 
1075 	if (nvlist_lookup_nvlist_array(nvroot, cbp->cb_type,
1076 	    &list, &count) == 0) {
1077 		for (i = 0; i < count; i++) {
1078 			verify(nvlist_lookup_uint64(list[i],
1079 			    ZPOOL_CONFIG_GUID, &guid) == 0);
1080 			if (guid == cbp->cb_guid) {
1081 				cbp->cb_zhp = zhp;
1082 				return (1);
1083 			}
1084 		}
1085 	}
1086 
1087 	zpool_close(zhp);
1088 	return (0);
1089 }
1090 
1091 /*
1092  * Determines if the pool is in use.  If so, it returns true and the state of
1093  * the pool as well as the name of the pool.  Both strings are allocated and
1094  * must be freed by the caller.
1095  */
1096 int
1097 zpool_in_use(libzfs_handle_t *hdl, int fd, pool_state_t *state, char **namestr,
1098     boolean_t *inuse)
1099 {
1100 	nvlist_t *config;
1101 	char *name;
1102 	boolean_t ret;
1103 	uint64_t guid, vdev_guid;
1104 	zpool_handle_t *zhp;
1105 	nvlist_t *pool_config;
1106 	uint64_t stateval, isspare;
1107 	aux_cbdata_t cb = { 0 };
1108 	boolean_t isactive;
1109 
1110 	*inuse = B_FALSE;
1111 
1112 	if (zpool_read_label(fd, &config) != 0) {
1113 		(void) no_memory(hdl);
1114 		return (-1);
1115 	}
1116 
1117 	if (config == NULL)
1118 		return (0);
1119 
1120 	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
1121 	    &stateval) == 0);
1122 	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
1123 	    &vdev_guid) == 0);
1124 
1125 	if (stateval != POOL_STATE_SPARE && stateval != POOL_STATE_L2CACHE) {
1126 		verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1127 		    &name) == 0);
1128 		verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1129 		    &guid) == 0);
1130 	}
1131 
1132 	switch (stateval) {
1133 	case POOL_STATE_EXPORTED:
1134 		ret = B_TRUE;
1135 		break;
1136 
1137 	case POOL_STATE_ACTIVE:
1138 		/*
1139 		 * For an active pool, we have to determine if it's really part
1140 		 * of a currently active pool (in which case the pool will exist
1141 		 * and the guid will be the same), or whether it's part of an
1142 		 * active pool that was disconnected without being explicitly
1143 		 * exported.
1144 		 */
1145 		if (pool_active(hdl, name, guid, &isactive) != 0) {
1146 			nvlist_free(config);
1147 			return (-1);
1148 		}
1149 
1150 		if (isactive) {
1151 			/*
1152 			 * Because the device may have been removed while
1153 			 * offlined, we only report it as active if the vdev is
1154 			 * still present in the config.  Otherwise, pretend like
1155 			 * it's not in use.
1156 			 */
1157 			if ((zhp = zpool_open_canfail(hdl, name)) != NULL &&
1158 			    (pool_config = zpool_get_config(zhp, NULL))
1159 			    != NULL) {
1160 				nvlist_t *nvroot;
1161 
1162 				verify(nvlist_lookup_nvlist(pool_config,
1163 				    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1164 				ret = find_guid(nvroot, vdev_guid);
1165 			} else {
1166 				ret = B_FALSE;
1167 			}
1168 
1169 			/*
1170 			 * If this is an active spare within another pool, we
1171 			 * treat it like an unused hot spare.  This allows the
1172 			 * user to create a pool with a hot spare that currently
1173 			 * in use within another pool.  Since we return B_TRUE,
1174 			 * libdiskmgt will continue to prevent generic consumers
1175 			 * from using the device.
1176 			 */
1177 			if (ret && nvlist_lookup_uint64(config,
1178 			    ZPOOL_CONFIG_IS_SPARE, &isspare) == 0 && isspare)
1179 				stateval = POOL_STATE_SPARE;
1180 
1181 			if (zhp != NULL)
1182 				zpool_close(zhp);
1183 		} else {
1184 			stateval = POOL_STATE_POTENTIALLY_ACTIVE;
1185 			ret = B_TRUE;
1186 		}
1187 		break;
1188 
1189 	case POOL_STATE_SPARE:
1190 		/*
1191 		 * For a hot spare, it can be either definitively in use, or
1192 		 * potentially active.  To determine if it's in use, we iterate
1193 		 * over all pools in the system and search for one with a spare
1194 		 * with a matching guid.
1195 		 *
1196 		 * Due to the shared nature of spares, we don't actually report
1197 		 * the potentially active case as in use.  This means the user
1198 		 * can freely create pools on the hot spares of exported pools,
1199 		 * but to do otherwise makes the resulting code complicated, and
1200 		 * we end up having to deal with this case anyway.
1201 		 */
1202 		cb.cb_zhp = NULL;
1203 		cb.cb_guid = vdev_guid;
1204 		cb.cb_type = ZPOOL_CONFIG_SPARES;
1205 		if (zpool_iter(hdl, find_aux, &cb) == 1) {
1206 			name = (char *)zpool_get_name(cb.cb_zhp);
1207 			ret = TRUE;
1208 		} else {
1209 			ret = FALSE;
1210 		}
1211 		break;
1212 
1213 	case POOL_STATE_L2CACHE:
1214 
1215 		/*
1216 		 * Check if any pool is currently using this l2cache device.
1217 		 */
1218 		cb.cb_zhp = NULL;
1219 		cb.cb_guid = vdev_guid;
1220 		cb.cb_type = ZPOOL_CONFIG_L2CACHE;
1221 		if (zpool_iter(hdl, find_aux, &cb) == 1) {
1222 			name = (char *)zpool_get_name(cb.cb_zhp);
1223 			ret = TRUE;
1224 		} else {
1225 			ret = FALSE;
1226 		}
1227 		break;
1228 
1229 	default:
1230 		ret = B_FALSE;
1231 	}
1232 
1233 
1234 	if (ret) {
1235 		if ((*namestr = zfs_strdup(hdl, name)) == NULL) {
1236 			if (cb.cb_zhp)
1237 				zpool_close(cb.cb_zhp);
1238 			nvlist_free(config);
1239 			return (-1);
1240 		}
1241 		*state = (pool_state_t)stateval;
1242 	}
1243 
1244 	if (cb.cb_zhp)
1245 		zpool_close(cb.cb_zhp);
1246 
1247 	nvlist_free(config);
1248 	*inuse = ret;
1249 	return (0);
1250 }
1251