xref: /titanic_41/usr/src/lib/libzfs/common/libzfs_pool.c (revision 1f0f5e3e328e41529296f756090856aa7f239b1c)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <ctype.h>
28 #include <errno.h>
29 #include <devid.h>
30 #include <fcntl.h>
31 #include <libintl.h>
32 #include <stdio.h>
33 #include <stdlib.h>
34 #include <strings.h>
35 #include <unistd.h>
36 #include <sys/efi_partition.h>
37 #include <sys/vtoc.h>
38 #include <sys/zfs_ioctl.h>
39 #include <dlfcn.h>
40 
41 #include "zfs_namecheck.h"
42 #include "zfs_prop.h"
43 #include "libzfs_impl.h"
44 #include "zfs_comutil.h"
45 
46 const char *hist_event_table[LOG_END] = {
47 	"invalid event",
48 	"pool create",
49 	"vdev add",
50 	"pool remove",
51 	"pool destroy",
52 	"pool export",
53 	"pool import",
54 	"vdev attach",
55 	"vdev replace",
56 	"vdev detach",
57 	"vdev online",
58 	"vdev offline",
59 	"vdev upgrade",
60 	"pool clear",
61 	"pool scrub",
62 	"pool property set",
63 	"create",
64 	"clone",
65 	"destroy",
66 	"destroy_begin_sync",
67 	"inherit",
68 	"property set",
69 	"quota set",
70 	"permission update",
71 	"permission remove",
72 	"permission who remove",
73 	"promote",
74 	"receive",
75 	"rename",
76 	"reservation set",
77 	"replay_inc_sync",
78 	"replay_full_sync",
79 	"rollback",
80 	"snapshot",
81 	"filesystem version upgrade",
82 	"refquota set",
83 	"refreservation set",
84 	"pool scrub done",
85 	"user hold",
86 	"user release",
87 	"pool split",
88 };
89 
90 static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
91 
92 #if defined(__i386) || defined(__amd64)
93 #define	BOOTCMD	"installgrub(1M)"
94 #else
95 #define	BOOTCMD	"installboot(1M)"
96 #endif
97 
98 #define	DISK_ROOT	"/dev/dsk"
99 #define	RDISK_ROOT	"/dev/rdsk"
100 #define	BACKUP_SLICE	"s2"
101 
102 /*
103  * ====================================================================
104  *   zpool property functions
105  * ====================================================================
106  */
107 
108 static int
109 zpool_get_all_props(zpool_handle_t *zhp)
110 {
111 	zfs_cmd_t zc = { 0 };
112 	libzfs_handle_t *hdl = zhp->zpool_hdl;
113 
114 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
115 
116 	if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
117 		return (-1);
118 
119 	while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
120 		if (errno == ENOMEM) {
121 			if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
122 				zcmd_free_nvlists(&zc);
123 				return (-1);
124 			}
125 		} else {
126 			zcmd_free_nvlists(&zc);
127 			return (-1);
128 		}
129 	}
130 
131 	if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
132 		zcmd_free_nvlists(&zc);
133 		return (-1);
134 	}
135 
136 	zcmd_free_nvlists(&zc);
137 
138 	return (0);
139 }
140 
141 static int
142 zpool_props_refresh(zpool_handle_t *zhp)
143 {
144 	nvlist_t *old_props;
145 
146 	old_props = zhp->zpool_props;
147 
148 	if (zpool_get_all_props(zhp) != 0)
149 		return (-1);
150 
151 	nvlist_free(old_props);
152 	return (0);
153 }
154 
155 static char *
156 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
157     zprop_source_t *src)
158 {
159 	nvlist_t *nv, *nvl;
160 	uint64_t ival;
161 	char *value;
162 	zprop_source_t source;
163 
164 	nvl = zhp->zpool_props;
165 	if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
166 		verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
167 		source = ival;
168 		verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
169 	} else {
170 		source = ZPROP_SRC_DEFAULT;
171 		if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
172 			value = "-";
173 	}
174 
175 	if (src)
176 		*src = source;
177 
178 	return (value);
179 }
180 
181 uint64_t
182 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
183 {
184 	nvlist_t *nv, *nvl;
185 	uint64_t value;
186 	zprop_source_t source;
187 
188 	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
189 		/*
190 		 * zpool_get_all_props() has most likely failed because
191 		 * the pool is faulted, but if all we need is the top level
192 		 * vdev's guid then get it from the zhp config nvlist.
193 		 */
194 		if ((prop == ZPOOL_PROP_GUID) &&
195 		    (nvlist_lookup_nvlist(zhp->zpool_config,
196 		    ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
197 		    (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
198 		    == 0)) {
199 			return (value);
200 		}
201 		return (zpool_prop_default_numeric(prop));
202 	}
203 
204 	nvl = zhp->zpool_props;
205 	if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
206 		verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
207 		source = value;
208 		verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
209 	} else {
210 		source = ZPROP_SRC_DEFAULT;
211 		value = zpool_prop_default_numeric(prop);
212 	}
213 
214 	if (src)
215 		*src = source;
216 
217 	return (value);
218 }
219 
220 /*
221  * Map VDEV STATE to printed strings.
222  */
223 char *
224 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
225 {
226 	switch (state) {
227 	case VDEV_STATE_CLOSED:
228 	case VDEV_STATE_OFFLINE:
229 		return (gettext("OFFLINE"));
230 	case VDEV_STATE_REMOVED:
231 		return (gettext("REMOVED"));
232 	case VDEV_STATE_CANT_OPEN:
233 		if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
234 			return (gettext("FAULTED"));
235 		else if (aux == VDEV_AUX_SPLIT_POOL)
236 			return (gettext("SPLIT"));
237 		else
238 			return (gettext("UNAVAIL"));
239 	case VDEV_STATE_FAULTED:
240 		return (gettext("FAULTED"));
241 	case VDEV_STATE_DEGRADED:
242 		return (gettext("DEGRADED"));
243 	case VDEV_STATE_HEALTHY:
244 		return (gettext("ONLINE"));
245 	}
246 
247 	return (gettext("UNKNOWN"));
248 }
249 
250 /*
251  * Get a zpool property value for 'prop' and return the value in
252  * a pre-allocated buffer.
253  */
254 int
255 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
256     zprop_source_t *srctype)
257 {
258 	uint64_t intval;
259 	const char *strval;
260 	zprop_source_t src = ZPROP_SRC_NONE;
261 	nvlist_t *nvroot;
262 	vdev_stat_t *vs;
263 	uint_t vsc;
264 
265 	if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
266 		switch (prop) {
267 		case ZPOOL_PROP_NAME:
268 			(void) strlcpy(buf, zpool_get_name(zhp), len);
269 			break;
270 
271 		case ZPOOL_PROP_HEALTH:
272 			(void) strlcpy(buf, "FAULTED", len);
273 			break;
274 
275 		case ZPOOL_PROP_GUID:
276 			intval = zpool_get_prop_int(zhp, prop, &src);
277 			(void) snprintf(buf, len, "%llu", intval);
278 			break;
279 
280 		case ZPOOL_PROP_ALTROOT:
281 		case ZPOOL_PROP_CACHEFILE:
282 			if (zhp->zpool_props != NULL ||
283 			    zpool_get_all_props(zhp) == 0) {
284 				(void) strlcpy(buf,
285 				    zpool_get_prop_string(zhp, prop, &src),
286 				    len);
287 				if (srctype != NULL)
288 					*srctype = src;
289 				return (0);
290 			}
291 			/* FALLTHROUGH */
292 		default:
293 			(void) strlcpy(buf, "-", len);
294 			break;
295 		}
296 
297 		if (srctype != NULL)
298 			*srctype = src;
299 		return (0);
300 	}
301 
302 	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
303 	    prop != ZPOOL_PROP_NAME)
304 		return (-1);
305 
306 	switch (zpool_prop_get_type(prop)) {
307 	case PROP_TYPE_STRING:
308 		(void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
309 		    len);
310 		break;
311 
312 	case PROP_TYPE_NUMBER:
313 		intval = zpool_get_prop_int(zhp, prop, &src);
314 
315 		switch (prop) {
316 		case ZPOOL_PROP_SIZE:
317 		case ZPOOL_PROP_ALLOCATED:
318 		case ZPOOL_PROP_FREE:
319 			(void) zfs_nicenum(intval, buf, len);
320 			break;
321 
322 		case ZPOOL_PROP_CAPACITY:
323 			(void) snprintf(buf, len, "%llu%%",
324 			    (u_longlong_t)intval);
325 			break;
326 
327 		case ZPOOL_PROP_DEDUPRATIO:
328 			(void) snprintf(buf, len, "%llu.%02llux",
329 			    (u_longlong_t)(intval / 100),
330 			    (u_longlong_t)(intval % 100));
331 			break;
332 
333 		case ZPOOL_PROP_HEALTH:
334 			verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
335 			    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
336 			verify(nvlist_lookup_uint64_array(nvroot,
337 			    ZPOOL_CONFIG_STATS, (uint64_t **)&vs, &vsc) == 0);
338 
339 			(void) strlcpy(buf, zpool_state_to_name(intval,
340 			    vs->vs_aux), len);
341 			break;
342 		default:
343 			(void) snprintf(buf, len, "%llu", intval);
344 		}
345 		break;
346 
347 	case PROP_TYPE_INDEX:
348 		intval = zpool_get_prop_int(zhp, prop, &src);
349 		if (zpool_prop_index_to_string(prop, intval, &strval)
350 		    != 0)
351 			return (-1);
352 		(void) strlcpy(buf, strval, len);
353 		break;
354 
355 	default:
356 		abort();
357 	}
358 
359 	if (srctype)
360 		*srctype = src;
361 
362 	return (0);
363 }
364 
365 /*
366  * Check if the bootfs name has the same pool name as it is set to.
367  * Assuming bootfs is a valid dataset name.
368  */
369 static boolean_t
370 bootfs_name_valid(const char *pool, char *bootfs)
371 {
372 	int len = strlen(pool);
373 
374 	if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
375 		return (B_FALSE);
376 
377 	if (strncmp(pool, bootfs, len) == 0 &&
378 	    (bootfs[len] == '/' || bootfs[len] == '\0'))
379 		return (B_TRUE);
380 
381 	return (B_FALSE);
382 }
383 
384 /*
385  * Inspect the configuration to determine if any of the devices contain
386  * an EFI label.
387  */
388 static boolean_t
389 pool_uses_efi(nvlist_t *config)
390 {
391 	nvlist_t **child;
392 	uint_t c, children;
393 
394 	if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
395 	    &child, &children) != 0)
396 		return (read_efi_label(config, NULL) >= 0);
397 
398 	for (c = 0; c < children; c++) {
399 		if (pool_uses_efi(child[c]))
400 			return (B_TRUE);
401 	}
402 	return (B_FALSE);
403 }
404 
405 static boolean_t
406 pool_is_bootable(zpool_handle_t *zhp)
407 {
408 	char bootfs[ZPOOL_MAXNAMELEN];
409 
410 	return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
411 	    sizeof (bootfs), NULL) == 0 && strncmp(bootfs, "-",
412 	    sizeof (bootfs)) != 0);
413 }
414 
415 
416 /*
417  * Given an nvlist of zpool properties to be set, validate that they are
418  * correct, and parse any numeric properties (index, boolean, etc) if they are
419  * specified as strings.
420  */
421 static nvlist_t *
422 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
423     nvlist_t *props, uint64_t version, boolean_t create_or_import, char *errbuf)
424 {
425 	nvpair_t *elem;
426 	nvlist_t *retprops;
427 	zpool_prop_t prop;
428 	char *strval;
429 	uint64_t intval;
430 	char *slash;
431 	struct stat64 statbuf;
432 	zpool_handle_t *zhp;
433 	nvlist_t *nvroot;
434 
435 	if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
436 		(void) no_memory(hdl);
437 		return (NULL);
438 	}
439 
440 	elem = NULL;
441 	while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
442 		const char *propname = nvpair_name(elem);
443 
444 		/*
445 		 * Make sure this property is valid and applies to this type.
446 		 */
447 		if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL) {
448 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
449 			    "invalid property '%s'"), propname);
450 			(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
451 			goto error;
452 		}
453 
454 		if (zpool_prop_readonly(prop)) {
455 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
456 			    "is readonly"), propname);
457 			(void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
458 			goto error;
459 		}
460 
461 		if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
462 		    &strval, &intval, errbuf) != 0)
463 			goto error;
464 
465 		/*
466 		 * Perform additional checking for specific properties.
467 		 */
468 		switch (prop) {
469 		case ZPOOL_PROP_VERSION:
470 			if (intval < version || intval > SPA_VERSION) {
471 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
472 				    "property '%s' number %d is invalid."),
473 				    propname, intval);
474 				(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
475 				goto error;
476 			}
477 			break;
478 
479 		case ZPOOL_PROP_BOOTFS:
480 			if (create_or_import) {
481 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
482 				    "property '%s' cannot be set at creation "
483 				    "or import time"), propname);
484 				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
485 				goto error;
486 			}
487 
488 			if (version < SPA_VERSION_BOOTFS) {
489 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
490 				    "pool must be upgraded to support "
491 				    "'%s' property"), propname);
492 				(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
493 				goto error;
494 			}
495 
496 			/*
497 			 * bootfs property value has to be a dataset name and
498 			 * the dataset has to be in the same pool as it sets to.
499 			 */
500 			if (strval[0] != '\0' && !bootfs_name_valid(poolname,
501 			    strval)) {
502 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
503 				    "is an invalid name"), strval);
504 				(void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
505 				goto error;
506 			}
507 
508 			if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
509 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
510 				    "could not open pool '%s'"), poolname);
511 				(void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
512 				goto error;
513 			}
514 			verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
515 			    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
516 
517 			/*
518 			 * bootfs property cannot be set on a disk which has
519 			 * been EFI labeled.
520 			 */
521 			if (pool_uses_efi(nvroot)) {
522 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
523 				    "property '%s' not supported on "
524 				    "EFI labeled devices"), propname);
525 				(void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf);
526 				zpool_close(zhp);
527 				goto error;
528 			}
529 			zpool_close(zhp);
530 			break;
531 
532 		case ZPOOL_PROP_ALTROOT:
533 			if (!create_or_import) {
534 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
535 				    "property '%s' can only be set during pool "
536 				    "creation or import"), propname);
537 				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
538 				goto error;
539 			}
540 
541 			if (strval[0] != '/') {
542 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
543 				    "bad alternate root '%s'"), strval);
544 				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
545 				goto error;
546 			}
547 			break;
548 
549 		case ZPOOL_PROP_CACHEFILE:
550 			if (strval[0] == '\0')
551 				break;
552 
553 			if (strcmp(strval, "none") == 0)
554 				break;
555 
556 			if (strval[0] != '/') {
557 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
558 				    "property '%s' must be empty, an "
559 				    "absolute path, or 'none'"), propname);
560 				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
561 				goto error;
562 			}
563 
564 			slash = strrchr(strval, '/');
565 
566 			if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
567 			    strcmp(slash, "/..") == 0) {
568 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
569 				    "'%s' is not a valid file"), strval);
570 				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
571 				goto error;
572 			}
573 
574 			*slash = '\0';
575 
576 			if (strval[0] != '\0' &&
577 			    (stat64(strval, &statbuf) != 0 ||
578 			    !S_ISDIR(statbuf.st_mode))) {
579 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
580 				    "'%s' is not a valid directory"),
581 				    strval);
582 				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
583 				goto error;
584 			}
585 
586 			*slash = '/';
587 			break;
588 		}
589 	}
590 
591 	return (retprops);
592 error:
593 	nvlist_free(retprops);
594 	return (NULL);
595 }
596 
597 /*
598  * Set zpool property : propname=propval.
599  */
600 int
601 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
602 {
603 	zfs_cmd_t zc = { 0 };
604 	int ret = -1;
605 	char errbuf[1024];
606 	nvlist_t *nvl = NULL;
607 	nvlist_t *realprops;
608 	uint64_t version;
609 
610 	(void) snprintf(errbuf, sizeof (errbuf),
611 	    dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
612 	    zhp->zpool_name);
613 
614 	if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
615 		return (no_memory(zhp->zpool_hdl));
616 
617 	if (nvlist_add_string(nvl, propname, propval) != 0) {
618 		nvlist_free(nvl);
619 		return (no_memory(zhp->zpool_hdl));
620 	}
621 
622 	version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
623 	if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
624 	    zhp->zpool_name, nvl, version, B_FALSE, errbuf)) == NULL) {
625 		nvlist_free(nvl);
626 		return (-1);
627 	}
628 
629 	nvlist_free(nvl);
630 	nvl = realprops;
631 
632 	/*
633 	 * Execute the corresponding ioctl() to set this property.
634 	 */
635 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
636 
637 	if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
638 		nvlist_free(nvl);
639 		return (-1);
640 	}
641 
642 	ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
643 
644 	zcmd_free_nvlists(&zc);
645 	nvlist_free(nvl);
646 
647 	if (ret)
648 		(void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
649 	else
650 		(void) zpool_props_refresh(zhp);
651 
652 	return (ret);
653 }
654 
655 int
656 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
657 {
658 	libzfs_handle_t *hdl = zhp->zpool_hdl;
659 	zprop_list_t *entry;
660 	char buf[ZFS_MAXPROPLEN];
661 
662 	if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
663 		return (-1);
664 
665 	for (entry = *plp; entry != NULL; entry = entry->pl_next) {
666 
667 		if (entry->pl_fixed)
668 			continue;
669 
670 		if (entry->pl_prop != ZPROP_INVAL &&
671 		    zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
672 		    NULL) == 0) {
673 			if (strlen(buf) > entry->pl_width)
674 				entry->pl_width = strlen(buf);
675 		}
676 	}
677 
678 	return (0);
679 }
680 
681 
682 /*
683  * Don't start the slice at the default block of 34; many storage
684  * devices will use a stripe width of 128k, so start there instead.
685  */
686 #define	NEW_START_BLOCK	256
687 
688 /*
689  * Validate the given pool name, optionally putting an extended error message in
690  * 'buf'.
691  */
692 boolean_t
693 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
694 {
695 	namecheck_err_t why;
696 	char what;
697 	int ret;
698 
699 	ret = pool_namecheck(pool, &why, &what);
700 
701 	/*
702 	 * The rules for reserved pool names were extended at a later point.
703 	 * But we need to support users with existing pools that may now be
704 	 * invalid.  So we only check for this expanded set of names during a
705 	 * create (or import), and only in userland.
706 	 */
707 	if (ret == 0 && !isopen &&
708 	    (strncmp(pool, "mirror", 6) == 0 ||
709 	    strncmp(pool, "raidz", 5) == 0 ||
710 	    strncmp(pool, "spare", 5) == 0 ||
711 	    strcmp(pool, "log") == 0)) {
712 		if (hdl != NULL)
713 			zfs_error_aux(hdl,
714 			    dgettext(TEXT_DOMAIN, "name is reserved"));
715 		return (B_FALSE);
716 	}
717 
718 
719 	if (ret != 0) {
720 		if (hdl != NULL) {
721 			switch (why) {
722 			case NAME_ERR_TOOLONG:
723 				zfs_error_aux(hdl,
724 				    dgettext(TEXT_DOMAIN, "name is too long"));
725 				break;
726 
727 			case NAME_ERR_INVALCHAR:
728 				zfs_error_aux(hdl,
729 				    dgettext(TEXT_DOMAIN, "invalid character "
730 				    "'%c' in pool name"), what);
731 				break;
732 
733 			case NAME_ERR_NOLETTER:
734 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
735 				    "name must begin with a letter"));
736 				break;
737 
738 			case NAME_ERR_RESERVED:
739 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
740 				    "name is reserved"));
741 				break;
742 
743 			case NAME_ERR_DISKLIKE:
744 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
745 				    "pool name is reserved"));
746 				break;
747 
748 			case NAME_ERR_LEADING_SLASH:
749 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
750 				    "leading slash in name"));
751 				break;
752 
753 			case NAME_ERR_EMPTY_COMPONENT:
754 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
755 				    "empty component in name"));
756 				break;
757 
758 			case NAME_ERR_TRAILING_SLASH:
759 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
760 				    "trailing slash in name"));
761 				break;
762 
763 			case NAME_ERR_MULTIPLE_AT:
764 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
765 				    "multiple '@' delimiters in name"));
766 				break;
767 
768 			}
769 		}
770 		return (B_FALSE);
771 	}
772 
773 	return (B_TRUE);
774 }
775 
776 /*
777  * Open a handle to the given pool, even if the pool is currently in the FAULTED
778  * state.
779  */
780 zpool_handle_t *
781 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
782 {
783 	zpool_handle_t *zhp;
784 	boolean_t missing;
785 
786 	/*
787 	 * Make sure the pool name is valid.
788 	 */
789 	if (!zpool_name_valid(hdl, B_TRUE, pool)) {
790 		(void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
791 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
792 		    pool);
793 		return (NULL);
794 	}
795 
796 	if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
797 		return (NULL);
798 
799 	zhp->zpool_hdl = hdl;
800 	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
801 
802 	if (zpool_refresh_stats(zhp, &missing) != 0) {
803 		zpool_close(zhp);
804 		return (NULL);
805 	}
806 
807 	if (missing) {
808 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
809 		(void) zfs_error_fmt(hdl, EZFS_NOENT,
810 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
811 		zpool_close(zhp);
812 		return (NULL);
813 	}
814 
815 	return (zhp);
816 }
817 
818 /*
819  * Like the above, but silent on error.  Used when iterating over pools (because
820  * the configuration cache may be out of date).
821  */
822 int
823 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
824 {
825 	zpool_handle_t *zhp;
826 	boolean_t missing;
827 
828 	if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
829 		return (-1);
830 
831 	zhp->zpool_hdl = hdl;
832 	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
833 
834 	if (zpool_refresh_stats(zhp, &missing) != 0) {
835 		zpool_close(zhp);
836 		return (-1);
837 	}
838 
839 	if (missing) {
840 		zpool_close(zhp);
841 		*ret = NULL;
842 		return (0);
843 	}
844 
845 	*ret = zhp;
846 	return (0);
847 }
848 
849 /*
850  * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
851  * state.
852  */
853 zpool_handle_t *
854 zpool_open(libzfs_handle_t *hdl, const char *pool)
855 {
856 	zpool_handle_t *zhp;
857 
858 	if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
859 		return (NULL);
860 
861 	if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
862 		(void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
863 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
864 		zpool_close(zhp);
865 		return (NULL);
866 	}
867 
868 	return (zhp);
869 }
870 
871 /*
872  * Close the handle.  Simply frees the memory associated with the handle.
873  */
874 void
875 zpool_close(zpool_handle_t *zhp)
876 {
877 	if (zhp->zpool_config)
878 		nvlist_free(zhp->zpool_config);
879 	if (zhp->zpool_old_config)
880 		nvlist_free(zhp->zpool_old_config);
881 	if (zhp->zpool_props)
882 		nvlist_free(zhp->zpool_props);
883 	free(zhp);
884 }
885 
886 /*
887  * Return the name of the pool.
888  */
889 const char *
890 zpool_get_name(zpool_handle_t *zhp)
891 {
892 	return (zhp->zpool_name);
893 }
894 
895 
896 /*
897  * Return the state of the pool (ACTIVE or UNAVAILABLE)
898  */
899 int
900 zpool_get_state(zpool_handle_t *zhp)
901 {
902 	return (zhp->zpool_state);
903 }
904 
905 /*
906  * Create the named pool, using the provided vdev list.  It is assumed
907  * that the consumer has already validated the contents of the nvlist, so we
908  * don't have to worry about error semantics.
909  */
910 int
911 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
912     nvlist_t *props, nvlist_t *fsprops)
913 {
914 	zfs_cmd_t zc = { 0 };
915 	nvlist_t *zc_fsprops = NULL;
916 	nvlist_t *zc_props = NULL;
917 	char msg[1024];
918 	char *altroot;
919 	int ret = -1;
920 
921 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
922 	    "cannot create '%s'"), pool);
923 
924 	if (!zpool_name_valid(hdl, B_FALSE, pool))
925 		return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
926 
927 	if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
928 		return (-1);
929 
930 	if (props) {
931 		if ((zc_props = zpool_valid_proplist(hdl, pool, props,
932 		    SPA_VERSION_1, B_TRUE, msg)) == NULL) {
933 			goto create_failed;
934 		}
935 	}
936 
937 	if (fsprops) {
938 		uint64_t zoned;
939 		char *zonestr;
940 
941 		zoned = ((nvlist_lookup_string(fsprops,
942 		    zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
943 		    strcmp(zonestr, "on") == 0);
944 
945 		if ((zc_fsprops = zfs_valid_proplist(hdl,
946 		    ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) {
947 			goto create_failed;
948 		}
949 		if (!zc_props &&
950 		    (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
951 			goto create_failed;
952 		}
953 		if (nvlist_add_nvlist(zc_props,
954 		    ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
955 			goto create_failed;
956 		}
957 	}
958 
959 	if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
960 		goto create_failed;
961 
962 	(void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
963 
964 	if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
965 
966 		zcmd_free_nvlists(&zc);
967 		nvlist_free(zc_props);
968 		nvlist_free(zc_fsprops);
969 
970 		switch (errno) {
971 		case EBUSY:
972 			/*
973 			 * This can happen if the user has specified the same
974 			 * device multiple times.  We can't reliably detect this
975 			 * until we try to add it and see we already have a
976 			 * label.
977 			 */
978 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
979 			    "one or more vdevs refer to the same device"));
980 			return (zfs_error(hdl, EZFS_BADDEV, msg));
981 
982 		case EOVERFLOW:
983 			/*
984 			 * This occurs when one of the devices is below
985 			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
986 			 * device was the problem device since there's no
987 			 * reliable way to determine device size from userland.
988 			 */
989 			{
990 				char buf[64];
991 
992 				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
993 
994 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
995 				    "one or more devices is less than the "
996 				    "minimum size (%s)"), buf);
997 			}
998 			return (zfs_error(hdl, EZFS_BADDEV, msg));
999 
1000 		case ENOSPC:
1001 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1002 			    "one or more devices is out of space"));
1003 			return (zfs_error(hdl, EZFS_BADDEV, msg));
1004 
1005 		case ENOTBLK:
1006 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1007 			    "cache device must be a disk or disk slice"));
1008 			return (zfs_error(hdl, EZFS_BADDEV, msg));
1009 
1010 		default:
1011 			return (zpool_standard_error(hdl, errno, msg));
1012 		}
1013 	}
1014 
1015 	/*
1016 	 * If this is an alternate root pool, then we automatically set the
1017 	 * mountpoint of the root dataset to be '/'.
1018 	 */
1019 	if (nvlist_lookup_string(props, zpool_prop_to_name(ZPOOL_PROP_ALTROOT),
1020 	    &altroot) == 0) {
1021 		zfs_handle_t *zhp;
1022 
1023 		verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_DATASET)) != NULL);
1024 		verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
1025 		    "/") == 0);
1026 
1027 		zfs_close(zhp);
1028 	}
1029 
1030 create_failed:
1031 	zcmd_free_nvlists(&zc);
1032 	nvlist_free(zc_props);
1033 	nvlist_free(zc_fsprops);
1034 	return (ret);
1035 }
1036 
1037 /*
1038  * Destroy the given pool.  It is up to the caller to ensure that there are no
1039  * datasets left in the pool.
1040  */
1041 int
1042 zpool_destroy(zpool_handle_t *zhp)
1043 {
1044 	zfs_cmd_t zc = { 0 };
1045 	zfs_handle_t *zfp = NULL;
1046 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1047 	char msg[1024];
1048 
1049 	if (zhp->zpool_state == POOL_STATE_ACTIVE &&
1050 	    (zfp = zfs_open(zhp->zpool_hdl, zhp->zpool_name,
1051 	    ZFS_TYPE_FILESYSTEM)) == NULL)
1052 		return (-1);
1053 
1054 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1055 
1056 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
1057 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1058 		    "cannot destroy '%s'"), zhp->zpool_name);
1059 
1060 		if (errno == EROFS) {
1061 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1062 			    "one or more devices is read only"));
1063 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
1064 		} else {
1065 			(void) zpool_standard_error(hdl, errno, msg);
1066 		}
1067 
1068 		if (zfp)
1069 			zfs_close(zfp);
1070 		return (-1);
1071 	}
1072 
1073 	if (zfp) {
1074 		remove_mountpoint(zfp);
1075 		zfs_close(zfp);
1076 	}
1077 
1078 	return (0);
1079 }
1080 
1081 /*
1082  * Add the given vdevs to the pool.  The caller must have already performed the
1083  * necessary verification to ensure that the vdev specification is well-formed.
1084  */
1085 int
1086 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1087 {
1088 	zfs_cmd_t zc = { 0 };
1089 	int ret;
1090 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1091 	char msg[1024];
1092 	nvlist_t **spares, **l2cache;
1093 	uint_t nspares, nl2cache;
1094 
1095 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1096 	    "cannot add to '%s'"), zhp->zpool_name);
1097 
1098 	if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1099 	    SPA_VERSION_SPARES &&
1100 	    nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1101 	    &spares, &nspares) == 0) {
1102 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1103 		    "upgraded to add hot spares"));
1104 		return (zfs_error(hdl, EZFS_BADVERSION, msg));
1105 	}
1106 
1107 	if (pool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot,
1108 	    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) {
1109 		uint64_t s;
1110 
1111 		for (s = 0; s < nspares; s++) {
1112 			char *path;
1113 
1114 			if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH,
1115 			    &path) == 0 && pool_uses_efi(spares[s])) {
1116 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1117 				    "device '%s' contains an EFI label and "
1118 				    "cannot be used on root pools."),
1119 				    zpool_vdev_name(hdl, NULL, spares[s],
1120 				    B_FALSE));
1121 				return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
1122 			}
1123 		}
1124 	}
1125 
1126 	if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1127 	    SPA_VERSION_L2CACHE &&
1128 	    nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1129 	    &l2cache, &nl2cache) == 0) {
1130 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1131 		    "upgraded to add cache devices"));
1132 		return (zfs_error(hdl, EZFS_BADVERSION, msg));
1133 	}
1134 
1135 	if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1136 		return (-1);
1137 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1138 
1139 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1140 		switch (errno) {
1141 		case EBUSY:
1142 			/*
1143 			 * This can happen if the user has specified the same
1144 			 * device multiple times.  We can't reliably detect this
1145 			 * until we try to add it and see we already have a
1146 			 * label.
1147 			 */
1148 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1149 			    "one or more vdevs refer to the same device"));
1150 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
1151 			break;
1152 
1153 		case EOVERFLOW:
1154 			/*
1155 			 * This occurrs when one of the devices is below
1156 			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
1157 			 * device was the problem device since there's no
1158 			 * reliable way to determine device size from userland.
1159 			 */
1160 			{
1161 				char buf[64];
1162 
1163 				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1164 
1165 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1166 				    "device is less than the minimum "
1167 				    "size (%s)"), buf);
1168 			}
1169 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
1170 			break;
1171 
1172 		case ENOTSUP:
1173 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1174 			    "pool must be upgraded to add these vdevs"));
1175 			(void) zfs_error(hdl, EZFS_BADVERSION, msg);
1176 			break;
1177 
1178 		case EDOM:
1179 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1180 			    "root pool can not have multiple vdevs"
1181 			    " or separate logs"));
1182 			(void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
1183 			break;
1184 
1185 		case ENOTBLK:
1186 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1187 			    "cache device must be a disk or disk slice"));
1188 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
1189 			break;
1190 
1191 		default:
1192 			(void) zpool_standard_error(hdl, errno, msg);
1193 		}
1194 
1195 		ret = -1;
1196 	} else {
1197 		ret = 0;
1198 	}
1199 
1200 	zcmd_free_nvlists(&zc);
1201 
1202 	return (ret);
1203 }
1204 
1205 /*
1206  * Exports the pool from the system.  The caller must ensure that there are no
1207  * mounted datasets in the pool.
1208  */
1209 int
1210 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce)
1211 {
1212 	zfs_cmd_t zc = { 0 };
1213 	char msg[1024];
1214 
1215 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1216 	    "cannot export '%s'"), zhp->zpool_name);
1217 
1218 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1219 	zc.zc_cookie = force;
1220 	zc.zc_guid = hardforce;
1221 
1222 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1223 		switch (errno) {
1224 		case EXDEV:
1225 			zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1226 			    "use '-f' to override the following errors:\n"
1227 			    "'%s' has an active shared spare which could be"
1228 			    " used by other pools once '%s' is exported."),
1229 			    zhp->zpool_name, zhp->zpool_name);
1230 			return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1231 			    msg));
1232 		default:
1233 			return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1234 			    msg));
1235 		}
1236 	}
1237 
1238 	return (0);
1239 }
1240 
1241 int
1242 zpool_export(zpool_handle_t *zhp, boolean_t force)
1243 {
1244 	return (zpool_export_common(zhp, force, B_FALSE));
1245 }
1246 
1247 int
1248 zpool_export_force(zpool_handle_t *zhp)
1249 {
1250 	return (zpool_export_common(zhp, B_TRUE, B_TRUE));
1251 }
1252 
1253 static void
1254 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
1255     nvlist_t *rbi)
1256 {
1257 	uint64_t rewindto;
1258 	int64_t loss = -1;
1259 	struct tm t;
1260 	char timestr[128];
1261 
1262 	if (!hdl->libzfs_printerr || rbi == NULL)
1263 		return;
1264 
1265 	if (nvlist_lookup_uint64(rbi, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1266 		return;
1267 	(void) nvlist_lookup_int64(rbi, ZPOOL_CONFIG_REWIND_TIME, &loss);
1268 
1269 	if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1270 	    strftime(timestr, 128, 0, &t) != 0) {
1271 		if (dryrun) {
1272 			(void) printf(dgettext(TEXT_DOMAIN,
1273 			    "Would be able to return %s "
1274 			    "to its state as of %s.\n"),
1275 			    name, timestr);
1276 		} else {
1277 			(void) printf(dgettext(TEXT_DOMAIN,
1278 			    "Pool %s returned to its state as of %s.\n"),
1279 			    name, timestr);
1280 		}
1281 		if (loss > 120) {
1282 			(void) printf(dgettext(TEXT_DOMAIN,
1283 			    "%s approximately %lld "),
1284 			    dryrun ? "Would discard" : "Discarded",
1285 			    (loss + 30) / 60);
1286 			(void) printf(dgettext(TEXT_DOMAIN,
1287 			    "minutes of transactions.\n"));
1288 		} else if (loss > 0) {
1289 			(void) printf(dgettext(TEXT_DOMAIN,
1290 			    "%s approximately %lld "),
1291 			    dryrun ? "Would discard" : "Discarded", loss);
1292 			(void) printf(dgettext(TEXT_DOMAIN,
1293 			    "seconds of transactions.\n"));
1294 		}
1295 	}
1296 }
1297 
1298 void
1299 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
1300     nvlist_t *config)
1301 {
1302 	int64_t loss = -1;
1303 	uint64_t edata = UINT64_MAX;
1304 	uint64_t rewindto;
1305 	struct tm t;
1306 	char timestr[128];
1307 
1308 	if (!hdl->libzfs_printerr)
1309 		return;
1310 
1311 	if (reason >= 0)
1312 		(void) printf(dgettext(TEXT_DOMAIN, "action: "));
1313 	else
1314 		(void) printf(dgettext(TEXT_DOMAIN, "\t"));
1315 
1316 	/* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
1317 	if (nvlist_lookup_uint64(config,
1318 	    ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1319 		goto no_info;
1320 
1321 	(void) nvlist_lookup_int64(config, ZPOOL_CONFIG_REWIND_TIME, &loss);
1322 	(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
1323 	    &edata);
1324 
1325 	(void) printf(dgettext(TEXT_DOMAIN,
1326 	    "Recovery is possible, but will result in some data loss.\n"));
1327 
1328 	if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1329 	    strftime(timestr, 128, 0, &t) != 0) {
1330 		(void) printf(dgettext(TEXT_DOMAIN,
1331 		    "\tReturning the pool to its state as of %s\n"
1332 		    "\tshould correct the problem.  "),
1333 		    timestr);
1334 	} else {
1335 		(void) printf(dgettext(TEXT_DOMAIN,
1336 		    "\tReverting the pool to an earlier state "
1337 		    "should correct the problem.\n\t"));
1338 	}
1339 
1340 	if (loss > 120) {
1341 		(void) printf(dgettext(TEXT_DOMAIN,
1342 		    "Approximately %lld minutes of data\n"
1343 		    "\tmust be discarded, irreversibly.  "), (loss + 30) / 60);
1344 	} else if (loss > 0) {
1345 		(void) printf(dgettext(TEXT_DOMAIN,
1346 		    "Approximately %lld seconds of data\n"
1347 		    "\tmust be discarded, irreversibly.  "), loss);
1348 	}
1349 	if (edata != 0 && edata != UINT64_MAX) {
1350 		if (edata == 1) {
1351 			(void) printf(dgettext(TEXT_DOMAIN,
1352 			    "After rewind, at least\n"
1353 			    "\tone persistent user-data error will remain.  "));
1354 		} else {
1355 			(void) printf(dgettext(TEXT_DOMAIN,
1356 			    "After rewind, several\n"
1357 			    "\tpersistent user-data errors will remain.  "));
1358 		}
1359 	}
1360 	(void) printf(dgettext(TEXT_DOMAIN,
1361 	    "Recovery can be attempted\n\tby executing 'zpool %s -F %s'.  "),
1362 	    reason >= 0 ? "clear" : "import", name);
1363 
1364 	(void) printf(dgettext(TEXT_DOMAIN,
1365 	    "A scrub of the pool\n"
1366 	    "\tis strongly recommended after recovery.\n"));
1367 	return;
1368 
1369 no_info:
1370 	(void) printf(dgettext(TEXT_DOMAIN,
1371 	    "Destroy and re-create the pool from\n\ta backup source.\n"));
1372 }
1373 
1374 /*
1375  * zpool_import() is a contracted interface. Should be kept the same
1376  * if possible.
1377  *
1378  * Applications should use zpool_import_props() to import a pool with
1379  * new properties value to be set.
1380  */
1381 int
1382 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1383     char *altroot)
1384 {
1385 	nvlist_t *props = NULL;
1386 	int ret;
1387 
1388 	if (altroot != NULL) {
1389 		if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1390 			return (zfs_error_fmt(hdl, EZFS_NOMEM,
1391 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1392 			    newname));
1393 		}
1394 
1395 		if (nvlist_add_string(props,
1396 		    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1397 		    nvlist_add_string(props,
1398 		    zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
1399 			nvlist_free(props);
1400 			return (zfs_error_fmt(hdl, EZFS_NOMEM,
1401 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1402 			    newname));
1403 		}
1404 	}
1405 
1406 	ret = zpool_import_props(hdl, config, newname, props, B_FALSE);
1407 	if (props)
1408 		nvlist_free(props);
1409 	return (ret);
1410 }
1411 
1412 /*
1413  * Import the given pool using the known configuration and a list of
1414  * properties to be set. The configuration should have come from
1415  * zpool_find_import(). The 'newname' parameters control whether the pool
1416  * is imported with a different name.
1417  */
1418 int
1419 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1420     nvlist_t *props, boolean_t importfaulted)
1421 {
1422 	zfs_cmd_t zc = { 0 };
1423 	zpool_rewind_policy_t policy;
1424 	nvlist_t *nvi = NULL;
1425 	char *thename;
1426 	char *origname;
1427 	uint64_t returned_size;
1428 	int ret;
1429 	char errbuf[1024];
1430 
1431 	verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1432 	    &origname) == 0);
1433 
1434 	(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1435 	    "cannot import pool '%s'"), origname);
1436 
1437 	if (newname != NULL) {
1438 		if (!zpool_name_valid(hdl, B_FALSE, newname))
1439 			return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1440 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1441 			    newname));
1442 		thename = (char *)newname;
1443 	} else {
1444 		thename = origname;
1445 	}
1446 
1447 	if (props) {
1448 		uint64_t version;
1449 
1450 		verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1451 		    &version) == 0);
1452 
1453 		if ((props = zpool_valid_proplist(hdl, origname,
1454 		    props, version, B_TRUE, errbuf)) == NULL) {
1455 			return (-1);
1456 		} else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1457 			nvlist_free(props);
1458 			return (-1);
1459 		}
1460 	}
1461 
1462 	(void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1463 
1464 	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1465 	    &zc.zc_guid) == 0);
1466 
1467 	if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1468 		nvlist_free(props);
1469 		return (-1);
1470 	}
1471 	returned_size =  zc.zc_nvlist_conf_size + 512;
1472 	if (zcmd_alloc_dst_nvlist(hdl, &zc, returned_size) != 0) {
1473 		nvlist_free(props);
1474 		return (-1);
1475 	}
1476 
1477 	zc.zc_cookie = (uint64_t)importfaulted;
1478 	ret = 0;
1479 	if (zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc) != 0) {
1480 		char desc[1024];
1481 
1482 		(void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
1483 		zpool_get_rewind_policy(config, &policy);
1484 		/*
1485 		 * Dry-run failed, but we print out what success
1486 		 * looks like if we found a best txg
1487 		 */
1488 		if ((policy.zrp_request & ZPOOL_TRY_REWIND) && nvi) {
1489 			zpool_rewind_exclaim(hdl, newname ? origname : thename,
1490 			    B_TRUE, nvi);
1491 			nvlist_free(nvi);
1492 			return (-1);
1493 		}
1494 
1495 		if (newname == NULL)
1496 			(void) snprintf(desc, sizeof (desc),
1497 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1498 			    thename);
1499 		else
1500 			(void) snprintf(desc, sizeof (desc),
1501 			    dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1502 			    origname, thename);
1503 
1504 		switch (errno) {
1505 		case ENOTSUP:
1506 			/*
1507 			 * Unsupported version.
1508 			 */
1509 			(void) zfs_error(hdl, EZFS_BADVERSION, desc);
1510 			break;
1511 
1512 		case EINVAL:
1513 			(void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1514 			break;
1515 
1516 		default:
1517 			(void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
1518 			(void) zpool_standard_error(hdl, errno, desc);
1519 			zpool_explain_recover(hdl,
1520 			    newname ? origname : thename, -errno, nvi);
1521 			nvlist_free(nvi);
1522 			break;
1523 		}
1524 
1525 		ret = -1;
1526 	} else {
1527 		zpool_handle_t *zhp;
1528 
1529 		/*
1530 		 * This should never fail, but play it safe anyway.
1531 		 */
1532 		if (zpool_open_silent(hdl, thename, &zhp) != 0)
1533 			ret = -1;
1534 		else if (zhp != NULL)
1535 			zpool_close(zhp);
1536 		(void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
1537 		zpool_get_rewind_policy(config, &policy);
1538 		if (policy.zrp_request &
1539 		    (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
1540 			zpool_rewind_exclaim(hdl, newname ? origname : thename,
1541 			    ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0),
1542 			    nvi);
1543 		}
1544 		nvlist_free(nvi);
1545 		return (0);
1546 	}
1547 
1548 	zcmd_free_nvlists(&zc);
1549 	nvlist_free(props);
1550 
1551 	return (ret);
1552 }
1553 
1554 /*
1555  * Scrub the pool.
1556  */
1557 int
1558 zpool_scrub(zpool_handle_t *zhp, pool_scrub_type_t type)
1559 {
1560 	zfs_cmd_t zc = { 0 };
1561 	char msg[1024];
1562 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1563 
1564 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1565 	zc.zc_cookie = type;
1566 
1567 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SCRUB, &zc) == 0)
1568 		return (0);
1569 
1570 	(void) snprintf(msg, sizeof (msg),
1571 	    dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
1572 
1573 	if (errno == EBUSY)
1574 		return (zfs_error(hdl, EZFS_RESILVERING, msg));
1575 	else
1576 		return (zpool_standard_error(hdl, errno, msg));
1577 }
1578 
1579 /*
1580  * Find a vdev that matches the search criteria specified. We use the
1581  * the nvpair name to determine how we should look for the device.
1582  * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1583  * spare; but FALSE if its an INUSE spare.
1584  */
1585 static nvlist_t *
1586 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
1587     boolean_t *l2cache, boolean_t *log)
1588 {
1589 	uint_t c, children;
1590 	nvlist_t **child;
1591 	nvlist_t *ret;
1592 	uint64_t is_log;
1593 	char *srchkey;
1594 	nvpair_t *pair = nvlist_next_nvpair(search, NULL);
1595 
1596 	/* Nothing to look for */
1597 	if (search == NULL || pair == NULL)
1598 		return (NULL);
1599 
1600 	/* Obtain the key we will use to search */
1601 	srchkey = nvpair_name(pair);
1602 
1603 	switch (nvpair_type(pair)) {
1604 	case DATA_TYPE_UINT64: {
1605 		uint64_t srchval, theguid, present;
1606 
1607 		verify(nvpair_value_uint64(pair, &srchval) == 0);
1608 		if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
1609 			if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
1610 			    &present) == 0) {
1611 				/*
1612 				 * If the device has never been present since
1613 				 * import, the only reliable way to match the
1614 				 * vdev is by GUID.
1615 				 */
1616 				verify(nvlist_lookup_uint64(nv,
1617 				    ZPOOL_CONFIG_GUID, &theguid) == 0);
1618 				if (theguid == srchval)
1619 					return (nv);
1620 			}
1621 		}
1622 		break;
1623 	}
1624 
1625 	case DATA_TYPE_STRING: {
1626 		char *srchval, *val;
1627 
1628 		verify(nvpair_value_string(pair, &srchval) == 0);
1629 		if (nvlist_lookup_string(nv, srchkey, &val) != 0)
1630 			break;
1631 
1632 		/*
1633 		 * Search for the requested value. We special case the search
1634 		 * for ZPOOL_CONFIG_PATH when it's a wholedisk and when
1635 		 * Looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
1636 		 * Otherwise, all other searches are simple string compares.
1637 		 */
1638 		if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0 && val) {
1639 			uint64_t wholedisk = 0;
1640 
1641 			(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
1642 			    &wholedisk);
1643 			if (wholedisk) {
1644 				/*
1645 				 * For whole disks, the internal path has 's0',
1646 				 * but the path passed in by the user doesn't.
1647 				 */
1648 				if (strlen(srchval) == strlen(val) - 2 &&
1649 				    strncmp(srchval, val, strlen(srchval)) == 0)
1650 					return (nv);
1651 				break;
1652 			}
1653 		} else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
1654 			char *type, *idx, *end, *p;
1655 			uint64_t id, vdev_id;
1656 
1657 			/*
1658 			 * Determine our vdev type, keeping in mind
1659 			 * that the srchval is composed of a type and
1660 			 * vdev id pair (i.e. mirror-4).
1661 			 */
1662 			if ((type = strdup(srchval)) == NULL)
1663 				return (NULL);
1664 
1665 			if ((p = strrchr(type, '-')) == NULL) {
1666 				free(type);
1667 				break;
1668 			}
1669 			idx = p + 1;
1670 			*p = '\0';
1671 
1672 			/*
1673 			 * If the types don't match then keep looking.
1674 			 */
1675 			if (strncmp(val, type, strlen(val)) != 0) {
1676 				free(type);
1677 				break;
1678 			}
1679 
1680 			verify(strncmp(type, VDEV_TYPE_RAIDZ,
1681 			    strlen(VDEV_TYPE_RAIDZ)) == 0 ||
1682 			    strncmp(type, VDEV_TYPE_MIRROR,
1683 			    strlen(VDEV_TYPE_MIRROR)) == 0);
1684 			verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
1685 			    &id) == 0);
1686 
1687 			errno = 0;
1688 			vdev_id = strtoull(idx, &end, 10);
1689 
1690 			free(type);
1691 			if (errno != 0)
1692 				return (NULL);
1693 
1694 			/*
1695 			 * Now verify that we have the correct vdev id.
1696 			 */
1697 			if (vdev_id == id)
1698 				return (nv);
1699 		}
1700 
1701 		/*
1702 		 * Common case
1703 		 */
1704 		if (strcmp(srchval, val) == 0)
1705 			return (nv);
1706 		break;
1707 	}
1708 
1709 	default:
1710 		break;
1711 	}
1712 
1713 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1714 	    &child, &children) != 0)
1715 		return (NULL);
1716 
1717 	for (c = 0; c < children; c++) {
1718 		if ((ret = vdev_to_nvlist_iter(child[c], search,
1719 		    avail_spare, l2cache, NULL)) != NULL) {
1720 			/*
1721 			 * The 'is_log' value is only set for the toplevel
1722 			 * vdev, not the leaf vdevs.  So we always lookup the
1723 			 * log device from the root of the vdev tree (where
1724 			 * 'log' is non-NULL).
1725 			 */
1726 			if (log != NULL &&
1727 			    nvlist_lookup_uint64(child[c],
1728 			    ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
1729 			    is_log) {
1730 				*log = B_TRUE;
1731 			}
1732 			return (ret);
1733 		}
1734 	}
1735 
1736 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
1737 	    &child, &children) == 0) {
1738 		for (c = 0; c < children; c++) {
1739 			if ((ret = vdev_to_nvlist_iter(child[c], search,
1740 			    avail_spare, l2cache, NULL)) != NULL) {
1741 				*avail_spare = B_TRUE;
1742 				return (ret);
1743 			}
1744 		}
1745 	}
1746 
1747 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
1748 	    &child, &children) == 0) {
1749 		for (c = 0; c < children; c++) {
1750 			if ((ret = vdev_to_nvlist_iter(child[c], search,
1751 			    avail_spare, l2cache, NULL)) != NULL) {
1752 				*l2cache = B_TRUE;
1753 				return (ret);
1754 			}
1755 		}
1756 	}
1757 
1758 	return (NULL);
1759 }
1760 
1761 /*
1762  * Given a physical path (minus the "/devices" prefix), find the
1763  * associated vdev.
1764  */
1765 nvlist_t *
1766 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
1767     boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
1768 {
1769 	nvlist_t *search, *nvroot, *ret;
1770 
1771 	verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1772 	verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0);
1773 
1774 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1775 	    &nvroot) == 0);
1776 
1777 	*avail_spare = B_FALSE;
1778 	ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
1779 	nvlist_free(search);
1780 
1781 	return (ret);
1782 }
1783 
1784 /*
1785  * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
1786  */
1787 boolean_t
1788 zpool_vdev_is_interior(const char *name)
1789 {
1790 	if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
1791 	    strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
1792 		return (B_TRUE);
1793 	return (B_FALSE);
1794 }
1795 
1796 nvlist_t *
1797 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
1798     boolean_t *l2cache, boolean_t *log)
1799 {
1800 	char buf[MAXPATHLEN];
1801 	char *end;
1802 	nvlist_t *nvroot, *search, *ret;
1803 	uint64_t guid;
1804 
1805 	verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1806 
1807 	guid = strtoull(path, &end, 10);
1808 	if (guid != 0 && *end == '\0') {
1809 		verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
1810 	} else if (zpool_vdev_is_interior(path)) {
1811 		verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
1812 	} else if (path[0] != '/') {
1813 		(void) snprintf(buf, sizeof (buf), "%s%s", "/dev/dsk/", path);
1814 		verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0);
1815 	} else {
1816 		verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
1817 	}
1818 
1819 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1820 	    &nvroot) == 0);
1821 
1822 	*avail_spare = B_FALSE;
1823 	*l2cache = B_FALSE;
1824 	if (log != NULL)
1825 		*log = B_FALSE;
1826 	ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
1827 	nvlist_free(search);
1828 
1829 	return (ret);
1830 }
1831 
1832 static int
1833 vdev_online(nvlist_t *nv)
1834 {
1835 	uint64_t ival;
1836 
1837 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
1838 	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
1839 	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
1840 		return (0);
1841 
1842 	return (1);
1843 }
1844 
1845 /*
1846  * Helper function for zpool_get_physpaths().
1847  */
1848 static int
1849 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
1850     size_t *bytes_written)
1851 {
1852 	size_t bytes_left, pos, rsz;
1853 	char *tmppath;
1854 	const char *format;
1855 
1856 	if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
1857 	    &tmppath) != 0)
1858 		return (EZFS_NODEVICE);
1859 
1860 	pos = *bytes_written;
1861 	bytes_left = physpath_size - pos;
1862 	format = (pos == 0) ? "%s" : " %s";
1863 
1864 	rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
1865 	*bytes_written += rsz;
1866 
1867 	if (rsz >= bytes_left) {
1868 		/* if physpath was not copied properly, clear it */
1869 		if (bytes_left != 0) {
1870 			physpath[pos] = 0;
1871 		}
1872 		return (EZFS_NOSPC);
1873 	}
1874 	return (0);
1875 }
1876 
1877 static int
1878 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
1879     size_t *rsz, boolean_t is_spare)
1880 {
1881 	char *type;
1882 	int ret;
1883 
1884 	if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
1885 		return (EZFS_INVALCONFIG);
1886 
1887 	if (strcmp(type, VDEV_TYPE_DISK) == 0) {
1888 		/*
1889 		 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
1890 		 * For a spare vdev, we only want to boot from the active
1891 		 * spare device.
1892 		 */
1893 		if (is_spare) {
1894 			uint64_t spare = 0;
1895 			(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
1896 			    &spare);
1897 			if (!spare)
1898 				return (EZFS_INVALCONFIG);
1899 		}
1900 
1901 		if (vdev_online(nv)) {
1902 			if ((ret = vdev_get_one_physpath(nv, physpath,
1903 			    phypath_size, rsz)) != 0)
1904 				return (ret);
1905 		}
1906 	} else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
1907 	    strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
1908 	    (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
1909 		nvlist_t **child;
1910 		uint_t count;
1911 		int i, ret;
1912 
1913 		if (nvlist_lookup_nvlist_array(nv,
1914 		    ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
1915 			return (EZFS_INVALCONFIG);
1916 
1917 		for (i = 0; i < count; i++) {
1918 			ret = vdev_get_physpaths(child[i], physpath,
1919 			    phypath_size, rsz, is_spare);
1920 			if (ret == EZFS_NOSPC)
1921 				return (ret);
1922 		}
1923 	}
1924 
1925 	return (EZFS_POOL_INVALARG);
1926 }
1927 
1928 /*
1929  * Get phys_path for a root pool config.
1930  * Return 0 on success; non-zero on failure.
1931  */
1932 static int
1933 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
1934 {
1935 	size_t rsz;
1936 	nvlist_t *vdev_root;
1937 	nvlist_t **child;
1938 	uint_t count;
1939 	char *type;
1940 
1941 	rsz = 0;
1942 
1943 	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
1944 	    &vdev_root) != 0)
1945 		return (EZFS_INVALCONFIG);
1946 
1947 	if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
1948 	    nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
1949 	    &child, &count) != 0)
1950 		return (EZFS_INVALCONFIG);
1951 
1952 	/*
1953 	 * root pool can not have EFI labeled disks and can only have
1954 	 * a single top-level vdev.
1955 	 */
1956 	if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1 ||
1957 	    pool_uses_efi(vdev_root))
1958 		return (EZFS_POOL_INVALARG);
1959 
1960 	(void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
1961 	    B_FALSE);
1962 
1963 	/* No online devices */
1964 	if (rsz == 0)
1965 		return (EZFS_NODEVICE);
1966 
1967 	return (0);
1968 }
1969 
1970 /*
1971  * Get phys_path for a root pool
1972  * Return 0 on success; non-zero on failure.
1973  */
1974 int
1975 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
1976 {
1977 	return (zpool_get_config_physpath(zhp->zpool_config, physpath,
1978 	    phypath_size));
1979 }
1980 
1981 /*
1982  * If the device has being dynamically expanded then we need to relabel
1983  * the disk to use the new unallocated space.
1984  */
1985 static int
1986 zpool_relabel_disk(libzfs_handle_t *hdl, const char *name)
1987 {
1988 	char path[MAXPATHLEN];
1989 	char errbuf[1024];
1990 	int fd, error;
1991 	int (*_efi_use_whole_disk)(int);
1992 
1993 	if ((_efi_use_whole_disk = (int (*)(int))dlsym(RTLD_DEFAULT,
1994 	    "efi_use_whole_disk")) == NULL)
1995 		return (-1);
1996 
1997 	(void) snprintf(path, sizeof (path), "%s/%s", RDISK_ROOT, name);
1998 
1999 	if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
2000 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2001 		    "relabel '%s': unable to open device"), name);
2002 		return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
2003 	}
2004 
2005 	/*
2006 	 * It's possible that we might encounter an error if the device
2007 	 * does not have any unallocated space left. If so, we simply
2008 	 * ignore that error and continue on.
2009 	 */
2010 	error = _efi_use_whole_disk(fd);
2011 	(void) close(fd);
2012 	if (error && error != VT_ENOSPC) {
2013 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2014 		    "relabel '%s': unable to read disk capacity"), name);
2015 		return (zfs_error(hdl, EZFS_NOCAP, errbuf));
2016 	}
2017 	return (0);
2018 }
2019 
2020 /*
2021  * Bring the specified vdev online.   The 'flags' parameter is a set of the
2022  * ZFS_ONLINE_* flags.
2023  */
2024 int
2025 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
2026     vdev_state_t *newstate)
2027 {
2028 	zfs_cmd_t zc = { 0 };
2029 	char msg[1024];
2030 	nvlist_t *tgt;
2031 	boolean_t avail_spare, l2cache, islog;
2032 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2033 
2034 	if (flags & ZFS_ONLINE_EXPAND) {
2035 		(void) snprintf(msg, sizeof (msg),
2036 		    dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
2037 	} else {
2038 		(void) snprintf(msg, sizeof (msg),
2039 		    dgettext(TEXT_DOMAIN, "cannot online %s"), path);
2040 	}
2041 
2042 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2043 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2044 	    &islog)) == NULL)
2045 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
2046 
2047 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2048 
2049 	if (avail_spare)
2050 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
2051 
2052 	if (flags & ZFS_ONLINE_EXPAND ||
2053 	    zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
2054 		char *pathname = NULL;
2055 		uint64_t wholedisk = 0;
2056 
2057 		(void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
2058 		    &wholedisk);
2059 		verify(nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH,
2060 		    &pathname) == 0);
2061 
2062 		/*
2063 		 * XXX - L2ARC 1.0 devices can't support expansion.
2064 		 */
2065 		if (l2cache) {
2066 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2067 			    "cannot expand cache devices"));
2068 			return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
2069 		}
2070 
2071 		if (wholedisk) {
2072 			pathname += strlen(DISK_ROOT) + 1;
2073 			(void) zpool_relabel_disk(zhp->zpool_hdl, pathname);
2074 		}
2075 	}
2076 
2077 	zc.zc_cookie = VDEV_STATE_ONLINE;
2078 	zc.zc_obj = flags;
2079 
2080 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
2081 		if (errno == EINVAL) {
2082 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
2083 			    "from this pool into a new one.  Use '%s' "
2084 			    "instead"), "zpool detach");
2085 			return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg));
2086 		}
2087 		return (zpool_standard_error(hdl, errno, msg));
2088 	}
2089 
2090 	*newstate = zc.zc_cookie;
2091 	return (0);
2092 }
2093 
2094 /*
2095  * Take the specified vdev offline
2096  */
2097 int
2098 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
2099 {
2100 	zfs_cmd_t zc = { 0 };
2101 	char msg[1024];
2102 	nvlist_t *tgt;
2103 	boolean_t avail_spare, l2cache;
2104 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2105 
2106 	(void) snprintf(msg, sizeof (msg),
2107 	    dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
2108 
2109 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2110 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2111 	    NULL)) == NULL)
2112 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
2113 
2114 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2115 
2116 	if (avail_spare)
2117 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
2118 
2119 	zc.zc_cookie = VDEV_STATE_OFFLINE;
2120 	zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
2121 
2122 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2123 		return (0);
2124 
2125 	switch (errno) {
2126 	case EBUSY:
2127 
2128 		/*
2129 		 * There are no other replicas of this device.
2130 		 */
2131 		return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2132 
2133 	case EEXIST:
2134 		/*
2135 		 * The log device has unplayed logs
2136 		 */
2137 		return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
2138 
2139 	default:
2140 		return (zpool_standard_error(hdl, errno, msg));
2141 	}
2142 }
2143 
2144 /*
2145  * Mark the given vdev faulted.
2146  */
2147 int
2148 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2149 {
2150 	zfs_cmd_t zc = { 0 };
2151 	char msg[1024];
2152 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2153 
2154 	(void) snprintf(msg, sizeof (msg),
2155 	    dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid);
2156 
2157 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2158 	zc.zc_guid = guid;
2159 	zc.zc_cookie = VDEV_STATE_FAULTED;
2160 	zc.zc_obj = aux;
2161 
2162 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2163 		return (0);
2164 
2165 	switch (errno) {
2166 	case EBUSY:
2167 
2168 		/*
2169 		 * There are no other replicas of this device.
2170 		 */
2171 		return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2172 
2173 	default:
2174 		return (zpool_standard_error(hdl, errno, msg));
2175 	}
2176 
2177 }
2178 
2179 /*
2180  * Mark the given vdev degraded.
2181  */
2182 int
2183 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2184 {
2185 	zfs_cmd_t zc = { 0 };
2186 	char msg[1024];
2187 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2188 
2189 	(void) snprintf(msg, sizeof (msg),
2190 	    dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid);
2191 
2192 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2193 	zc.zc_guid = guid;
2194 	zc.zc_cookie = VDEV_STATE_DEGRADED;
2195 	zc.zc_obj = aux;
2196 
2197 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2198 		return (0);
2199 
2200 	return (zpool_standard_error(hdl, errno, msg));
2201 }
2202 
2203 /*
2204  * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
2205  * a hot spare.
2206  */
2207 static boolean_t
2208 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
2209 {
2210 	nvlist_t **child;
2211 	uint_t c, children;
2212 	char *type;
2213 
2214 	if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
2215 	    &children) == 0) {
2216 		verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
2217 		    &type) == 0);
2218 
2219 		if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
2220 		    children == 2 && child[which] == tgt)
2221 			return (B_TRUE);
2222 
2223 		for (c = 0; c < children; c++)
2224 			if (is_replacing_spare(child[c], tgt, which))
2225 				return (B_TRUE);
2226 	}
2227 
2228 	return (B_FALSE);
2229 }
2230 
2231 /*
2232  * Attach new_disk (fully described by nvroot) to old_disk.
2233  * If 'replacing' is specified, the new disk will replace the old one.
2234  */
2235 int
2236 zpool_vdev_attach(zpool_handle_t *zhp,
2237     const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
2238 {
2239 	zfs_cmd_t zc = { 0 };
2240 	char msg[1024];
2241 	int ret;
2242 	nvlist_t *tgt;
2243 	boolean_t avail_spare, l2cache, islog;
2244 	uint64_t val;
2245 	char *path, *newname;
2246 	nvlist_t **child;
2247 	uint_t children;
2248 	nvlist_t *config_root;
2249 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2250 	boolean_t rootpool = pool_is_bootable(zhp);
2251 
2252 	if (replacing)
2253 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2254 		    "cannot replace %s with %s"), old_disk, new_disk);
2255 	else
2256 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2257 		    "cannot attach %s to %s"), new_disk, old_disk);
2258 
2259 	/*
2260 	 * If this is a root pool, make sure that we're not attaching an
2261 	 * EFI labeled device.
2262 	 */
2263 	if (rootpool && pool_uses_efi(nvroot)) {
2264 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2265 		    "EFI labeled devices are not supported on root pools."));
2266 		return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
2267 	}
2268 
2269 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2270 	if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
2271 	    &islog)) == 0)
2272 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
2273 
2274 	if (avail_spare)
2275 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
2276 
2277 	if (l2cache)
2278 		return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2279 
2280 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2281 	zc.zc_cookie = replacing;
2282 
2283 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2284 	    &child, &children) != 0 || children != 1) {
2285 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2286 		    "new device must be a single disk"));
2287 		return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
2288 	}
2289 
2290 	verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
2291 	    ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
2292 
2293 	if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL)
2294 		return (-1);
2295 
2296 	/*
2297 	 * If the target is a hot spare that has been swapped in, we can only
2298 	 * replace it with another hot spare.
2299 	 */
2300 	if (replacing &&
2301 	    nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
2302 	    (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
2303 	    NULL) == NULL || !avail_spare) &&
2304 	    is_replacing_spare(config_root, tgt, 1)) {
2305 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2306 		    "can only be replaced by another hot spare"));
2307 		free(newname);
2308 		return (zfs_error(hdl, EZFS_BADTARGET, msg));
2309 	}
2310 
2311 	/*
2312 	 * If we are attempting to replace a spare, it canot be applied to an
2313 	 * already spared device.
2314 	 */
2315 	if (replacing &&
2316 	    nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
2317 	    zpool_find_vdev(zhp, newname, &avail_spare,
2318 	    &l2cache, NULL) != NULL && avail_spare &&
2319 	    is_replacing_spare(config_root, tgt, 0)) {
2320 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2321 		    "device has already been replaced with a spare"));
2322 		free(newname);
2323 		return (zfs_error(hdl, EZFS_BADTARGET, msg));
2324 	}
2325 
2326 	free(newname);
2327 
2328 	if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
2329 		return (-1);
2330 
2331 	ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ATTACH, &zc);
2332 
2333 	zcmd_free_nvlists(&zc);
2334 
2335 	if (ret == 0) {
2336 		if (rootpool) {
2337 			/*
2338 			 * XXX - This should be removed once we can
2339 			 * automatically install the bootblocks on the
2340 			 * newly attached disk.
2341 			 */
2342 			(void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Please "
2343 			    "be sure to invoke %s to make '%s' bootable.\n"),
2344 			    BOOTCMD, new_disk);
2345 
2346 			/*
2347 			 * XXX need a better way to prevent user from
2348 			 * booting up a half-baked vdev.
2349 			 */
2350 			(void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make "
2351 			    "sure to wait until resilver is done "
2352 			    "before rebooting.\n"));
2353 		}
2354 		return (0);
2355 	}
2356 
2357 	switch (errno) {
2358 	case ENOTSUP:
2359 		/*
2360 		 * Can't attach to or replace this type of vdev.
2361 		 */
2362 		if (replacing) {
2363 			if (islog)
2364 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2365 				    "cannot replace a log with a spare"));
2366 			else
2367 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2368 				    "cannot replace a replacing device"));
2369 		} else {
2370 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2371 			    "can only attach to mirrors and top-level "
2372 			    "disks"));
2373 		}
2374 		(void) zfs_error(hdl, EZFS_BADTARGET, msg);
2375 		break;
2376 
2377 	case EINVAL:
2378 		/*
2379 		 * The new device must be a single disk.
2380 		 */
2381 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2382 		    "new device must be a single disk"));
2383 		(void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
2384 		break;
2385 
2386 	case EBUSY:
2387 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
2388 		    new_disk);
2389 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
2390 		break;
2391 
2392 	case EOVERFLOW:
2393 		/*
2394 		 * The new device is too small.
2395 		 */
2396 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2397 		    "device is too small"));
2398 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
2399 		break;
2400 
2401 	case EDOM:
2402 		/*
2403 		 * The new device has a different alignment requirement.
2404 		 */
2405 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2406 		    "devices have different sector alignment"));
2407 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
2408 		break;
2409 
2410 	case ENAMETOOLONG:
2411 		/*
2412 		 * The resulting top-level vdev spec won't fit in the label.
2413 		 */
2414 		(void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
2415 		break;
2416 
2417 	default:
2418 		(void) zpool_standard_error(hdl, errno, msg);
2419 	}
2420 
2421 	return (-1);
2422 }
2423 
2424 /*
2425  * Detach the specified device.
2426  */
2427 int
2428 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
2429 {
2430 	zfs_cmd_t zc = { 0 };
2431 	char msg[1024];
2432 	nvlist_t *tgt;
2433 	boolean_t avail_spare, l2cache;
2434 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2435 
2436 	(void) snprintf(msg, sizeof (msg),
2437 	    dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
2438 
2439 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2440 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2441 	    NULL)) == 0)
2442 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
2443 
2444 	if (avail_spare)
2445 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
2446 
2447 	if (l2cache)
2448 		return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2449 
2450 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2451 
2452 	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
2453 		return (0);
2454 
2455 	switch (errno) {
2456 
2457 	case ENOTSUP:
2458 		/*
2459 		 * Can't detach from this type of vdev.
2460 		 */
2461 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
2462 		    "applicable to mirror and replacing vdevs"));
2463 		(void) zfs_error(zhp->zpool_hdl, EZFS_BADTARGET, msg);
2464 		break;
2465 
2466 	case EBUSY:
2467 		/*
2468 		 * There are no other replicas of this device.
2469 		 */
2470 		(void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
2471 		break;
2472 
2473 	default:
2474 		(void) zpool_standard_error(hdl, errno, msg);
2475 	}
2476 
2477 	return (-1);
2478 }
2479 
2480 /*
2481  * Find a mirror vdev in the source nvlist.
2482  *
2483  * The mchild array contains a list of disks in one of the top-level mirrors
2484  * of the source pool.  The schild array contains a list of disks that the
2485  * user specified on the command line.  We loop over the mchild array to
2486  * see if any entry in the schild array matches.
2487  *
2488  * If a disk in the mchild array is found in the schild array, we return
2489  * the index of that entry.  Otherwise we return -1.
2490  */
2491 static int
2492 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
2493     nvlist_t **schild, uint_t schildren)
2494 {
2495 	uint_t mc;
2496 
2497 	for (mc = 0; mc < mchildren; mc++) {
2498 		uint_t sc;
2499 		char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2500 		    mchild[mc], B_FALSE);
2501 
2502 		for (sc = 0; sc < schildren; sc++) {
2503 			char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2504 			    schild[sc], B_FALSE);
2505 			boolean_t result = (strcmp(mpath, spath) == 0);
2506 
2507 			free(spath);
2508 			if (result) {
2509 				free(mpath);
2510 				return (mc);
2511 			}
2512 		}
2513 
2514 		free(mpath);
2515 	}
2516 
2517 	return (-1);
2518 }
2519 
2520 /*
2521  * Split a mirror pool.  If newroot points to null, then a new nvlist
2522  * is generated and it is the responsibility of the caller to free it.
2523  */
2524 int
2525 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
2526     nvlist_t *props, splitflags_t flags)
2527 {
2528 	zfs_cmd_t zc = { 0 };
2529 	char msg[1024];
2530 	nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
2531 	nvlist_t **varray = NULL, *zc_props = NULL;
2532 	uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
2533 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2534 	uint64_t vers;
2535 	boolean_t freelist = B_FALSE, memory_err = B_TRUE;
2536 	int retval = 0;
2537 
2538 	(void) snprintf(msg, sizeof (msg),
2539 	    dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
2540 
2541 	if (!zpool_name_valid(hdl, B_FALSE, newname))
2542 		return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
2543 
2544 	if ((config = zpool_get_config(zhp, NULL)) == NULL) {
2545 		(void) fprintf(stderr, gettext("Internal error: unable to "
2546 		    "retrieve pool configuration\n"));
2547 		return (-1);
2548 	}
2549 
2550 	verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree)
2551 	    == 0);
2552 	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0);
2553 
2554 	if (props) {
2555 		if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
2556 		    props, vers, B_TRUE, msg)) == NULL)
2557 			return (-1);
2558 	}
2559 
2560 	if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
2561 	    &children) != 0) {
2562 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2563 		    "Source pool is missing vdev tree"));
2564 		if (zc_props)
2565 			nvlist_free(zc_props);
2566 		return (-1);
2567 	}
2568 
2569 	varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
2570 	vcount = 0;
2571 
2572 	if (*newroot == NULL ||
2573 	    nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
2574 	    &newchild, &newchildren) != 0)
2575 		newchildren = 0;
2576 
2577 	for (c = 0; c < children; c++) {
2578 		uint64_t is_log = B_FALSE, is_hole = B_FALSE;
2579 		char *type;
2580 		nvlist_t **mchild, *vdev;
2581 		uint_t mchildren;
2582 		int entry;
2583 
2584 		/*
2585 		 * Unlike cache & spares, slogs are stored in the
2586 		 * ZPOOL_CONFIG_CHILDREN array.  We filter them out here.
2587 		 */
2588 		(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
2589 		    &is_log);
2590 		(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
2591 		    &is_hole);
2592 		if (is_log || is_hole) {
2593 			/*
2594 			 * Create a hole vdev and put it in the config.
2595 			 */
2596 			if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
2597 				goto out;
2598 			if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
2599 			    VDEV_TYPE_HOLE) != 0)
2600 				goto out;
2601 			if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
2602 			    1) != 0)
2603 				goto out;
2604 			if (lastlog == 0)
2605 				lastlog = vcount;
2606 			varray[vcount++] = vdev;
2607 			continue;
2608 		}
2609 		lastlog = 0;
2610 		verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type)
2611 		    == 0);
2612 		if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
2613 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2614 			    "Source pool must be composed only of mirrors\n"));
2615 			retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
2616 			goto out;
2617 		}
2618 
2619 		verify(nvlist_lookup_nvlist_array(child[c],
2620 		    ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
2621 
2622 		/* find or add an entry for this top-level vdev */
2623 		if (newchildren > 0 &&
2624 		    (entry = find_vdev_entry(zhp, mchild, mchildren,
2625 		    newchild, newchildren)) >= 0) {
2626 			/* We found a disk that the user specified. */
2627 			vdev = mchild[entry];
2628 			++found;
2629 		} else {
2630 			/* User didn't specify a disk for this vdev. */
2631 			vdev = mchild[mchildren - 1];
2632 		}
2633 
2634 		if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
2635 			goto out;
2636 	}
2637 
2638 	/* did we find every disk the user specified? */
2639 	if (found != newchildren) {
2640 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
2641 		    "include at most one disk from each mirror"));
2642 		retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
2643 		goto out;
2644 	}
2645 
2646 	/* Prepare the nvlist for populating. */
2647 	if (*newroot == NULL) {
2648 		if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
2649 			goto out;
2650 		freelist = B_TRUE;
2651 		if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
2652 		    VDEV_TYPE_ROOT) != 0)
2653 			goto out;
2654 	} else {
2655 		verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
2656 	}
2657 
2658 	/* Add all the children we found */
2659 	if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray,
2660 	    lastlog == 0 ? vcount : lastlog) != 0)
2661 		goto out;
2662 
2663 	/*
2664 	 * If we're just doing a dry run, exit now with success.
2665 	 */
2666 	if (flags.dryrun) {
2667 		memory_err = B_FALSE;
2668 		freelist = B_FALSE;
2669 		goto out;
2670 	}
2671 
2672 	/* now build up the config list & call the ioctl */
2673 	if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
2674 		goto out;
2675 
2676 	if (nvlist_add_nvlist(newconfig,
2677 	    ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
2678 	    nvlist_add_string(newconfig,
2679 	    ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
2680 	    nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
2681 		goto out;
2682 
2683 	/*
2684 	 * The new pool is automatically part of the namespace unless we
2685 	 * explicitly export it.
2686 	 */
2687 	if (!flags.import)
2688 		zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
2689 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2690 	(void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
2691 	if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0)
2692 		goto out;
2693 	if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
2694 		goto out;
2695 
2696 	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
2697 		retval = zpool_standard_error(hdl, errno, msg);
2698 		goto out;
2699 	}
2700 
2701 	freelist = B_FALSE;
2702 	memory_err = B_FALSE;
2703 
2704 out:
2705 	if (varray != NULL) {
2706 		int v;
2707 
2708 		for (v = 0; v < vcount; v++)
2709 			nvlist_free(varray[v]);
2710 		free(varray);
2711 	}
2712 	zcmd_free_nvlists(&zc);
2713 	if (zc_props)
2714 		nvlist_free(zc_props);
2715 	if (newconfig)
2716 		nvlist_free(newconfig);
2717 	if (freelist) {
2718 		nvlist_free(*newroot);
2719 		*newroot = NULL;
2720 	}
2721 
2722 	if (retval != 0)
2723 		return (retval);
2724 
2725 	if (memory_err)
2726 		return (no_memory(hdl));
2727 
2728 	return (0);
2729 }
2730 
2731 /*
2732  * Remove the given device.  Currently, this is supported only for hot spares
2733  * and level 2 cache devices.
2734  */
2735 int
2736 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
2737 {
2738 	zfs_cmd_t zc = { 0 };
2739 	char msg[1024];
2740 	nvlist_t *tgt;
2741 	boolean_t avail_spare, l2cache, islog;
2742 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2743 	uint64_t version;
2744 
2745 	(void) snprintf(msg, sizeof (msg),
2746 	    dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
2747 
2748 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2749 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2750 	    &islog)) == 0)
2751 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
2752 	/*
2753 	 * XXX - this should just go away.
2754 	 */
2755 	if (!avail_spare && !l2cache && !islog) {
2756 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2757 		    "only inactive hot spares, cache, top-level, "
2758 		    "or log devices can be removed"));
2759 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
2760 	}
2761 
2762 	version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
2763 	if (islog && version < SPA_VERSION_HOLES) {
2764 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2765 		    "pool must be upgrade to support log removal"));
2766 		return (zfs_error(hdl, EZFS_BADVERSION, msg));
2767 	}
2768 
2769 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2770 
2771 	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
2772 		return (0);
2773 
2774 	return (zpool_standard_error(hdl, errno, msg));
2775 }
2776 
2777 /*
2778  * Clear the errors for the pool, or the particular device if specified.
2779  */
2780 int
2781 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
2782 {
2783 	zfs_cmd_t zc = { 0 };
2784 	char msg[1024];
2785 	nvlist_t *tgt;
2786 	zpool_rewind_policy_t policy;
2787 	boolean_t avail_spare, l2cache;
2788 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2789 	nvlist_t *nvi = NULL;
2790 
2791 	if (path)
2792 		(void) snprintf(msg, sizeof (msg),
2793 		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
2794 		    path);
2795 	else
2796 		(void) snprintf(msg, sizeof (msg),
2797 		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
2798 		    zhp->zpool_name);
2799 
2800 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2801 	if (path) {
2802 		if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
2803 		    &l2cache, NULL)) == 0)
2804 			return (zfs_error(hdl, EZFS_NODEVICE, msg));
2805 
2806 		/*
2807 		 * Don't allow error clearing for hot spares.  Do allow
2808 		 * error clearing for l2cache devices.
2809 		 */
2810 		if (avail_spare)
2811 			return (zfs_error(hdl, EZFS_ISSPARE, msg));
2812 
2813 		verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
2814 		    &zc.zc_guid) == 0);
2815 	}
2816 
2817 	zpool_get_rewind_policy(rewindnvl, &policy);
2818 	zc.zc_cookie = policy.zrp_request;
2819 
2820 	if (zcmd_alloc_dst_nvlist(hdl, &zc, 8192) != 0)
2821 		return (-1);
2822 
2823 	if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, rewindnvl) != 0)
2824 		return (-1);
2825 
2826 	if (zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc) == 0 ||
2827 	    ((policy.zrp_request & ZPOOL_TRY_REWIND) &&
2828 	    errno != EPERM && errno != EACCES)) {
2829 		if (policy.zrp_request &
2830 		    (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
2831 			(void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
2832 			zpool_rewind_exclaim(hdl, zc.zc_name,
2833 			    ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0),
2834 			    nvi);
2835 			nvlist_free(nvi);
2836 		}
2837 		zcmd_free_nvlists(&zc);
2838 		return (0);
2839 	}
2840 
2841 	zcmd_free_nvlists(&zc);
2842 	return (zpool_standard_error(hdl, errno, msg));
2843 }
2844 
2845 /*
2846  * Similar to zpool_clear(), but takes a GUID (used by fmd).
2847  */
2848 int
2849 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
2850 {
2851 	zfs_cmd_t zc = { 0 };
2852 	char msg[1024];
2853 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2854 
2855 	(void) snprintf(msg, sizeof (msg),
2856 	    dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
2857 	    guid);
2858 
2859 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2860 	zc.zc_guid = guid;
2861 
2862 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
2863 		return (0);
2864 
2865 	return (zpool_standard_error(hdl, errno, msg));
2866 }
2867 
2868 /*
2869  * Convert from a devid string to a path.
2870  */
2871 static char *
2872 devid_to_path(char *devid_str)
2873 {
2874 	ddi_devid_t devid;
2875 	char *minor;
2876 	char *path;
2877 	devid_nmlist_t *list = NULL;
2878 	int ret;
2879 
2880 	if (devid_str_decode(devid_str, &devid, &minor) != 0)
2881 		return (NULL);
2882 
2883 	ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
2884 
2885 	devid_str_free(minor);
2886 	devid_free(devid);
2887 
2888 	if (ret != 0)
2889 		return (NULL);
2890 
2891 	if ((path = strdup(list[0].devname)) == NULL)
2892 		return (NULL);
2893 
2894 	devid_free_nmlist(list);
2895 
2896 	return (path);
2897 }
2898 
2899 /*
2900  * Convert from a path to a devid string.
2901  */
2902 static char *
2903 path_to_devid(const char *path)
2904 {
2905 	int fd;
2906 	ddi_devid_t devid;
2907 	char *minor, *ret;
2908 
2909 	if ((fd = open(path, O_RDONLY)) < 0)
2910 		return (NULL);
2911 
2912 	minor = NULL;
2913 	ret = NULL;
2914 	if (devid_get(fd, &devid) == 0) {
2915 		if (devid_get_minor_name(fd, &minor) == 0)
2916 			ret = devid_str_encode(devid, minor);
2917 		if (minor != NULL)
2918 			devid_str_free(minor);
2919 		devid_free(devid);
2920 	}
2921 	(void) close(fd);
2922 
2923 	return (ret);
2924 }
2925 
2926 /*
2927  * Issue the necessary ioctl() to update the stored path value for the vdev.  We
2928  * ignore any failure here, since a common case is for an unprivileged user to
2929  * type 'zpool status', and we'll display the correct information anyway.
2930  */
2931 static void
2932 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
2933 {
2934 	zfs_cmd_t zc = { 0 };
2935 
2936 	(void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2937 	(void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
2938 	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2939 	    &zc.zc_guid) == 0);
2940 
2941 	(void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
2942 }
2943 
2944 /*
2945  * Given a vdev, return the name to display in iostat.  If the vdev has a path,
2946  * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
2947  * We also check if this is a whole disk, in which case we strip off the
2948  * trailing 's0' slice name.
2949  *
2950  * This routine is also responsible for identifying when disks have been
2951  * reconfigured in a new location.  The kernel will have opened the device by
2952  * devid, but the path will still refer to the old location.  To catch this, we
2953  * first do a path -> devid translation (which is fast for the common case).  If
2954  * the devid matches, we're done.  If not, we do a reverse devid -> path
2955  * translation and issue the appropriate ioctl() to update the path of the vdev.
2956  * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
2957  * of these checks.
2958  */
2959 char *
2960 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
2961     boolean_t verbose)
2962 {
2963 	char *path, *devid;
2964 	uint64_t value;
2965 	char buf[64];
2966 	vdev_stat_t *vs;
2967 	uint_t vsc;
2968 
2969 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
2970 	    &value) == 0) {
2971 		verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2972 		    &value) == 0);
2973 		(void) snprintf(buf, sizeof (buf), "%llu",
2974 		    (u_longlong_t)value);
2975 		path = buf;
2976 	} else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
2977 
2978 		/*
2979 		 * If the device is dead (faulted, offline, etc) then don't
2980 		 * bother opening it.  Otherwise we may be forcing the user to
2981 		 * open a misbehaving device, which can have undesirable
2982 		 * effects.
2983 		 */
2984 		if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_STATS,
2985 		    (uint64_t **)&vs, &vsc) != 0 ||
2986 		    vs->vs_state >= VDEV_STATE_DEGRADED) &&
2987 		    zhp != NULL &&
2988 		    nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
2989 			/*
2990 			 * Determine if the current path is correct.
2991 			 */
2992 			char *newdevid = path_to_devid(path);
2993 
2994 			if (newdevid == NULL ||
2995 			    strcmp(devid, newdevid) != 0) {
2996 				char *newpath;
2997 
2998 				if ((newpath = devid_to_path(devid)) != NULL) {
2999 					/*
3000 					 * Update the path appropriately.
3001 					 */
3002 					set_path(zhp, nv, newpath);
3003 					if (nvlist_add_string(nv,
3004 					    ZPOOL_CONFIG_PATH, newpath) == 0)
3005 						verify(nvlist_lookup_string(nv,
3006 						    ZPOOL_CONFIG_PATH,
3007 						    &path) == 0);
3008 					free(newpath);
3009 				}
3010 			}
3011 
3012 			if (newdevid)
3013 				devid_str_free(newdevid);
3014 		}
3015 
3016 		if (strncmp(path, "/dev/dsk/", 9) == 0)
3017 			path += 9;
3018 
3019 		if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
3020 		    &value) == 0 && value) {
3021 			char *tmp = zfs_strdup(hdl, path);
3022 			if (tmp == NULL)
3023 				return (NULL);
3024 			tmp[strlen(path) - 2] = '\0';
3025 			return (tmp);
3026 		}
3027 	} else {
3028 		verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
3029 
3030 		/*
3031 		 * If it's a raidz device, we need to stick in the parity level.
3032 		 */
3033 		if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
3034 			verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
3035 			    &value) == 0);
3036 			(void) snprintf(buf, sizeof (buf), "%s%llu", path,
3037 			    (u_longlong_t)value);
3038 			path = buf;
3039 		}
3040 
3041 		/*
3042 		 * We identify each top-level vdev by using a <type-id>
3043 		 * naming convention.
3044 		 */
3045 		if (verbose) {
3046 			uint64_t id;
3047 
3048 			verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
3049 			    &id) == 0);
3050 			(void) snprintf(buf, sizeof (buf), "%s-%llu", path,
3051 			    (u_longlong_t)id);
3052 			path = buf;
3053 		}
3054 	}
3055 
3056 	return (zfs_strdup(hdl, path));
3057 }
3058 
3059 static int
3060 zbookmark_compare(const void *a, const void *b)
3061 {
3062 	return (memcmp(a, b, sizeof (zbookmark_t)));
3063 }
3064 
3065 /*
3066  * Retrieve the persistent error log, uniquify the members, and return to the
3067  * caller.
3068  */
3069 int
3070 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
3071 {
3072 	zfs_cmd_t zc = { 0 };
3073 	uint64_t count;
3074 	zbookmark_t *zb = NULL;
3075 	int i;
3076 
3077 	/*
3078 	 * Retrieve the raw error list from the kernel.  If the number of errors
3079 	 * has increased, allocate more space and continue until we get the
3080 	 * entire list.
3081 	 */
3082 	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
3083 	    &count) == 0);
3084 	if (count == 0)
3085 		return (0);
3086 	if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
3087 	    count * sizeof (zbookmark_t))) == (uintptr_t)NULL)
3088 		return (-1);
3089 	zc.zc_nvlist_dst_size = count;
3090 	(void) strcpy(zc.zc_name, zhp->zpool_name);
3091 	for (;;) {
3092 		if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
3093 		    &zc) != 0) {
3094 			free((void *)(uintptr_t)zc.zc_nvlist_dst);
3095 			if (errno == ENOMEM) {
3096 				count = zc.zc_nvlist_dst_size;
3097 				if ((zc.zc_nvlist_dst = (uintptr_t)
3098 				    zfs_alloc(zhp->zpool_hdl, count *
3099 				    sizeof (zbookmark_t))) == (uintptr_t)NULL)
3100 					return (-1);
3101 			} else {
3102 				return (-1);
3103 			}
3104 		} else {
3105 			break;
3106 		}
3107 	}
3108 
3109 	/*
3110 	 * Sort the resulting bookmarks.  This is a little confusing due to the
3111 	 * implementation of ZFS_IOC_ERROR_LOG.  The bookmarks are copied last
3112 	 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
3113 	 * _not_ copied as part of the process.  So we point the start of our
3114 	 * array appropriate and decrement the total number of elements.
3115 	 */
3116 	zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) +
3117 	    zc.zc_nvlist_dst_size;
3118 	count -= zc.zc_nvlist_dst_size;
3119 
3120 	qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare);
3121 
3122 	verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
3123 
3124 	/*
3125 	 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
3126 	 */
3127 	for (i = 0; i < count; i++) {
3128 		nvlist_t *nv;
3129 
3130 		/* ignoring zb_blkid and zb_level for now */
3131 		if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
3132 		    zb[i-1].zb_object == zb[i].zb_object)
3133 			continue;
3134 
3135 		if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
3136 			goto nomem;
3137 		if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
3138 		    zb[i].zb_objset) != 0) {
3139 			nvlist_free(nv);
3140 			goto nomem;
3141 		}
3142 		if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
3143 		    zb[i].zb_object) != 0) {
3144 			nvlist_free(nv);
3145 			goto nomem;
3146 		}
3147 		if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
3148 			nvlist_free(nv);
3149 			goto nomem;
3150 		}
3151 		nvlist_free(nv);
3152 	}
3153 
3154 	free((void *)(uintptr_t)zc.zc_nvlist_dst);
3155 	return (0);
3156 
3157 nomem:
3158 	free((void *)(uintptr_t)zc.zc_nvlist_dst);
3159 	return (no_memory(zhp->zpool_hdl));
3160 }
3161 
3162 /*
3163  * Upgrade a ZFS pool to the latest on-disk version.
3164  */
3165 int
3166 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
3167 {
3168 	zfs_cmd_t zc = { 0 };
3169 	libzfs_handle_t *hdl = zhp->zpool_hdl;
3170 
3171 	(void) strcpy(zc.zc_name, zhp->zpool_name);
3172 	zc.zc_cookie = new_version;
3173 
3174 	if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
3175 		return (zpool_standard_error_fmt(hdl, errno,
3176 		    dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
3177 		    zhp->zpool_name));
3178 	return (0);
3179 }
3180 
3181 void
3182 zpool_set_history_str(const char *subcommand, int argc, char **argv,
3183     char *history_str)
3184 {
3185 	int i;
3186 
3187 	(void) strlcpy(history_str, subcommand, HIS_MAX_RECORD_LEN);
3188 	for (i = 1; i < argc; i++) {
3189 		if (strlen(history_str) + 1 + strlen(argv[i]) >
3190 		    HIS_MAX_RECORD_LEN)
3191 			break;
3192 		(void) strlcat(history_str, " ", HIS_MAX_RECORD_LEN);
3193 		(void) strlcat(history_str, argv[i], HIS_MAX_RECORD_LEN);
3194 	}
3195 }
3196 
3197 /*
3198  * Stage command history for logging.
3199  */
3200 int
3201 zpool_stage_history(libzfs_handle_t *hdl, const char *history_str)
3202 {
3203 	if (history_str == NULL)
3204 		return (EINVAL);
3205 
3206 	if (strlen(history_str) > HIS_MAX_RECORD_LEN)
3207 		return (EINVAL);
3208 
3209 	if (hdl->libzfs_log_str != NULL)
3210 		free(hdl->libzfs_log_str);
3211 
3212 	if ((hdl->libzfs_log_str = strdup(history_str)) == NULL)
3213 		return (no_memory(hdl));
3214 
3215 	return (0);
3216 }
3217 
3218 /*
3219  * Perform ioctl to get some command history of a pool.
3220  *
3221  * 'buf' is the buffer to fill up to 'len' bytes.  'off' is the
3222  * logical offset of the history buffer to start reading from.
3223  *
3224  * Upon return, 'off' is the next logical offset to read from and
3225  * 'len' is the actual amount of bytes read into 'buf'.
3226  */
3227 static int
3228 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
3229 {
3230 	zfs_cmd_t zc = { 0 };
3231 	libzfs_handle_t *hdl = zhp->zpool_hdl;
3232 
3233 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3234 
3235 	zc.zc_history = (uint64_t)(uintptr_t)buf;
3236 	zc.zc_history_len = *len;
3237 	zc.zc_history_offset = *off;
3238 
3239 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
3240 		switch (errno) {
3241 		case EPERM:
3242 			return (zfs_error_fmt(hdl, EZFS_PERM,
3243 			    dgettext(TEXT_DOMAIN,
3244 			    "cannot show history for pool '%s'"),
3245 			    zhp->zpool_name));
3246 		case ENOENT:
3247 			return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
3248 			    dgettext(TEXT_DOMAIN, "cannot get history for pool "
3249 			    "'%s'"), zhp->zpool_name));
3250 		case ENOTSUP:
3251 			return (zfs_error_fmt(hdl, EZFS_BADVERSION,
3252 			    dgettext(TEXT_DOMAIN, "cannot get history for pool "
3253 			    "'%s', pool must be upgraded"), zhp->zpool_name));
3254 		default:
3255 			return (zpool_standard_error_fmt(hdl, errno,
3256 			    dgettext(TEXT_DOMAIN,
3257 			    "cannot get history for '%s'"), zhp->zpool_name));
3258 		}
3259 	}
3260 
3261 	*len = zc.zc_history_len;
3262 	*off = zc.zc_history_offset;
3263 
3264 	return (0);
3265 }
3266 
3267 /*
3268  * Process the buffer of nvlists, unpacking and storing each nvlist record
3269  * into 'records'.  'leftover' is set to the number of bytes that weren't
3270  * processed as there wasn't a complete record.
3271  */
3272 int
3273 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
3274     nvlist_t ***records, uint_t *numrecords)
3275 {
3276 	uint64_t reclen;
3277 	nvlist_t *nv;
3278 	int i;
3279 
3280 	while (bytes_read > sizeof (reclen)) {
3281 
3282 		/* get length of packed record (stored as little endian) */
3283 		for (i = 0, reclen = 0; i < sizeof (reclen); i++)
3284 			reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
3285 
3286 		if (bytes_read < sizeof (reclen) + reclen)
3287 			break;
3288 
3289 		/* unpack record */
3290 		if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
3291 			return (ENOMEM);
3292 		bytes_read -= sizeof (reclen) + reclen;
3293 		buf += sizeof (reclen) + reclen;
3294 
3295 		/* add record to nvlist array */
3296 		(*numrecords)++;
3297 		if (ISP2(*numrecords + 1)) {
3298 			*records = realloc(*records,
3299 			    *numrecords * 2 * sizeof (nvlist_t *));
3300 		}
3301 		(*records)[*numrecords - 1] = nv;
3302 	}
3303 
3304 	*leftover = bytes_read;
3305 	return (0);
3306 }
3307 
3308 #define	HIS_BUF_LEN	(128*1024)
3309 
3310 /*
3311  * Retrieve the command history of a pool.
3312  */
3313 int
3314 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
3315 {
3316 	char buf[HIS_BUF_LEN];
3317 	uint64_t off = 0;
3318 	nvlist_t **records = NULL;
3319 	uint_t numrecords = 0;
3320 	int err, i;
3321 
3322 	do {
3323 		uint64_t bytes_read = sizeof (buf);
3324 		uint64_t leftover;
3325 
3326 		if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
3327 			break;
3328 
3329 		/* if nothing else was read in, we're at EOF, just return */
3330 		if (!bytes_read)
3331 			break;
3332 
3333 		if ((err = zpool_history_unpack(buf, bytes_read,
3334 		    &leftover, &records, &numrecords)) != 0)
3335 			break;
3336 		off -= leftover;
3337 
3338 		/* CONSTCOND */
3339 	} while (1);
3340 
3341 	if (!err) {
3342 		verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
3343 		verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
3344 		    records, numrecords) == 0);
3345 	}
3346 	for (i = 0; i < numrecords; i++)
3347 		nvlist_free(records[i]);
3348 	free(records);
3349 
3350 	return (err);
3351 }
3352 
3353 void
3354 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
3355     char *pathname, size_t len)
3356 {
3357 	zfs_cmd_t zc = { 0 };
3358 	boolean_t mounted = B_FALSE;
3359 	char *mntpnt = NULL;
3360 	char dsname[MAXNAMELEN];
3361 
3362 	if (dsobj == 0) {
3363 		/* special case for the MOS */
3364 		(void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj);
3365 		return;
3366 	}
3367 
3368 	/* get the dataset's name */
3369 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3370 	zc.zc_obj = dsobj;
3371 	if (ioctl(zhp->zpool_hdl->libzfs_fd,
3372 	    ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
3373 		/* just write out a path of two object numbers */
3374 		(void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
3375 		    dsobj, obj);
3376 		return;
3377 	}
3378 	(void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
3379 
3380 	/* find out if the dataset is mounted */
3381 	mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
3382 
3383 	/* get the corrupted object's path */
3384 	(void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
3385 	zc.zc_obj = obj;
3386 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
3387 	    &zc) == 0) {
3388 		if (mounted) {
3389 			(void) snprintf(pathname, len, "%s%s", mntpnt,
3390 			    zc.zc_value);
3391 		} else {
3392 			(void) snprintf(pathname, len, "%s:%s",
3393 			    dsname, zc.zc_value);
3394 		}
3395 	} else {
3396 		(void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj);
3397 	}
3398 	free(mntpnt);
3399 }
3400 
3401 /*
3402  * Read the EFI label from the config, if a label does not exist then
3403  * pass back the error to the caller. If the caller has passed a non-NULL
3404  * diskaddr argument then we set it to the starting address of the EFI
3405  * partition.
3406  */
3407 static int
3408 read_efi_label(nvlist_t *config, diskaddr_t *sb)
3409 {
3410 	char *path;
3411 	int fd;
3412 	char diskname[MAXPATHLEN];
3413 	int err = -1;
3414 
3415 	if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
3416 		return (err);
3417 
3418 	(void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT,
3419 	    strrchr(path, '/'));
3420 	if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) {
3421 		struct dk_gpt *vtoc;
3422 
3423 		if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
3424 			if (sb != NULL)
3425 				*sb = vtoc->efi_parts[0].p_start;
3426 			efi_free(vtoc);
3427 		}
3428 		(void) close(fd);
3429 	}
3430 	return (err);
3431 }
3432 
3433 /*
3434  * determine where a partition starts on a disk in the current
3435  * configuration
3436  */
3437 static diskaddr_t
3438 find_start_block(nvlist_t *config)
3439 {
3440 	nvlist_t **child;
3441 	uint_t c, children;
3442 	diskaddr_t sb = MAXOFFSET_T;
3443 	uint64_t wholedisk;
3444 
3445 	if (nvlist_lookup_nvlist_array(config,
3446 	    ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
3447 		if (nvlist_lookup_uint64(config,
3448 		    ZPOOL_CONFIG_WHOLE_DISK,
3449 		    &wholedisk) != 0 || !wholedisk) {
3450 			return (MAXOFFSET_T);
3451 		}
3452 		if (read_efi_label(config, &sb) < 0)
3453 			sb = MAXOFFSET_T;
3454 		return (sb);
3455 	}
3456 
3457 	for (c = 0; c < children; c++) {
3458 		sb = find_start_block(child[c]);
3459 		if (sb != MAXOFFSET_T) {
3460 			return (sb);
3461 		}
3462 	}
3463 	return (MAXOFFSET_T);
3464 }
3465 
3466 /*
3467  * Label an individual disk.  The name provided is the short name,
3468  * stripped of any leading /dev path.
3469  */
3470 int
3471 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
3472 {
3473 	char path[MAXPATHLEN];
3474 	struct dk_gpt *vtoc;
3475 	int fd;
3476 	size_t resv = EFI_MIN_RESV_SIZE;
3477 	uint64_t slice_size;
3478 	diskaddr_t start_block;
3479 	char errbuf[1024];
3480 
3481 	/* prepare an error message just in case */
3482 	(void) snprintf(errbuf, sizeof (errbuf),
3483 	    dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
3484 
3485 	if (zhp) {
3486 		nvlist_t *nvroot;
3487 
3488 		if (pool_is_bootable(zhp)) {
3489 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3490 			    "EFI labeled devices are not supported on root "
3491 			    "pools."));
3492 			return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf));
3493 		}
3494 
3495 		verify(nvlist_lookup_nvlist(zhp->zpool_config,
3496 		    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
3497 
3498 		if (zhp->zpool_start_block == 0)
3499 			start_block = find_start_block(nvroot);
3500 		else
3501 			start_block = zhp->zpool_start_block;
3502 		zhp->zpool_start_block = start_block;
3503 	} else {
3504 		/* new pool */
3505 		start_block = NEW_START_BLOCK;
3506 	}
3507 
3508 	(void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name,
3509 	    BACKUP_SLICE);
3510 
3511 	if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
3512 		/*
3513 		 * This shouldn't happen.  We've long since verified that this
3514 		 * is a valid device.
3515 		 */
3516 		zfs_error_aux(hdl,
3517 		    dgettext(TEXT_DOMAIN, "unable to open device"));
3518 		return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
3519 	}
3520 
3521 	if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
3522 		/*
3523 		 * The only way this can fail is if we run out of memory, or we
3524 		 * were unable to read the disk's capacity
3525 		 */
3526 		if (errno == ENOMEM)
3527 			(void) no_memory(hdl);
3528 
3529 		(void) close(fd);
3530 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3531 		    "unable to read disk capacity"), name);
3532 
3533 		return (zfs_error(hdl, EZFS_NOCAP, errbuf));
3534 	}
3535 
3536 	slice_size = vtoc->efi_last_u_lba + 1;
3537 	slice_size -= EFI_MIN_RESV_SIZE;
3538 	if (start_block == MAXOFFSET_T)
3539 		start_block = NEW_START_BLOCK;
3540 	slice_size -= start_block;
3541 
3542 	vtoc->efi_parts[0].p_start = start_block;
3543 	vtoc->efi_parts[0].p_size = slice_size;
3544 
3545 	/*
3546 	 * Why we use V_USR: V_BACKUP confuses users, and is considered
3547 	 * disposable by some EFI utilities (since EFI doesn't have a backup
3548 	 * slice).  V_UNASSIGNED is supposed to be used only for zero size
3549 	 * partitions, and efi_write() will fail if we use it.  V_ROOT, V_BOOT,
3550 	 * etc. were all pretty specific.  V_USR is as close to reality as we
3551 	 * can get, in the absence of V_OTHER.
3552 	 */
3553 	vtoc->efi_parts[0].p_tag = V_USR;
3554 	(void) strcpy(vtoc->efi_parts[0].p_name, "zfs");
3555 
3556 	vtoc->efi_parts[8].p_start = slice_size + start_block;
3557 	vtoc->efi_parts[8].p_size = resv;
3558 	vtoc->efi_parts[8].p_tag = V_RESERVED;
3559 
3560 	if (efi_write(fd, vtoc) != 0) {
3561 		/*
3562 		 * Some block drivers (like pcata) may not support EFI
3563 		 * GPT labels.  Print out a helpful error message dir-
3564 		 * ecting the user to manually label the disk and give
3565 		 * a specific slice.
3566 		 */
3567 		(void) close(fd);
3568 		efi_free(vtoc);
3569 
3570 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3571 		    "try using fdisk(1M) and then provide a specific slice"));
3572 		return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
3573 	}
3574 
3575 	(void) close(fd);
3576 	efi_free(vtoc);
3577 	return (0);
3578 }
3579 
3580 static boolean_t
3581 supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf)
3582 {
3583 	char *type;
3584 	nvlist_t **child;
3585 	uint_t children, c;
3586 
3587 	verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0);
3588 	if (strcmp(type, VDEV_TYPE_RAIDZ) == 0 ||
3589 	    strcmp(type, VDEV_TYPE_FILE) == 0 ||
3590 	    strcmp(type, VDEV_TYPE_LOG) == 0 ||
3591 	    strcmp(type, VDEV_TYPE_HOLE) == 0 ||
3592 	    strcmp(type, VDEV_TYPE_MISSING) == 0) {
3593 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3594 		    "vdev type '%s' is not supported"), type);
3595 		(void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf);
3596 		return (B_FALSE);
3597 	}
3598 	if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
3599 	    &child, &children) == 0) {
3600 		for (c = 0; c < children; c++) {
3601 			if (!supported_dump_vdev_type(hdl, child[c], errbuf))
3602 				return (B_FALSE);
3603 		}
3604 	}
3605 	return (B_TRUE);
3606 }
3607 
3608 /*
3609  * check if this zvol is allowable for use as a dump device; zero if
3610  * it is, > 0 if it isn't, < 0 if it isn't a zvol
3611  */
3612 int
3613 zvol_check_dump_config(char *arg)
3614 {
3615 	zpool_handle_t *zhp = NULL;
3616 	nvlist_t *config, *nvroot;
3617 	char *p, *volname;
3618 	nvlist_t **top;
3619 	uint_t toplevels;
3620 	libzfs_handle_t *hdl;
3621 	char errbuf[1024];
3622 	char poolname[ZPOOL_MAXNAMELEN];
3623 	int pathlen = strlen(ZVOL_FULL_DEV_DIR);
3624 	int ret = 1;
3625 
3626 	if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) {
3627 		return (-1);
3628 	}
3629 
3630 	(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
3631 	    "dump is not supported on device '%s'"), arg);
3632 
3633 	if ((hdl = libzfs_init()) == NULL)
3634 		return (1);
3635 	libzfs_print_on_error(hdl, B_TRUE);
3636 
3637 	volname = arg + pathlen;
3638 
3639 	/* check the configuration of the pool */
3640 	if ((p = strchr(volname, '/')) == NULL) {
3641 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3642 		    "malformed dataset name"));
3643 		(void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
3644 		return (1);
3645 	} else if (p - volname >= ZFS_MAXNAMELEN) {
3646 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3647 		    "dataset name is too long"));
3648 		(void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf);
3649 		return (1);
3650 	} else {
3651 		(void) strncpy(poolname, volname, p - volname);
3652 		poolname[p - volname] = '\0';
3653 	}
3654 
3655 	if ((zhp = zpool_open(hdl, poolname)) == NULL) {
3656 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3657 		    "could not open pool '%s'"), poolname);
3658 		(void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
3659 		goto out;
3660 	}
3661 	config = zpool_get_config(zhp, NULL);
3662 	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
3663 	    &nvroot) != 0) {
3664 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3665 		    "could not obtain vdev configuration for  '%s'"), poolname);
3666 		(void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
3667 		goto out;
3668 	}
3669 
3670 	verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
3671 	    &top, &toplevels) == 0);
3672 	if (toplevels != 1) {
3673 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3674 		    "'%s' has multiple top level vdevs"), poolname);
3675 		(void) zfs_error(hdl, EZFS_DEVOVERFLOW, errbuf);
3676 		goto out;
3677 	}
3678 
3679 	if (!supported_dump_vdev_type(hdl, top[0], errbuf)) {
3680 		goto out;
3681 	}
3682 	ret = 0;
3683 
3684 out:
3685 	if (zhp)
3686 		zpool_close(zhp);
3687 	libzfs_fini(hdl);
3688 	return (ret);
3689 }
3690