xref: /titanic_51/usr/src/lib/libzfs/common/libzfs_pool.c (revision dedec472759b1a1a25044d504201ef59ccbffb56)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <ctype.h>
28 #include <errno.h>
29 #include <devid.h>
30 #include <fcntl.h>
31 #include <libintl.h>
32 #include <stdio.h>
33 #include <stdlib.h>
34 #include <strings.h>
35 #include <unistd.h>
36 #include <sys/efi_partition.h>
37 #include <sys/vtoc.h>
38 #include <sys/zfs_ioctl.h>
39 #include <dlfcn.h>
40 
41 #include "zfs_namecheck.h"
42 #include "zfs_prop.h"
43 #include "libzfs_impl.h"
44 
45 const char *hist_event_table[LOG_END] = {
46 	"invalid event",
47 	"pool create",
48 	"vdev add",
49 	"pool remove",
50 	"pool destroy",
51 	"pool export",
52 	"pool import",
53 	"vdev attach",
54 	"vdev replace",
55 	"vdev detach",
56 	"vdev online",
57 	"vdev offline",
58 	"vdev upgrade",
59 	"pool clear",
60 	"pool scrub",
61 	"pool property set",
62 	"create",
63 	"clone",
64 	"destroy",
65 	"destroy_begin_sync",
66 	"inherit",
67 	"property set",
68 	"quota set",
69 	"permission update",
70 	"permission remove",
71 	"permission who remove",
72 	"promote",
73 	"receive",
74 	"rename",
75 	"reservation set",
76 	"replay_inc_sync",
77 	"replay_full_sync",
78 	"rollback",
79 	"snapshot",
80 	"filesystem version upgrade",
81 	"refquota set",
82 	"refreservation set",
83 	"pool scrub done",
84 	"user hold",
85 	"user release",
86 };
87 
88 static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
89 
90 #if defined(__i386) || defined(__amd64)
91 #define	BOOTCMD	"installgrub(1M)"
92 #else
93 #define	BOOTCMD	"installboot(1M)"
94 #endif
95 
96 #define	DISK_ROOT	"/dev/dsk"
97 #define	RDISK_ROOT	"/dev/rdsk"
98 #define	BACKUP_SLICE	"s2"
99 
100 /*
101  * ====================================================================
102  *   zpool property functions
103  * ====================================================================
104  */
105 
106 static int
107 zpool_get_all_props(zpool_handle_t *zhp)
108 {
109 	zfs_cmd_t zc = { 0 };
110 	libzfs_handle_t *hdl = zhp->zpool_hdl;
111 
112 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
113 
114 	if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
115 		return (-1);
116 
117 	while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
118 		if (errno == ENOMEM) {
119 			if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
120 				zcmd_free_nvlists(&zc);
121 				return (-1);
122 			}
123 		} else {
124 			zcmd_free_nvlists(&zc);
125 			return (-1);
126 		}
127 	}
128 
129 	if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
130 		zcmd_free_nvlists(&zc);
131 		return (-1);
132 	}
133 
134 	zcmd_free_nvlists(&zc);
135 
136 	return (0);
137 }
138 
139 static int
140 zpool_props_refresh(zpool_handle_t *zhp)
141 {
142 	nvlist_t *old_props;
143 
144 	old_props = zhp->zpool_props;
145 
146 	if (zpool_get_all_props(zhp) != 0)
147 		return (-1);
148 
149 	nvlist_free(old_props);
150 	return (0);
151 }
152 
153 static char *
154 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
155     zprop_source_t *src)
156 {
157 	nvlist_t *nv, *nvl;
158 	uint64_t ival;
159 	char *value;
160 	zprop_source_t source;
161 
162 	nvl = zhp->zpool_props;
163 	if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
164 		verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
165 		source = ival;
166 		verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
167 	} else {
168 		source = ZPROP_SRC_DEFAULT;
169 		if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
170 			value = "-";
171 	}
172 
173 	if (src)
174 		*src = source;
175 
176 	return (value);
177 }
178 
179 uint64_t
180 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
181 {
182 	nvlist_t *nv, *nvl;
183 	uint64_t value;
184 	zprop_source_t source;
185 
186 	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
187 		/*
188 		 * zpool_get_all_props() has most likely failed because
189 		 * the pool is faulted, but if all we need is the top level
190 		 * vdev's guid then get it from the zhp config nvlist.
191 		 */
192 		if ((prop == ZPOOL_PROP_GUID) &&
193 		    (nvlist_lookup_nvlist(zhp->zpool_config,
194 		    ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
195 		    (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
196 		    == 0)) {
197 			return (value);
198 		}
199 		return (zpool_prop_default_numeric(prop));
200 	}
201 
202 	nvl = zhp->zpool_props;
203 	if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
204 		verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
205 		source = value;
206 		verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
207 	} else {
208 		source = ZPROP_SRC_DEFAULT;
209 		value = zpool_prop_default_numeric(prop);
210 	}
211 
212 	if (src)
213 		*src = source;
214 
215 	return (value);
216 }
217 
218 /*
219  * Map VDEV STATE to printed strings.
220  */
221 char *
222 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
223 {
224 	switch (state) {
225 	case VDEV_STATE_CLOSED:
226 	case VDEV_STATE_OFFLINE:
227 		return (gettext("OFFLINE"));
228 	case VDEV_STATE_REMOVED:
229 		return (gettext("REMOVED"));
230 	case VDEV_STATE_CANT_OPEN:
231 		if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
232 			return (gettext("FAULTED"));
233 		else
234 			return (gettext("UNAVAIL"));
235 	case VDEV_STATE_FAULTED:
236 		return (gettext("FAULTED"));
237 	case VDEV_STATE_DEGRADED:
238 		return (gettext("DEGRADED"));
239 	case VDEV_STATE_HEALTHY:
240 		return (gettext("ONLINE"));
241 	}
242 
243 	return (gettext("UNKNOWN"));
244 }
245 
246 /*
247  * Get a zpool property value for 'prop' and return the value in
248  * a pre-allocated buffer.
249  */
250 int
251 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
252     zprop_source_t *srctype)
253 {
254 	uint64_t intval;
255 	const char *strval;
256 	zprop_source_t src = ZPROP_SRC_NONE;
257 	nvlist_t *nvroot;
258 	vdev_stat_t *vs;
259 	uint_t vsc;
260 
261 	if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
262 		switch (prop) {
263 		case ZPOOL_PROP_NAME:
264 			(void) strlcpy(buf, zpool_get_name(zhp), len);
265 			break;
266 
267 		case ZPOOL_PROP_HEALTH:
268 			(void) strlcpy(buf, "FAULTED", len);
269 			break;
270 
271 		case ZPOOL_PROP_GUID:
272 			intval = zpool_get_prop_int(zhp, prop, &src);
273 			(void) snprintf(buf, len, "%llu", intval);
274 			break;
275 
276 		case ZPOOL_PROP_ALTROOT:
277 		case ZPOOL_PROP_CACHEFILE:
278 			if (zhp->zpool_props != NULL ||
279 			    zpool_get_all_props(zhp) == 0) {
280 				(void) strlcpy(buf,
281 				    zpool_get_prop_string(zhp, prop, &src),
282 				    len);
283 				if (srctype != NULL)
284 					*srctype = src;
285 				return (0);
286 			}
287 			/* FALLTHROUGH */
288 		default:
289 			(void) strlcpy(buf, "-", len);
290 			break;
291 		}
292 
293 		if (srctype != NULL)
294 			*srctype = src;
295 		return (0);
296 	}
297 
298 	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
299 	    prop != ZPOOL_PROP_NAME)
300 		return (-1);
301 
302 	switch (zpool_prop_get_type(prop)) {
303 	case PROP_TYPE_STRING:
304 		(void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
305 		    len);
306 		break;
307 
308 	case PROP_TYPE_NUMBER:
309 		intval = zpool_get_prop_int(zhp, prop, &src);
310 
311 		switch (prop) {
312 		case ZPOOL_PROP_SIZE:
313 		case ZPOOL_PROP_USED:
314 		case ZPOOL_PROP_AVAILABLE:
315 			(void) zfs_nicenum(intval, buf, len);
316 			break;
317 
318 		case ZPOOL_PROP_CAPACITY:
319 			(void) snprintf(buf, len, "%llu%%",
320 			    (u_longlong_t)intval);
321 			break;
322 
323 		case ZPOOL_PROP_HEALTH:
324 			verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
325 			    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
326 			verify(nvlist_lookup_uint64_array(nvroot,
327 			    ZPOOL_CONFIG_STATS, (uint64_t **)&vs, &vsc) == 0);
328 
329 			(void) strlcpy(buf, zpool_state_to_name(intval,
330 			    vs->vs_aux), len);
331 			break;
332 		default:
333 			(void) snprintf(buf, len, "%llu", intval);
334 		}
335 		break;
336 
337 	case PROP_TYPE_INDEX:
338 		intval = zpool_get_prop_int(zhp, prop, &src);
339 		if (zpool_prop_index_to_string(prop, intval, &strval)
340 		    != 0)
341 			return (-1);
342 		(void) strlcpy(buf, strval, len);
343 		break;
344 
345 	default:
346 		abort();
347 	}
348 
349 	if (srctype)
350 		*srctype = src;
351 
352 	return (0);
353 }
354 
355 /*
356  * Check if the bootfs name has the same pool name as it is set to.
357  * Assuming bootfs is a valid dataset name.
358  */
359 static boolean_t
360 bootfs_name_valid(const char *pool, char *bootfs)
361 {
362 	int len = strlen(pool);
363 
364 	if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
365 		return (B_FALSE);
366 
367 	if (strncmp(pool, bootfs, len) == 0 &&
368 	    (bootfs[len] == '/' || bootfs[len] == '\0'))
369 		return (B_TRUE);
370 
371 	return (B_FALSE);
372 }
373 
374 /*
375  * Inspect the configuration to determine if any of the devices contain
376  * an EFI label.
377  */
378 static boolean_t
379 pool_uses_efi(nvlist_t *config)
380 {
381 	nvlist_t **child;
382 	uint_t c, children;
383 
384 	if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
385 	    &child, &children) != 0)
386 		return (read_efi_label(config, NULL) >= 0);
387 
388 	for (c = 0; c < children; c++) {
389 		if (pool_uses_efi(child[c]))
390 			return (B_TRUE);
391 	}
392 	return (B_FALSE);
393 }
394 
395 static boolean_t
396 pool_is_bootable(zpool_handle_t *zhp)
397 {
398 	char bootfs[ZPOOL_MAXNAMELEN];
399 
400 	return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
401 	    sizeof (bootfs), NULL) == 0 && strncmp(bootfs, "-",
402 	    sizeof (bootfs)) != 0);
403 }
404 
405 
406 /*
407  * Given an nvlist of zpool properties to be set, validate that they are
408  * correct, and parse any numeric properties (index, boolean, etc) if they are
409  * specified as strings.
410  */
411 static nvlist_t *
412 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
413     nvlist_t *props, uint64_t version, boolean_t create_or_import, char *errbuf)
414 {
415 	nvpair_t *elem;
416 	nvlist_t *retprops;
417 	zpool_prop_t prop;
418 	char *strval;
419 	uint64_t intval;
420 	char *slash;
421 	struct stat64 statbuf;
422 	zpool_handle_t *zhp;
423 	nvlist_t *nvroot;
424 
425 	if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
426 		(void) no_memory(hdl);
427 		return (NULL);
428 	}
429 
430 	elem = NULL;
431 	while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
432 		const char *propname = nvpair_name(elem);
433 
434 		/*
435 		 * Make sure this property is valid and applies to this type.
436 		 */
437 		if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL) {
438 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
439 			    "invalid property '%s'"), propname);
440 			(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
441 			goto error;
442 		}
443 
444 		if (zpool_prop_readonly(prop)) {
445 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
446 			    "is readonly"), propname);
447 			(void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
448 			goto error;
449 		}
450 
451 		if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
452 		    &strval, &intval, errbuf) != 0)
453 			goto error;
454 
455 		/*
456 		 * Perform additional checking for specific properties.
457 		 */
458 		switch (prop) {
459 		case ZPOOL_PROP_VERSION:
460 			if (intval < version || intval > SPA_VERSION) {
461 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
462 				    "property '%s' number %d is invalid."),
463 				    propname, intval);
464 				(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
465 				goto error;
466 			}
467 			break;
468 
469 		case ZPOOL_PROP_BOOTFS:
470 			if (create_or_import) {
471 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
472 				    "property '%s' cannot be set at creation "
473 				    "or import time"), propname);
474 				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
475 				goto error;
476 			}
477 
478 			if (version < SPA_VERSION_BOOTFS) {
479 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
480 				    "pool must be upgraded to support "
481 				    "'%s' property"), propname);
482 				(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
483 				goto error;
484 			}
485 
486 			/*
487 			 * bootfs property value has to be a dataset name and
488 			 * the dataset has to be in the same pool as it sets to.
489 			 */
490 			if (strval[0] != '\0' && !bootfs_name_valid(poolname,
491 			    strval)) {
492 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
493 				    "is an invalid name"), strval);
494 				(void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
495 				goto error;
496 			}
497 
498 			if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
499 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
500 				    "could not open pool '%s'"), poolname);
501 				(void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
502 				goto error;
503 			}
504 			verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
505 			    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
506 
507 			/*
508 			 * bootfs property cannot be set on a disk which has
509 			 * been EFI labeled.
510 			 */
511 			if (pool_uses_efi(nvroot)) {
512 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
513 				    "property '%s' not supported on "
514 				    "EFI labeled devices"), propname);
515 				(void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf);
516 				zpool_close(zhp);
517 				goto error;
518 			}
519 			zpool_close(zhp);
520 			break;
521 
522 		case ZPOOL_PROP_ALTROOT:
523 			if (!create_or_import) {
524 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
525 				    "property '%s' can only be set during pool "
526 				    "creation or import"), propname);
527 				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
528 				goto error;
529 			}
530 
531 			if (strval[0] != '/') {
532 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
533 				    "bad alternate root '%s'"), strval);
534 				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
535 				goto error;
536 			}
537 			break;
538 
539 		case ZPOOL_PROP_CACHEFILE:
540 			if (strval[0] == '\0')
541 				break;
542 
543 			if (strcmp(strval, "none") == 0)
544 				break;
545 
546 			if (strval[0] != '/') {
547 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
548 				    "property '%s' must be empty, an "
549 				    "absolute path, or 'none'"), propname);
550 				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
551 				goto error;
552 			}
553 
554 			slash = strrchr(strval, '/');
555 
556 			if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
557 			    strcmp(slash, "/..") == 0) {
558 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
559 				    "'%s' is not a valid file"), strval);
560 				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
561 				goto error;
562 			}
563 
564 			*slash = '\0';
565 
566 			if (strval[0] != '\0' &&
567 			    (stat64(strval, &statbuf) != 0 ||
568 			    !S_ISDIR(statbuf.st_mode))) {
569 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
570 				    "'%s' is not a valid directory"),
571 				    strval);
572 				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
573 				goto error;
574 			}
575 
576 			*slash = '/';
577 			break;
578 		}
579 	}
580 
581 	return (retprops);
582 error:
583 	nvlist_free(retprops);
584 	return (NULL);
585 }
586 
587 /*
588  * Set zpool property : propname=propval.
589  */
590 int
591 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
592 {
593 	zfs_cmd_t zc = { 0 };
594 	int ret = -1;
595 	char errbuf[1024];
596 	nvlist_t *nvl = NULL;
597 	nvlist_t *realprops;
598 	uint64_t version;
599 
600 	(void) snprintf(errbuf, sizeof (errbuf),
601 	    dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
602 	    zhp->zpool_name);
603 
604 	if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
605 		return (no_memory(zhp->zpool_hdl));
606 
607 	if (nvlist_add_string(nvl, propname, propval) != 0) {
608 		nvlist_free(nvl);
609 		return (no_memory(zhp->zpool_hdl));
610 	}
611 
612 	version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
613 	if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
614 	    zhp->zpool_name, nvl, version, B_FALSE, errbuf)) == NULL) {
615 		nvlist_free(nvl);
616 		return (-1);
617 	}
618 
619 	nvlist_free(nvl);
620 	nvl = realprops;
621 
622 	/*
623 	 * Execute the corresponding ioctl() to set this property.
624 	 */
625 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
626 
627 	if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
628 		nvlist_free(nvl);
629 		return (-1);
630 	}
631 
632 	ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
633 
634 	zcmd_free_nvlists(&zc);
635 	nvlist_free(nvl);
636 
637 	if (ret)
638 		(void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
639 	else
640 		(void) zpool_props_refresh(zhp);
641 
642 	return (ret);
643 }
644 
645 int
646 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
647 {
648 	libzfs_handle_t *hdl = zhp->zpool_hdl;
649 	zprop_list_t *entry;
650 	char buf[ZFS_MAXPROPLEN];
651 
652 	if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
653 		return (-1);
654 
655 	for (entry = *plp; entry != NULL; entry = entry->pl_next) {
656 
657 		if (entry->pl_fixed)
658 			continue;
659 
660 		if (entry->pl_prop != ZPROP_INVAL &&
661 		    zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
662 		    NULL) == 0) {
663 			if (strlen(buf) > entry->pl_width)
664 				entry->pl_width = strlen(buf);
665 		}
666 	}
667 
668 	return (0);
669 }
670 
671 
672 /*
673  * Don't start the slice at the default block of 34; many storage
674  * devices will use a stripe width of 128k, so start there instead.
675  */
676 #define	NEW_START_BLOCK	256
677 
678 /*
679  * Validate the given pool name, optionally putting an extended error message in
680  * 'buf'.
681  */
682 boolean_t
683 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
684 {
685 	namecheck_err_t why;
686 	char what;
687 	int ret;
688 
689 	ret = pool_namecheck(pool, &why, &what);
690 
691 	/*
692 	 * The rules for reserved pool names were extended at a later point.
693 	 * But we need to support users with existing pools that may now be
694 	 * invalid.  So we only check for this expanded set of names during a
695 	 * create (or import), and only in userland.
696 	 */
697 	if (ret == 0 && !isopen &&
698 	    (strncmp(pool, "mirror", 6) == 0 ||
699 	    strncmp(pool, "raidz", 5) == 0 ||
700 	    strncmp(pool, "spare", 5) == 0 ||
701 	    strcmp(pool, "log") == 0)) {
702 		if (hdl != NULL)
703 			zfs_error_aux(hdl,
704 			    dgettext(TEXT_DOMAIN, "name is reserved"));
705 		return (B_FALSE);
706 	}
707 
708 
709 	if (ret != 0) {
710 		if (hdl != NULL) {
711 			switch (why) {
712 			case NAME_ERR_TOOLONG:
713 				zfs_error_aux(hdl,
714 				    dgettext(TEXT_DOMAIN, "name is too long"));
715 				break;
716 
717 			case NAME_ERR_INVALCHAR:
718 				zfs_error_aux(hdl,
719 				    dgettext(TEXT_DOMAIN, "invalid character "
720 				    "'%c' in pool name"), what);
721 				break;
722 
723 			case NAME_ERR_NOLETTER:
724 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
725 				    "name must begin with a letter"));
726 				break;
727 
728 			case NAME_ERR_RESERVED:
729 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
730 				    "name is reserved"));
731 				break;
732 
733 			case NAME_ERR_DISKLIKE:
734 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
735 				    "pool name is reserved"));
736 				break;
737 
738 			case NAME_ERR_LEADING_SLASH:
739 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
740 				    "leading slash in name"));
741 				break;
742 
743 			case NAME_ERR_EMPTY_COMPONENT:
744 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
745 				    "empty component in name"));
746 				break;
747 
748 			case NAME_ERR_TRAILING_SLASH:
749 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
750 				    "trailing slash in name"));
751 				break;
752 
753 			case NAME_ERR_MULTIPLE_AT:
754 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
755 				    "multiple '@' delimiters in name"));
756 				break;
757 
758 			}
759 		}
760 		return (B_FALSE);
761 	}
762 
763 	return (B_TRUE);
764 }
765 
766 /*
767  * Open a handle to the given pool, even if the pool is currently in the FAULTED
768  * state.
769  */
770 zpool_handle_t *
771 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
772 {
773 	zpool_handle_t *zhp;
774 	boolean_t missing;
775 
776 	/*
777 	 * Make sure the pool name is valid.
778 	 */
779 	if (!zpool_name_valid(hdl, B_TRUE, pool)) {
780 		(void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
781 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
782 		    pool);
783 		return (NULL);
784 	}
785 
786 	if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
787 		return (NULL);
788 
789 	zhp->zpool_hdl = hdl;
790 	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
791 
792 	if (zpool_refresh_stats(zhp, &missing) != 0) {
793 		zpool_close(zhp);
794 		return (NULL);
795 	}
796 
797 	if (missing) {
798 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
799 		(void) zfs_error_fmt(hdl, EZFS_NOENT,
800 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
801 		zpool_close(zhp);
802 		return (NULL);
803 	}
804 
805 	return (zhp);
806 }
807 
808 /*
809  * Like the above, but silent on error.  Used when iterating over pools (because
810  * the configuration cache may be out of date).
811  */
812 int
813 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
814 {
815 	zpool_handle_t *zhp;
816 	boolean_t missing;
817 
818 	if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
819 		return (-1);
820 
821 	zhp->zpool_hdl = hdl;
822 	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
823 
824 	if (zpool_refresh_stats(zhp, &missing) != 0) {
825 		zpool_close(zhp);
826 		return (-1);
827 	}
828 
829 	if (missing) {
830 		zpool_close(zhp);
831 		*ret = NULL;
832 		return (0);
833 	}
834 
835 	*ret = zhp;
836 	return (0);
837 }
838 
839 /*
840  * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
841  * state.
842  */
843 zpool_handle_t *
844 zpool_open(libzfs_handle_t *hdl, const char *pool)
845 {
846 	zpool_handle_t *zhp;
847 
848 	if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
849 		return (NULL);
850 
851 	if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
852 		(void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
853 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
854 		zpool_close(zhp);
855 		return (NULL);
856 	}
857 
858 	return (zhp);
859 }
860 
861 /*
862  * Close the handle.  Simply frees the memory associated with the handle.
863  */
864 void
865 zpool_close(zpool_handle_t *zhp)
866 {
867 	if (zhp->zpool_config)
868 		nvlist_free(zhp->zpool_config);
869 	if (zhp->zpool_old_config)
870 		nvlist_free(zhp->zpool_old_config);
871 	if (zhp->zpool_props)
872 		nvlist_free(zhp->zpool_props);
873 	free(zhp);
874 }
875 
876 /*
877  * Return the name of the pool.
878  */
879 const char *
880 zpool_get_name(zpool_handle_t *zhp)
881 {
882 	return (zhp->zpool_name);
883 }
884 
885 
886 /*
887  * Return the state of the pool (ACTIVE or UNAVAILABLE)
888  */
889 int
890 zpool_get_state(zpool_handle_t *zhp)
891 {
892 	return (zhp->zpool_state);
893 }
894 
895 /*
896  * Create the named pool, using the provided vdev list.  It is assumed
897  * that the consumer has already validated the contents of the nvlist, so we
898  * don't have to worry about error semantics.
899  */
900 int
901 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
902     nvlist_t *props, nvlist_t *fsprops)
903 {
904 	zfs_cmd_t zc = { 0 };
905 	nvlist_t *zc_fsprops = NULL;
906 	nvlist_t *zc_props = NULL;
907 	char msg[1024];
908 	char *altroot;
909 	int ret = -1;
910 
911 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
912 	    "cannot create '%s'"), pool);
913 
914 	if (!zpool_name_valid(hdl, B_FALSE, pool))
915 		return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
916 
917 	if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
918 		return (-1);
919 
920 	if (props) {
921 		if ((zc_props = zpool_valid_proplist(hdl, pool, props,
922 		    SPA_VERSION_1, B_TRUE, msg)) == NULL) {
923 			goto create_failed;
924 		}
925 	}
926 
927 	if (fsprops) {
928 		uint64_t zoned;
929 		char *zonestr;
930 
931 		zoned = ((nvlist_lookup_string(fsprops,
932 		    zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
933 		    strcmp(zonestr, "on") == 0);
934 
935 		if ((zc_fsprops = zfs_valid_proplist(hdl,
936 		    ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) {
937 			goto create_failed;
938 		}
939 		if (!zc_props &&
940 		    (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
941 			goto create_failed;
942 		}
943 		if (nvlist_add_nvlist(zc_props,
944 		    ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
945 			goto create_failed;
946 		}
947 	}
948 
949 	if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
950 		goto create_failed;
951 
952 	(void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
953 
954 	if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
955 
956 		zcmd_free_nvlists(&zc);
957 		nvlist_free(zc_props);
958 		nvlist_free(zc_fsprops);
959 
960 		switch (errno) {
961 		case EBUSY:
962 			/*
963 			 * This can happen if the user has specified the same
964 			 * device multiple times.  We can't reliably detect this
965 			 * until we try to add it and see we already have a
966 			 * label.
967 			 */
968 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
969 			    "one or more vdevs refer to the same device"));
970 			return (zfs_error(hdl, EZFS_BADDEV, msg));
971 
972 		case EOVERFLOW:
973 			/*
974 			 * This occurs when one of the devices is below
975 			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
976 			 * device was the problem device since there's no
977 			 * reliable way to determine device size from userland.
978 			 */
979 			{
980 				char buf[64];
981 
982 				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
983 
984 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
985 				    "one or more devices is less than the "
986 				    "minimum size (%s)"), buf);
987 			}
988 			return (zfs_error(hdl, EZFS_BADDEV, msg));
989 
990 		case ENOSPC:
991 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
992 			    "one or more devices is out of space"));
993 			return (zfs_error(hdl, EZFS_BADDEV, msg));
994 
995 		case ENOTBLK:
996 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
997 			    "cache device must be a disk or disk slice"));
998 			return (zfs_error(hdl, EZFS_BADDEV, msg));
999 
1000 		default:
1001 			return (zpool_standard_error(hdl, errno, msg));
1002 		}
1003 	}
1004 
1005 	/*
1006 	 * If this is an alternate root pool, then we automatically set the
1007 	 * mountpoint of the root dataset to be '/'.
1008 	 */
1009 	if (nvlist_lookup_string(props, zpool_prop_to_name(ZPOOL_PROP_ALTROOT),
1010 	    &altroot) == 0) {
1011 		zfs_handle_t *zhp;
1012 
1013 		verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_DATASET)) != NULL);
1014 		verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
1015 		    "/") == 0);
1016 
1017 		zfs_close(zhp);
1018 	}
1019 
1020 create_failed:
1021 	zcmd_free_nvlists(&zc);
1022 	nvlist_free(zc_props);
1023 	nvlist_free(zc_fsprops);
1024 	return (ret);
1025 }
1026 
1027 /*
1028  * Destroy the given pool.  It is up to the caller to ensure that there are no
1029  * datasets left in the pool.
1030  */
1031 int
1032 zpool_destroy(zpool_handle_t *zhp)
1033 {
1034 	zfs_cmd_t zc = { 0 };
1035 	zfs_handle_t *zfp = NULL;
1036 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1037 	char msg[1024];
1038 
1039 	if (zhp->zpool_state == POOL_STATE_ACTIVE &&
1040 	    (zfp = zfs_open(zhp->zpool_hdl, zhp->zpool_name,
1041 	    ZFS_TYPE_FILESYSTEM)) == NULL)
1042 		return (-1);
1043 
1044 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1045 
1046 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
1047 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1048 		    "cannot destroy '%s'"), zhp->zpool_name);
1049 
1050 		if (errno == EROFS) {
1051 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1052 			    "one or more devices is read only"));
1053 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
1054 		} else {
1055 			(void) zpool_standard_error(hdl, errno, msg);
1056 		}
1057 
1058 		if (zfp)
1059 			zfs_close(zfp);
1060 		return (-1);
1061 	}
1062 
1063 	if (zfp) {
1064 		remove_mountpoint(zfp);
1065 		zfs_close(zfp);
1066 	}
1067 
1068 	return (0);
1069 }
1070 
1071 /*
1072  * Add the given vdevs to the pool.  The caller must have already performed the
1073  * necessary verification to ensure that the vdev specification is well-formed.
1074  */
1075 int
1076 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1077 {
1078 	zfs_cmd_t zc = { 0 };
1079 	int ret;
1080 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1081 	char msg[1024];
1082 	nvlist_t **spares, **l2cache;
1083 	uint_t nspares, nl2cache;
1084 
1085 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1086 	    "cannot add to '%s'"), zhp->zpool_name);
1087 
1088 	if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1089 	    SPA_VERSION_SPARES &&
1090 	    nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1091 	    &spares, &nspares) == 0) {
1092 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1093 		    "upgraded to add hot spares"));
1094 		return (zfs_error(hdl, EZFS_BADVERSION, msg));
1095 	}
1096 
1097 	if (pool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot,
1098 	    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) {
1099 		uint64_t s;
1100 
1101 		for (s = 0; s < nspares; s++) {
1102 			char *path;
1103 
1104 			if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH,
1105 			    &path) == 0 && pool_uses_efi(spares[s])) {
1106 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1107 				    "device '%s' contains an EFI label and "
1108 				    "cannot be used on root pools."),
1109 				    zpool_vdev_name(hdl, NULL, spares[s],
1110 				    B_FALSE));
1111 				return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
1112 			}
1113 		}
1114 	}
1115 
1116 	if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1117 	    SPA_VERSION_L2CACHE &&
1118 	    nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1119 	    &l2cache, &nl2cache) == 0) {
1120 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1121 		    "upgraded to add cache devices"));
1122 		return (zfs_error(hdl, EZFS_BADVERSION, msg));
1123 	}
1124 
1125 	if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1126 		return (-1);
1127 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1128 
1129 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1130 		switch (errno) {
1131 		case EBUSY:
1132 			/*
1133 			 * This can happen if the user has specified the same
1134 			 * device multiple times.  We can't reliably detect this
1135 			 * until we try to add it and see we already have a
1136 			 * label.
1137 			 */
1138 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1139 			    "one or more vdevs refer to the same device"));
1140 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
1141 			break;
1142 
1143 		case EOVERFLOW:
1144 			/*
1145 			 * This occurrs when one of the devices is below
1146 			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
1147 			 * device was the problem device since there's no
1148 			 * reliable way to determine device size from userland.
1149 			 */
1150 			{
1151 				char buf[64];
1152 
1153 				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1154 
1155 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1156 				    "device is less than the minimum "
1157 				    "size (%s)"), buf);
1158 			}
1159 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
1160 			break;
1161 
1162 		case ENOTSUP:
1163 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1164 			    "pool must be upgraded to add these vdevs"));
1165 			(void) zfs_error(hdl, EZFS_BADVERSION, msg);
1166 			break;
1167 
1168 		case EDOM:
1169 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1170 			    "root pool can not have multiple vdevs"
1171 			    " or separate logs"));
1172 			(void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
1173 			break;
1174 
1175 		case ENOTBLK:
1176 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1177 			    "cache device must be a disk or disk slice"));
1178 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
1179 			break;
1180 
1181 		default:
1182 			(void) zpool_standard_error(hdl, errno, msg);
1183 		}
1184 
1185 		ret = -1;
1186 	} else {
1187 		ret = 0;
1188 	}
1189 
1190 	zcmd_free_nvlists(&zc);
1191 
1192 	return (ret);
1193 }
1194 
1195 /*
1196  * Exports the pool from the system.  The caller must ensure that there are no
1197  * mounted datasets in the pool.
1198  */
1199 int
1200 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce)
1201 {
1202 	zfs_cmd_t zc = { 0 };
1203 	char msg[1024];
1204 
1205 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1206 	    "cannot export '%s'"), zhp->zpool_name);
1207 
1208 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1209 	zc.zc_cookie = force;
1210 	zc.zc_guid = hardforce;
1211 
1212 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1213 		switch (errno) {
1214 		case EXDEV:
1215 			zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1216 			    "use '-f' to override the following errors:\n"
1217 			    "'%s' has an active shared spare which could be"
1218 			    " used by other pools once '%s' is exported."),
1219 			    zhp->zpool_name, zhp->zpool_name);
1220 			return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1221 			    msg));
1222 		default:
1223 			return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1224 			    msg));
1225 		}
1226 	}
1227 
1228 	return (0);
1229 }
1230 
1231 int
1232 zpool_export(zpool_handle_t *zhp, boolean_t force)
1233 {
1234 	return (zpool_export_common(zhp, force, B_FALSE));
1235 }
1236 
1237 int
1238 zpool_export_force(zpool_handle_t *zhp)
1239 {
1240 	return (zpool_export_common(zhp, B_TRUE, B_TRUE));
1241 }
1242 
1243 /*
1244  * zpool_import() is a contracted interface. Should be kept the same
1245  * if possible.
1246  *
1247  * Applications should use zpool_import_props() to import a pool with
1248  * new properties value to be set.
1249  */
1250 int
1251 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1252     char *altroot)
1253 {
1254 	nvlist_t *props = NULL;
1255 	int ret;
1256 
1257 	if (altroot != NULL) {
1258 		if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1259 			return (zfs_error_fmt(hdl, EZFS_NOMEM,
1260 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1261 			    newname));
1262 		}
1263 
1264 		if (nvlist_add_string(props,
1265 		    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1266 		    nvlist_add_string(props,
1267 		    zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
1268 			nvlist_free(props);
1269 			return (zfs_error_fmt(hdl, EZFS_NOMEM,
1270 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1271 			    newname));
1272 		}
1273 	}
1274 
1275 	ret = zpool_import_props(hdl, config, newname, props, B_FALSE);
1276 	if (props)
1277 		nvlist_free(props);
1278 	return (ret);
1279 }
1280 
1281 /*
1282  * Import the given pool using the known configuration and a list of
1283  * properties to be set. The configuration should have come from
1284  * zpool_find_import(). The 'newname' parameters control whether the pool
1285  * is imported with a different name.
1286  */
1287 int
1288 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1289     nvlist_t *props, boolean_t importfaulted)
1290 {
1291 	zfs_cmd_t zc = { 0 };
1292 	char *thename;
1293 	char *origname;
1294 	int ret;
1295 	char errbuf[1024];
1296 
1297 	verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1298 	    &origname) == 0);
1299 
1300 	(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1301 	    "cannot import pool '%s'"), origname);
1302 
1303 	if (newname != NULL) {
1304 		if (!zpool_name_valid(hdl, B_FALSE, newname))
1305 			return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1306 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1307 			    newname));
1308 		thename = (char *)newname;
1309 	} else {
1310 		thename = origname;
1311 	}
1312 
1313 	if (props) {
1314 		uint64_t version;
1315 
1316 		verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1317 		    &version) == 0);
1318 
1319 		if ((props = zpool_valid_proplist(hdl, origname,
1320 		    props, version, B_TRUE, errbuf)) == NULL) {
1321 			return (-1);
1322 		} else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1323 			nvlist_free(props);
1324 			return (-1);
1325 		}
1326 	}
1327 
1328 	(void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1329 
1330 	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1331 	    &zc.zc_guid) == 0);
1332 
1333 	if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1334 		nvlist_free(props);
1335 		return (-1);
1336 	}
1337 
1338 	zc.zc_cookie = (uint64_t)importfaulted;
1339 	ret = 0;
1340 	if (zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc) != 0) {
1341 		char desc[1024];
1342 		if (newname == NULL)
1343 			(void) snprintf(desc, sizeof (desc),
1344 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1345 			    thename);
1346 		else
1347 			(void) snprintf(desc, sizeof (desc),
1348 			    dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1349 			    origname, thename);
1350 
1351 		switch (errno) {
1352 		case ENOTSUP:
1353 			/*
1354 			 * Unsupported version.
1355 			 */
1356 			(void) zfs_error(hdl, EZFS_BADVERSION, desc);
1357 			break;
1358 
1359 		case EINVAL:
1360 			(void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1361 			break;
1362 
1363 		default:
1364 			(void) zpool_standard_error(hdl, errno, desc);
1365 		}
1366 
1367 		ret = -1;
1368 	} else {
1369 		zpool_handle_t *zhp;
1370 
1371 		/*
1372 		 * This should never fail, but play it safe anyway.
1373 		 */
1374 		if (zpool_open_silent(hdl, thename, &zhp) != 0)
1375 			ret = -1;
1376 		else if (zhp != NULL)
1377 			zpool_close(zhp);
1378 	}
1379 
1380 	zcmd_free_nvlists(&zc);
1381 	nvlist_free(props);
1382 
1383 	return (ret);
1384 }
1385 
1386 /*
1387  * Scrub the pool.
1388  */
1389 int
1390 zpool_scrub(zpool_handle_t *zhp, pool_scrub_type_t type)
1391 {
1392 	zfs_cmd_t zc = { 0 };
1393 	char msg[1024];
1394 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1395 
1396 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1397 	zc.zc_cookie = type;
1398 
1399 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SCRUB, &zc) == 0)
1400 		return (0);
1401 
1402 	(void) snprintf(msg, sizeof (msg),
1403 	    dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
1404 
1405 	if (errno == EBUSY)
1406 		return (zfs_error(hdl, EZFS_RESILVERING, msg));
1407 	else
1408 		return (zpool_standard_error(hdl, errno, msg));
1409 }
1410 
1411 /*
1412  * Find a vdev that matches the search criteria specified. We use the
1413  * the nvpair name to determine how we should look for the device.
1414  * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1415  * spare; but FALSE if its an INUSE spare.
1416  */
1417 static nvlist_t *
1418 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
1419     boolean_t *l2cache, boolean_t *log)
1420 {
1421 	uint_t c, children;
1422 	nvlist_t **child;
1423 	nvlist_t *ret;
1424 	uint64_t is_log;
1425 	char *srchkey;
1426 	nvpair_t *pair = nvlist_next_nvpair(search, NULL);
1427 
1428 	/* Nothing to look for */
1429 	if (search == NULL || pair == NULL)
1430 		return (NULL);
1431 
1432 	/* Obtain the key we will use to search */
1433 	srchkey = nvpair_name(pair);
1434 
1435 	switch (nvpair_type(pair)) {
1436 	case DATA_TYPE_UINT64: {
1437 		uint64_t srchval, theguid, present;
1438 
1439 		verify(nvpair_value_uint64(pair, &srchval) == 0);
1440 		if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
1441 			if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
1442 			    &present) == 0) {
1443 				/*
1444 				 * If the device has never been present since
1445 				 * import, the only reliable way to match the
1446 				 * vdev is by GUID.
1447 				 */
1448 				verify(nvlist_lookup_uint64(nv,
1449 				    ZPOOL_CONFIG_GUID, &theguid) == 0);
1450 				if (theguid == srchval)
1451 					return (nv);
1452 			}
1453 		}
1454 		break;
1455 	}
1456 
1457 	case DATA_TYPE_STRING: {
1458 		char *srchval, *val;
1459 
1460 		verify(nvpair_value_string(pair, &srchval) == 0);
1461 		if (nvlist_lookup_string(nv, srchkey, &val) != 0)
1462 			break;
1463 
1464 		/*
1465 		 * Search for the requested value. We special case the search
1466 		 * for ZPOOL_CONFIG_PATH when it's a wholedisk and when
1467 		 * Looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
1468 		 * Otherwise, all other searches are simple string compares.
1469 		 */
1470 		if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0 && val) {
1471 			uint64_t wholedisk = 0;
1472 
1473 			(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
1474 			    &wholedisk);
1475 			if (wholedisk) {
1476 				/*
1477 				 * For whole disks, the internal path has 's0',
1478 				 * but the path passed in by the user doesn't.
1479 				 */
1480 				if (strlen(srchval) == strlen(val) - 2 &&
1481 				    strncmp(srchval, val, strlen(srchval)) == 0)
1482 					return (nv);
1483 				break;
1484 			}
1485 		} else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
1486 			char *type, *idx, *end, *p;
1487 			uint64_t id, vdev_id;
1488 
1489 			/*
1490 			 * Determine our vdev type, keeping in mind
1491 			 * that the srchval is composed of a type and
1492 			 * vdev id pair (i.e. mirror-4).
1493 			 */
1494 			if ((type = strdup(srchval)) == NULL)
1495 				return (NULL);
1496 
1497 			if ((p = strrchr(type, '-')) == NULL) {
1498 				free(type);
1499 				break;
1500 			}
1501 			idx = p + 1;
1502 			*p = '\0';
1503 
1504 			/*
1505 			 * If the types don't match then keep looking.
1506 			 */
1507 			if (strncmp(val, type, strlen(val)) != 0) {
1508 				free(type);
1509 				break;
1510 			}
1511 
1512 			verify(strncmp(type, VDEV_TYPE_RAIDZ,
1513 			    strlen(VDEV_TYPE_RAIDZ)) == 0 ||
1514 			    strncmp(type, VDEV_TYPE_MIRROR,
1515 			    strlen(VDEV_TYPE_MIRROR)) == 0);
1516 			verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
1517 			    &id) == 0);
1518 
1519 			errno = 0;
1520 			vdev_id = strtoull(idx, &end, 10);
1521 
1522 			free(type);
1523 			if (errno != 0)
1524 				return (NULL);
1525 
1526 			/*
1527 			 * Now verify that we have the correct vdev id.
1528 			 */
1529 			if (vdev_id == id)
1530 				return (nv);
1531 		}
1532 
1533 		/*
1534 		 * Common case
1535 		 */
1536 		if (strcmp(srchval, val) == 0)
1537 			return (nv);
1538 		break;
1539 	}
1540 
1541 	default:
1542 		break;
1543 	}
1544 
1545 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1546 	    &child, &children) != 0)
1547 		return (NULL);
1548 
1549 	for (c = 0; c < children; c++) {
1550 		if ((ret = vdev_to_nvlist_iter(child[c], search,
1551 		    avail_spare, l2cache, NULL)) != NULL) {
1552 			/*
1553 			 * The 'is_log' value is only set for the toplevel
1554 			 * vdev, not the leaf vdevs.  So we always lookup the
1555 			 * log device from the root of the vdev tree (where
1556 			 * 'log' is non-NULL).
1557 			 */
1558 			if (log != NULL &&
1559 			    nvlist_lookup_uint64(child[c],
1560 			    ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
1561 			    is_log) {
1562 				*log = B_TRUE;
1563 			}
1564 			return (ret);
1565 		}
1566 	}
1567 
1568 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
1569 	    &child, &children) == 0) {
1570 		for (c = 0; c < children; c++) {
1571 			if ((ret = vdev_to_nvlist_iter(child[c], search,
1572 			    avail_spare, l2cache, NULL)) != NULL) {
1573 				*avail_spare = B_TRUE;
1574 				return (ret);
1575 			}
1576 		}
1577 	}
1578 
1579 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
1580 	    &child, &children) == 0) {
1581 		for (c = 0; c < children; c++) {
1582 			if ((ret = vdev_to_nvlist_iter(child[c], search,
1583 			    avail_spare, l2cache, NULL)) != NULL) {
1584 				*l2cache = B_TRUE;
1585 				return (ret);
1586 			}
1587 		}
1588 	}
1589 
1590 	return (NULL);
1591 }
1592 
1593 /*
1594  * Given a physical path (minus the "/devices" prefix), find the
1595  * associated vdev.
1596  */
1597 nvlist_t *
1598 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
1599     boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
1600 {
1601 	nvlist_t *search, *nvroot, *ret;
1602 
1603 	verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1604 	verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0);
1605 
1606 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1607 	    &nvroot) == 0);
1608 
1609 	*avail_spare = B_FALSE;
1610 	ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
1611 	nvlist_free(search);
1612 
1613 	return (ret);
1614 }
1615 
1616 /*
1617  * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
1618  */
1619 boolean_t
1620 zpool_vdev_is_interior(const char *name)
1621 {
1622 	if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
1623 	    strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
1624 		return (B_TRUE);
1625 	return (B_FALSE);
1626 }
1627 
1628 nvlist_t *
1629 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
1630     boolean_t *l2cache, boolean_t *log)
1631 {
1632 	char buf[MAXPATHLEN];
1633 	char *end;
1634 	nvlist_t *nvroot, *search, *ret;
1635 	uint64_t guid;
1636 
1637 	verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1638 
1639 	guid = strtoull(path, &end, 10);
1640 	if (guid != 0 && *end == '\0') {
1641 		verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
1642 	} else if (zpool_vdev_is_interior(path)) {
1643 		verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
1644 	} else if (path[0] != '/') {
1645 		(void) snprintf(buf, sizeof (buf), "%s%s", "/dev/dsk/", path);
1646 		verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0);
1647 	} else {
1648 		verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
1649 	}
1650 
1651 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1652 	    &nvroot) == 0);
1653 
1654 	*avail_spare = B_FALSE;
1655 	*l2cache = B_FALSE;
1656 	if (log != NULL)
1657 		*log = B_FALSE;
1658 	ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
1659 	nvlist_free(search);
1660 
1661 	return (ret);
1662 }
1663 
1664 static int
1665 vdev_online(nvlist_t *nv)
1666 {
1667 	uint64_t ival;
1668 
1669 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
1670 	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
1671 	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
1672 		return (0);
1673 
1674 	return (1);
1675 }
1676 
1677 /*
1678  * Helper function for zpool_get_physpaths().
1679  */
1680 static int
1681 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
1682     size_t *bytes_written)
1683 {
1684 	size_t bytes_left, pos, rsz;
1685 	char *tmppath;
1686 	const char *format;
1687 
1688 	if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
1689 	    &tmppath) != 0)
1690 		return (EZFS_NODEVICE);
1691 
1692 	pos = *bytes_written;
1693 	bytes_left = physpath_size - pos;
1694 	format = (pos == 0) ? "%s" : " %s";
1695 
1696 	rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
1697 	*bytes_written += rsz;
1698 
1699 	if (rsz >= bytes_left) {
1700 		/* if physpath was not copied properly, clear it */
1701 		if (bytes_left != 0) {
1702 			physpath[pos] = 0;
1703 		}
1704 		return (EZFS_NOSPC);
1705 	}
1706 	return (0);
1707 }
1708 
1709 static int
1710 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
1711     size_t *rsz, boolean_t is_spare)
1712 {
1713 	char *type;
1714 	int ret;
1715 
1716 	if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
1717 		return (EZFS_INVALCONFIG);
1718 
1719 	if (strcmp(type, VDEV_TYPE_DISK) == 0) {
1720 		/*
1721 		 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
1722 		 * For a spare vdev, we only want to boot from the active
1723 		 * spare device.
1724 		 */
1725 		if (is_spare) {
1726 			uint64_t spare = 0;
1727 			(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
1728 			    &spare);
1729 			if (!spare)
1730 				return (EZFS_INVALCONFIG);
1731 		}
1732 
1733 		if (vdev_online(nv)) {
1734 			if ((ret = vdev_get_one_physpath(nv, physpath,
1735 			    phypath_size, rsz)) != 0)
1736 				return (ret);
1737 		}
1738 	} else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
1739 	    strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
1740 	    (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
1741 		nvlist_t **child;
1742 		uint_t count;
1743 		int i, ret;
1744 
1745 		if (nvlist_lookup_nvlist_array(nv,
1746 		    ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
1747 			return (EZFS_INVALCONFIG);
1748 
1749 		for (i = 0; i < count; i++) {
1750 			ret = vdev_get_physpaths(child[i], physpath,
1751 			    phypath_size, rsz, is_spare);
1752 			if (ret == EZFS_NOSPC)
1753 				return (ret);
1754 		}
1755 	}
1756 
1757 	return (EZFS_POOL_INVALARG);
1758 }
1759 
1760 /*
1761  * Get phys_path for a root pool config.
1762  * Return 0 on success; non-zero on failure.
1763  */
1764 static int
1765 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
1766 {
1767 	size_t rsz;
1768 	nvlist_t *vdev_root;
1769 	nvlist_t **child;
1770 	uint_t count;
1771 	char *type;
1772 
1773 	rsz = 0;
1774 
1775 	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
1776 	    &vdev_root) != 0)
1777 		return (EZFS_INVALCONFIG);
1778 
1779 	if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
1780 	    nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
1781 	    &child, &count) != 0)
1782 		return (EZFS_INVALCONFIG);
1783 
1784 	/*
1785 	 * root pool can not have EFI labeled disks and can only have
1786 	 * a single top-level vdev.
1787 	 */
1788 	if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1 ||
1789 	    pool_uses_efi(vdev_root))
1790 		return (EZFS_POOL_INVALARG);
1791 
1792 	(void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
1793 	    B_FALSE);
1794 
1795 	/* No online devices */
1796 	if (rsz == 0)
1797 		return (EZFS_NODEVICE);
1798 
1799 	return (0);
1800 }
1801 
1802 /*
1803  * Get phys_path for a root pool
1804  * Return 0 on success; non-zero on failure.
1805  */
1806 int
1807 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
1808 {
1809 	return (zpool_get_config_physpath(zhp->zpool_config, physpath,
1810 	    phypath_size));
1811 }
1812 
1813 /*
1814  * If the device has being dynamically expanded then we need to relabel
1815  * the disk to use the new unallocated space.
1816  */
1817 static int
1818 zpool_relabel_disk(libzfs_handle_t *hdl, const char *name)
1819 {
1820 	char path[MAXPATHLEN];
1821 	char errbuf[1024];
1822 	int fd, error;
1823 	int (*_efi_use_whole_disk)(int);
1824 
1825 	if ((_efi_use_whole_disk = (int (*)(int))dlsym(RTLD_DEFAULT,
1826 	    "efi_use_whole_disk")) == NULL)
1827 		return (-1);
1828 
1829 	(void) snprintf(path, sizeof (path), "%s/%s", RDISK_ROOT, name);
1830 
1831 	if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
1832 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
1833 		    "relabel '%s': unable to open device"), name);
1834 		return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
1835 	}
1836 
1837 	/*
1838 	 * It's possible that we might encounter an error if the device
1839 	 * does not have any unallocated space left. If so, we simply
1840 	 * ignore that error and continue on.
1841 	 */
1842 	error = _efi_use_whole_disk(fd);
1843 	(void) close(fd);
1844 	if (error && error != VT_ENOSPC) {
1845 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
1846 		    "relabel '%s': unable to read disk capacity"), name);
1847 		return (zfs_error(hdl, EZFS_NOCAP, errbuf));
1848 	}
1849 	return (0);
1850 }
1851 
1852 /*
1853  * Bring the specified vdev online.   The 'flags' parameter is a set of the
1854  * ZFS_ONLINE_* flags.
1855  */
1856 int
1857 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
1858     vdev_state_t *newstate)
1859 {
1860 	zfs_cmd_t zc = { 0 };
1861 	char msg[1024];
1862 	nvlist_t *tgt;
1863 	boolean_t avail_spare, l2cache, islog;
1864 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1865 
1866 	if (flags & ZFS_ONLINE_EXPAND) {
1867 		(void) snprintf(msg, sizeof (msg),
1868 		    dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
1869 	} else {
1870 		(void) snprintf(msg, sizeof (msg),
1871 		    dgettext(TEXT_DOMAIN, "cannot online %s"), path);
1872 	}
1873 
1874 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1875 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
1876 	    &islog)) == NULL)
1877 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1878 
1879 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1880 
1881 	if (avail_spare)
1882 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
1883 
1884 	if (flags & ZFS_ONLINE_EXPAND ||
1885 	    zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
1886 		char *pathname = NULL;
1887 		uint64_t wholedisk = 0;
1888 
1889 		(void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
1890 		    &wholedisk);
1891 		verify(nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH,
1892 		    &pathname) == 0);
1893 
1894 		/*
1895 		 * XXX - L2ARC 1.0 devices can't support expansion.
1896 		 */
1897 		if (l2cache) {
1898 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1899 			    "cannot expand cache devices"));
1900 			return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
1901 		}
1902 
1903 		if (wholedisk) {
1904 			pathname += strlen(DISK_ROOT) + 1;
1905 			(void) zpool_relabel_disk(zhp->zpool_hdl, pathname);
1906 		}
1907 	}
1908 
1909 	zc.zc_cookie = VDEV_STATE_ONLINE;
1910 	zc.zc_obj = flags;
1911 
1912 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0)
1913 		return (zpool_standard_error(hdl, errno, msg));
1914 
1915 	*newstate = zc.zc_cookie;
1916 	return (0);
1917 }
1918 
1919 /*
1920  * Take the specified vdev offline
1921  */
1922 int
1923 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
1924 {
1925 	zfs_cmd_t zc = { 0 };
1926 	char msg[1024];
1927 	nvlist_t *tgt;
1928 	boolean_t avail_spare, l2cache;
1929 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1930 
1931 	(void) snprintf(msg, sizeof (msg),
1932 	    dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
1933 
1934 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1935 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
1936 	    NULL)) == NULL)
1937 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1938 
1939 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1940 
1941 	if (avail_spare)
1942 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
1943 
1944 	zc.zc_cookie = VDEV_STATE_OFFLINE;
1945 	zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
1946 
1947 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
1948 		return (0);
1949 
1950 	switch (errno) {
1951 	case EBUSY:
1952 
1953 		/*
1954 		 * There are no other replicas of this device.
1955 		 */
1956 		return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
1957 
1958 	case EEXIST:
1959 		/*
1960 		 * The log device has unplayed logs
1961 		 */
1962 		return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
1963 
1964 	default:
1965 		return (zpool_standard_error(hdl, errno, msg));
1966 	}
1967 }
1968 
1969 /*
1970  * Mark the given vdev faulted.
1971  */
1972 int
1973 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
1974 {
1975 	zfs_cmd_t zc = { 0 };
1976 	char msg[1024];
1977 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1978 
1979 	(void) snprintf(msg, sizeof (msg),
1980 	    dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid);
1981 
1982 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1983 	zc.zc_guid = guid;
1984 	zc.zc_cookie = VDEV_STATE_FAULTED;
1985 	zc.zc_obj = aux;
1986 
1987 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
1988 		return (0);
1989 
1990 	switch (errno) {
1991 	case EBUSY:
1992 
1993 		/*
1994 		 * There are no other replicas of this device.
1995 		 */
1996 		return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
1997 
1998 	default:
1999 		return (zpool_standard_error(hdl, errno, msg));
2000 	}
2001 
2002 }
2003 
2004 /*
2005  * Mark the given vdev degraded.
2006  */
2007 int
2008 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2009 {
2010 	zfs_cmd_t zc = { 0 };
2011 	char msg[1024];
2012 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2013 
2014 	(void) snprintf(msg, sizeof (msg),
2015 	    dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid);
2016 
2017 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2018 	zc.zc_guid = guid;
2019 	zc.zc_cookie = VDEV_STATE_DEGRADED;
2020 	zc.zc_obj = aux;
2021 
2022 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2023 		return (0);
2024 
2025 	return (zpool_standard_error(hdl, errno, msg));
2026 }
2027 
2028 /*
2029  * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
2030  * a hot spare.
2031  */
2032 static boolean_t
2033 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
2034 {
2035 	nvlist_t **child;
2036 	uint_t c, children;
2037 	char *type;
2038 
2039 	if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
2040 	    &children) == 0) {
2041 		verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
2042 		    &type) == 0);
2043 
2044 		if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
2045 		    children == 2 && child[which] == tgt)
2046 			return (B_TRUE);
2047 
2048 		for (c = 0; c < children; c++)
2049 			if (is_replacing_spare(child[c], tgt, which))
2050 				return (B_TRUE);
2051 	}
2052 
2053 	return (B_FALSE);
2054 }
2055 
2056 /*
2057  * Attach new_disk (fully described by nvroot) to old_disk.
2058  * If 'replacing' is specified, the new disk will replace the old one.
2059  */
2060 int
2061 zpool_vdev_attach(zpool_handle_t *zhp,
2062     const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
2063 {
2064 	zfs_cmd_t zc = { 0 };
2065 	char msg[1024];
2066 	int ret;
2067 	nvlist_t *tgt;
2068 	boolean_t avail_spare, l2cache, islog;
2069 	uint64_t val;
2070 	char *path, *newname;
2071 	nvlist_t **child;
2072 	uint_t children;
2073 	nvlist_t *config_root;
2074 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2075 	boolean_t rootpool = pool_is_bootable(zhp);
2076 
2077 	if (replacing)
2078 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2079 		    "cannot replace %s with %s"), old_disk, new_disk);
2080 	else
2081 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2082 		    "cannot attach %s to %s"), new_disk, old_disk);
2083 
2084 	/*
2085 	 * If this is a root pool, make sure that we're not attaching an
2086 	 * EFI labeled device.
2087 	 */
2088 	if (rootpool && pool_uses_efi(nvroot)) {
2089 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2090 		    "EFI labeled devices are not supported on root pools."));
2091 		return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
2092 	}
2093 
2094 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2095 	if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
2096 	    &islog)) == 0)
2097 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
2098 
2099 	if (avail_spare)
2100 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
2101 
2102 	if (l2cache)
2103 		return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2104 
2105 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2106 	zc.zc_cookie = replacing;
2107 
2108 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2109 	    &child, &children) != 0 || children != 1) {
2110 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2111 		    "new device must be a single disk"));
2112 		return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
2113 	}
2114 
2115 	verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
2116 	    ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
2117 
2118 	if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL)
2119 		return (-1);
2120 
2121 	/*
2122 	 * If the target is a hot spare that has been swapped in, we can only
2123 	 * replace it with another hot spare.
2124 	 */
2125 	if (replacing &&
2126 	    nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
2127 	    (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
2128 	    NULL) == NULL || !avail_spare) &&
2129 	    is_replacing_spare(config_root, tgt, 1)) {
2130 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2131 		    "can only be replaced by another hot spare"));
2132 		free(newname);
2133 		return (zfs_error(hdl, EZFS_BADTARGET, msg));
2134 	}
2135 
2136 	/*
2137 	 * If we are attempting to replace a spare, it canot be applied to an
2138 	 * already spared device.
2139 	 */
2140 	if (replacing &&
2141 	    nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
2142 	    zpool_find_vdev(zhp, newname, &avail_spare,
2143 	    &l2cache, NULL) != NULL && avail_spare &&
2144 	    is_replacing_spare(config_root, tgt, 0)) {
2145 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2146 		    "device has already been replaced with a spare"));
2147 		free(newname);
2148 		return (zfs_error(hdl, EZFS_BADTARGET, msg));
2149 	}
2150 
2151 	free(newname);
2152 
2153 	if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
2154 		return (-1);
2155 
2156 	ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ATTACH, &zc);
2157 
2158 	zcmd_free_nvlists(&zc);
2159 
2160 	if (ret == 0) {
2161 		if (rootpool) {
2162 			/*
2163 			 * XXX - This should be removed once we can
2164 			 * automatically install the bootblocks on the
2165 			 * newly attached disk.
2166 			 */
2167 			(void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Please "
2168 			    "be sure to invoke %s to make '%s' bootable.\n"),
2169 			    BOOTCMD, new_disk);
2170 
2171 			/*
2172 			 * XXX need a better way to prevent user from
2173 			 * booting up a half-baked vdev.
2174 			 */
2175 			(void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make "
2176 			    "sure to wait until resilver is done "
2177 			    "before rebooting.\n"));
2178 		}
2179 		return (0);
2180 	}
2181 
2182 	switch (errno) {
2183 	case ENOTSUP:
2184 		/*
2185 		 * Can't attach to or replace this type of vdev.
2186 		 */
2187 		if (replacing) {
2188 			if (islog)
2189 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2190 				    "cannot replace a log with a spare"));
2191 			else
2192 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2193 				    "cannot replace a replacing device"));
2194 		} else {
2195 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2196 			    "can only attach to mirrors and top-level "
2197 			    "disks"));
2198 		}
2199 		(void) zfs_error(hdl, EZFS_BADTARGET, msg);
2200 		break;
2201 
2202 	case EINVAL:
2203 		/*
2204 		 * The new device must be a single disk.
2205 		 */
2206 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2207 		    "new device must be a single disk"));
2208 		(void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
2209 		break;
2210 
2211 	case EBUSY:
2212 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
2213 		    new_disk);
2214 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
2215 		break;
2216 
2217 	case EOVERFLOW:
2218 		/*
2219 		 * The new device is too small.
2220 		 */
2221 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2222 		    "device is too small"));
2223 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
2224 		break;
2225 
2226 	case EDOM:
2227 		/*
2228 		 * The new device has a different alignment requirement.
2229 		 */
2230 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2231 		    "devices have different sector alignment"));
2232 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
2233 		break;
2234 
2235 	case ENAMETOOLONG:
2236 		/*
2237 		 * The resulting top-level vdev spec won't fit in the label.
2238 		 */
2239 		(void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
2240 		break;
2241 
2242 	default:
2243 		(void) zpool_standard_error(hdl, errno, msg);
2244 	}
2245 
2246 	return (-1);
2247 }
2248 
2249 /*
2250  * Detach the specified device.
2251  */
2252 int
2253 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
2254 {
2255 	zfs_cmd_t zc = { 0 };
2256 	char msg[1024];
2257 	nvlist_t *tgt;
2258 	boolean_t avail_spare, l2cache;
2259 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2260 
2261 	(void) snprintf(msg, sizeof (msg),
2262 	    dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
2263 
2264 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2265 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2266 	    NULL)) == 0)
2267 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
2268 
2269 	if (avail_spare)
2270 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
2271 
2272 	if (l2cache)
2273 		return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2274 
2275 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2276 
2277 	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
2278 		return (0);
2279 
2280 	switch (errno) {
2281 
2282 	case ENOTSUP:
2283 		/*
2284 		 * Can't detach from this type of vdev.
2285 		 */
2286 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
2287 		    "applicable to mirror and replacing vdevs"));
2288 		(void) zfs_error(zhp->zpool_hdl, EZFS_BADTARGET, msg);
2289 		break;
2290 
2291 	case EBUSY:
2292 		/*
2293 		 * There are no other replicas of this device.
2294 		 */
2295 		(void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
2296 		break;
2297 
2298 	default:
2299 		(void) zpool_standard_error(hdl, errno, msg);
2300 	}
2301 
2302 	return (-1);
2303 }
2304 
2305 /*
2306  * Remove the given device.  Currently, this is supported only for hot spares
2307  * and level 2 cache devices.
2308  */
2309 int
2310 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
2311 {
2312 	zfs_cmd_t zc = { 0 };
2313 	char msg[1024];
2314 	nvlist_t *tgt;
2315 	boolean_t avail_spare, l2cache, islog;
2316 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2317 	uint64_t version;
2318 
2319 	(void) snprintf(msg, sizeof (msg),
2320 	    dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
2321 
2322 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2323 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2324 	    &islog)) == 0)
2325 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
2326 	/*
2327 	 * XXX - this should just go away.
2328 	 */
2329 	if (!avail_spare && !l2cache && !islog) {
2330 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2331 		    "only inactive hot spares, cache, top-level, "
2332 		    "or log devices can be removed"));
2333 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
2334 	}
2335 
2336 	version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
2337 	if (islog && version < SPA_VERSION_HOLES) {
2338 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2339 		    "pool must be upgrade to support log removal"));
2340 		return (zfs_error(hdl, EZFS_BADVERSION, msg));
2341 	}
2342 
2343 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2344 
2345 	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
2346 		return (0);
2347 
2348 	return (zpool_standard_error(hdl, errno, msg));
2349 }
2350 
2351 /*
2352  * Clear the errors for the pool, or the particular device if specified.
2353  */
2354 int
2355 zpool_clear(zpool_handle_t *zhp, const char *path)
2356 {
2357 	zfs_cmd_t zc = { 0 };
2358 	char msg[1024];
2359 	nvlist_t *tgt;
2360 	boolean_t avail_spare, l2cache;
2361 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2362 
2363 	if (path)
2364 		(void) snprintf(msg, sizeof (msg),
2365 		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
2366 		    path);
2367 	else
2368 		(void) snprintf(msg, sizeof (msg),
2369 		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
2370 		    zhp->zpool_name);
2371 
2372 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2373 	if (path) {
2374 		if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
2375 		    &l2cache, NULL)) == 0)
2376 			return (zfs_error(hdl, EZFS_NODEVICE, msg));
2377 
2378 		/*
2379 		 * Don't allow error clearing for hot spares.  Do allow
2380 		 * error clearing for l2cache devices.
2381 		 */
2382 		if (avail_spare)
2383 			return (zfs_error(hdl, EZFS_ISSPARE, msg));
2384 
2385 		verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
2386 		    &zc.zc_guid) == 0);
2387 	}
2388 
2389 	if (zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc) == 0)
2390 		return (0);
2391 
2392 	return (zpool_standard_error(hdl, errno, msg));
2393 }
2394 
2395 /*
2396  * Similar to zpool_clear(), but takes a GUID (used by fmd).
2397  */
2398 int
2399 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
2400 {
2401 	zfs_cmd_t zc = { 0 };
2402 	char msg[1024];
2403 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2404 
2405 	(void) snprintf(msg, sizeof (msg),
2406 	    dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
2407 	    guid);
2408 
2409 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2410 	zc.zc_guid = guid;
2411 
2412 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
2413 		return (0);
2414 
2415 	return (zpool_standard_error(hdl, errno, msg));
2416 }
2417 
2418 /*
2419  * Convert from a devid string to a path.
2420  */
2421 static char *
2422 devid_to_path(char *devid_str)
2423 {
2424 	ddi_devid_t devid;
2425 	char *minor;
2426 	char *path;
2427 	devid_nmlist_t *list = NULL;
2428 	int ret;
2429 
2430 	if (devid_str_decode(devid_str, &devid, &minor) != 0)
2431 		return (NULL);
2432 
2433 	ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
2434 
2435 	devid_str_free(minor);
2436 	devid_free(devid);
2437 
2438 	if (ret != 0)
2439 		return (NULL);
2440 
2441 	if ((path = strdup(list[0].devname)) == NULL)
2442 		return (NULL);
2443 
2444 	devid_free_nmlist(list);
2445 
2446 	return (path);
2447 }
2448 
2449 /*
2450  * Convert from a path to a devid string.
2451  */
2452 static char *
2453 path_to_devid(const char *path)
2454 {
2455 	int fd;
2456 	ddi_devid_t devid;
2457 	char *minor, *ret;
2458 
2459 	if ((fd = open(path, O_RDONLY)) < 0)
2460 		return (NULL);
2461 
2462 	minor = NULL;
2463 	ret = NULL;
2464 	if (devid_get(fd, &devid) == 0) {
2465 		if (devid_get_minor_name(fd, &minor) == 0)
2466 			ret = devid_str_encode(devid, minor);
2467 		if (minor != NULL)
2468 			devid_str_free(minor);
2469 		devid_free(devid);
2470 	}
2471 	(void) close(fd);
2472 
2473 	return (ret);
2474 }
2475 
2476 /*
2477  * Issue the necessary ioctl() to update the stored path value for the vdev.  We
2478  * ignore any failure here, since a common case is for an unprivileged user to
2479  * type 'zpool status', and we'll display the correct information anyway.
2480  */
2481 static void
2482 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
2483 {
2484 	zfs_cmd_t zc = { 0 };
2485 
2486 	(void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2487 	(void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
2488 	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2489 	    &zc.zc_guid) == 0);
2490 
2491 	(void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
2492 }
2493 
2494 /*
2495  * Given a vdev, return the name to display in iostat.  If the vdev has a path,
2496  * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
2497  * We also check if this is a whole disk, in which case we strip off the
2498  * trailing 's0' slice name.
2499  *
2500  * This routine is also responsible for identifying when disks have been
2501  * reconfigured in a new location.  The kernel will have opened the device by
2502  * devid, but the path will still refer to the old location.  To catch this, we
2503  * first do a path -> devid translation (which is fast for the common case).  If
2504  * the devid matches, we're done.  If not, we do a reverse devid -> path
2505  * translation and issue the appropriate ioctl() to update the path of the vdev.
2506  * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
2507  * of these checks.
2508  */
2509 char *
2510 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
2511     boolean_t verbose)
2512 {
2513 	char *path, *devid;
2514 	uint64_t value;
2515 	char buf[64];
2516 	vdev_stat_t *vs;
2517 	uint_t vsc;
2518 
2519 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
2520 	    &value) == 0) {
2521 		verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2522 		    &value) == 0);
2523 		(void) snprintf(buf, sizeof (buf), "%llu",
2524 		    (u_longlong_t)value);
2525 		path = buf;
2526 	} else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
2527 
2528 		/*
2529 		 * If the device is dead (faulted, offline, etc) then don't
2530 		 * bother opening it.  Otherwise we may be forcing the user to
2531 		 * open a misbehaving device, which can have undesirable
2532 		 * effects.
2533 		 */
2534 		if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_STATS,
2535 		    (uint64_t **)&vs, &vsc) != 0 ||
2536 		    vs->vs_state >= VDEV_STATE_DEGRADED) &&
2537 		    zhp != NULL &&
2538 		    nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
2539 			/*
2540 			 * Determine if the current path is correct.
2541 			 */
2542 			char *newdevid = path_to_devid(path);
2543 
2544 			if (newdevid == NULL ||
2545 			    strcmp(devid, newdevid) != 0) {
2546 				char *newpath;
2547 
2548 				if ((newpath = devid_to_path(devid)) != NULL) {
2549 					/*
2550 					 * Update the path appropriately.
2551 					 */
2552 					set_path(zhp, nv, newpath);
2553 					if (nvlist_add_string(nv,
2554 					    ZPOOL_CONFIG_PATH, newpath) == 0)
2555 						verify(nvlist_lookup_string(nv,
2556 						    ZPOOL_CONFIG_PATH,
2557 						    &path) == 0);
2558 					free(newpath);
2559 				}
2560 			}
2561 
2562 			if (newdevid)
2563 				devid_str_free(newdevid);
2564 		}
2565 
2566 		if (strncmp(path, "/dev/dsk/", 9) == 0)
2567 			path += 9;
2568 
2569 		if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
2570 		    &value) == 0 && value) {
2571 			char *tmp = zfs_strdup(hdl, path);
2572 			if (tmp == NULL)
2573 				return (NULL);
2574 			tmp[strlen(path) - 2] = '\0';
2575 			return (tmp);
2576 		}
2577 	} else {
2578 		verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
2579 
2580 		/*
2581 		 * If it's a raidz device, we need to stick in the parity level.
2582 		 */
2583 		if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
2584 			verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
2585 			    &value) == 0);
2586 			(void) snprintf(buf, sizeof (buf), "%s%llu", path,
2587 			    (u_longlong_t)value);
2588 			path = buf;
2589 		}
2590 
2591 		/*
2592 		 * We identify each top-level vdev by using a <type-id>
2593 		 * naming convention.
2594 		 */
2595 		if (verbose) {
2596 			uint64_t id;
2597 
2598 			verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
2599 			    &id) == 0);
2600 			(void) snprintf(buf, sizeof (buf), "%s-%llu", path,
2601 			    (u_longlong_t)id);
2602 			path = buf;
2603 		}
2604 	}
2605 
2606 	return (zfs_strdup(hdl, path));
2607 }
2608 
2609 static int
2610 zbookmark_compare(const void *a, const void *b)
2611 {
2612 	return (memcmp(a, b, sizeof (zbookmark_t)));
2613 }
2614 
2615 /*
2616  * Retrieve the persistent error log, uniquify the members, and return to the
2617  * caller.
2618  */
2619 int
2620 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
2621 {
2622 	zfs_cmd_t zc = { 0 };
2623 	uint64_t count;
2624 	zbookmark_t *zb = NULL;
2625 	int i;
2626 
2627 	/*
2628 	 * Retrieve the raw error list from the kernel.  If the number of errors
2629 	 * has increased, allocate more space and continue until we get the
2630 	 * entire list.
2631 	 */
2632 	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
2633 	    &count) == 0);
2634 	if (count == 0)
2635 		return (0);
2636 	if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
2637 	    count * sizeof (zbookmark_t))) == (uintptr_t)NULL)
2638 		return (-1);
2639 	zc.zc_nvlist_dst_size = count;
2640 	(void) strcpy(zc.zc_name, zhp->zpool_name);
2641 	for (;;) {
2642 		if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
2643 		    &zc) != 0) {
2644 			free((void *)(uintptr_t)zc.zc_nvlist_dst);
2645 			if (errno == ENOMEM) {
2646 				count = zc.zc_nvlist_dst_size;
2647 				if ((zc.zc_nvlist_dst = (uintptr_t)
2648 				    zfs_alloc(zhp->zpool_hdl, count *
2649 				    sizeof (zbookmark_t))) == (uintptr_t)NULL)
2650 					return (-1);
2651 			} else {
2652 				return (-1);
2653 			}
2654 		} else {
2655 			break;
2656 		}
2657 	}
2658 
2659 	/*
2660 	 * Sort the resulting bookmarks.  This is a little confusing due to the
2661 	 * implementation of ZFS_IOC_ERROR_LOG.  The bookmarks are copied last
2662 	 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
2663 	 * _not_ copied as part of the process.  So we point the start of our
2664 	 * array appropriate and decrement the total number of elements.
2665 	 */
2666 	zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) +
2667 	    zc.zc_nvlist_dst_size;
2668 	count -= zc.zc_nvlist_dst_size;
2669 
2670 	qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare);
2671 
2672 	verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
2673 
2674 	/*
2675 	 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
2676 	 */
2677 	for (i = 0; i < count; i++) {
2678 		nvlist_t *nv;
2679 
2680 		/* ignoring zb_blkid and zb_level for now */
2681 		if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
2682 		    zb[i-1].zb_object == zb[i].zb_object)
2683 			continue;
2684 
2685 		if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
2686 			goto nomem;
2687 		if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
2688 		    zb[i].zb_objset) != 0) {
2689 			nvlist_free(nv);
2690 			goto nomem;
2691 		}
2692 		if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
2693 		    zb[i].zb_object) != 0) {
2694 			nvlist_free(nv);
2695 			goto nomem;
2696 		}
2697 		if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
2698 			nvlist_free(nv);
2699 			goto nomem;
2700 		}
2701 		nvlist_free(nv);
2702 	}
2703 
2704 	free((void *)(uintptr_t)zc.zc_nvlist_dst);
2705 	return (0);
2706 
2707 nomem:
2708 	free((void *)(uintptr_t)zc.zc_nvlist_dst);
2709 	return (no_memory(zhp->zpool_hdl));
2710 }
2711 
2712 /*
2713  * Upgrade a ZFS pool to the latest on-disk version.
2714  */
2715 int
2716 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
2717 {
2718 	zfs_cmd_t zc = { 0 };
2719 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2720 
2721 	(void) strcpy(zc.zc_name, zhp->zpool_name);
2722 	zc.zc_cookie = new_version;
2723 
2724 	if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
2725 		return (zpool_standard_error_fmt(hdl, errno,
2726 		    dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
2727 		    zhp->zpool_name));
2728 	return (0);
2729 }
2730 
2731 void
2732 zpool_set_history_str(const char *subcommand, int argc, char **argv,
2733     char *history_str)
2734 {
2735 	int i;
2736 
2737 	(void) strlcpy(history_str, subcommand, HIS_MAX_RECORD_LEN);
2738 	for (i = 1; i < argc; i++) {
2739 		if (strlen(history_str) + 1 + strlen(argv[i]) >
2740 		    HIS_MAX_RECORD_LEN)
2741 			break;
2742 		(void) strlcat(history_str, " ", HIS_MAX_RECORD_LEN);
2743 		(void) strlcat(history_str, argv[i], HIS_MAX_RECORD_LEN);
2744 	}
2745 }
2746 
2747 /*
2748  * Stage command history for logging.
2749  */
2750 int
2751 zpool_stage_history(libzfs_handle_t *hdl, const char *history_str)
2752 {
2753 	if (history_str == NULL)
2754 		return (EINVAL);
2755 
2756 	if (strlen(history_str) > HIS_MAX_RECORD_LEN)
2757 		return (EINVAL);
2758 
2759 	if (hdl->libzfs_log_str != NULL)
2760 		free(hdl->libzfs_log_str);
2761 
2762 	if ((hdl->libzfs_log_str = strdup(history_str)) == NULL)
2763 		return (no_memory(hdl));
2764 
2765 	return (0);
2766 }
2767 
2768 /*
2769  * Perform ioctl to get some command history of a pool.
2770  *
2771  * 'buf' is the buffer to fill up to 'len' bytes.  'off' is the
2772  * logical offset of the history buffer to start reading from.
2773  *
2774  * Upon return, 'off' is the next logical offset to read from and
2775  * 'len' is the actual amount of bytes read into 'buf'.
2776  */
2777 static int
2778 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
2779 {
2780 	zfs_cmd_t zc = { 0 };
2781 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2782 
2783 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2784 
2785 	zc.zc_history = (uint64_t)(uintptr_t)buf;
2786 	zc.zc_history_len = *len;
2787 	zc.zc_history_offset = *off;
2788 
2789 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
2790 		switch (errno) {
2791 		case EPERM:
2792 			return (zfs_error_fmt(hdl, EZFS_PERM,
2793 			    dgettext(TEXT_DOMAIN,
2794 			    "cannot show history for pool '%s'"),
2795 			    zhp->zpool_name));
2796 		case ENOENT:
2797 			return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
2798 			    dgettext(TEXT_DOMAIN, "cannot get history for pool "
2799 			    "'%s'"), zhp->zpool_name));
2800 		case ENOTSUP:
2801 			return (zfs_error_fmt(hdl, EZFS_BADVERSION,
2802 			    dgettext(TEXT_DOMAIN, "cannot get history for pool "
2803 			    "'%s', pool must be upgraded"), zhp->zpool_name));
2804 		default:
2805 			return (zpool_standard_error_fmt(hdl, errno,
2806 			    dgettext(TEXT_DOMAIN,
2807 			    "cannot get history for '%s'"), zhp->zpool_name));
2808 		}
2809 	}
2810 
2811 	*len = zc.zc_history_len;
2812 	*off = zc.zc_history_offset;
2813 
2814 	return (0);
2815 }
2816 
2817 /*
2818  * Process the buffer of nvlists, unpacking and storing each nvlist record
2819  * into 'records'.  'leftover' is set to the number of bytes that weren't
2820  * processed as there wasn't a complete record.
2821  */
2822 int
2823 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
2824     nvlist_t ***records, uint_t *numrecords)
2825 {
2826 	uint64_t reclen;
2827 	nvlist_t *nv;
2828 	int i;
2829 
2830 	while (bytes_read > sizeof (reclen)) {
2831 
2832 		/* get length of packed record (stored as little endian) */
2833 		for (i = 0, reclen = 0; i < sizeof (reclen); i++)
2834 			reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
2835 
2836 		if (bytes_read < sizeof (reclen) + reclen)
2837 			break;
2838 
2839 		/* unpack record */
2840 		if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
2841 			return (ENOMEM);
2842 		bytes_read -= sizeof (reclen) + reclen;
2843 		buf += sizeof (reclen) + reclen;
2844 
2845 		/* add record to nvlist array */
2846 		(*numrecords)++;
2847 		if (ISP2(*numrecords + 1)) {
2848 			*records = realloc(*records,
2849 			    *numrecords * 2 * sizeof (nvlist_t *));
2850 		}
2851 		(*records)[*numrecords - 1] = nv;
2852 	}
2853 
2854 	*leftover = bytes_read;
2855 	return (0);
2856 }
2857 
2858 #define	HIS_BUF_LEN	(128*1024)
2859 
2860 /*
2861  * Retrieve the command history of a pool.
2862  */
2863 int
2864 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
2865 {
2866 	char buf[HIS_BUF_LEN];
2867 	uint64_t off = 0;
2868 	nvlist_t **records = NULL;
2869 	uint_t numrecords = 0;
2870 	int err, i;
2871 
2872 	do {
2873 		uint64_t bytes_read = sizeof (buf);
2874 		uint64_t leftover;
2875 
2876 		if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
2877 			break;
2878 
2879 		/* if nothing else was read in, we're at EOF, just return */
2880 		if (!bytes_read)
2881 			break;
2882 
2883 		if ((err = zpool_history_unpack(buf, bytes_read,
2884 		    &leftover, &records, &numrecords)) != 0)
2885 			break;
2886 		off -= leftover;
2887 
2888 		/* CONSTCOND */
2889 	} while (1);
2890 
2891 	if (!err) {
2892 		verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
2893 		verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
2894 		    records, numrecords) == 0);
2895 	}
2896 	for (i = 0; i < numrecords; i++)
2897 		nvlist_free(records[i]);
2898 	free(records);
2899 
2900 	return (err);
2901 }
2902 
2903 void
2904 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
2905     char *pathname, size_t len)
2906 {
2907 	zfs_cmd_t zc = { 0 };
2908 	boolean_t mounted = B_FALSE;
2909 	char *mntpnt = NULL;
2910 	char dsname[MAXNAMELEN];
2911 
2912 	if (dsobj == 0) {
2913 		/* special case for the MOS */
2914 		(void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj);
2915 		return;
2916 	}
2917 
2918 	/* get the dataset's name */
2919 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2920 	zc.zc_obj = dsobj;
2921 	if (ioctl(zhp->zpool_hdl->libzfs_fd,
2922 	    ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
2923 		/* just write out a path of two object numbers */
2924 		(void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
2925 		    dsobj, obj);
2926 		return;
2927 	}
2928 	(void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
2929 
2930 	/* find out if the dataset is mounted */
2931 	mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
2932 
2933 	/* get the corrupted object's path */
2934 	(void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
2935 	zc.zc_obj = obj;
2936 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
2937 	    &zc) == 0) {
2938 		if (mounted) {
2939 			(void) snprintf(pathname, len, "%s%s", mntpnt,
2940 			    zc.zc_value);
2941 		} else {
2942 			(void) snprintf(pathname, len, "%s:%s",
2943 			    dsname, zc.zc_value);
2944 		}
2945 	} else {
2946 		(void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj);
2947 	}
2948 	free(mntpnt);
2949 }
2950 
2951 /*
2952  * Read the EFI label from the config, if a label does not exist then
2953  * pass back the error to the caller. If the caller has passed a non-NULL
2954  * diskaddr argument then we set it to the starting address of the EFI
2955  * partition.
2956  */
2957 static int
2958 read_efi_label(nvlist_t *config, diskaddr_t *sb)
2959 {
2960 	char *path;
2961 	int fd;
2962 	char diskname[MAXPATHLEN];
2963 	int err = -1;
2964 
2965 	if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
2966 		return (err);
2967 
2968 	(void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT,
2969 	    strrchr(path, '/'));
2970 	if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) {
2971 		struct dk_gpt *vtoc;
2972 
2973 		if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
2974 			if (sb != NULL)
2975 				*sb = vtoc->efi_parts[0].p_start;
2976 			efi_free(vtoc);
2977 		}
2978 		(void) close(fd);
2979 	}
2980 	return (err);
2981 }
2982 
2983 /*
2984  * determine where a partition starts on a disk in the current
2985  * configuration
2986  */
2987 static diskaddr_t
2988 find_start_block(nvlist_t *config)
2989 {
2990 	nvlist_t **child;
2991 	uint_t c, children;
2992 	diskaddr_t sb = MAXOFFSET_T;
2993 	uint64_t wholedisk;
2994 
2995 	if (nvlist_lookup_nvlist_array(config,
2996 	    ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
2997 		if (nvlist_lookup_uint64(config,
2998 		    ZPOOL_CONFIG_WHOLE_DISK,
2999 		    &wholedisk) != 0 || !wholedisk) {
3000 			return (MAXOFFSET_T);
3001 		}
3002 		if (read_efi_label(config, &sb) < 0)
3003 			sb = MAXOFFSET_T;
3004 		return (sb);
3005 	}
3006 
3007 	for (c = 0; c < children; c++) {
3008 		sb = find_start_block(child[c]);
3009 		if (sb != MAXOFFSET_T) {
3010 			return (sb);
3011 		}
3012 	}
3013 	return (MAXOFFSET_T);
3014 }
3015 
3016 /*
3017  * Label an individual disk.  The name provided is the short name,
3018  * stripped of any leading /dev path.
3019  */
3020 int
3021 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
3022 {
3023 	char path[MAXPATHLEN];
3024 	struct dk_gpt *vtoc;
3025 	int fd;
3026 	size_t resv = EFI_MIN_RESV_SIZE;
3027 	uint64_t slice_size;
3028 	diskaddr_t start_block;
3029 	char errbuf[1024];
3030 
3031 	/* prepare an error message just in case */
3032 	(void) snprintf(errbuf, sizeof (errbuf),
3033 	    dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
3034 
3035 	if (zhp) {
3036 		nvlist_t *nvroot;
3037 
3038 		if (pool_is_bootable(zhp)) {
3039 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3040 			    "EFI labeled devices are not supported on root "
3041 			    "pools."));
3042 			return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf));
3043 		}
3044 
3045 		verify(nvlist_lookup_nvlist(zhp->zpool_config,
3046 		    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
3047 
3048 		if (zhp->zpool_start_block == 0)
3049 			start_block = find_start_block(nvroot);
3050 		else
3051 			start_block = zhp->zpool_start_block;
3052 		zhp->zpool_start_block = start_block;
3053 	} else {
3054 		/* new pool */
3055 		start_block = NEW_START_BLOCK;
3056 	}
3057 
3058 	(void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name,
3059 	    BACKUP_SLICE);
3060 
3061 	if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
3062 		/*
3063 		 * This shouldn't happen.  We've long since verified that this
3064 		 * is a valid device.
3065 		 */
3066 		zfs_error_aux(hdl,
3067 		    dgettext(TEXT_DOMAIN, "unable to open device"));
3068 		return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
3069 	}
3070 
3071 	if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
3072 		/*
3073 		 * The only way this can fail is if we run out of memory, or we
3074 		 * were unable to read the disk's capacity
3075 		 */
3076 		if (errno == ENOMEM)
3077 			(void) no_memory(hdl);
3078 
3079 		(void) close(fd);
3080 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3081 		    "unable to read disk capacity"), name);
3082 
3083 		return (zfs_error(hdl, EZFS_NOCAP, errbuf));
3084 	}
3085 
3086 	slice_size = vtoc->efi_last_u_lba + 1;
3087 	slice_size -= EFI_MIN_RESV_SIZE;
3088 	if (start_block == MAXOFFSET_T)
3089 		start_block = NEW_START_BLOCK;
3090 	slice_size -= start_block;
3091 
3092 	vtoc->efi_parts[0].p_start = start_block;
3093 	vtoc->efi_parts[0].p_size = slice_size;
3094 
3095 	/*
3096 	 * Why we use V_USR: V_BACKUP confuses users, and is considered
3097 	 * disposable by some EFI utilities (since EFI doesn't have a backup
3098 	 * slice).  V_UNASSIGNED is supposed to be used only for zero size
3099 	 * partitions, and efi_write() will fail if we use it.  V_ROOT, V_BOOT,
3100 	 * etc. were all pretty specific.  V_USR is as close to reality as we
3101 	 * can get, in the absence of V_OTHER.
3102 	 */
3103 	vtoc->efi_parts[0].p_tag = V_USR;
3104 	(void) strcpy(vtoc->efi_parts[0].p_name, "zfs");
3105 
3106 	vtoc->efi_parts[8].p_start = slice_size + start_block;
3107 	vtoc->efi_parts[8].p_size = resv;
3108 	vtoc->efi_parts[8].p_tag = V_RESERVED;
3109 
3110 	if (efi_write(fd, vtoc) != 0) {
3111 		/*
3112 		 * Some block drivers (like pcata) may not support EFI
3113 		 * GPT labels.  Print out a helpful error message dir-
3114 		 * ecting the user to manually label the disk and give
3115 		 * a specific slice.
3116 		 */
3117 		(void) close(fd);
3118 		efi_free(vtoc);
3119 
3120 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3121 		    "try using fdisk(1M) and then provide a specific slice"));
3122 		return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
3123 	}
3124 
3125 	(void) close(fd);
3126 	efi_free(vtoc);
3127 	return (0);
3128 }
3129 
3130 static boolean_t
3131 supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf)
3132 {
3133 	char *type;
3134 	nvlist_t **child;
3135 	uint_t children, c;
3136 
3137 	verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0);
3138 	if (strcmp(type, VDEV_TYPE_RAIDZ) == 0 ||
3139 	    strcmp(type, VDEV_TYPE_FILE) == 0 ||
3140 	    strcmp(type, VDEV_TYPE_LOG) == 0 ||
3141 	    strcmp(type, VDEV_TYPE_HOLE) == 0 ||
3142 	    strcmp(type, VDEV_TYPE_MISSING) == 0) {
3143 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3144 		    "vdev type '%s' is not supported"), type);
3145 		(void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf);
3146 		return (B_FALSE);
3147 	}
3148 	if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
3149 	    &child, &children) == 0) {
3150 		for (c = 0; c < children; c++) {
3151 			if (!supported_dump_vdev_type(hdl, child[c], errbuf))
3152 				return (B_FALSE);
3153 		}
3154 	}
3155 	return (B_TRUE);
3156 }
3157 
3158 /*
3159  * check if this zvol is allowable for use as a dump device; zero if
3160  * it is, > 0 if it isn't, < 0 if it isn't a zvol
3161  */
3162 int
3163 zvol_check_dump_config(char *arg)
3164 {
3165 	zpool_handle_t *zhp = NULL;
3166 	nvlist_t *config, *nvroot;
3167 	char *p, *volname;
3168 	nvlist_t **top;
3169 	uint_t toplevels;
3170 	libzfs_handle_t *hdl;
3171 	char errbuf[1024];
3172 	char poolname[ZPOOL_MAXNAMELEN];
3173 	int pathlen = strlen(ZVOL_FULL_DEV_DIR);
3174 	int ret = 1;
3175 
3176 	if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) {
3177 		return (-1);
3178 	}
3179 
3180 	(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
3181 	    "dump is not supported on device '%s'"), arg);
3182 
3183 	if ((hdl = libzfs_init()) == NULL)
3184 		return (1);
3185 	libzfs_print_on_error(hdl, B_TRUE);
3186 
3187 	volname = arg + pathlen;
3188 
3189 	/* check the configuration of the pool */
3190 	if ((p = strchr(volname, '/')) == NULL) {
3191 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3192 		    "malformed dataset name"));
3193 		(void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
3194 		return (1);
3195 	} else if (p - volname >= ZFS_MAXNAMELEN) {
3196 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3197 		    "dataset name is too long"));
3198 		(void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf);
3199 		return (1);
3200 	} else {
3201 		(void) strncpy(poolname, volname, p - volname);
3202 		poolname[p - volname] = '\0';
3203 	}
3204 
3205 	if ((zhp = zpool_open(hdl, poolname)) == NULL) {
3206 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3207 		    "could not open pool '%s'"), poolname);
3208 		(void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
3209 		goto out;
3210 	}
3211 	config = zpool_get_config(zhp, NULL);
3212 	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
3213 	    &nvroot) != 0) {
3214 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3215 		    "could not obtain vdev configuration for  '%s'"), poolname);
3216 		(void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
3217 		goto out;
3218 	}
3219 
3220 	verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
3221 	    &top, &toplevels) == 0);
3222 	if (toplevels != 1) {
3223 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3224 		    "'%s' has multiple top level vdevs"), poolname);
3225 		(void) zfs_error(hdl, EZFS_DEVOVERFLOW, errbuf);
3226 		goto out;
3227 	}
3228 
3229 	if (!supported_dump_vdev_type(hdl, top[0], errbuf)) {
3230 		goto out;
3231 	}
3232 	ret = 0;
3233 
3234 out:
3235 	if (zhp)
3236 		zpool_close(zhp);
3237 	libzfs_fini(hdl);
3238 	return (ret);
3239 }
3240