xref: /titanic_52/usr/src/lib/libzfs/common/libzfs_pool.c (revision db9ce1c953f094d292df951aada6b8a85e1ff103)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <ctype.h>
28 #include <errno.h>
29 #include <devid.h>
30 #include <fcntl.h>
31 #include <libintl.h>
32 #include <stdio.h>
33 #include <stdlib.h>
34 #include <strings.h>
35 #include <unistd.h>
36 #include <sys/efi_partition.h>
37 #include <sys/vtoc.h>
38 #include <sys/zfs_ioctl.h>
39 #include <dlfcn.h>
40 
41 #include "zfs_namecheck.h"
42 #include "zfs_prop.h"
43 #include "libzfs_impl.h"
44 #include "zfs_comutil.h"
45 
46 const char *hist_event_table[LOG_END] = {
47 	"invalid event",
48 	"pool create",
49 	"vdev add",
50 	"pool remove",
51 	"pool destroy",
52 	"pool export",
53 	"pool import",
54 	"vdev attach",
55 	"vdev replace",
56 	"vdev detach",
57 	"vdev online",
58 	"vdev offline",
59 	"vdev upgrade",
60 	"pool clear",
61 	"pool scrub",
62 	"pool property set",
63 	"create",
64 	"clone",
65 	"destroy",
66 	"destroy_begin_sync",
67 	"inherit",
68 	"property set",
69 	"quota set",
70 	"permission update",
71 	"permission remove",
72 	"permission who remove",
73 	"promote",
74 	"receive",
75 	"rename",
76 	"reservation set",
77 	"replay_inc_sync",
78 	"replay_full_sync",
79 	"rollback",
80 	"snapshot",
81 	"filesystem version upgrade",
82 	"refquota set",
83 	"refreservation set",
84 	"pool scrub done",
85 	"user hold",
86 	"user release",
87 };
88 
89 static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
90 
91 #if defined(__i386) || defined(__amd64)
92 #define	BOOTCMD	"installgrub(1M)"
93 #else
94 #define	BOOTCMD	"installboot(1M)"
95 #endif
96 
97 #define	DISK_ROOT	"/dev/dsk"
98 #define	RDISK_ROOT	"/dev/rdsk"
99 #define	BACKUP_SLICE	"s2"
100 
101 /*
102  * ====================================================================
103  *   zpool property functions
104  * ====================================================================
105  */
106 
107 static int
108 zpool_get_all_props(zpool_handle_t *zhp)
109 {
110 	zfs_cmd_t zc = { 0 };
111 	libzfs_handle_t *hdl = zhp->zpool_hdl;
112 
113 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
114 
115 	if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
116 		return (-1);
117 
118 	while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
119 		if (errno == ENOMEM) {
120 			if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
121 				zcmd_free_nvlists(&zc);
122 				return (-1);
123 			}
124 		} else {
125 			zcmd_free_nvlists(&zc);
126 			return (-1);
127 		}
128 	}
129 
130 	if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
131 		zcmd_free_nvlists(&zc);
132 		return (-1);
133 	}
134 
135 	zcmd_free_nvlists(&zc);
136 
137 	return (0);
138 }
139 
140 static int
141 zpool_props_refresh(zpool_handle_t *zhp)
142 {
143 	nvlist_t *old_props;
144 
145 	old_props = zhp->zpool_props;
146 
147 	if (zpool_get_all_props(zhp) != 0)
148 		return (-1);
149 
150 	nvlist_free(old_props);
151 	return (0);
152 }
153 
154 static char *
155 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
156     zprop_source_t *src)
157 {
158 	nvlist_t *nv, *nvl;
159 	uint64_t ival;
160 	char *value;
161 	zprop_source_t source;
162 
163 	nvl = zhp->zpool_props;
164 	if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
165 		verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
166 		source = ival;
167 		verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
168 	} else {
169 		source = ZPROP_SRC_DEFAULT;
170 		if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
171 			value = "-";
172 	}
173 
174 	if (src)
175 		*src = source;
176 
177 	return (value);
178 }
179 
180 uint64_t
181 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
182 {
183 	nvlist_t *nv, *nvl;
184 	uint64_t value;
185 	zprop_source_t source;
186 
187 	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
188 		/*
189 		 * zpool_get_all_props() has most likely failed because
190 		 * the pool is faulted, but if all we need is the top level
191 		 * vdev's guid then get it from the zhp config nvlist.
192 		 */
193 		if ((prop == ZPOOL_PROP_GUID) &&
194 		    (nvlist_lookup_nvlist(zhp->zpool_config,
195 		    ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
196 		    (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
197 		    == 0)) {
198 			return (value);
199 		}
200 		return (zpool_prop_default_numeric(prop));
201 	}
202 
203 	nvl = zhp->zpool_props;
204 	if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
205 		verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
206 		source = value;
207 		verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
208 	} else {
209 		source = ZPROP_SRC_DEFAULT;
210 		value = zpool_prop_default_numeric(prop);
211 	}
212 
213 	if (src)
214 		*src = source;
215 
216 	return (value);
217 }
218 
219 /*
220  * Map VDEV STATE to printed strings.
221  */
222 char *
223 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
224 {
225 	switch (state) {
226 	case VDEV_STATE_CLOSED:
227 	case VDEV_STATE_OFFLINE:
228 		return (gettext("OFFLINE"));
229 	case VDEV_STATE_REMOVED:
230 		return (gettext("REMOVED"));
231 	case VDEV_STATE_CANT_OPEN:
232 		if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
233 			return (gettext("FAULTED"));
234 		else
235 			return (gettext("UNAVAIL"));
236 	case VDEV_STATE_FAULTED:
237 		return (gettext("FAULTED"));
238 	case VDEV_STATE_DEGRADED:
239 		return (gettext("DEGRADED"));
240 	case VDEV_STATE_HEALTHY:
241 		return (gettext("ONLINE"));
242 	}
243 
244 	return (gettext("UNKNOWN"));
245 }
246 
247 /*
248  * Get a zpool property value for 'prop' and return the value in
249  * a pre-allocated buffer.
250  */
251 int
252 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
253     zprop_source_t *srctype)
254 {
255 	uint64_t intval;
256 	const char *strval;
257 	zprop_source_t src = ZPROP_SRC_NONE;
258 	nvlist_t *nvroot;
259 	vdev_stat_t *vs;
260 	uint_t vsc;
261 
262 	if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
263 		switch (prop) {
264 		case ZPOOL_PROP_NAME:
265 			(void) strlcpy(buf, zpool_get_name(zhp), len);
266 			break;
267 
268 		case ZPOOL_PROP_HEALTH:
269 			(void) strlcpy(buf, "FAULTED", len);
270 			break;
271 
272 		case ZPOOL_PROP_GUID:
273 			intval = zpool_get_prop_int(zhp, prop, &src);
274 			(void) snprintf(buf, len, "%llu", intval);
275 			break;
276 
277 		case ZPOOL_PROP_ALTROOT:
278 		case ZPOOL_PROP_CACHEFILE:
279 			if (zhp->zpool_props != NULL ||
280 			    zpool_get_all_props(zhp) == 0) {
281 				(void) strlcpy(buf,
282 				    zpool_get_prop_string(zhp, prop, &src),
283 				    len);
284 				if (srctype != NULL)
285 					*srctype = src;
286 				return (0);
287 			}
288 			/* FALLTHROUGH */
289 		default:
290 			(void) strlcpy(buf, "-", len);
291 			break;
292 		}
293 
294 		if (srctype != NULL)
295 			*srctype = src;
296 		return (0);
297 	}
298 
299 	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
300 	    prop != ZPOOL_PROP_NAME)
301 		return (-1);
302 
303 	switch (zpool_prop_get_type(prop)) {
304 	case PROP_TYPE_STRING:
305 		(void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
306 		    len);
307 		break;
308 
309 	case PROP_TYPE_NUMBER:
310 		intval = zpool_get_prop_int(zhp, prop, &src);
311 
312 		switch (prop) {
313 		case ZPOOL_PROP_SIZE:
314 		case ZPOOL_PROP_ALLOCATED:
315 		case ZPOOL_PROP_FREE:
316 			(void) zfs_nicenum(intval, buf, len);
317 			break;
318 
319 		case ZPOOL_PROP_CAPACITY:
320 			(void) snprintf(buf, len, "%llu%%",
321 			    (u_longlong_t)intval);
322 			break;
323 
324 		case ZPOOL_PROP_DEDUPRATIO:
325 			(void) snprintf(buf, len, "%llu.%02llux",
326 			    (u_longlong_t)(intval / 100),
327 			    (u_longlong_t)(intval % 100));
328 			break;
329 
330 		case ZPOOL_PROP_HEALTH:
331 			verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
332 			    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
333 			verify(nvlist_lookup_uint64_array(nvroot,
334 			    ZPOOL_CONFIG_STATS, (uint64_t **)&vs, &vsc) == 0);
335 
336 			(void) strlcpy(buf, zpool_state_to_name(intval,
337 			    vs->vs_aux), len);
338 			break;
339 		default:
340 			(void) snprintf(buf, len, "%llu", intval);
341 		}
342 		break;
343 
344 	case PROP_TYPE_INDEX:
345 		intval = zpool_get_prop_int(zhp, prop, &src);
346 		if (zpool_prop_index_to_string(prop, intval, &strval)
347 		    != 0)
348 			return (-1);
349 		(void) strlcpy(buf, strval, len);
350 		break;
351 
352 	default:
353 		abort();
354 	}
355 
356 	if (srctype)
357 		*srctype = src;
358 
359 	return (0);
360 }
361 
362 /*
363  * Check if the bootfs name has the same pool name as it is set to.
364  * Assuming bootfs is a valid dataset name.
365  */
366 static boolean_t
367 bootfs_name_valid(const char *pool, char *bootfs)
368 {
369 	int len = strlen(pool);
370 
371 	if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
372 		return (B_FALSE);
373 
374 	if (strncmp(pool, bootfs, len) == 0 &&
375 	    (bootfs[len] == '/' || bootfs[len] == '\0'))
376 		return (B_TRUE);
377 
378 	return (B_FALSE);
379 }
380 
381 /*
382  * Inspect the configuration to determine if any of the devices contain
383  * an EFI label.
384  */
385 static boolean_t
386 pool_uses_efi(nvlist_t *config)
387 {
388 	nvlist_t **child;
389 	uint_t c, children;
390 
391 	if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
392 	    &child, &children) != 0)
393 		return (read_efi_label(config, NULL) >= 0);
394 
395 	for (c = 0; c < children; c++) {
396 		if (pool_uses_efi(child[c]))
397 			return (B_TRUE);
398 	}
399 	return (B_FALSE);
400 }
401 
402 static boolean_t
403 pool_is_bootable(zpool_handle_t *zhp)
404 {
405 	char bootfs[ZPOOL_MAXNAMELEN];
406 
407 	return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
408 	    sizeof (bootfs), NULL) == 0 && strncmp(bootfs, "-",
409 	    sizeof (bootfs)) != 0);
410 }
411 
412 
413 /*
414  * Given an nvlist of zpool properties to be set, validate that they are
415  * correct, and parse any numeric properties (index, boolean, etc) if they are
416  * specified as strings.
417  */
418 static nvlist_t *
419 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
420     nvlist_t *props, uint64_t version, boolean_t create_or_import, char *errbuf)
421 {
422 	nvpair_t *elem;
423 	nvlist_t *retprops;
424 	zpool_prop_t prop;
425 	char *strval;
426 	uint64_t intval;
427 	char *slash;
428 	struct stat64 statbuf;
429 	zpool_handle_t *zhp;
430 	nvlist_t *nvroot;
431 
432 	if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
433 		(void) no_memory(hdl);
434 		return (NULL);
435 	}
436 
437 	elem = NULL;
438 	while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
439 		const char *propname = nvpair_name(elem);
440 
441 		/*
442 		 * Make sure this property is valid and applies to this type.
443 		 */
444 		if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL) {
445 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
446 			    "invalid property '%s'"), propname);
447 			(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
448 			goto error;
449 		}
450 
451 		if (zpool_prop_readonly(prop)) {
452 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
453 			    "is readonly"), propname);
454 			(void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
455 			goto error;
456 		}
457 
458 		if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
459 		    &strval, &intval, errbuf) != 0)
460 			goto error;
461 
462 		/*
463 		 * Perform additional checking for specific properties.
464 		 */
465 		switch (prop) {
466 		case ZPOOL_PROP_VERSION:
467 			if (intval < version || intval > SPA_VERSION) {
468 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
469 				    "property '%s' number %d is invalid."),
470 				    propname, intval);
471 				(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
472 				goto error;
473 			}
474 			break;
475 
476 		case ZPOOL_PROP_BOOTFS:
477 			if (create_or_import) {
478 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
479 				    "property '%s' cannot be set at creation "
480 				    "or import time"), propname);
481 				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
482 				goto error;
483 			}
484 
485 			if (version < SPA_VERSION_BOOTFS) {
486 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
487 				    "pool must be upgraded to support "
488 				    "'%s' property"), propname);
489 				(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
490 				goto error;
491 			}
492 
493 			/*
494 			 * bootfs property value has to be a dataset name and
495 			 * the dataset has to be in the same pool as it sets to.
496 			 */
497 			if (strval[0] != '\0' && !bootfs_name_valid(poolname,
498 			    strval)) {
499 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
500 				    "is an invalid name"), strval);
501 				(void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
502 				goto error;
503 			}
504 
505 			if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
506 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
507 				    "could not open pool '%s'"), poolname);
508 				(void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
509 				goto error;
510 			}
511 			verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
512 			    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
513 
514 			/*
515 			 * bootfs property cannot be set on a disk which has
516 			 * been EFI labeled.
517 			 */
518 			if (pool_uses_efi(nvroot)) {
519 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
520 				    "property '%s' not supported on "
521 				    "EFI labeled devices"), propname);
522 				(void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf);
523 				zpool_close(zhp);
524 				goto error;
525 			}
526 			zpool_close(zhp);
527 			break;
528 
529 		case ZPOOL_PROP_ALTROOT:
530 			if (!create_or_import) {
531 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
532 				    "property '%s' can only be set during pool "
533 				    "creation or import"), propname);
534 				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
535 				goto error;
536 			}
537 
538 			if (strval[0] != '/') {
539 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
540 				    "bad alternate root '%s'"), strval);
541 				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
542 				goto error;
543 			}
544 			break;
545 
546 		case ZPOOL_PROP_CACHEFILE:
547 			if (strval[0] == '\0')
548 				break;
549 
550 			if (strcmp(strval, "none") == 0)
551 				break;
552 
553 			if (strval[0] != '/') {
554 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
555 				    "property '%s' must be empty, an "
556 				    "absolute path, or 'none'"), propname);
557 				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
558 				goto error;
559 			}
560 
561 			slash = strrchr(strval, '/');
562 
563 			if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
564 			    strcmp(slash, "/..") == 0) {
565 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
566 				    "'%s' is not a valid file"), strval);
567 				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
568 				goto error;
569 			}
570 
571 			*slash = '\0';
572 
573 			if (strval[0] != '\0' &&
574 			    (stat64(strval, &statbuf) != 0 ||
575 			    !S_ISDIR(statbuf.st_mode))) {
576 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
577 				    "'%s' is not a valid directory"),
578 				    strval);
579 				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
580 				goto error;
581 			}
582 
583 			*slash = '/';
584 			break;
585 		}
586 	}
587 
588 	return (retprops);
589 error:
590 	nvlist_free(retprops);
591 	return (NULL);
592 }
593 
594 /*
595  * Set zpool property : propname=propval.
596  */
597 int
598 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
599 {
600 	zfs_cmd_t zc = { 0 };
601 	int ret = -1;
602 	char errbuf[1024];
603 	nvlist_t *nvl = NULL;
604 	nvlist_t *realprops;
605 	uint64_t version;
606 
607 	(void) snprintf(errbuf, sizeof (errbuf),
608 	    dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
609 	    zhp->zpool_name);
610 
611 	if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
612 		return (no_memory(zhp->zpool_hdl));
613 
614 	if (nvlist_add_string(nvl, propname, propval) != 0) {
615 		nvlist_free(nvl);
616 		return (no_memory(zhp->zpool_hdl));
617 	}
618 
619 	version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
620 	if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
621 	    zhp->zpool_name, nvl, version, B_FALSE, errbuf)) == NULL) {
622 		nvlist_free(nvl);
623 		return (-1);
624 	}
625 
626 	nvlist_free(nvl);
627 	nvl = realprops;
628 
629 	/*
630 	 * Execute the corresponding ioctl() to set this property.
631 	 */
632 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
633 
634 	if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
635 		nvlist_free(nvl);
636 		return (-1);
637 	}
638 
639 	ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
640 
641 	zcmd_free_nvlists(&zc);
642 	nvlist_free(nvl);
643 
644 	if (ret)
645 		(void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
646 	else
647 		(void) zpool_props_refresh(zhp);
648 
649 	return (ret);
650 }
651 
652 int
653 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
654 {
655 	libzfs_handle_t *hdl = zhp->zpool_hdl;
656 	zprop_list_t *entry;
657 	char buf[ZFS_MAXPROPLEN];
658 
659 	if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
660 		return (-1);
661 
662 	for (entry = *plp; entry != NULL; entry = entry->pl_next) {
663 
664 		if (entry->pl_fixed)
665 			continue;
666 
667 		if (entry->pl_prop != ZPROP_INVAL &&
668 		    zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
669 		    NULL) == 0) {
670 			if (strlen(buf) > entry->pl_width)
671 				entry->pl_width = strlen(buf);
672 		}
673 	}
674 
675 	return (0);
676 }
677 
678 
679 /*
680  * Don't start the slice at the default block of 34; many storage
681  * devices will use a stripe width of 128k, so start there instead.
682  */
683 #define	NEW_START_BLOCK	256
684 
685 /*
686  * Validate the given pool name, optionally putting an extended error message in
687  * 'buf'.
688  */
689 boolean_t
690 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
691 {
692 	namecheck_err_t why;
693 	char what;
694 	int ret;
695 
696 	ret = pool_namecheck(pool, &why, &what);
697 
698 	/*
699 	 * The rules for reserved pool names were extended at a later point.
700 	 * But we need to support users with existing pools that may now be
701 	 * invalid.  So we only check for this expanded set of names during a
702 	 * create (or import), and only in userland.
703 	 */
704 	if (ret == 0 && !isopen &&
705 	    (strncmp(pool, "mirror", 6) == 0 ||
706 	    strncmp(pool, "raidz", 5) == 0 ||
707 	    strncmp(pool, "spare", 5) == 0 ||
708 	    strcmp(pool, "log") == 0)) {
709 		if (hdl != NULL)
710 			zfs_error_aux(hdl,
711 			    dgettext(TEXT_DOMAIN, "name is reserved"));
712 		return (B_FALSE);
713 	}
714 
715 
716 	if (ret != 0) {
717 		if (hdl != NULL) {
718 			switch (why) {
719 			case NAME_ERR_TOOLONG:
720 				zfs_error_aux(hdl,
721 				    dgettext(TEXT_DOMAIN, "name is too long"));
722 				break;
723 
724 			case NAME_ERR_INVALCHAR:
725 				zfs_error_aux(hdl,
726 				    dgettext(TEXT_DOMAIN, "invalid character "
727 				    "'%c' in pool name"), what);
728 				break;
729 
730 			case NAME_ERR_NOLETTER:
731 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
732 				    "name must begin with a letter"));
733 				break;
734 
735 			case NAME_ERR_RESERVED:
736 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
737 				    "name is reserved"));
738 				break;
739 
740 			case NAME_ERR_DISKLIKE:
741 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
742 				    "pool name is reserved"));
743 				break;
744 
745 			case NAME_ERR_LEADING_SLASH:
746 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
747 				    "leading slash in name"));
748 				break;
749 
750 			case NAME_ERR_EMPTY_COMPONENT:
751 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
752 				    "empty component in name"));
753 				break;
754 
755 			case NAME_ERR_TRAILING_SLASH:
756 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
757 				    "trailing slash in name"));
758 				break;
759 
760 			case NAME_ERR_MULTIPLE_AT:
761 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
762 				    "multiple '@' delimiters in name"));
763 				break;
764 
765 			}
766 		}
767 		return (B_FALSE);
768 	}
769 
770 	return (B_TRUE);
771 }
772 
773 /*
774  * Open a handle to the given pool, even if the pool is currently in the FAULTED
775  * state.
776  */
777 zpool_handle_t *
778 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
779 {
780 	zpool_handle_t *zhp;
781 	boolean_t missing;
782 
783 	/*
784 	 * Make sure the pool name is valid.
785 	 */
786 	if (!zpool_name_valid(hdl, B_TRUE, pool)) {
787 		(void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
788 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
789 		    pool);
790 		return (NULL);
791 	}
792 
793 	if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
794 		return (NULL);
795 
796 	zhp->zpool_hdl = hdl;
797 	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
798 
799 	if (zpool_refresh_stats(zhp, &missing) != 0) {
800 		zpool_close(zhp);
801 		return (NULL);
802 	}
803 
804 	if (missing) {
805 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
806 		(void) zfs_error_fmt(hdl, EZFS_NOENT,
807 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
808 		zpool_close(zhp);
809 		return (NULL);
810 	}
811 
812 	return (zhp);
813 }
814 
815 /*
816  * Like the above, but silent on error.  Used when iterating over pools (because
817  * the configuration cache may be out of date).
818  */
819 int
820 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
821 {
822 	zpool_handle_t *zhp;
823 	boolean_t missing;
824 
825 	if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
826 		return (-1);
827 
828 	zhp->zpool_hdl = hdl;
829 	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
830 
831 	if (zpool_refresh_stats(zhp, &missing) != 0) {
832 		zpool_close(zhp);
833 		return (-1);
834 	}
835 
836 	if (missing) {
837 		zpool_close(zhp);
838 		*ret = NULL;
839 		return (0);
840 	}
841 
842 	*ret = zhp;
843 	return (0);
844 }
845 
846 /*
847  * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
848  * state.
849  */
850 zpool_handle_t *
851 zpool_open(libzfs_handle_t *hdl, const char *pool)
852 {
853 	zpool_handle_t *zhp;
854 
855 	if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
856 		return (NULL);
857 
858 	if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
859 		(void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
860 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
861 		zpool_close(zhp);
862 		return (NULL);
863 	}
864 
865 	return (zhp);
866 }
867 
868 /*
869  * Close the handle.  Simply frees the memory associated with the handle.
870  */
871 void
872 zpool_close(zpool_handle_t *zhp)
873 {
874 	if (zhp->zpool_config)
875 		nvlist_free(zhp->zpool_config);
876 	if (zhp->zpool_old_config)
877 		nvlist_free(zhp->zpool_old_config);
878 	if (zhp->zpool_props)
879 		nvlist_free(zhp->zpool_props);
880 	free(zhp);
881 }
882 
883 /*
884  * Return the name of the pool.
885  */
886 const char *
887 zpool_get_name(zpool_handle_t *zhp)
888 {
889 	return (zhp->zpool_name);
890 }
891 
892 
893 /*
894  * Return the state of the pool (ACTIVE or UNAVAILABLE)
895  */
896 int
897 zpool_get_state(zpool_handle_t *zhp)
898 {
899 	return (zhp->zpool_state);
900 }
901 
902 /*
903  * Create the named pool, using the provided vdev list.  It is assumed
904  * that the consumer has already validated the contents of the nvlist, so we
905  * don't have to worry about error semantics.
906  */
907 int
908 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
909     nvlist_t *props, nvlist_t *fsprops)
910 {
911 	zfs_cmd_t zc = { 0 };
912 	nvlist_t *zc_fsprops = NULL;
913 	nvlist_t *zc_props = NULL;
914 	char msg[1024];
915 	char *altroot;
916 	int ret = -1;
917 
918 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
919 	    "cannot create '%s'"), pool);
920 
921 	if (!zpool_name_valid(hdl, B_FALSE, pool))
922 		return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
923 
924 	if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
925 		return (-1);
926 
927 	if (props) {
928 		if ((zc_props = zpool_valid_proplist(hdl, pool, props,
929 		    SPA_VERSION_1, B_TRUE, msg)) == NULL) {
930 			goto create_failed;
931 		}
932 	}
933 
934 	if (fsprops) {
935 		uint64_t zoned;
936 		char *zonestr;
937 
938 		zoned = ((nvlist_lookup_string(fsprops,
939 		    zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
940 		    strcmp(zonestr, "on") == 0);
941 
942 		if ((zc_fsprops = zfs_valid_proplist(hdl,
943 		    ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) {
944 			goto create_failed;
945 		}
946 		if (!zc_props &&
947 		    (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
948 			goto create_failed;
949 		}
950 		if (nvlist_add_nvlist(zc_props,
951 		    ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
952 			goto create_failed;
953 		}
954 	}
955 
956 	if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
957 		goto create_failed;
958 
959 	(void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
960 
961 	if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
962 
963 		zcmd_free_nvlists(&zc);
964 		nvlist_free(zc_props);
965 		nvlist_free(zc_fsprops);
966 
967 		switch (errno) {
968 		case EBUSY:
969 			/*
970 			 * This can happen if the user has specified the same
971 			 * device multiple times.  We can't reliably detect this
972 			 * until we try to add it and see we already have a
973 			 * label.
974 			 */
975 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
976 			    "one or more vdevs refer to the same device"));
977 			return (zfs_error(hdl, EZFS_BADDEV, msg));
978 
979 		case EOVERFLOW:
980 			/*
981 			 * This occurs when one of the devices is below
982 			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
983 			 * device was the problem device since there's no
984 			 * reliable way to determine device size from userland.
985 			 */
986 			{
987 				char buf[64];
988 
989 				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
990 
991 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
992 				    "one or more devices is less than the "
993 				    "minimum size (%s)"), buf);
994 			}
995 			return (zfs_error(hdl, EZFS_BADDEV, msg));
996 
997 		case ENOSPC:
998 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
999 			    "one or more devices is out of space"));
1000 			return (zfs_error(hdl, EZFS_BADDEV, msg));
1001 
1002 		case ENOTBLK:
1003 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1004 			    "cache device must be a disk or disk slice"));
1005 			return (zfs_error(hdl, EZFS_BADDEV, msg));
1006 
1007 		default:
1008 			return (zpool_standard_error(hdl, errno, msg));
1009 		}
1010 	}
1011 
1012 	/*
1013 	 * If this is an alternate root pool, then we automatically set the
1014 	 * mountpoint of the root dataset to be '/'.
1015 	 */
1016 	if (nvlist_lookup_string(props, zpool_prop_to_name(ZPOOL_PROP_ALTROOT),
1017 	    &altroot) == 0) {
1018 		zfs_handle_t *zhp;
1019 
1020 		verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_DATASET)) != NULL);
1021 		verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
1022 		    "/") == 0);
1023 
1024 		zfs_close(zhp);
1025 	}
1026 
1027 create_failed:
1028 	zcmd_free_nvlists(&zc);
1029 	nvlist_free(zc_props);
1030 	nvlist_free(zc_fsprops);
1031 	return (ret);
1032 }
1033 
1034 /*
1035  * Destroy the given pool.  It is up to the caller to ensure that there are no
1036  * datasets left in the pool.
1037  */
1038 int
1039 zpool_destroy(zpool_handle_t *zhp)
1040 {
1041 	zfs_cmd_t zc = { 0 };
1042 	zfs_handle_t *zfp = NULL;
1043 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1044 	char msg[1024];
1045 
1046 	if (zhp->zpool_state == POOL_STATE_ACTIVE &&
1047 	    (zfp = zfs_open(zhp->zpool_hdl, zhp->zpool_name,
1048 	    ZFS_TYPE_FILESYSTEM)) == NULL)
1049 		return (-1);
1050 
1051 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1052 
1053 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
1054 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1055 		    "cannot destroy '%s'"), zhp->zpool_name);
1056 
1057 		if (errno == EROFS) {
1058 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1059 			    "one or more devices is read only"));
1060 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
1061 		} else {
1062 			(void) zpool_standard_error(hdl, errno, msg);
1063 		}
1064 
1065 		if (zfp)
1066 			zfs_close(zfp);
1067 		return (-1);
1068 	}
1069 
1070 	if (zfp) {
1071 		remove_mountpoint(zfp);
1072 		zfs_close(zfp);
1073 	}
1074 
1075 	return (0);
1076 }
1077 
1078 /*
1079  * Add the given vdevs to the pool.  The caller must have already performed the
1080  * necessary verification to ensure that the vdev specification is well-formed.
1081  */
1082 int
1083 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1084 {
1085 	zfs_cmd_t zc = { 0 };
1086 	int ret;
1087 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1088 	char msg[1024];
1089 	nvlist_t **spares, **l2cache;
1090 	uint_t nspares, nl2cache;
1091 
1092 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1093 	    "cannot add to '%s'"), zhp->zpool_name);
1094 
1095 	if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1096 	    SPA_VERSION_SPARES &&
1097 	    nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1098 	    &spares, &nspares) == 0) {
1099 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1100 		    "upgraded to add hot spares"));
1101 		return (zfs_error(hdl, EZFS_BADVERSION, msg));
1102 	}
1103 
1104 	if (pool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot,
1105 	    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) {
1106 		uint64_t s;
1107 
1108 		for (s = 0; s < nspares; s++) {
1109 			char *path;
1110 
1111 			if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH,
1112 			    &path) == 0 && pool_uses_efi(spares[s])) {
1113 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1114 				    "device '%s' contains an EFI label and "
1115 				    "cannot be used on root pools."),
1116 				    zpool_vdev_name(hdl, NULL, spares[s],
1117 				    B_FALSE));
1118 				return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
1119 			}
1120 		}
1121 	}
1122 
1123 	if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1124 	    SPA_VERSION_L2CACHE &&
1125 	    nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1126 	    &l2cache, &nl2cache) == 0) {
1127 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1128 		    "upgraded to add cache devices"));
1129 		return (zfs_error(hdl, EZFS_BADVERSION, msg));
1130 	}
1131 
1132 	if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1133 		return (-1);
1134 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1135 
1136 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1137 		switch (errno) {
1138 		case EBUSY:
1139 			/*
1140 			 * This can happen if the user has specified the same
1141 			 * device multiple times.  We can't reliably detect this
1142 			 * until we try to add it and see we already have a
1143 			 * label.
1144 			 */
1145 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1146 			    "one or more vdevs refer to the same device"));
1147 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
1148 			break;
1149 
1150 		case EOVERFLOW:
1151 			/*
1152 			 * This occurrs when one of the devices is below
1153 			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
1154 			 * device was the problem device since there's no
1155 			 * reliable way to determine device size from userland.
1156 			 */
1157 			{
1158 				char buf[64];
1159 
1160 				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1161 
1162 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1163 				    "device is less than the minimum "
1164 				    "size (%s)"), buf);
1165 			}
1166 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
1167 			break;
1168 
1169 		case ENOTSUP:
1170 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1171 			    "pool must be upgraded to add these vdevs"));
1172 			(void) zfs_error(hdl, EZFS_BADVERSION, msg);
1173 			break;
1174 
1175 		case EDOM:
1176 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1177 			    "root pool can not have multiple vdevs"
1178 			    " or separate logs"));
1179 			(void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
1180 			break;
1181 
1182 		case ENOTBLK:
1183 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1184 			    "cache device must be a disk or disk slice"));
1185 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
1186 			break;
1187 
1188 		default:
1189 			(void) zpool_standard_error(hdl, errno, msg);
1190 		}
1191 
1192 		ret = -1;
1193 	} else {
1194 		ret = 0;
1195 	}
1196 
1197 	zcmd_free_nvlists(&zc);
1198 
1199 	return (ret);
1200 }
1201 
1202 /*
1203  * Exports the pool from the system.  The caller must ensure that there are no
1204  * mounted datasets in the pool.
1205  */
1206 int
1207 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce)
1208 {
1209 	zfs_cmd_t zc = { 0 };
1210 	char msg[1024];
1211 
1212 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1213 	    "cannot export '%s'"), zhp->zpool_name);
1214 
1215 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1216 	zc.zc_cookie = force;
1217 	zc.zc_guid = hardforce;
1218 
1219 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1220 		switch (errno) {
1221 		case EXDEV:
1222 			zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1223 			    "use '-f' to override the following errors:\n"
1224 			    "'%s' has an active shared spare which could be"
1225 			    " used by other pools once '%s' is exported."),
1226 			    zhp->zpool_name, zhp->zpool_name);
1227 			return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1228 			    msg));
1229 		default:
1230 			return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1231 			    msg));
1232 		}
1233 	}
1234 
1235 	return (0);
1236 }
1237 
1238 int
1239 zpool_export(zpool_handle_t *zhp, boolean_t force)
1240 {
1241 	return (zpool_export_common(zhp, force, B_FALSE));
1242 }
1243 
1244 int
1245 zpool_export_force(zpool_handle_t *zhp)
1246 {
1247 	return (zpool_export_common(zhp, B_TRUE, B_TRUE));
1248 }
1249 
1250 static void
1251 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
1252     nvlist_t *rbi)
1253 {
1254 	uint64_t rewindto;
1255 	int64_t loss = -1;
1256 	struct tm t;
1257 	char timestr[128];
1258 
1259 	if (!hdl->libzfs_printerr || rbi == NULL)
1260 		return;
1261 
1262 	if (nvlist_lookup_uint64(rbi, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1263 		return;
1264 	(void) nvlist_lookup_int64(rbi, ZPOOL_CONFIG_REWIND_TIME, &loss);
1265 
1266 	if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1267 	    strftime(timestr, 128, 0, &t) != 0) {
1268 		if (dryrun) {
1269 			(void) printf(dgettext(TEXT_DOMAIN,
1270 			    "Would be able to return %s "
1271 			    "to its state as of %s.\n"),
1272 			    name, timestr);
1273 		} else {
1274 			(void) printf(dgettext(TEXT_DOMAIN,
1275 			    "Pool %s returned to its state as of %s.\n"),
1276 			    name, timestr);
1277 		}
1278 		if (loss > 120) {
1279 			(void) printf(dgettext(TEXT_DOMAIN,
1280 			    "%s approximately %lld "),
1281 			    dryrun ? "Would discard" : "Discarded",
1282 			    (loss + 30) / 60);
1283 			(void) printf(dgettext(TEXT_DOMAIN,
1284 			    "minutes of transactions.\n"));
1285 		} else if (loss > 0) {
1286 			(void) printf(dgettext(TEXT_DOMAIN,
1287 			    "%s approximately %lld "),
1288 			    dryrun ? "Would discard" : "Discarded", loss);
1289 			(void) printf(dgettext(TEXT_DOMAIN,
1290 			    "seconds of transactions.\n"));
1291 		}
1292 	}
1293 }
1294 
1295 void
1296 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
1297     nvlist_t *config)
1298 {
1299 	int64_t loss = -1;
1300 	uint64_t edata = UINT64_MAX;
1301 	uint64_t rewindto;
1302 	struct tm t;
1303 	char timestr[128];
1304 
1305 	if (!hdl->libzfs_printerr)
1306 		return;
1307 
1308 	if (reason >= 0)
1309 		(void) printf(dgettext(TEXT_DOMAIN, "action: "));
1310 	else
1311 		(void) printf(dgettext(TEXT_DOMAIN, "\t"));
1312 
1313 	/* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
1314 	if (nvlist_lookup_uint64(config,
1315 	    ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1316 		goto no_info;
1317 
1318 	(void) nvlist_lookup_int64(config, ZPOOL_CONFIG_REWIND_TIME, &loss);
1319 	(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
1320 	    &edata);
1321 
1322 	(void) printf(dgettext(TEXT_DOMAIN,
1323 	    "Recovery is possible, but will result in some data loss.\n"));
1324 
1325 	if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1326 	    strftime(timestr, 128, 0, &t) != 0) {
1327 		(void) printf(dgettext(TEXT_DOMAIN,
1328 		    "\tReturning the pool to its state as of %s\n"
1329 		    "\tshould correct the problem.  "),
1330 		    timestr);
1331 	} else {
1332 		(void) printf(dgettext(TEXT_DOMAIN,
1333 		    "\tReverting the pool to an earlier state "
1334 		    "should correct the problem.\n\t"));
1335 	}
1336 
1337 	if (loss > 120) {
1338 		(void) printf(dgettext(TEXT_DOMAIN,
1339 		    "Approximately %lld minutes of data\n"
1340 		    "\tmust be discarded, irreversibly.  "), (loss + 30) / 60);
1341 	} else if (loss > 0) {
1342 		(void) printf(dgettext(TEXT_DOMAIN,
1343 		    "Approximately %lld seconds of data\n"
1344 		    "\tmust be discarded, irreversibly.  "), loss);
1345 	}
1346 	if (edata != 0 && edata != UINT64_MAX) {
1347 		if (edata == 1) {
1348 			(void) printf(dgettext(TEXT_DOMAIN,
1349 			    "After rewind, at least\n"
1350 			    "\tone persistent user-data error will remain.  "));
1351 		} else {
1352 			(void) printf(dgettext(TEXT_DOMAIN,
1353 			    "After rewind, several\n"
1354 			    "\tpersistent user-data errors will remain.  "));
1355 		}
1356 	}
1357 	(void) printf(dgettext(TEXT_DOMAIN,
1358 	    "Recovery can be attempted\n\tby executing 'zpool %s -F %s'.  "),
1359 	    reason >= 0 ? "clear" : "import", name);
1360 
1361 	(void) printf(dgettext(TEXT_DOMAIN,
1362 	    "A scrub of the pool\n"
1363 	    "\tis strongly recommended after recovery.\n"));
1364 	return;
1365 
1366 no_info:
1367 	(void) printf(dgettext(TEXT_DOMAIN,
1368 	    "Destroy and re-create the pool from\n\ta backup source.\n"));
1369 }
1370 
1371 /*
1372  * zpool_import() is a contracted interface. Should be kept the same
1373  * if possible.
1374  *
1375  * Applications should use zpool_import_props() to import a pool with
1376  * new properties value to be set.
1377  */
1378 int
1379 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1380     char *altroot)
1381 {
1382 	nvlist_t *props = NULL;
1383 	int ret;
1384 
1385 	if (altroot != NULL) {
1386 		if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1387 			return (zfs_error_fmt(hdl, EZFS_NOMEM,
1388 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1389 			    newname));
1390 		}
1391 
1392 		if (nvlist_add_string(props,
1393 		    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1394 		    nvlist_add_string(props,
1395 		    zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
1396 			nvlist_free(props);
1397 			return (zfs_error_fmt(hdl, EZFS_NOMEM,
1398 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1399 			    newname));
1400 		}
1401 	}
1402 
1403 	ret = zpool_import_props(hdl, config, newname, props, B_FALSE);
1404 	if (props)
1405 		nvlist_free(props);
1406 	return (ret);
1407 }
1408 
1409 /*
1410  * Import the given pool using the known configuration and a list of
1411  * properties to be set. The configuration should have come from
1412  * zpool_find_import(). The 'newname' parameters control whether the pool
1413  * is imported with a different name.
1414  */
1415 int
1416 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1417     nvlist_t *props, boolean_t importfaulted)
1418 {
1419 	zfs_cmd_t zc = { 0 };
1420 	zpool_rewind_policy_t policy;
1421 	nvlist_t *nvi = NULL;
1422 	char *thename;
1423 	char *origname;
1424 	uint64_t returned_size;
1425 	int ret;
1426 	char errbuf[1024];
1427 
1428 	verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1429 	    &origname) == 0);
1430 
1431 	(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1432 	    "cannot import pool '%s'"), origname);
1433 
1434 	if (newname != NULL) {
1435 		if (!zpool_name_valid(hdl, B_FALSE, newname))
1436 			return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1437 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1438 			    newname));
1439 		thename = (char *)newname;
1440 	} else {
1441 		thename = origname;
1442 	}
1443 
1444 	if (props) {
1445 		uint64_t version;
1446 
1447 		verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1448 		    &version) == 0);
1449 
1450 		if ((props = zpool_valid_proplist(hdl, origname,
1451 		    props, version, B_TRUE, errbuf)) == NULL) {
1452 			return (-1);
1453 		} else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1454 			nvlist_free(props);
1455 			return (-1);
1456 		}
1457 	}
1458 
1459 	(void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1460 
1461 	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1462 	    &zc.zc_guid) == 0);
1463 
1464 	if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1465 		nvlist_free(props);
1466 		return (-1);
1467 	}
1468 	returned_size =  zc.zc_nvlist_conf_size + 512;
1469 	if (zcmd_alloc_dst_nvlist(hdl, &zc, returned_size) != 0) {
1470 		nvlist_free(props);
1471 		return (-1);
1472 	}
1473 
1474 	zc.zc_cookie = (uint64_t)importfaulted;
1475 	ret = 0;
1476 	if (zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc) != 0) {
1477 		char desc[1024];
1478 
1479 		(void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
1480 		zpool_get_rewind_policy(config, &policy);
1481 		/*
1482 		 * Dry-run failed, but we print out what success
1483 		 * looks like if we found a best txg
1484 		 */
1485 		if ((policy.zrp_request & ZPOOL_TRY_REWIND) && nvi) {
1486 			zpool_rewind_exclaim(hdl, newname ? origname : thename,
1487 			    B_TRUE, nvi);
1488 			nvlist_free(nvi);
1489 			return (-1);
1490 		}
1491 
1492 		if (newname == NULL)
1493 			(void) snprintf(desc, sizeof (desc),
1494 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1495 			    thename);
1496 		else
1497 			(void) snprintf(desc, sizeof (desc),
1498 			    dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1499 			    origname, thename);
1500 
1501 		switch (errno) {
1502 		case ENOTSUP:
1503 			/*
1504 			 * Unsupported version.
1505 			 */
1506 			(void) zfs_error(hdl, EZFS_BADVERSION, desc);
1507 			break;
1508 
1509 		case EINVAL:
1510 			(void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1511 			break;
1512 
1513 		default:
1514 			(void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
1515 			(void) zpool_standard_error(hdl, errno, desc);
1516 			zpool_explain_recover(hdl,
1517 			    newname ? origname : thename, -errno, nvi);
1518 			nvlist_free(nvi);
1519 			break;
1520 		}
1521 
1522 		ret = -1;
1523 	} else {
1524 		zpool_handle_t *zhp;
1525 
1526 		/*
1527 		 * This should never fail, but play it safe anyway.
1528 		 */
1529 		if (zpool_open_silent(hdl, thename, &zhp) != 0)
1530 			ret = -1;
1531 		else if (zhp != NULL)
1532 			zpool_close(zhp);
1533 		(void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
1534 		zpool_get_rewind_policy(config, &policy);
1535 		if (policy.zrp_request &
1536 		    (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
1537 			zpool_rewind_exclaim(hdl, newname ? origname : thename,
1538 			    ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0),
1539 			    nvi);
1540 		}
1541 		nvlist_free(nvi);
1542 		return (0);
1543 	}
1544 
1545 	zcmd_free_nvlists(&zc);
1546 	nvlist_free(props);
1547 
1548 	return (ret);
1549 }
1550 
1551 /*
1552  * Scrub the pool.
1553  */
1554 int
1555 zpool_scrub(zpool_handle_t *zhp, pool_scrub_type_t type)
1556 {
1557 	zfs_cmd_t zc = { 0 };
1558 	char msg[1024];
1559 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1560 
1561 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1562 	zc.zc_cookie = type;
1563 
1564 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SCRUB, &zc) == 0)
1565 		return (0);
1566 
1567 	(void) snprintf(msg, sizeof (msg),
1568 	    dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
1569 
1570 	if (errno == EBUSY)
1571 		return (zfs_error(hdl, EZFS_RESILVERING, msg));
1572 	else
1573 		return (zpool_standard_error(hdl, errno, msg));
1574 }
1575 
1576 /*
1577  * Find a vdev that matches the search criteria specified. We use the
1578  * the nvpair name to determine how we should look for the device.
1579  * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1580  * spare; but FALSE if its an INUSE spare.
1581  */
1582 static nvlist_t *
1583 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
1584     boolean_t *l2cache, boolean_t *log)
1585 {
1586 	uint_t c, children;
1587 	nvlist_t **child;
1588 	nvlist_t *ret;
1589 	uint64_t is_log;
1590 	char *srchkey;
1591 	nvpair_t *pair = nvlist_next_nvpair(search, NULL);
1592 
1593 	/* Nothing to look for */
1594 	if (search == NULL || pair == NULL)
1595 		return (NULL);
1596 
1597 	/* Obtain the key we will use to search */
1598 	srchkey = nvpair_name(pair);
1599 
1600 	switch (nvpair_type(pair)) {
1601 	case DATA_TYPE_UINT64: {
1602 		uint64_t srchval, theguid, present;
1603 
1604 		verify(nvpair_value_uint64(pair, &srchval) == 0);
1605 		if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
1606 			if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
1607 			    &present) == 0) {
1608 				/*
1609 				 * If the device has never been present since
1610 				 * import, the only reliable way to match the
1611 				 * vdev is by GUID.
1612 				 */
1613 				verify(nvlist_lookup_uint64(nv,
1614 				    ZPOOL_CONFIG_GUID, &theguid) == 0);
1615 				if (theguid == srchval)
1616 					return (nv);
1617 			}
1618 		}
1619 		break;
1620 	}
1621 
1622 	case DATA_TYPE_STRING: {
1623 		char *srchval, *val;
1624 
1625 		verify(nvpair_value_string(pair, &srchval) == 0);
1626 		if (nvlist_lookup_string(nv, srchkey, &val) != 0)
1627 			break;
1628 
1629 		/*
1630 		 * Search for the requested value. We special case the search
1631 		 * for ZPOOL_CONFIG_PATH when it's a wholedisk and when
1632 		 * Looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
1633 		 * Otherwise, all other searches are simple string compares.
1634 		 */
1635 		if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0 && val) {
1636 			uint64_t wholedisk = 0;
1637 
1638 			(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
1639 			    &wholedisk);
1640 			if (wholedisk) {
1641 				/*
1642 				 * For whole disks, the internal path has 's0',
1643 				 * but the path passed in by the user doesn't.
1644 				 */
1645 				if (strlen(srchval) == strlen(val) - 2 &&
1646 				    strncmp(srchval, val, strlen(srchval)) == 0)
1647 					return (nv);
1648 				break;
1649 			}
1650 		} else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
1651 			char *type, *idx, *end, *p;
1652 			uint64_t id, vdev_id;
1653 
1654 			/*
1655 			 * Determine our vdev type, keeping in mind
1656 			 * that the srchval is composed of a type and
1657 			 * vdev id pair (i.e. mirror-4).
1658 			 */
1659 			if ((type = strdup(srchval)) == NULL)
1660 				return (NULL);
1661 
1662 			if ((p = strrchr(type, '-')) == NULL) {
1663 				free(type);
1664 				break;
1665 			}
1666 			idx = p + 1;
1667 			*p = '\0';
1668 
1669 			/*
1670 			 * If the types don't match then keep looking.
1671 			 */
1672 			if (strncmp(val, type, strlen(val)) != 0) {
1673 				free(type);
1674 				break;
1675 			}
1676 
1677 			verify(strncmp(type, VDEV_TYPE_RAIDZ,
1678 			    strlen(VDEV_TYPE_RAIDZ)) == 0 ||
1679 			    strncmp(type, VDEV_TYPE_MIRROR,
1680 			    strlen(VDEV_TYPE_MIRROR)) == 0);
1681 			verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
1682 			    &id) == 0);
1683 
1684 			errno = 0;
1685 			vdev_id = strtoull(idx, &end, 10);
1686 
1687 			free(type);
1688 			if (errno != 0)
1689 				return (NULL);
1690 
1691 			/*
1692 			 * Now verify that we have the correct vdev id.
1693 			 */
1694 			if (vdev_id == id)
1695 				return (nv);
1696 		}
1697 
1698 		/*
1699 		 * Common case
1700 		 */
1701 		if (strcmp(srchval, val) == 0)
1702 			return (nv);
1703 		break;
1704 	}
1705 
1706 	default:
1707 		break;
1708 	}
1709 
1710 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1711 	    &child, &children) != 0)
1712 		return (NULL);
1713 
1714 	for (c = 0; c < children; c++) {
1715 		if ((ret = vdev_to_nvlist_iter(child[c], search,
1716 		    avail_spare, l2cache, NULL)) != NULL) {
1717 			/*
1718 			 * The 'is_log' value is only set for the toplevel
1719 			 * vdev, not the leaf vdevs.  So we always lookup the
1720 			 * log device from the root of the vdev tree (where
1721 			 * 'log' is non-NULL).
1722 			 */
1723 			if (log != NULL &&
1724 			    nvlist_lookup_uint64(child[c],
1725 			    ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
1726 			    is_log) {
1727 				*log = B_TRUE;
1728 			}
1729 			return (ret);
1730 		}
1731 	}
1732 
1733 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
1734 	    &child, &children) == 0) {
1735 		for (c = 0; c < children; c++) {
1736 			if ((ret = vdev_to_nvlist_iter(child[c], search,
1737 			    avail_spare, l2cache, NULL)) != NULL) {
1738 				*avail_spare = B_TRUE;
1739 				return (ret);
1740 			}
1741 		}
1742 	}
1743 
1744 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
1745 	    &child, &children) == 0) {
1746 		for (c = 0; c < children; c++) {
1747 			if ((ret = vdev_to_nvlist_iter(child[c], search,
1748 			    avail_spare, l2cache, NULL)) != NULL) {
1749 				*l2cache = B_TRUE;
1750 				return (ret);
1751 			}
1752 		}
1753 	}
1754 
1755 	return (NULL);
1756 }
1757 
1758 /*
1759  * Given a physical path (minus the "/devices" prefix), find the
1760  * associated vdev.
1761  */
1762 nvlist_t *
1763 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
1764     boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
1765 {
1766 	nvlist_t *search, *nvroot, *ret;
1767 
1768 	verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1769 	verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0);
1770 
1771 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1772 	    &nvroot) == 0);
1773 
1774 	*avail_spare = B_FALSE;
1775 	ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
1776 	nvlist_free(search);
1777 
1778 	return (ret);
1779 }
1780 
1781 /*
1782  * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
1783  */
1784 boolean_t
1785 zpool_vdev_is_interior(const char *name)
1786 {
1787 	if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
1788 	    strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
1789 		return (B_TRUE);
1790 	return (B_FALSE);
1791 }
1792 
1793 nvlist_t *
1794 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
1795     boolean_t *l2cache, boolean_t *log)
1796 {
1797 	char buf[MAXPATHLEN];
1798 	char *end;
1799 	nvlist_t *nvroot, *search, *ret;
1800 	uint64_t guid;
1801 
1802 	verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1803 
1804 	guid = strtoull(path, &end, 10);
1805 	if (guid != 0 && *end == '\0') {
1806 		verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
1807 	} else if (zpool_vdev_is_interior(path)) {
1808 		verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
1809 	} else if (path[0] != '/') {
1810 		(void) snprintf(buf, sizeof (buf), "%s%s", "/dev/dsk/", path);
1811 		verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0);
1812 	} else {
1813 		verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
1814 	}
1815 
1816 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1817 	    &nvroot) == 0);
1818 
1819 	*avail_spare = B_FALSE;
1820 	*l2cache = B_FALSE;
1821 	if (log != NULL)
1822 		*log = B_FALSE;
1823 	ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
1824 	nvlist_free(search);
1825 
1826 	return (ret);
1827 }
1828 
1829 static int
1830 vdev_online(nvlist_t *nv)
1831 {
1832 	uint64_t ival;
1833 
1834 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
1835 	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
1836 	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
1837 		return (0);
1838 
1839 	return (1);
1840 }
1841 
1842 /*
1843  * Helper function for zpool_get_physpaths().
1844  */
1845 static int
1846 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
1847     size_t *bytes_written)
1848 {
1849 	size_t bytes_left, pos, rsz;
1850 	char *tmppath;
1851 	const char *format;
1852 
1853 	if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
1854 	    &tmppath) != 0)
1855 		return (EZFS_NODEVICE);
1856 
1857 	pos = *bytes_written;
1858 	bytes_left = physpath_size - pos;
1859 	format = (pos == 0) ? "%s" : " %s";
1860 
1861 	rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
1862 	*bytes_written += rsz;
1863 
1864 	if (rsz >= bytes_left) {
1865 		/* if physpath was not copied properly, clear it */
1866 		if (bytes_left != 0) {
1867 			physpath[pos] = 0;
1868 		}
1869 		return (EZFS_NOSPC);
1870 	}
1871 	return (0);
1872 }
1873 
1874 static int
1875 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
1876     size_t *rsz, boolean_t is_spare)
1877 {
1878 	char *type;
1879 	int ret;
1880 
1881 	if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
1882 		return (EZFS_INVALCONFIG);
1883 
1884 	if (strcmp(type, VDEV_TYPE_DISK) == 0) {
1885 		/*
1886 		 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
1887 		 * For a spare vdev, we only want to boot from the active
1888 		 * spare device.
1889 		 */
1890 		if (is_spare) {
1891 			uint64_t spare = 0;
1892 			(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
1893 			    &spare);
1894 			if (!spare)
1895 				return (EZFS_INVALCONFIG);
1896 		}
1897 
1898 		if (vdev_online(nv)) {
1899 			if ((ret = vdev_get_one_physpath(nv, physpath,
1900 			    phypath_size, rsz)) != 0)
1901 				return (ret);
1902 		}
1903 	} else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
1904 	    strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
1905 	    (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
1906 		nvlist_t **child;
1907 		uint_t count;
1908 		int i, ret;
1909 
1910 		if (nvlist_lookup_nvlist_array(nv,
1911 		    ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
1912 			return (EZFS_INVALCONFIG);
1913 
1914 		for (i = 0; i < count; i++) {
1915 			ret = vdev_get_physpaths(child[i], physpath,
1916 			    phypath_size, rsz, is_spare);
1917 			if (ret == EZFS_NOSPC)
1918 				return (ret);
1919 		}
1920 	}
1921 
1922 	return (EZFS_POOL_INVALARG);
1923 }
1924 
1925 /*
1926  * Get phys_path for a root pool config.
1927  * Return 0 on success; non-zero on failure.
1928  */
1929 static int
1930 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
1931 {
1932 	size_t rsz;
1933 	nvlist_t *vdev_root;
1934 	nvlist_t **child;
1935 	uint_t count;
1936 	char *type;
1937 
1938 	rsz = 0;
1939 
1940 	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
1941 	    &vdev_root) != 0)
1942 		return (EZFS_INVALCONFIG);
1943 
1944 	if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
1945 	    nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
1946 	    &child, &count) != 0)
1947 		return (EZFS_INVALCONFIG);
1948 
1949 	/*
1950 	 * root pool can not have EFI labeled disks and can only have
1951 	 * a single top-level vdev.
1952 	 */
1953 	if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1 ||
1954 	    pool_uses_efi(vdev_root))
1955 		return (EZFS_POOL_INVALARG);
1956 
1957 	(void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
1958 	    B_FALSE);
1959 
1960 	/* No online devices */
1961 	if (rsz == 0)
1962 		return (EZFS_NODEVICE);
1963 
1964 	return (0);
1965 }
1966 
1967 /*
1968  * Get phys_path for a root pool
1969  * Return 0 on success; non-zero on failure.
1970  */
1971 int
1972 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
1973 {
1974 	return (zpool_get_config_physpath(zhp->zpool_config, physpath,
1975 	    phypath_size));
1976 }
1977 
1978 /*
1979  * If the device has being dynamically expanded then we need to relabel
1980  * the disk to use the new unallocated space.
1981  */
1982 static int
1983 zpool_relabel_disk(libzfs_handle_t *hdl, const char *name)
1984 {
1985 	char path[MAXPATHLEN];
1986 	char errbuf[1024];
1987 	int fd, error;
1988 	int (*_efi_use_whole_disk)(int);
1989 
1990 	if ((_efi_use_whole_disk = (int (*)(int))dlsym(RTLD_DEFAULT,
1991 	    "efi_use_whole_disk")) == NULL)
1992 		return (-1);
1993 
1994 	(void) snprintf(path, sizeof (path), "%s/%s", RDISK_ROOT, name);
1995 
1996 	if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
1997 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
1998 		    "relabel '%s': unable to open device"), name);
1999 		return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
2000 	}
2001 
2002 	/*
2003 	 * It's possible that we might encounter an error if the device
2004 	 * does not have any unallocated space left. If so, we simply
2005 	 * ignore that error and continue on.
2006 	 */
2007 	error = _efi_use_whole_disk(fd);
2008 	(void) close(fd);
2009 	if (error && error != VT_ENOSPC) {
2010 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2011 		    "relabel '%s': unable to read disk capacity"), name);
2012 		return (zfs_error(hdl, EZFS_NOCAP, errbuf));
2013 	}
2014 	return (0);
2015 }
2016 
2017 /*
2018  * Bring the specified vdev online.   The 'flags' parameter is a set of the
2019  * ZFS_ONLINE_* flags.
2020  */
2021 int
2022 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
2023     vdev_state_t *newstate)
2024 {
2025 	zfs_cmd_t zc = { 0 };
2026 	char msg[1024];
2027 	nvlist_t *tgt;
2028 	boolean_t avail_spare, l2cache, islog;
2029 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2030 
2031 	if (flags & ZFS_ONLINE_EXPAND) {
2032 		(void) snprintf(msg, sizeof (msg),
2033 		    dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
2034 	} else {
2035 		(void) snprintf(msg, sizeof (msg),
2036 		    dgettext(TEXT_DOMAIN, "cannot online %s"), path);
2037 	}
2038 
2039 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2040 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2041 	    &islog)) == NULL)
2042 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
2043 
2044 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2045 
2046 	if (avail_spare)
2047 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
2048 
2049 	if (flags & ZFS_ONLINE_EXPAND ||
2050 	    zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
2051 		char *pathname = NULL;
2052 		uint64_t wholedisk = 0;
2053 
2054 		(void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
2055 		    &wholedisk);
2056 		verify(nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH,
2057 		    &pathname) == 0);
2058 
2059 		/*
2060 		 * XXX - L2ARC 1.0 devices can't support expansion.
2061 		 */
2062 		if (l2cache) {
2063 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2064 			    "cannot expand cache devices"));
2065 			return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
2066 		}
2067 
2068 		if (wholedisk) {
2069 			pathname += strlen(DISK_ROOT) + 1;
2070 			(void) zpool_relabel_disk(zhp->zpool_hdl, pathname);
2071 		}
2072 	}
2073 
2074 	zc.zc_cookie = VDEV_STATE_ONLINE;
2075 	zc.zc_obj = flags;
2076 
2077 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0)
2078 		return (zpool_standard_error(hdl, errno, msg));
2079 
2080 	*newstate = zc.zc_cookie;
2081 	return (0);
2082 }
2083 
2084 /*
2085  * Take the specified vdev offline
2086  */
2087 int
2088 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
2089 {
2090 	zfs_cmd_t zc = { 0 };
2091 	char msg[1024];
2092 	nvlist_t *tgt;
2093 	boolean_t avail_spare, l2cache;
2094 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2095 
2096 	(void) snprintf(msg, sizeof (msg),
2097 	    dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
2098 
2099 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2100 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2101 	    NULL)) == NULL)
2102 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
2103 
2104 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2105 
2106 	if (avail_spare)
2107 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
2108 
2109 	zc.zc_cookie = VDEV_STATE_OFFLINE;
2110 	zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
2111 
2112 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2113 		return (0);
2114 
2115 	switch (errno) {
2116 	case EBUSY:
2117 
2118 		/*
2119 		 * There are no other replicas of this device.
2120 		 */
2121 		return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2122 
2123 	case EEXIST:
2124 		/*
2125 		 * The log device has unplayed logs
2126 		 */
2127 		return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
2128 
2129 	default:
2130 		return (zpool_standard_error(hdl, errno, msg));
2131 	}
2132 }
2133 
2134 /*
2135  * Mark the given vdev faulted.
2136  */
2137 int
2138 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2139 {
2140 	zfs_cmd_t zc = { 0 };
2141 	char msg[1024];
2142 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2143 
2144 	(void) snprintf(msg, sizeof (msg),
2145 	    dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid);
2146 
2147 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2148 	zc.zc_guid = guid;
2149 	zc.zc_cookie = VDEV_STATE_FAULTED;
2150 	zc.zc_obj = aux;
2151 
2152 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2153 		return (0);
2154 
2155 	switch (errno) {
2156 	case EBUSY:
2157 
2158 		/*
2159 		 * There are no other replicas of this device.
2160 		 */
2161 		return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2162 
2163 	default:
2164 		return (zpool_standard_error(hdl, errno, msg));
2165 	}
2166 
2167 }
2168 
2169 /*
2170  * Mark the given vdev degraded.
2171  */
2172 int
2173 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2174 {
2175 	zfs_cmd_t zc = { 0 };
2176 	char msg[1024];
2177 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2178 
2179 	(void) snprintf(msg, sizeof (msg),
2180 	    dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid);
2181 
2182 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2183 	zc.zc_guid = guid;
2184 	zc.zc_cookie = VDEV_STATE_DEGRADED;
2185 	zc.zc_obj = aux;
2186 
2187 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2188 		return (0);
2189 
2190 	return (zpool_standard_error(hdl, errno, msg));
2191 }
2192 
2193 /*
2194  * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
2195  * a hot spare.
2196  */
2197 static boolean_t
2198 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
2199 {
2200 	nvlist_t **child;
2201 	uint_t c, children;
2202 	char *type;
2203 
2204 	if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
2205 	    &children) == 0) {
2206 		verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
2207 		    &type) == 0);
2208 
2209 		if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
2210 		    children == 2 && child[which] == tgt)
2211 			return (B_TRUE);
2212 
2213 		for (c = 0; c < children; c++)
2214 			if (is_replacing_spare(child[c], tgt, which))
2215 				return (B_TRUE);
2216 	}
2217 
2218 	return (B_FALSE);
2219 }
2220 
2221 /*
2222  * Attach new_disk (fully described by nvroot) to old_disk.
2223  * If 'replacing' is specified, the new disk will replace the old one.
2224  */
2225 int
2226 zpool_vdev_attach(zpool_handle_t *zhp,
2227     const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
2228 {
2229 	zfs_cmd_t zc = { 0 };
2230 	char msg[1024];
2231 	int ret;
2232 	nvlist_t *tgt;
2233 	boolean_t avail_spare, l2cache, islog;
2234 	uint64_t val;
2235 	char *path, *newname;
2236 	nvlist_t **child;
2237 	uint_t children;
2238 	nvlist_t *config_root;
2239 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2240 	boolean_t rootpool = pool_is_bootable(zhp);
2241 
2242 	if (replacing)
2243 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2244 		    "cannot replace %s with %s"), old_disk, new_disk);
2245 	else
2246 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2247 		    "cannot attach %s to %s"), new_disk, old_disk);
2248 
2249 	/*
2250 	 * If this is a root pool, make sure that we're not attaching an
2251 	 * EFI labeled device.
2252 	 */
2253 	if (rootpool && pool_uses_efi(nvroot)) {
2254 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2255 		    "EFI labeled devices are not supported on root pools."));
2256 		return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
2257 	}
2258 
2259 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2260 	if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
2261 	    &islog)) == 0)
2262 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
2263 
2264 	if (avail_spare)
2265 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
2266 
2267 	if (l2cache)
2268 		return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2269 
2270 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2271 	zc.zc_cookie = replacing;
2272 
2273 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2274 	    &child, &children) != 0 || children != 1) {
2275 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2276 		    "new device must be a single disk"));
2277 		return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
2278 	}
2279 
2280 	verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
2281 	    ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
2282 
2283 	if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL)
2284 		return (-1);
2285 
2286 	/*
2287 	 * If the target is a hot spare that has been swapped in, we can only
2288 	 * replace it with another hot spare.
2289 	 */
2290 	if (replacing &&
2291 	    nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
2292 	    (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
2293 	    NULL) == NULL || !avail_spare) &&
2294 	    is_replacing_spare(config_root, tgt, 1)) {
2295 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2296 		    "can only be replaced by another hot spare"));
2297 		free(newname);
2298 		return (zfs_error(hdl, EZFS_BADTARGET, msg));
2299 	}
2300 
2301 	/*
2302 	 * If we are attempting to replace a spare, it canot be applied to an
2303 	 * already spared device.
2304 	 */
2305 	if (replacing &&
2306 	    nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
2307 	    zpool_find_vdev(zhp, newname, &avail_spare,
2308 	    &l2cache, NULL) != NULL && avail_spare &&
2309 	    is_replacing_spare(config_root, tgt, 0)) {
2310 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2311 		    "device has already been replaced with a spare"));
2312 		free(newname);
2313 		return (zfs_error(hdl, EZFS_BADTARGET, msg));
2314 	}
2315 
2316 	free(newname);
2317 
2318 	if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
2319 		return (-1);
2320 
2321 	ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ATTACH, &zc);
2322 
2323 	zcmd_free_nvlists(&zc);
2324 
2325 	if (ret == 0) {
2326 		if (rootpool) {
2327 			/*
2328 			 * XXX - This should be removed once we can
2329 			 * automatically install the bootblocks on the
2330 			 * newly attached disk.
2331 			 */
2332 			(void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Please "
2333 			    "be sure to invoke %s to make '%s' bootable.\n"),
2334 			    BOOTCMD, new_disk);
2335 
2336 			/*
2337 			 * XXX need a better way to prevent user from
2338 			 * booting up a half-baked vdev.
2339 			 */
2340 			(void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make "
2341 			    "sure to wait until resilver is done "
2342 			    "before rebooting.\n"));
2343 		}
2344 		return (0);
2345 	}
2346 
2347 	switch (errno) {
2348 	case ENOTSUP:
2349 		/*
2350 		 * Can't attach to or replace this type of vdev.
2351 		 */
2352 		if (replacing) {
2353 			if (islog)
2354 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2355 				    "cannot replace a log with a spare"));
2356 			else
2357 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2358 				    "cannot replace a replacing device"));
2359 		} else {
2360 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2361 			    "can only attach to mirrors and top-level "
2362 			    "disks"));
2363 		}
2364 		(void) zfs_error(hdl, EZFS_BADTARGET, msg);
2365 		break;
2366 
2367 	case EINVAL:
2368 		/*
2369 		 * The new device must be a single disk.
2370 		 */
2371 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2372 		    "new device must be a single disk"));
2373 		(void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
2374 		break;
2375 
2376 	case EBUSY:
2377 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
2378 		    new_disk);
2379 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
2380 		break;
2381 
2382 	case EOVERFLOW:
2383 		/*
2384 		 * The new device is too small.
2385 		 */
2386 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2387 		    "device is too small"));
2388 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
2389 		break;
2390 
2391 	case EDOM:
2392 		/*
2393 		 * The new device has a different alignment requirement.
2394 		 */
2395 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2396 		    "devices have different sector alignment"));
2397 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
2398 		break;
2399 
2400 	case ENAMETOOLONG:
2401 		/*
2402 		 * The resulting top-level vdev spec won't fit in the label.
2403 		 */
2404 		(void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
2405 		break;
2406 
2407 	default:
2408 		(void) zpool_standard_error(hdl, errno, msg);
2409 	}
2410 
2411 	return (-1);
2412 }
2413 
2414 /*
2415  * Detach the specified device.
2416  */
2417 int
2418 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
2419 {
2420 	zfs_cmd_t zc = { 0 };
2421 	char msg[1024];
2422 	nvlist_t *tgt;
2423 	boolean_t avail_spare, l2cache;
2424 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2425 
2426 	(void) snprintf(msg, sizeof (msg),
2427 	    dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
2428 
2429 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2430 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2431 	    NULL)) == 0)
2432 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
2433 
2434 	if (avail_spare)
2435 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
2436 
2437 	if (l2cache)
2438 		return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2439 
2440 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2441 
2442 	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
2443 		return (0);
2444 
2445 	switch (errno) {
2446 
2447 	case ENOTSUP:
2448 		/*
2449 		 * Can't detach from this type of vdev.
2450 		 */
2451 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
2452 		    "applicable to mirror and replacing vdevs"));
2453 		(void) zfs_error(zhp->zpool_hdl, EZFS_BADTARGET, msg);
2454 		break;
2455 
2456 	case EBUSY:
2457 		/*
2458 		 * There are no other replicas of this device.
2459 		 */
2460 		(void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
2461 		break;
2462 
2463 	default:
2464 		(void) zpool_standard_error(hdl, errno, msg);
2465 	}
2466 
2467 	return (-1);
2468 }
2469 
2470 /*
2471  * Remove the given device.  Currently, this is supported only for hot spares
2472  * and level 2 cache devices.
2473  */
2474 int
2475 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
2476 {
2477 	zfs_cmd_t zc = { 0 };
2478 	char msg[1024];
2479 	nvlist_t *tgt;
2480 	boolean_t avail_spare, l2cache, islog;
2481 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2482 	uint64_t version;
2483 
2484 	(void) snprintf(msg, sizeof (msg),
2485 	    dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
2486 
2487 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2488 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2489 	    &islog)) == 0)
2490 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
2491 	/*
2492 	 * XXX - this should just go away.
2493 	 */
2494 	if (!avail_spare && !l2cache && !islog) {
2495 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2496 		    "only inactive hot spares, cache, top-level, "
2497 		    "or log devices can be removed"));
2498 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
2499 	}
2500 
2501 	version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
2502 	if (islog && version < SPA_VERSION_HOLES) {
2503 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2504 		    "pool must be upgrade to support log removal"));
2505 		return (zfs_error(hdl, EZFS_BADVERSION, msg));
2506 	}
2507 
2508 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2509 
2510 	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
2511 		return (0);
2512 
2513 	return (zpool_standard_error(hdl, errno, msg));
2514 }
2515 
2516 /*
2517  * Clear the errors for the pool, or the particular device if specified.
2518  */
2519 int
2520 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
2521 {
2522 	zfs_cmd_t zc = { 0 };
2523 	char msg[1024];
2524 	nvlist_t *tgt;
2525 	zpool_rewind_policy_t policy;
2526 	boolean_t avail_spare, l2cache;
2527 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2528 	nvlist_t *nvi = NULL;
2529 
2530 	if (path)
2531 		(void) snprintf(msg, sizeof (msg),
2532 		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
2533 		    path);
2534 	else
2535 		(void) snprintf(msg, sizeof (msg),
2536 		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
2537 		    zhp->zpool_name);
2538 
2539 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2540 	if (path) {
2541 		if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
2542 		    &l2cache, NULL)) == 0)
2543 			return (zfs_error(hdl, EZFS_NODEVICE, msg));
2544 
2545 		/*
2546 		 * Don't allow error clearing for hot spares.  Do allow
2547 		 * error clearing for l2cache devices.
2548 		 */
2549 		if (avail_spare)
2550 			return (zfs_error(hdl, EZFS_ISSPARE, msg));
2551 
2552 		verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
2553 		    &zc.zc_guid) == 0);
2554 	}
2555 
2556 	zpool_get_rewind_policy(rewindnvl, &policy);
2557 	zc.zc_cookie = policy.zrp_request;
2558 
2559 	if (zcmd_alloc_dst_nvlist(hdl, &zc, 8192) != 0)
2560 		return (-1);
2561 
2562 	if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, rewindnvl) != 0)
2563 		return (-1);
2564 
2565 	if (zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc) == 0 ||
2566 	    ((policy.zrp_request & ZPOOL_TRY_REWIND) &&
2567 	    errno != EPERM && errno != EACCES)) {
2568 		if (policy.zrp_request &
2569 		    (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
2570 			(void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
2571 			zpool_rewind_exclaim(hdl, zc.zc_name,
2572 			    ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0),
2573 			    nvi);
2574 			nvlist_free(nvi);
2575 		}
2576 		zcmd_free_nvlists(&zc);
2577 		return (0);
2578 	}
2579 
2580 	zcmd_free_nvlists(&zc);
2581 	return (zpool_standard_error(hdl, errno, msg));
2582 }
2583 
2584 /*
2585  * Similar to zpool_clear(), but takes a GUID (used by fmd).
2586  */
2587 int
2588 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
2589 {
2590 	zfs_cmd_t zc = { 0 };
2591 	char msg[1024];
2592 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2593 
2594 	(void) snprintf(msg, sizeof (msg),
2595 	    dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
2596 	    guid);
2597 
2598 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2599 	zc.zc_guid = guid;
2600 
2601 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
2602 		return (0);
2603 
2604 	return (zpool_standard_error(hdl, errno, msg));
2605 }
2606 
2607 /*
2608  * Convert from a devid string to a path.
2609  */
2610 static char *
2611 devid_to_path(char *devid_str)
2612 {
2613 	ddi_devid_t devid;
2614 	char *minor;
2615 	char *path;
2616 	devid_nmlist_t *list = NULL;
2617 	int ret;
2618 
2619 	if (devid_str_decode(devid_str, &devid, &minor) != 0)
2620 		return (NULL);
2621 
2622 	ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
2623 
2624 	devid_str_free(minor);
2625 	devid_free(devid);
2626 
2627 	if (ret != 0)
2628 		return (NULL);
2629 
2630 	if ((path = strdup(list[0].devname)) == NULL)
2631 		return (NULL);
2632 
2633 	devid_free_nmlist(list);
2634 
2635 	return (path);
2636 }
2637 
2638 /*
2639  * Convert from a path to a devid string.
2640  */
2641 static char *
2642 path_to_devid(const char *path)
2643 {
2644 	int fd;
2645 	ddi_devid_t devid;
2646 	char *minor, *ret;
2647 
2648 	if ((fd = open(path, O_RDONLY)) < 0)
2649 		return (NULL);
2650 
2651 	minor = NULL;
2652 	ret = NULL;
2653 	if (devid_get(fd, &devid) == 0) {
2654 		if (devid_get_minor_name(fd, &minor) == 0)
2655 			ret = devid_str_encode(devid, minor);
2656 		if (minor != NULL)
2657 			devid_str_free(minor);
2658 		devid_free(devid);
2659 	}
2660 	(void) close(fd);
2661 
2662 	return (ret);
2663 }
2664 
2665 /*
2666  * Issue the necessary ioctl() to update the stored path value for the vdev.  We
2667  * ignore any failure here, since a common case is for an unprivileged user to
2668  * type 'zpool status', and we'll display the correct information anyway.
2669  */
2670 static void
2671 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
2672 {
2673 	zfs_cmd_t zc = { 0 };
2674 
2675 	(void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2676 	(void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
2677 	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2678 	    &zc.zc_guid) == 0);
2679 
2680 	(void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
2681 }
2682 
2683 /*
2684  * Given a vdev, return the name to display in iostat.  If the vdev has a path,
2685  * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
2686  * We also check if this is a whole disk, in which case we strip off the
2687  * trailing 's0' slice name.
2688  *
2689  * This routine is also responsible for identifying when disks have been
2690  * reconfigured in a new location.  The kernel will have opened the device by
2691  * devid, but the path will still refer to the old location.  To catch this, we
2692  * first do a path -> devid translation (which is fast for the common case).  If
2693  * the devid matches, we're done.  If not, we do a reverse devid -> path
2694  * translation and issue the appropriate ioctl() to update the path of the vdev.
2695  * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
2696  * of these checks.
2697  */
2698 char *
2699 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
2700     boolean_t verbose)
2701 {
2702 	char *path, *devid;
2703 	uint64_t value;
2704 	char buf[64];
2705 	vdev_stat_t *vs;
2706 	uint_t vsc;
2707 
2708 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
2709 	    &value) == 0) {
2710 		verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2711 		    &value) == 0);
2712 		(void) snprintf(buf, sizeof (buf), "%llu",
2713 		    (u_longlong_t)value);
2714 		path = buf;
2715 	} else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
2716 
2717 		/*
2718 		 * If the device is dead (faulted, offline, etc) then don't
2719 		 * bother opening it.  Otherwise we may be forcing the user to
2720 		 * open a misbehaving device, which can have undesirable
2721 		 * effects.
2722 		 */
2723 		if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_STATS,
2724 		    (uint64_t **)&vs, &vsc) != 0 ||
2725 		    vs->vs_state >= VDEV_STATE_DEGRADED) &&
2726 		    zhp != NULL &&
2727 		    nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
2728 			/*
2729 			 * Determine if the current path is correct.
2730 			 */
2731 			char *newdevid = path_to_devid(path);
2732 
2733 			if (newdevid == NULL ||
2734 			    strcmp(devid, newdevid) != 0) {
2735 				char *newpath;
2736 
2737 				if ((newpath = devid_to_path(devid)) != NULL) {
2738 					/*
2739 					 * Update the path appropriately.
2740 					 */
2741 					set_path(zhp, nv, newpath);
2742 					if (nvlist_add_string(nv,
2743 					    ZPOOL_CONFIG_PATH, newpath) == 0)
2744 						verify(nvlist_lookup_string(nv,
2745 						    ZPOOL_CONFIG_PATH,
2746 						    &path) == 0);
2747 					free(newpath);
2748 				}
2749 			}
2750 
2751 			if (newdevid)
2752 				devid_str_free(newdevid);
2753 		}
2754 
2755 		if (strncmp(path, "/dev/dsk/", 9) == 0)
2756 			path += 9;
2757 
2758 		if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
2759 		    &value) == 0 && value) {
2760 			char *tmp = zfs_strdup(hdl, path);
2761 			if (tmp == NULL)
2762 				return (NULL);
2763 			tmp[strlen(path) - 2] = '\0';
2764 			return (tmp);
2765 		}
2766 	} else {
2767 		verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
2768 
2769 		/*
2770 		 * If it's a raidz device, we need to stick in the parity level.
2771 		 */
2772 		if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
2773 			verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
2774 			    &value) == 0);
2775 			(void) snprintf(buf, sizeof (buf), "%s%llu", path,
2776 			    (u_longlong_t)value);
2777 			path = buf;
2778 		}
2779 
2780 		/*
2781 		 * We identify each top-level vdev by using a <type-id>
2782 		 * naming convention.
2783 		 */
2784 		if (verbose) {
2785 			uint64_t id;
2786 
2787 			verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
2788 			    &id) == 0);
2789 			(void) snprintf(buf, sizeof (buf), "%s-%llu", path,
2790 			    (u_longlong_t)id);
2791 			path = buf;
2792 		}
2793 	}
2794 
2795 	return (zfs_strdup(hdl, path));
2796 }
2797 
2798 static int
2799 zbookmark_compare(const void *a, const void *b)
2800 {
2801 	return (memcmp(a, b, sizeof (zbookmark_t)));
2802 }
2803 
2804 /*
2805  * Retrieve the persistent error log, uniquify the members, and return to the
2806  * caller.
2807  */
2808 int
2809 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
2810 {
2811 	zfs_cmd_t zc = { 0 };
2812 	uint64_t count;
2813 	zbookmark_t *zb = NULL;
2814 	int i;
2815 
2816 	/*
2817 	 * Retrieve the raw error list from the kernel.  If the number of errors
2818 	 * has increased, allocate more space and continue until we get the
2819 	 * entire list.
2820 	 */
2821 	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
2822 	    &count) == 0);
2823 	if (count == 0)
2824 		return (0);
2825 	if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
2826 	    count * sizeof (zbookmark_t))) == (uintptr_t)NULL)
2827 		return (-1);
2828 	zc.zc_nvlist_dst_size = count;
2829 	(void) strcpy(zc.zc_name, zhp->zpool_name);
2830 	for (;;) {
2831 		if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
2832 		    &zc) != 0) {
2833 			free((void *)(uintptr_t)zc.zc_nvlist_dst);
2834 			if (errno == ENOMEM) {
2835 				count = zc.zc_nvlist_dst_size;
2836 				if ((zc.zc_nvlist_dst = (uintptr_t)
2837 				    zfs_alloc(zhp->zpool_hdl, count *
2838 				    sizeof (zbookmark_t))) == (uintptr_t)NULL)
2839 					return (-1);
2840 			} else {
2841 				return (-1);
2842 			}
2843 		} else {
2844 			break;
2845 		}
2846 	}
2847 
2848 	/*
2849 	 * Sort the resulting bookmarks.  This is a little confusing due to the
2850 	 * implementation of ZFS_IOC_ERROR_LOG.  The bookmarks are copied last
2851 	 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
2852 	 * _not_ copied as part of the process.  So we point the start of our
2853 	 * array appropriate and decrement the total number of elements.
2854 	 */
2855 	zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) +
2856 	    zc.zc_nvlist_dst_size;
2857 	count -= zc.zc_nvlist_dst_size;
2858 
2859 	qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare);
2860 
2861 	verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
2862 
2863 	/*
2864 	 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
2865 	 */
2866 	for (i = 0; i < count; i++) {
2867 		nvlist_t *nv;
2868 
2869 		/* ignoring zb_blkid and zb_level for now */
2870 		if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
2871 		    zb[i-1].zb_object == zb[i].zb_object)
2872 			continue;
2873 
2874 		if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
2875 			goto nomem;
2876 		if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
2877 		    zb[i].zb_objset) != 0) {
2878 			nvlist_free(nv);
2879 			goto nomem;
2880 		}
2881 		if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
2882 		    zb[i].zb_object) != 0) {
2883 			nvlist_free(nv);
2884 			goto nomem;
2885 		}
2886 		if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
2887 			nvlist_free(nv);
2888 			goto nomem;
2889 		}
2890 		nvlist_free(nv);
2891 	}
2892 
2893 	free((void *)(uintptr_t)zc.zc_nvlist_dst);
2894 	return (0);
2895 
2896 nomem:
2897 	free((void *)(uintptr_t)zc.zc_nvlist_dst);
2898 	return (no_memory(zhp->zpool_hdl));
2899 }
2900 
2901 /*
2902  * Upgrade a ZFS pool to the latest on-disk version.
2903  */
2904 int
2905 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
2906 {
2907 	zfs_cmd_t zc = { 0 };
2908 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2909 
2910 	(void) strcpy(zc.zc_name, zhp->zpool_name);
2911 	zc.zc_cookie = new_version;
2912 
2913 	if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
2914 		return (zpool_standard_error_fmt(hdl, errno,
2915 		    dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
2916 		    zhp->zpool_name));
2917 	return (0);
2918 }
2919 
2920 void
2921 zpool_set_history_str(const char *subcommand, int argc, char **argv,
2922     char *history_str)
2923 {
2924 	int i;
2925 
2926 	(void) strlcpy(history_str, subcommand, HIS_MAX_RECORD_LEN);
2927 	for (i = 1; i < argc; i++) {
2928 		if (strlen(history_str) + 1 + strlen(argv[i]) >
2929 		    HIS_MAX_RECORD_LEN)
2930 			break;
2931 		(void) strlcat(history_str, " ", HIS_MAX_RECORD_LEN);
2932 		(void) strlcat(history_str, argv[i], HIS_MAX_RECORD_LEN);
2933 	}
2934 }
2935 
2936 /*
2937  * Stage command history for logging.
2938  */
2939 int
2940 zpool_stage_history(libzfs_handle_t *hdl, const char *history_str)
2941 {
2942 	if (history_str == NULL)
2943 		return (EINVAL);
2944 
2945 	if (strlen(history_str) > HIS_MAX_RECORD_LEN)
2946 		return (EINVAL);
2947 
2948 	if (hdl->libzfs_log_str != NULL)
2949 		free(hdl->libzfs_log_str);
2950 
2951 	if ((hdl->libzfs_log_str = strdup(history_str)) == NULL)
2952 		return (no_memory(hdl));
2953 
2954 	return (0);
2955 }
2956 
2957 /*
2958  * Perform ioctl to get some command history of a pool.
2959  *
2960  * 'buf' is the buffer to fill up to 'len' bytes.  'off' is the
2961  * logical offset of the history buffer to start reading from.
2962  *
2963  * Upon return, 'off' is the next logical offset to read from and
2964  * 'len' is the actual amount of bytes read into 'buf'.
2965  */
2966 static int
2967 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
2968 {
2969 	zfs_cmd_t zc = { 0 };
2970 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2971 
2972 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2973 
2974 	zc.zc_history = (uint64_t)(uintptr_t)buf;
2975 	zc.zc_history_len = *len;
2976 	zc.zc_history_offset = *off;
2977 
2978 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
2979 		switch (errno) {
2980 		case EPERM:
2981 			return (zfs_error_fmt(hdl, EZFS_PERM,
2982 			    dgettext(TEXT_DOMAIN,
2983 			    "cannot show history for pool '%s'"),
2984 			    zhp->zpool_name));
2985 		case ENOENT:
2986 			return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
2987 			    dgettext(TEXT_DOMAIN, "cannot get history for pool "
2988 			    "'%s'"), zhp->zpool_name));
2989 		case ENOTSUP:
2990 			return (zfs_error_fmt(hdl, EZFS_BADVERSION,
2991 			    dgettext(TEXT_DOMAIN, "cannot get history for pool "
2992 			    "'%s', pool must be upgraded"), zhp->zpool_name));
2993 		default:
2994 			return (zpool_standard_error_fmt(hdl, errno,
2995 			    dgettext(TEXT_DOMAIN,
2996 			    "cannot get history for '%s'"), zhp->zpool_name));
2997 		}
2998 	}
2999 
3000 	*len = zc.zc_history_len;
3001 	*off = zc.zc_history_offset;
3002 
3003 	return (0);
3004 }
3005 
3006 /*
3007  * Process the buffer of nvlists, unpacking and storing each nvlist record
3008  * into 'records'.  'leftover' is set to the number of bytes that weren't
3009  * processed as there wasn't a complete record.
3010  */
3011 int
3012 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
3013     nvlist_t ***records, uint_t *numrecords)
3014 {
3015 	uint64_t reclen;
3016 	nvlist_t *nv;
3017 	int i;
3018 
3019 	while (bytes_read > sizeof (reclen)) {
3020 
3021 		/* get length of packed record (stored as little endian) */
3022 		for (i = 0, reclen = 0; i < sizeof (reclen); i++)
3023 			reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
3024 
3025 		if (bytes_read < sizeof (reclen) + reclen)
3026 			break;
3027 
3028 		/* unpack record */
3029 		if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
3030 			return (ENOMEM);
3031 		bytes_read -= sizeof (reclen) + reclen;
3032 		buf += sizeof (reclen) + reclen;
3033 
3034 		/* add record to nvlist array */
3035 		(*numrecords)++;
3036 		if (ISP2(*numrecords + 1)) {
3037 			*records = realloc(*records,
3038 			    *numrecords * 2 * sizeof (nvlist_t *));
3039 		}
3040 		(*records)[*numrecords - 1] = nv;
3041 	}
3042 
3043 	*leftover = bytes_read;
3044 	return (0);
3045 }
3046 
3047 #define	HIS_BUF_LEN	(128*1024)
3048 
3049 /*
3050  * Retrieve the command history of a pool.
3051  */
3052 int
3053 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
3054 {
3055 	char buf[HIS_BUF_LEN];
3056 	uint64_t off = 0;
3057 	nvlist_t **records = NULL;
3058 	uint_t numrecords = 0;
3059 	int err, i;
3060 
3061 	do {
3062 		uint64_t bytes_read = sizeof (buf);
3063 		uint64_t leftover;
3064 
3065 		if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
3066 			break;
3067 
3068 		/* if nothing else was read in, we're at EOF, just return */
3069 		if (!bytes_read)
3070 			break;
3071 
3072 		if ((err = zpool_history_unpack(buf, bytes_read,
3073 		    &leftover, &records, &numrecords)) != 0)
3074 			break;
3075 		off -= leftover;
3076 
3077 		/* CONSTCOND */
3078 	} while (1);
3079 
3080 	if (!err) {
3081 		verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
3082 		verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
3083 		    records, numrecords) == 0);
3084 	}
3085 	for (i = 0; i < numrecords; i++)
3086 		nvlist_free(records[i]);
3087 	free(records);
3088 
3089 	return (err);
3090 }
3091 
3092 void
3093 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
3094     char *pathname, size_t len)
3095 {
3096 	zfs_cmd_t zc = { 0 };
3097 	boolean_t mounted = B_FALSE;
3098 	char *mntpnt = NULL;
3099 	char dsname[MAXNAMELEN];
3100 
3101 	if (dsobj == 0) {
3102 		/* special case for the MOS */
3103 		(void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj);
3104 		return;
3105 	}
3106 
3107 	/* get the dataset's name */
3108 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3109 	zc.zc_obj = dsobj;
3110 	if (ioctl(zhp->zpool_hdl->libzfs_fd,
3111 	    ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
3112 		/* just write out a path of two object numbers */
3113 		(void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
3114 		    dsobj, obj);
3115 		return;
3116 	}
3117 	(void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
3118 
3119 	/* find out if the dataset is mounted */
3120 	mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
3121 
3122 	/* get the corrupted object's path */
3123 	(void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
3124 	zc.zc_obj = obj;
3125 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
3126 	    &zc) == 0) {
3127 		if (mounted) {
3128 			(void) snprintf(pathname, len, "%s%s", mntpnt,
3129 			    zc.zc_value);
3130 		} else {
3131 			(void) snprintf(pathname, len, "%s:%s",
3132 			    dsname, zc.zc_value);
3133 		}
3134 	} else {
3135 		(void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj);
3136 	}
3137 	free(mntpnt);
3138 }
3139 
3140 /*
3141  * Read the EFI label from the config, if a label does not exist then
3142  * pass back the error to the caller. If the caller has passed a non-NULL
3143  * diskaddr argument then we set it to the starting address of the EFI
3144  * partition.
3145  */
3146 static int
3147 read_efi_label(nvlist_t *config, diskaddr_t *sb)
3148 {
3149 	char *path;
3150 	int fd;
3151 	char diskname[MAXPATHLEN];
3152 	int err = -1;
3153 
3154 	if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
3155 		return (err);
3156 
3157 	(void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT,
3158 	    strrchr(path, '/'));
3159 	if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) {
3160 		struct dk_gpt *vtoc;
3161 
3162 		if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
3163 			if (sb != NULL)
3164 				*sb = vtoc->efi_parts[0].p_start;
3165 			efi_free(vtoc);
3166 		}
3167 		(void) close(fd);
3168 	}
3169 	return (err);
3170 }
3171 
3172 /*
3173  * determine where a partition starts on a disk in the current
3174  * configuration
3175  */
3176 static diskaddr_t
3177 find_start_block(nvlist_t *config)
3178 {
3179 	nvlist_t **child;
3180 	uint_t c, children;
3181 	diskaddr_t sb = MAXOFFSET_T;
3182 	uint64_t wholedisk;
3183 
3184 	if (nvlist_lookup_nvlist_array(config,
3185 	    ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
3186 		if (nvlist_lookup_uint64(config,
3187 		    ZPOOL_CONFIG_WHOLE_DISK,
3188 		    &wholedisk) != 0 || !wholedisk) {
3189 			return (MAXOFFSET_T);
3190 		}
3191 		if (read_efi_label(config, &sb) < 0)
3192 			sb = MAXOFFSET_T;
3193 		return (sb);
3194 	}
3195 
3196 	for (c = 0; c < children; c++) {
3197 		sb = find_start_block(child[c]);
3198 		if (sb != MAXOFFSET_T) {
3199 			return (sb);
3200 		}
3201 	}
3202 	return (MAXOFFSET_T);
3203 }
3204 
3205 /*
3206  * Label an individual disk.  The name provided is the short name,
3207  * stripped of any leading /dev path.
3208  */
3209 int
3210 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
3211 {
3212 	char path[MAXPATHLEN];
3213 	struct dk_gpt *vtoc;
3214 	int fd;
3215 	size_t resv = EFI_MIN_RESV_SIZE;
3216 	uint64_t slice_size;
3217 	diskaddr_t start_block;
3218 	char errbuf[1024];
3219 
3220 	/* prepare an error message just in case */
3221 	(void) snprintf(errbuf, sizeof (errbuf),
3222 	    dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
3223 
3224 	if (zhp) {
3225 		nvlist_t *nvroot;
3226 
3227 		if (pool_is_bootable(zhp)) {
3228 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3229 			    "EFI labeled devices are not supported on root "
3230 			    "pools."));
3231 			return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf));
3232 		}
3233 
3234 		verify(nvlist_lookup_nvlist(zhp->zpool_config,
3235 		    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
3236 
3237 		if (zhp->zpool_start_block == 0)
3238 			start_block = find_start_block(nvroot);
3239 		else
3240 			start_block = zhp->zpool_start_block;
3241 		zhp->zpool_start_block = start_block;
3242 	} else {
3243 		/* new pool */
3244 		start_block = NEW_START_BLOCK;
3245 	}
3246 
3247 	(void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name,
3248 	    BACKUP_SLICE);
3249 
3250 	if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
3251 		/*
3252 		 * This shouldn't happen.  We've long since verified that this
3253 		 * is a valid device.
3254 		 */
3255 		zfs_error_aux(hdl,
3256 		    dgettext(TEXT_DOMAIN, "unable to open device"));
3257 		return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
3258 	}
3259 
3260 	if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
3261 		/*
3262 		 * The only way this can fail is if we run out of memory, or we
3263 		 * were unable to read the disk's capacity
3264 		 */
3265 		if (errno == ENOMEM)
3266 			(void) no_memory(hdl);
3267 
3268 		(void) close(fd);
3269 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3270 		    "unable to read disk capacity"), name);
3271 
3272 		return (zfs_error(hdl, EZFS_NOCAP, errbuf));
3273 	}
3274 
3275 	slice_size = vtoc->efi_last_u_lba + 1;
3276 	slice_size -= EFI_MIN_RESV_SIZE;
3277 	if (start_block == MAXOFFSET_T)
3278 		start_block = NEW_START_BLOCK;
3279 	slice_size -= start_block;
3280 
3281 	vtoc->efi_parts[0].p_start = start_block;
3282 	vtoc->efi_parts[0].p_size = slice_size;
3283 
3284 	/*
3285 	 * Why we use V_USR: V_BACKUP confuses users, and is considered
3286 	 * disposable by some EFI utilities (since EFI doesn't have a backup
3287 	 * slice).  V_UNASSIGNED is supposed to be used only for zero size
3288 	 * partitions, and efi_write() will fail if we use it.  V_ROOT, V_BOOT,
3289 	 * etc. were all pretty specific.  V_USR is as close to reality as we
3290 	 * can get, in the absence of V_OTHER.
3291 	 */
3292 	vtoc->efi_parts[0].p_tag = V_USR;
3293 	(void) strcpy(vtoc->efi_parts[0].p_name, "zfs");
3294 
3295 	vtoc->efi_parts[8].p_start = slice_size + start_block;
3296 	vtoc->efi_parts[8].p_size = resv;
3297 	vtoc->efi_parts[8].p_tag = V_RESERVED;
3298 
3299 	if (efi_write(fd, vtoc) != 0) {
3300 		/*
3301 		 * Some block drivers (like pcata) may not support EFI
3302 		 * GPT labels.  Print out a helpful error message dir-
3303 		 * ecting the user to manually label the disk and give
3304 		 * a specific slice.
3305 		 */
3306 		(void) close(fd);
3307 		efi_free(vtoc);
3308 
3309 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3310 		    "try using fdisk(1M) and then provide a specific slice"));
3311 		return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
3312 	}
3313 
3314 	(void) close(fd);
3315 	efi_free(vtoc);
3316 	return (0);
3317 }
3318 
3319 static boolean_t
3320 supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf)
3321 {
3322 	char *type;
3323 	nvlist_t **child;
3324 	uint_t children, c;
3325 
3326 	verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0);
3327 	if (strcmp(type, VDEV_TYPE_RAIDZ) == 0 ||
3328 	    strcmp(type, VDEV_TYPE_FILE) == 0 ||
3329 	    strcmp(type, VDEV_TYPE_LOG) == 0 ||
3330 	    strcmp(type, VDEV_TYPE_HOLE) == 0 ||
3331 	    strcmp(type, VDEV_TYPE_MISSING) == 0) {
3332 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3333 		    "vdev type '%s' is not supported"), type);
3334 		(void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf);
3335 		return (B_FALSE);
3336 	}
3337 	if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
3338 	    &child, &children) == 0) {
3339 		for (c = 0; c < children; c++) {
3340 			if (!supported_dump_vdev_type(hdl, child[c], errbuf))
3341 				return (B_FALSE);
3342 		}
3343 	}
3344 	return (B_TRUE);
3345 }
3346 
3347 /*
3348  * check if this zvol is allowable for use as a dump device; zero if
3349  * it is, > 0 if it isn't, < 0 if it isn't a zvol
3350  */
3351 int
3352 zvol_check_dump_config(char *arg)
3353 {
3354 	zpool_handle_t *zhp = NULL;
3355 	nvlist_t *config, *nvroot;
3356 	char *p, *volname;
3357 	nvlist_t **top;
3358 	uint_t toplevels;
3359 	libzfs_handle_t *hdl;
3360 	char errbuf[1024];
3361 	char poolname[ZPOOL_MAXNAMELEN];
3362 	int pathlen = strlen(ZVOL_FULL_DEV_DIR);
3363 	int ret = 1;
3364 
3365 	if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) {
3366 		return (-1);
3367 	}
3368 
3369 	(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
3370 	    "dump is not supported on device '%s'"), arg);
3371 
3372 	if ((hdl = libzfs_init()) == NULL)
3373 		return (1);
3374 	libzfs_print_on_error(hdl, B_TRUE);
3375 
3376 	volname = arg + pathlen;
3377 
3378 	/* check the configuration of the pool */
3379 	if ((p = strchr(volname, '/')) == NULL) {
3380 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3381 		    "malformed dataset name"));
3382 		(void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
3383 		return (1);
3384 	} else if (p - volname >= ZFS_MAXNAMELEN) {
3385 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3386 		    "dataset name is too long"));
3387 		(void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf);
3388 		return (1);
3389 	} else {
3390 		(void) strncpy(poolname, volname, p - volname);
3391 		poolname[p - volname] = '\0';
3392 	}
3393 
3394 	if ((zhp = zpool_open(hdl, poolname)) == NULL) {
3395 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3396 		    "could not open pool '%s'"), poolname);
3397 		(void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
3398 		goto out;
3399 	}
3400 	config = zpool_get_config(zhp, NULL);
3401 	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
3402 	    &nvroot) != 0) {
3403 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3404 		    "could not obtain vdev configuration for  '%s'"), poolname);
3405 		(void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
3406 		goto out;
3407 	}
3408 
3409 	verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
3410 	    &top, &toplevels) == 0);
3411 	if (toplevels != 1) {
3412 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3413 		    "'%s' has multiple top level vdevs"), poolname);
3414 		(void) zfs_error(hdl, EZFS_DEVOVERFLOW, errbuf);
3415 		goto out;
3416 	}
3417 
3418 	if (!supported_dump_vdev_type(hdl, top[0], errbuf)) {
3419 		goto out;
3420 	}
3421 	ret = 0;
3422 
3423 out:
3424 	if (zhp)
3425 		zpool_close(zhp);
3426 	libzfs_fini(hdl);
3427 	return (ret);
3428 }
3429