xref: /titanic_52/usr/src/lib/libzfs/common/libzfs_pool.c (revision 9a70fc3be3b1e966bf78825cdb8d509963a6f0a1)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <alloca.h>
30 #include <assert.h>
31 #include <ctype.h>
32 #include <errno.h>
33 #include <devid.h>
34 #include <dirent.h>
35 #include <fcntl.h>
36 #include <libintl.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <strings.h>
40 #include <unistd.h>
41 #include <zone.h>
42 #include <sys/efi_partition.h>
43 #include <sys/vtoc.h>
44 #include <sys/zfs_ioctl.h>
45 #include <sys/zio.h>
46 #include <strings.h>
47 
48 #include "zfs_namecheck.h"
49 #include "zfs_prop.h"
50 #include "libzfs_impl.h"
51 
52 static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
53 
54 /*
55  * ====================================================================
56  *   zpool property functions
57  * ====================================================================
58  */
59 
60 static int
61 zpool_get_all_props(zpool_handle_t *zhp)
62 {
63 	zfs_cmd_t zc = { 0 };
64 	libzfs_handle_t *hdl = zhp->zpool_hdl;
65 
66 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
67 
68 	if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
69 		return (-1);
70 
71 	while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
72 		if (errno == ENOMEM) {
73 			if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
74 				zcmd_free_nvlists(&zc);
75 				return (-1);
76 			}
77 		} else {
78 			zcmd_free_nvlists(&zc);
79 			return (-1);
80 		}
81 	}
82 
83 	if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
84 		zcmd_free_nvlists(&zc);
85 		return (-1);
86 	}
87 
88 	zcmd_free_nvlists(&zc);
89 
90 	return (0);
91 }
92 
93 static int
94 zpool_props_refresh(zpool_handle_t *zhp)
95 {
96 	nvlist_t *old_props;
97 
98 	old_props = zhp->zpool_props;
99 
100 	if (zpool_get_all_props(zhp) != 0)
101 		return (-1);
102 
103 	nvlist_free(old_props);
104 	return (0);
105 }
106 
107 static char *
108 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
109     zprop_source_t *src)
110 {
111 	nvlist_t *nv, *nvl;
112 	uint64_t ival;
113 	char *value;
114 	zprop_source_t source;
115 
116 	nvl = zhp->zpool_props;
117 	if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
118 		verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
119 		source = ival;
120 		verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
121 	} else {
122 		source = ZPROP_SRC_DEFAULT;
123 		if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
124 			value = "-";
125 	}
126 
127 	if (src)
128 		*src = source;
129 
130 	return (value);
131 }
132 
133 uint64_t
134 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
135 {
136 	nvlist_t *nv, *nvl;
137 	uint64_t value;
138 	zprop_source_t source;
139 
140 	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
141 		/*
142 		 * zpool_get_all_props() has most likely failed because
143 		 * the pool is faulted, but if all we need is the top level
144 		 * vdev's guid then get it from the zhp config nvlist.
145 		 */
146 		if ((prop == ZPOOL_PROP_GUID) &&
147 		    (nvlist_lookup_nvlist(zhp->zpool_config,
148 		    ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
149 		    (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
150 		    == 0)) {
151 			return (value);
152 		}
153 		return (zpool_prop_default_numeric(prop));
154 	}
155 
156 	nvl = zhp->zpool_props;
157 	if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
158 		verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
159 		source = value;
160 		verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
161 	} else {
162 		source = ZPROP_SRC_DEFAULT;
163 		value = zpool_prop_default_numeric(prop);
164 	}
165 
166 	if (src)
167 		*src = source;
168 
169 	return (value);
170 }
171 
172 /*
173  * Map VDEV STATE to printed strings.
174  */
175 char *
176 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
177 {
178 	switch (state) {
179 	case VDEV_STATE_CLOSED:
180 	case VDEV_STATE_OFFLINE:
181 		return (gettext("OFFLINE"));
182 	case VDEV_STATE_REMOVED:
183 		return (gettext("REMOVED"));
184 	case VDEV_STATE_CANT_OPEN:
185 		if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
186 			return (gettext("FAULTED"));
187 		else
188 			return (gettext("UNAVAIL"));
189 	case VDEV_STATE_FAULTED:
190 		return (gettext("FAULTED"));
191 	case VDEV_STATE_DEGRADED:
192 		return (gettext("DEGRADED"));
193 	case VDEV_STATE_HEALTHY:
194 		return (gettext("ONLINE"));
195 	}
196 
197 	return (gettext("UNKNOWN"));
198 }
199 
200 /*
201  * Get a zpool property value for 'prop' and return the value in
202  * a pre-allocated buffer.
203  */
204 int
205 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
206     zprop_source_t *srctype)
207 {
208 	uint64_t intval;
209 	const char *strval;
210 	zprop_source_t src = ZPROP_SRC_NONE;
211 	nvlist_t *nvroot;
212 	vdev_stat_t *vs;
213 	uint_t vsc;
214 
215 	if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
216 		if (prop == ZPOOL_PROP_NAME)
217 			(void) strlcpy(buf, zpool_get_name(zhp), len);
218 		else if (prop == ZPOOL_PROP_HEALTH)
219 			(void) strlcpy(buf, "FAULTED", len);
220 		else
221 			(void) strlcpy(buf, "-", len);
222 		return (0);
223 	}
224 
225 	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
226 	    prop != ZPOOL_PROP_NAME)
227 		return (-1);
228 
229 	switch (zpool_prop_get_type(prop)) {
230 	case PROP_TYPE_STRING:
231 		(void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
232 		    len);
233 		break;
234 
235 	case PROP_TYPE_NUMBER:
236 		intval = zpool_get_prop_int(zhp, prop, &src);
237 
238 		switch (prop) {
239 		case ZPOOL_PROP_SIZE:
240 		case ZPOOL_PROP_USED:
241 		case ZPOOL_PROP_AVAILABLE:
242 			(void) zfs_nicenum(intval, buf, len);
243 			break;
244 
245 		case ZPOOL_PROP_CAPACITY:
246 			(void) snprintf(buf, len, "%llu%%",
247 			    (u_longlong_t)intval);
248 			break;
249 
250 		case ZPOOL_PROP_HEALTH:
251 			verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
252 			    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
253 			verify(nvlist_lookup_uint64_array(nvroot,
254 			    ZPOOL_CONFIG_STATS, (uint64_t **)&vs, &vsc) == 0);
255 
256 			(void) strlcpy(buf, zpool_state_to_name(intval,
257 			    vs->vs_aux), len);
258 			break;
259 		default:
260 			(void) snprintf(buf, len, "%llu", intval);
261 		}
262 		break;
263 
264 	case PROP_TYPE_INDEX:
265 		intval = zpool_get_prop_int(zhp, prop, &src);
266 		if (zpool_prop_index_to_string(prop, intval, &strval)
267 		    != 0)
268 			return (-1);
269 		(void) strlcpy(buf, strval, len);
270 		break;
271 
272 	default:
273 		abort();
274 	}
275 
276 	if (srctype)
277 		*srctype = src;
278 
279 	return (0);
280 }
281 
282 /*
283  * Check if the bootfs name has the same pool name as it is set to.
284  * Assuming bootfs is a valid dataset name.
285  */
286 static boolean_t
287 bootfs_name_valid(const char *pool, char *bootfs)
288 {
289 	int len = strlen(pool);
290 
291 	if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM))
292 		return (B_FALSE);
293 
294 	if (strncmp(pool, bootfs, len) == 0 &&
295 	    (bootfs[len] == '/' || bootfs[len] == '\0'))
296 		return (B_TRUE);
297 
298 	return (B_FALSE);
299 }
300 
301 /*
302  * Inspect the configuration to determine if any of the devices contain
303  * an EFI label.
304  */
305 static boolean_t
306 pool_uses_efi(nvlist_t *config)
307 {
308 	nvlist_t **child;
309 	uint_t c, children;
310 
311 	if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
312 	    &child, &children) != 0)
313 		return (read_efi_label(config, NULL) >= 0);
314 
315 	for (c = 0; c < children; c++) {
316 		if (pool_uses_efi(child[c]))
317 			return (B_TRUE);
318 	}
319 	return (B_FALSE);
320 }
321 
322 /*
323  * Given an nvlist of zpool properties to be set, validate that they are
324  * correct, and parse any numeric properties (index, boolean, etc) if they are
325  * specified as strings.
326  */
327 static nvlist_t *
328 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
329     nvlist_t *props, uint64_t version, boolean_t create_or_import, char *errbuf)
330 {
331 	nvpair_t *elem;
332 	nvlist_t *retprops;
333 	zpool_prop_t prop;
334 	char *strval;
335 	uint64_t intval;
336 	char *slash;
337 	struct stat64 statbuf;
338 	zpool_handle_t *zhp;
339 	nvlist_t *nvroot;
340 
341 	if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
342 		(void) no_memory(hdl);
343 		return (NULL);
344 	}
345 
346 	elem = NULL;
347 	while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
348 		const char *propname = nvpair_name(elem);
349 
350 		/*
351 		 * Make sure this property is valid and applies to this type.
352 		 */
353 		if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL) {
354 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
355 			    "invalid property '%s'"), propname);
356 			(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
357 			goto error;
358 		}
359 
360 		if (zpool_prop_readonly(prop)) {
361 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
362 			    "is readonly"), propname);
363 			(void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
364 			goto error;
365 		}
366 
367 		if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
368 		    &strval, &intval, errbuf) != 0)
369 			goto error;
370 
371 		/*
372 		 * Perform additional checking for specific properties.
373 		 */
374 		switch (prop) {
375 		case ZPOOL_PROP_VERSION:
376 			if (intval < version || intval > SPA_VERSION) {
377 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
378 				    "property '%s' number %d is invalid."),
379 				    propname, intval);
380 				(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
381 				goto error;
382 			}
383 			break;
384 
385 		case ZPOOL_PROP_BOOTFS:
386 			if (create_or_import) {
387 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
388 				    "property '%s' cannot be set at creation "
389 				    "or import time"), propname);
390 				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
391 				goto error;
392 			}
393 
394 			if (version < SPA_VERSION_BOOTFS) {
395 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
396 				    "pool must be upgraded to support "
397 				    "'%s' property"), propname);
398 				(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
399 				goto error;
400 			}
401 
402 			/*
403 			 * bootfs property value has to be a dataset name and
404 			 * the dataset has to be in the same pool as it sets to.
405 			 */
406 			if (strval[0] != '\0' && !bootfs_name_valid(poolname,
407 			    strval)) {
408 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
409 				    "is an invalid name"), strval);
410 				(void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
411 				goto error;
412 			}
413 
414 			if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
415 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
416 				    "could not open pool '%s'"), poolname);
417 				(void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
418 				goto error;
419 			}
420 			verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
421 			    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
422 
423 			/*
424 			 * bootfs property cannot be set on a disk which has
425 			 * been EFI labeled.
426 			 */
427 			if (pool_uses_efi(nvroot)) {
428 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
429 				    "property '%s' not supported on "
430 				    "EFI labeled devices"), propname);
431 				(void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf);
432 				zpool_close(zhp);
433 				goto error;
434 			}
435 			zpool_close(zhp);
436 			break;
437 
438 		case ZPOOL_PROP_ALTROOT:
439 			if (!create_or_import) {
440 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
441 				    "property '%s' can only be set during pool "
442 				    "creation or import"), propname);
443 				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
444 				goto error;
445 			}
446 
447 			if (strval[0] != '/') {
448 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
449 				    "bad alternate root '%s'"), strval);
450 				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
451 				goto error;
452 			}
453 			break;
454 
455 		case ZPOOL_PROP_CACHEFILE:
456 			if (strval[0] == '\0')
457 				break;
458 
459 			if (strcmp(strval, "none") == 0)
460 				break;
461 
462 			if (strval[0] != '/') {
463 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
464 				    "property '%s' must be empty, an "
465 				    "absolute path, or 'none'"), propname);
466 				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
467 				goto error;
468 			}
469 
470 			slash = strrchr(strval, '/');
471 
472 			if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
473 			    strcmp(slash, "/..") == 0) {
474 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
475 				    "'%s' is not a valid file"), strval);
476 				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
477 				goto error;
478 			}
479 
480 			*slash = '\0';
481 
482 			if (strval[0] != '\0' &&
483 			    (stat64(strval, &statbuf) != 0 ||
484 			    !S_ISDIR(statbuf.st_mode))) {
485 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
486 				    "'%s' is not a valid directory"),
487 				    strval);
488 				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
489 				goto error;
490 			}
491 
492 			*slash = '/';
493 			break;
494 		}
495 	}
496 
497 	return (retprops);
498 error:
499 	nvlist_free(retprops);
500 	return (NULL);
501 }
502 
503 /*
504  * Set zpool property : propname=propval.
505  */
506 int
507 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
508 {
509 	zfs_cmd_t zc = { 0 };
510 	int ret = -1;
511 	char errbuf[1024];
512 	nvlist_t *nvl = NULL;
513 	nvlist_t *realprops;
514 	uint64_t version;
515 
516 	(void) snprintf(errbuf, sizeof (errbuf),
517 	    dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
518 	    zhp->zpool_name);
519 
520 	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp))
521 		return (zfs_error(zhp->zpool_hdl, EZFS_POOLPROPS, errbuf));
522 
523 	if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
524 		return (no_memory(zhp->zpool_hdl));
525 
526 	if (nvlist_add_string(nvl, propname, propval) != 0) {
527 		nvlist_free(nvl);
528 		return (no_memory(zhp->zpool_hdl));
529 	}
530 
531 	version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
532 	if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
533 	    zhp->zpool_name, nvl, version, B_FALSE, errbuf)) == NULL) {
534 		nvlist_free(nvl);
535 		return (-1);
536 	}
537 
538 	nvlist_free(nvl);
539 	nvl = realprops;
540 
541 	/*
542 	 * Execute the corresponding ioctl() to set this property.
543 	 */
544 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
545 
546 	if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
547 		nvlist_free(nvl);
548 		return (-1);
549 	}
550 
551 	ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
552 
553 	zcmd_free_nvlists(&zc);
554 	nvlist_free(nvl);
555 
556 	if (ret)
557 		(void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
558 	else
559 		(void) zpool_props_refresh(zhp);
560 
561 	return (ret);
562 }
563 
564 int
565 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
566 {
567 	libzfs_handle_t *hdl = zhp->zpool_hdl;
568 	zprop_list_t *entry;
569 	char buf[ZFS_MAXPROPLEN];
570 
571 	if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
572 		return (-1);
573 
574 	for (entry = *plp; entry != NULL; entry = entry->pl_next) {
575 
576 		if (entry->pl_fixed)
577 			continue;
578 
579 		if (entry->pl_prop != ZPROP_INVAL &&
580 		    zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
581 		    NULL) == 0) {
582 			if (strlen(buf) > entry->pl_width)
583 				entry->pl_width = strlen(buf);
584 		}
585 	}
586 
587 	return (0);
588 }
589 
590 
591 /*
592  * Validate the given pool name, optionally putting an extended error message in
593  * 'buf'.
594  */
595 boolean_t
596 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
597 {
598 	namecheck_err_t why;
599 	char what;
600 	int ret;
601 
602 	ret = pool_namecheck(pool, &why, &what);
603 
604 	/*
605 	 * The rules for reserved pool names were extended at a later point.
606 	 * But we need to support users with existing pools that may now be
607 	 * invalid.  So we only check for this expanded set of names during a
608 	 * create (or import), and only in userland.
609 	 */
610 	if (ret == 0 && !isopen &&
611 	    (strncmp(pool, "mirror", 6) == 0 ||
612 	    strncmp(pool, "raidz", 5) == 0 ||
613 	    strncmp(pool, "spare", 5) == 0 ||
614 	    strcmp(pool, "log") == 0)) {
615 		if (hdl != NULL)
616 			zfs_error_aux(hdl,
617 			    dgettext(TEXT_DOMAIN, "name is reserved"));
618 		return (B_FALSE);
619 	}
620 
621 
622 	if (ret != 0) {
623 		if (hdl != NULL) {
624 			switch (why) {
625 			case NAME_ERR_TOOLONG:
626 				zfs_error_aux(hdl,
627 				    dgettext(TEXT_DOMAIN, "name is too long"));
628 				break;
629 
630 			case NAME_ERR_INVALCHAR:
631 				zfs_error_aux(hdl,
632 				    dgettext(TEXT_DOMAIN, "invalid character "
633 				    "'%c' in pool name"), what);
634 				break;
635 
636 			case NAME_ERR_NOLETTER:
637 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
638 				    "name must begin with a letter"));
639 				break;
640 
641 			case NAME_ERR_RESERVED:
642 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
643 				    "name is reserved"));
644 				break;
645 
646 			case NAME_ERR_DISKLIKE:
647 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
648 				    "pool name is reserved"));
649 				break;
650 
651 			case NAME_ERR_LEADING_SLASH:
652 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
653 				    "leading slash in name"));
654 				break;
655 
656 			case NAME_ERR_EMPTY_COMPONENT:
657 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
658 				    "empty component in name"));
659 				break;
660 
661 			case NAME_ERR_TRAILING_SLASH:
662 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
663 				    "trailing slash in name"));
664 				break;
665 
666 			case NAME_ERR_MULTIPLE_AT:
667 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
668 				    "multiple '@' delimiters in name"));
669 				break;
670 
671 			}
672 		}
673 		return (B_FALSE);
674 	}
675 
676 	return (B_TRUE);
677 }
678 
679 /*
680  * Open a handle to the given pool, even if the pool is currently in the FAULTED
681  * state.
682  */
683 zpool_handle_t *
684 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
685 {
686 	zpool_handle_t *zhp;
687 	boolean_t missing;
688 
689 	/*
690 	 * Make sure the pool name is valid.
691 	 */
692 	if (!zpool_name_valid(hdl, B_TRUE, pool)) {
693 		(void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
694 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
695 		    pool);
696 		return (NULL);
697 	}
698 
699 	if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
700 		return (NULL);
701 
702 	zhp->zpool_hdl = hdl;
703 	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
704 
705 	if (zpool_refresh_stats(zhp, &missing) != 0) {
706 		zpool_close(zhp);
707 		return (NULL);
708 	}
709 
710 	if (missing) {
711 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
712 		(void) zfs_error_fmt(hdl, EZFS_NOENT,
713 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
714 		zpool_close(zhp);
715 		return (NULL);
716 	}
717 
718 	return (zhp);
719 }
720 
721 /*
722  * Like the above, but silent on error.  Used when iterating over pools (because
723  * the configuration cache may be out of date).
724  */
725 int
726 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
727 {
728 	zpool_handle_t *zhp;
729 	boolean_t missing;
730 
731 	if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
732 		return (-1);
733 
734 	zhp->zpool_hdl = hdl;
735 	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
736 
737 	if (zpool_refresh_stats(zhp, &missing) != 0) {
738 		zpool_close(zhp);
739 		return (-1);
740 	}
741 
742 	if (missing) {
743 		zpool_close(zhp);
744 		*ret = NULL;
745 		return (0);
746 	}
747 
748 	*ret = zhp;
749 	return (0);
750 }
751 
752 /*
753  * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
754  * state.
755  */
756 zpool_handle_t *
757 zpool_open(libzfs_handle_t *hdl, const char *pool)
758 {
759 	zpool_handle_t *zhp;
760 
761 	if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
762 		return (NULL);
763 
764 	if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
765 		(void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
766 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
767 		zpool_close(zhp);
768 		return (NULL);
769 	}
770 
771 	return (zhp);
772 }
773 
774 /*
775  * Close the handle.  Simply frees the memory associated with the handle.
776  */
777 void
778 zpool_close(zpool_handle_t *zhp)
779 {
780 	if (zhp->zpool_config)
781 		nvlist_free(zhp->zpool_config);
782 	if (zhp->zpool_old_config)
783 		nvlist_free(zhp->zpool_old_config);
784 	if (zhp->zpool_props)
785 		nvlist_free(zhp->zpool_props);
786 	free(zhp);
787 }
788 
789 /*
790  * Return the name of the pool.
791  */
792 const char *
793 zpool_get_name(zpool_handle_t *zhp)
794 {
795 	return (zhp->zpool_name);
796 }
797 
798 
799 /*
800  * Return the state of the pool (ACTIVE or UNAVAILABLE)
801  */
802 int
803 zpool_get_state(zpool_handle_t *zhp)
804 {
805 	return (zhp->zpool_state);
806 }
807 
808 /*
809  * Create the named pool, using the provided vdev list.  It is assumed
810  * that the consumer has already validated the contents of the nvlist, so we
811  * don't have to worry about error semantics.
812  */
813 int
814 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
815     nvlist_t *props, nvlist_t *fsprops)
816 {
817 	zfs_cmd_t zc = { 0 };
818 	nvlist_t *zc_fsprops = NULL;
819 	nvlist_t *zc_props = NULL;
820 	char msg[1024];
821 	char *altroot;
822 	int ret = -1;
823 
824 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
825 	    "cannot create '%s'"), pool);
826 
827 	if (!zpool_name_valid(hdl, B_FALSE, pool))
828 		return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
829 
830 	if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
831 		return (-1);
832 
833 	if (props) {
834 		if ((zc_props = zpool_valid_proplist(hdl, pool, props,
835 		    SPA_VERSION_1, B_TRUE, msg)) == NULL) {
836 			goto create_failed;
837 		}
838 	}
839 
840 	if (fsprops) {
841 		uint64_t zoned;
842 		char *zonestr;
843 
844 		zoned = ((nvlist_lookup_string(fsprops,
845 		    zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
846 		    strcmp(zonestr, "on") == 0);
847 
848 		if ((zc_fsprops = zfs_valid_proplist(hdl,
849 		    ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) {
850 			goto create_failed;
851 		}
852 		if (!zc_props &&
853 		    (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
854 			goto create_failed;
855 		}
856 		if (nvlist_add_nvlist(zc_props,
857 		    ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
858 			goto create_failed;
859 		}
860 	}
861 
862 	if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
863 		goto create_failed;
864 
865 	(void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
866 
867 	if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
868 
869 		zcmd_free_nvlists(&zc);
870 		nvlist_free(zc_props);
871 		nvlist_free(zc_fsprops);
872 
873 		switch (errno) {
874 		case EBUSY:
875 			/*
876 			 * This can happen if the user has specified the same
877 			 * device multiple times.  We can't reliably detect this
878 			 * until we try to add it and see we already have a
879 			 * label.
880 			 */
881 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
882 			    "one or more vdevs refer to the same device"));
883 			return (zfs_error(hdl, EZFS_BADDEV, msg));
884 
885 		case EOVERFLOW:
886 			/*
887 			 * This occurs when one of the devices is below
888 			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
889 			 * device was the problem device since there's no
890 			 * reliable way to determine device size from userland.
891 			 */
892 			{
893 				char buf[64];
894 
895 				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
896 
897 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
898 				    "one or more devices is less than the "
899 				    "minimum size (%s)"), buf);
900 			}
901 			return (zfs_error(hdl, EZFS_BADDEV, msg));
902 
903 		case ENOSPC:
904 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
905 			    "one or more devices is out of space"));
906 			return (zfs_error(hdl, EZFS_BADDEV, msg));
907 
908 		case ENOTBLK:
909 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
910 			    "cache device must be a disk or disk slice"));
911 			return (zfs_error(hdl, EZFS_BADDEV, msg));
912 
913 		default:
914 			return (zpool_standard_error(hdl, errno, msg));
915 		}
916 	}
917 
918 	/*
919 	 * If this is an alternate root pool, then we automatically set the
920 	 * mountpoint of the root dataset to be '/'.
921 	 */
922 	if (nvlist_lookup_string(props, zpool_prop_to_name(ZPOOL_PROP_ALTROOT),
923 	    &altroot) == 0) {
924 		zfs_handle_t *zhp;
925 
926 		verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_DATASET)) != NULL);
927 		verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
928 		    "/") == 0);
929 
930 		zfs_close(zhp);
931 	}
932 
933 create_failed:
934 	zcmd_free_nvlists(&zc);
935 	nvlist_free(zc_props);
936 	nvlist_free(zc_fsprops);
937 	return (ret);
938 }
939 
940 /*
941  * Destroy the given pool.  It is up to the caller to ensure that there are no
942  * datasets left in the pool.
943  */
944 int
945 zpool_destroy(zpool_handle_t *zhp)
946 {
947 	zfs_cmd_t zc = { 0 };
948 	zfs_handle_t *zfp = NULL;
949 	libzfs_handle_t *hdl = zhp->zpool_hdl;
950 	char msg[1024];
951 
952 	if (zhp->zpool_state == POOL_STATE_ACTIVE &&
953 	    (zfp = zfs_open(zhp->zpool_hdl, zhp->zpool_name,
954 	    ZFS_TYPE_FILESYSTEM)) == NULL)
955 		return (-1);
956 
957 	if (zpool_remove_zvol_links(zhp) != 0)
958 		return (-1);
959 
960 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
961 
962 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
963 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
964 		    "cannot destroy '%s'"), zhp->zpool_name);
965 
966 		if (errno == EROFS) {
967 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
968 			    "one or more devices is read only"));
969 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
970 		} else {
971 			(void) zpool_standard_error(hdl, errno, msg);
972 		}
973 
974 		if (zfp)
975 			zfs_close(zfp);
976 		return (-1);
977 	}
978 
979 	if (zfp) {
980 		remove_mountpoint(zfp);
981 		zfs_close(zfp);
982 	}
983 
984 	return (0);
985 }
986 
987 /*
988  * Add the given vdevs to the pool.  The caller must have already performed the
989  * necessary verification to ensure that the vdev specification is well-formed.
990  */
991 int
992 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
993 {
994 	zfs_cmd_t zc = { 0 };
995 	int ret;
996 	libzfs_handle_t *hdl = zhp->zpool_hdl;
997 	char msg[1024];
998 	nvlist_t **spares, **l2cache;
999 	uint_t nspares, nl2cache;
1000 
1001 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1002 	    "cannot add to '%s'"), zhp->zpool_name);
1003 
1004 	if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1005 	    SPA_VERSION_SPARES &&
1006 	    nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1007 	    &spares, &nspares) == 0) {
1008 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1009 		    "upgraded to add hot spares"));
1010 		return (zfs_error(hdl, EZFS_BADVERSION, msg));
1011 	}
1012 
1013 	if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1014 	    SPA_VERSION_L2CACHE &&
1015 	    nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1016 	    &l2cache, &nl2cache) == 0) {
1017 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1018 		    "upgraded to add cache devices"));
1019 		return (zfs_error(hdl, EZFS_BADVERSION, msg));
1020 	}
1021 
1022 	if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1023 		return (-1);
1024 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1025 
1026 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1027 		switch (errno) {
1028 		case EBUSY:
1029 			/*
1030 			 * This can happen if the user has specified the same
1031 			 * device multiple times.  We can't reliably detect this
1032 			 * until we try to add it and see we already have a
1033 			 * label.
1034 			 */
1035 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1036 			    "one or more vdevs refer to the same device"));
1037 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
1038 			break;
1039 
1040 		case EOVERFLOW:
1041 			/*
1042 			 * This occurrs when one of the devices is below
1043 			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
1044 			 * device was the problem device since there's no
1045 			 * reliable way to determine device size from userland.
1046 			 */
1047 			{
1048 				char buf[64];
1049 
1050 				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1051 
1052 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1053 				    "device is less than the minimum "
1054 				    "size (%s)"), buf);
1055 			}
1056 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
1057 			break;
1058 
1059 		case ENOTSUP:
1060 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1061 			    "pool must be upgraded to add these vdevs"));
1062 			(void) zfs_error(hdl, EZFS_BADVERSION, msg);
1063 			break;
1064 
1065 		case EDOM:
1066 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1067 			    "root pool can not have multiple vdevs"
1068 			    " or separate logs"));
1069 			(void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
1070 			break;
1071 
1072 		case ENOTBLK:
1073 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1074 			    "cache device must be a disk or disk slice"));
1075 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
1076 			break;
1077 
1078 		default:
1079 			(void) zpool_standard_error(hdl, errno, msg);
1080 		}
1081 
1082 		ret = -1;
1083 	} else {
1084 		ret = 0;
1085 	}
1086 
1087 	zcmd_free_nvlists(&zc);
1088 
1089 	return (ret);
1090 }
1091 
1092 /*
1093  * Exports the pool from the system.  The caller must ensure that there are no
1094  * mounted datasets in the pool.
1095  */
1096 int
1097 zpool_export(zpool_handle_t *zhp, boolean_t force)
1098 {
1099 	zfs_cmd_t zc = { 0 };
1100 	char msg[1024];
1101 
1102 	if (zpool_remove_zvol_links(zhp) != 0)
1103 		return (-1);
1104 
1105 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1106 	    "cannot export '%s'"), zhp->zpool_name);
1107 
1108 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1109 	zc.zc_cookie = force;
1110 
1111 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1112 		switch (errno) {
1113 		case EXDEV:
1114 			zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1115 			    "use '-f' to override the following errors:\n"
1116 			    "'%s' has an active shared spare which could be"
1117 			    " used by other pools once '%s' is exported."),
1118 			    zhp->zpool_name, zhp->zpool_name);
1119 			return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1120 			    msg));
1121 		default:
1122 			return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1123 			    msg));
1124 		}
1125 	}
1126 
1127 	return (0);
1128 }
1129 
1130 /*
1131  * zpool_import() is a contracted interface. Should be kept the same
1132  * if possible.
1133  *
1134  * Applications should use zpool_import_props() to import a pool with
1135  * new properties value to be set.
1136  */
1137 int
1138 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1139     char *altroot)
1140 {
1141 	nvlist_t *props = NULL;
1142 	int ret;
1143 
1144 	if (altroot != NULL) {
1145 		if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1146 			return (zfs_error_fmt(hdl, EZFS_NOMEM,
1147 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1148 			    newname));
1149 		}
1150 
1151 		if (nvlist_add_string(props,
1152 		    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0) {
1153 			nvlist_free(props);
1154 			return (zfs_error_fmt(hdl, EZFS_NOMEM,
1155 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1156 			    newname));
1157 		}
1158 	}
1159 
1160 	ret = zpool_import_props(hdl, config, newname, props, B_FALSE);
1161 	if (props)
1162 		nvlist_free(props);
1163 	return (ret);
1164 }
1165 
1166 /*
1167  * Import the given pool using the known configuration and a list of
1168  * properties to be set. The configuration should have come from
1169  * zpool_find_import(). The 'newname' parameters control whether the pool
1170  * is imported with a different name.
1171  */
1172 int
1173 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1174     nvlist_t *props, boolean_t importfaulted)
1175 {
1176 	zfs_cmd_t zc = { 0 };
1177 	char *thename;
1178 	char *origname;
1179 	int ret;
1180 	char errbuf[1024];
1181 
1182 	verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1183 	    &origname) == 0);
1184 
1185 	(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1186 	    "cannot import pool '%s'"), origname);
1187 
1188 	if (newname != NULL) {
1189 		if (!zpool_name_valid(hdl, B_FALSE, newname))
1190 			return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1191 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1192 			    newname));
1193 		thename = (char *)newname;
1194 	} else {
1195 		thename = origname;
1196 	}
1197 
1198 	if (props) {
1199 		uint64_t version;
1200 
1201 		verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1202 		    &version) == 0);
1203 
1204 		if ((props = zpool_valid_proplist(hdl, origname,
1205 		    props, version, B_TRUE, errbuf)) == NULL) {
1206 			return (-1);
1207 		} else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1208 			nvlist_free(props);
1209 			return (-1);
1210 		}
1211 	}
1212 
1213 	(void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1214 
1215 	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1216 	    &zc.zc_guid) == 0);
1217 
1218 	if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1219 		nvlist_free(props);
1220 		return (-1);
1221 	}
1222 
1223 	zc.zc_cookie = (uint64_t)importfaulted;
1224 	ret = 0;
1225 	if (zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc) != 0) {
1226 		char desc[1024];
1227 		if (newname == NULL)
1228 			(void) snprintf(desc, sizeof (desc),
1229 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1230 			    thename);
1231 		else
1232 			(void) snprintf(desc, sizeof (desc),
1233 			    dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1234 			    origname, thename);
1235 
1236 		switch (errno) {
1237 		case ENOTSUP:
1238 			/*
1239 			 * Unsupported version.
1240 			 */
1241 			(void) zfs_error(hdl, EZFS_BADVERSION, desc);
1242 			break;
1243 
1244 		case EINVAL:
1245 			(void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1246 			break;
1247 
1248 		default:
1249 			(void) zpool_standard_error(hdl, errno, desc);
1250 		}
1251 
1252 		ret = -1;
1253 	} else {
1254 		zpool_handle_t *zhp;
1255 
1256 		/*
1257 		 * This should never fail, but play it safe anyway.
1258 		 */
1259 		if (zpool_open_silent(hdl, thename, &zhp) != 0) {
1260 			ret = -1;
1261 		} else if (zhp != NULL) {
1262 			ret = zpool_create_zvol_links(zhp);
1263 			zpool_close(zhp);
1264 		}
1265 
1266 	}
1267 
1268 	zcmd_free_nvlists(&zc);
1269 	nvlist_free(props);
1270 
1271 	return (ret);
1272 }
1273 
1274 /*
1275  * Scrub the pool.
1276  */
1277 int
1278 zpool_scrub(zpool_handle_t *zhp, pool_scrub_type_t type)
1279 {
1280 	zfs_cmd_t zc = { 0 };
1281 	char msg[1024];
1282 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1283 
1284 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1285 	zc.zc_cookie = type;
1286 
1287 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SCRUB, &zc) == 0)
1288 		return (0);
1289 
1290 	(void) snprintf(msg, sizeof (msg),
1291 	    dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
1292 
1293 	if (errno == EBUSY)
1294 		return (zfs_error(hdl, EZFS_RESILVERING, msg));
1295 	else
1296 		return (zpool_standard_error(hdl, errno, msg));
1297 }
1298 
1299 /*
1300  * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1301  * spare; but FALSE if its an INUSE spare.
1302  */
1303 static nvlist_t *
1304 vdev_to_nvlist_iter(nvlist_t *nv, const char *search, uint64_t guid,
1305     boolean_t *avail_spare, boolean_t *l2cache)
1306 {
1307 	uint_t c, children;
1308 	nvlist_t **child;
1309 	uint64_t theguid, present;
1310 	char *path;
1311 	uint64_t wholedisk = 0;
1312 	nvlist_t *ret;
1313 
1314 	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &theguid) == 0);
1315 
1316 	if (search == NULL &&
1317 	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &present) == 0) {
1318 		/*
1319 		 * If the device has never been present since import, the only
1320 		 * reliable way to match the vdev is by GUID.
1321 		 */
1322 		if (theguid == guid)
1323 			return (nv);
1324 	} else if (search != NULL &&
1325 	    nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
1326 		(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
1327 		    &wholedisk);
1328 		if (wholedisk) {
1329 			/*
1330 			 * For whole disks, the internal path has 's0', but the
1331 			 * path passed in by the user doesn't.
1332 			 */
1333 			if (strlen(search) == strlen(path) - 2 &&
1334 			    strncmp(search, path, strlen(search)) == 0)
1335 				return (nv);
1336 		} else if (strcmp(search, path) == 0) {
1337 			return (nv);
1338 		}
1339 	}
1340 
1341 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1342 	    &child, &children) != 0)
1343 		return (NULL);
1344 
1345 	for (c = 0; c < children; c++)
1346 		if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
1347 		    avail_spare, l2cache)) != NULL)
1348 			return (ret);
1349 
1350 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
1351 	    &child, &children) == 0) {
1352 		for (c = 0; c < children; c++) {
1353 			if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
1354 			    avail_spare, l2cache)) != NULL) {
1355 				*avail_spare = B_TRUE;
1356 				return (ret);
1357 			}
1358 		}
1359 	}
1360 
1361 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
1362 	    &child, &children) == 0) {
1363 		for (c = 0; c < children; c++) {
1364 			if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
1365 			    avail_spare, l2cache)) != NULL) {
1366 				*l2cache = B_TRUE;
1367 				return (ret);
1368 			}
1369 		}
1370 	}
1371 
1372 	return (NULL);
1373 }
1374 
1375 nvlist_t *
1376 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
1377     boolean_t *l2cache)
1378 {
1379 	char buf[MAXPATHLEN];
1380 	const char *search;
1381 	char *end;
1382 	nvlist_t *nvroot;
1383 	uint64_t guid;
1384 
1385 	guid = strtoull(path, &end, 10);
1386 	if (guid != 0 && *end == '\0') {
1387 		search = NULL;
1388 	} else if (path[0] != '/') {
1389 		(void) snprintf(buf, sizeof (buf), "%s%s", "/dev/dsk/", path);
1390 		search = buf;
1391 	} else {
1392 		search = path;
1393 	}
1394 
1395 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1396 	    &nvroot) == 0);
1397 
1398 	*avail_spare = B_FALSE;
1399 	*l2cache = B_FALSE;
1400 	return (vdev_to_nvlist_iter(nvroot, search, guid, avail_spare,
1401 	    l2cache));
1402 }
1403 
1404 /*
1405  * Returns TRUE if the given guid corresponds to the given type.
1406  * This is used to check for hot spares (INUSE or not), and level 2 cache
1407  * devices.
1408  */
1409 static boolean_t
1410 is_guid_type(zpool_handle_t *zhp, uint64_t guid, const char *type)
1411 {
1412 	uint64_t target_guid;
1413 	nvlist_t *nvroot;
1414 	nvlist_t **list;
1415 	uint_t count;
1416 	int i;
1417 
1418 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1419 	    &nvroot) == 0);
1420 	if (nvlist_lookup_nvlist_array(nvroot, type, &list, &count) == 0) {
1421 		for (i = 0; i < count; i++) {
1422 			verify(nvlist_lookup_uint64(list[i], ZPOOL_CONFIG_GUID,
1423 			    &target_guid) == 0);
1424 			if (guid == target_guid)
1425 				return (B_TRUE);
1426 		}
1427 	}
1428 
1429 	return (B_FALSE);
1430 }
1431 
1432 /*
1433  * Bring the specified vdev online.   The 'flags' parameter is a set of the
1434  * ZFS_ONLINE_* flags.
1435  */
1436 int
1437 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
1438     vdev_state_t *newstate)
1439 {
1440 	zfs_cmd_t zc = { 0 };
1441 	char msg[1024];
1442 	nvlist_t *tgt;
1443 	boolean_t avail_spare, l2cache;
1444 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1445 
1446 	(void) snprintf(msg, sizeof (msg),
1447 	    dgettext(TEXT_DOMAIN, "cannot online %s"), path);
1448 
1449 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1450 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache)) == NULL)
1451 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1452 
1453 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1454 
1455 	if (avail_spare ||
1456 	    is_guid_type(zhp, zc.zc_guid, ZPOOL_CONFIG_SPARES) == B_TRUE)
1457 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
1458 
1459 	zc.zc_cookie = VDEV_STATE_ONLINE;
1460 	zc.zc_obj = flags;
1461 
1462 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0)
1463 		return (zpool_standard_error(hdl, errno, msg));
1464 
1465 	*newstate = zc.zc_cookie;
1466 	return (0);
1467 }
1468 
1469 /*
1470  * Take the specified vdev offline
1471  */
1472 int
1473 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
1474 {
1475 	zfs_cmd_t zc = { 0 };
1476 	char msg[1024];
1477 	nvlist_t *tgt;
1478 	boolean_t avail_spare, l2cache;
1479 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1480 
1481 	(void) snprintf(msg, sizeof (msg),
1482 	    dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
1483 
1484 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1485 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache)) == NULL)
1486 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1487 
1488 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1489 
1490 	if (avail_spare ||
1491 	    is_guid_type(zhp, zc.zc_guid, ZPOOL_CONFIG_SPARES) == B_TRUE)
1492 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
1493 
1494 	zc.zc_cookie = VDEV_STATE_OFFLINE;
1495 	zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
1496 
1497 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
1498 		return (0);
1499 
1500 	switch (errno) {
1501 	case EBUSY:
1502 
1503 		/*
1504 		 * There are no other replicas of this device.
1505 		 */
1506 		return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
1507 
1508 	default:
1509 		return (zpool_standard_error(hdl, errno, msg));
1510 	}
1511 }
1512 
1513 /*
1514  * Mark the given vdev faulted.
1515  */
1516 int
1517 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid)
1518 {
1519 	zfs_cmd_t zc = { 0 };
1520 	char msg[1024];
1521 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1522 
1523 	(void) snprintf(msg, sizeof (msg),
1524 	    dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid);
1525 
1526 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1527 	zc.zc_guid = guid;
1528 	zc.zc_cookie = VDEV_STATE_FAULTED;
1529 
1530 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
1531 		return (0);
1532 
1533 	switch (errno) {
1534 	case EBUSY:
1535 
1536 		/*
1537 		 * There are no other replicas of this device.
1538 		 */
1539 		return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
1540 
1541 	default:
1542 		return (zpool_standard_error(hdl, errno, msg));
1543 	}
1544 
1545 }
1546 
1547 /*
1548  * Mark the given vdev degraded.
1549  */
1550 int
1551 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid)
1552 {
1553 	zfs_cmd_t zc = { 0 };
1554 	char msg[1024];
1555 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1556 
1557 	(void) snprintf(msg, sizeof (msg),
1558 	    dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid);
1559 
1560 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1561 	zc.zc_guid = guid;
1562 	zc.zc_cookie = VDEV_STATE_DEGRADED;
1563 
1564 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
1565 		return (0);
1566 
1567 	return (zpool_standard_error(hdl, errno, msg));
1568 }
1569 
1570 /*
1571  * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
1572  * a hot spare.
1573  */
1574 static boolean_t
1575 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
1576 {
1577 	nvlist_t **child;
1578 	uint_t c, children;
1579 	char *type;
1580 
1581 	if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
1582 	    &children) == 0) {
1583 		verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
1584 		    &type) == 0);
1585 
1586 		if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
1587 		    children == 2 && child[which] == tgt)
1588 			return (B_TRUE);
1589 
1590 		for (c = 0; c < children; c++)
1591 			if (is_replacing_spare(child[c], tgt, which))
1592 				return (B_TRUE);
1593 	}
1594 
1595 	return (B_FALSE);
1596 }
1597 
1598 /*
1599  * Attach new_disk (fully described by nvroot) to old_disk.
1600  * If 'replacing' is specified, the new disk will replace the old one.
1601  */
1602 int
1603 zpool_vdev_attach(zpool_handle_t *zhp,
1604     const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
1605 {
1606 	zfs_cmd_t zc = { 0 };
1607 	char msg[1024];
1608 	int ret;
1609 	nvlist_t *tgt;
1610 	boolean_t avail_spare, l2cache;
1611 	uint64_t val, is_log;
1612 	char *path, *newname;
1613 	nvlist_t **child;
1614 	uint_t children;
1615 	nvlist_t *config_root;
1616 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1617 
1618 	if (replacing)
1619 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1620 		    "cannot replace %s with %s"), old_disk, new_disk);
1621 	else
1622 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1623 		    "cannot attach %s to %s"), new_disk, old_disk);
1624 
1625 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1626 	if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache)) == 0)
1627 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1628 
1629 	if (avail_spare)
1630 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
1631 
1632 	if (l2cache)
1633 		return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
1634 
1635 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1636 	zc.zc_cookie = replacing;
1637 
1638 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
1639 	    &child, &children) != 0 || children != 1) {
1640 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1641 		    "new device must be a single disk"));
1642 		return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
1643 	}
1644 
1645 	verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
1646 	    ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
1647 
1648 	if ((newname = zpool_vdev_name(NULL, NULL, child[0])) == NULL)
1649 		return (-1);
1650 
1651 	/*
1652 	 * If the target is a hot spare that has been swapped in, we can only
1653 	 * replace it with another hot spare.
1654 	 */
1655 	if (replacing &&
1656 	    nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
1657 	    (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache) == NULL ||
1658 	    !avail_spare) && is_replacing_spare(config_root, tgt, 1)) {
1659 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1660 		    "can only be replaced by another hot spare"));
1661 		free(newname);
1662 		return (zfs_error(hdl, EZFS_BADTARGET, msg));
1663 	}
1664 
1665 	/*
1666 	 * If we are attempting to replace a spare, it canot be applied to an
1667 	 * already spared device.
1668 	 */
1669 	if (replacing &&
1670 	    nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
1671 	    zpool_find_vdev(zhp, newname, &avail_spare, &l2cache) != NULL &&
1672 	    avail_spare && is_replacing_spare(config_root, tgt, 0)) {
1673 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1674 		    "device has already been replaced with a spare"));
1675 		free(newname);
1676 		return (zfs_error(hdl, EZFS_BADTARGET, msg));
1677 	}
1678 
1679 	free(newname);
1680 
1681 	if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1682 		return (-1);
1683 
1684 	ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ATTACH, &zc);
1685 
1686 	zcmd_free_nvlists(&zc);
1687 
1688 	if (ret == 0)
1689 		return (0);
1690 
1691 	switch (errno) {
1692 	case ENOTSUP:
1693 		/*
1694 		 * Can't attach to or replace this type of vdev.
1695 		 */
1696 		if (replacing) {
1697 			is_log = B_FALSE;
1698 			(void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_LOG,
1699 			    &is_log);
1700 			if (is_log)
1701 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1702 				    "cannot replace a log with a spare"));
1703 			else
1704 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1705 				    "cannot replace a replacing device"));
1706 		} else {
1707 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1708 			    "can only attach to mirrors and top-level "
1709 			    "disks"));
1710 		}
1711 		(void) zfs_error(hdl, EZFS_BADTARGET, msg);
1712 		break;
1713 
1714 	case EINVAL:
1715 		/*
1716 		 * The new device must be a single disk.
1717 		 */
1718 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1719 		    "new device must be a single disk"));
1720 		(void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
1721 		break;
1722 
1723 	case EBUSY:
1724 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
1725 		    new_disk);
1726 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
1727 		break;
1728 
1729 	case EOVERFLOW:
1730 		/*
1731 		 * The new device is too small.
1732 		 */
1733 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1734 		    "device is too small"));
1735 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
1736 		break;
1737 
1738 	case EDOM:
1739 		/*
1740 		 * The new device has a different alignment requirement.
1741 		 */
1742 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1743 		    "devices have different sector alignment"));
1744 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
1745 		break;
1746 
1747 	case ENAMETOOLONG:
1748 		/*
1749 		 * The resulting top-level vdev spec won't fit in the label.
1750 		 */
1751 		(void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
1752 		break;
1753 
1754 	default:
1755 		(void) zpool_standard_error(hdl, errno, msg);
1756 	}
1757 
1758 	return (-1);
1759 }
1760 
1761 /*
1762  * Detach the specified device.
1763  */
1764 int
1765 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
1766 {
1767 	zfs_cmd_t zc = { 0 };
1768 	char msg[1024];
1769 	nvlist_t *tgt;
1770 	boolean_t avail_spare, l2cache;
1771 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1772 
1773 	(void) snprintf(msg, sizeof (msg),
1774 	    dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
1775 
1776 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1777 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache)) == 0)
1778 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1779 
1780 	if (avail_spare)
1781 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
1782 
1783 	if (l2cache)
1784 		return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
1785 
1786 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1787 
1788 	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
1789 		return (0);
1790 
1791 	switch (errno) {
1792 
1793 	case ENOTSUP:
1794 		/*
1795 		 * Can't detach from this type of vdev.
1796 		 */
1797 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
1798 		    "applicable to mirror and replacing vdevs"));
1799 		(void) zfs_error(zhp->zpool_hdl, EZFS_BADTARGET, msg);
1800 		break;
1801 
1802 	case EBUSY:
1803 		/*
1804 		 * There are no other replicas of this device.
1805 		 */
1806 		(void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
1807 		break;
1808 
1809 	default:
1810 		(void) zpool_standard_error(hdl, errno, msg);
1811 	}
1812 
1813 	return (-1);
1814 }
1815 
1816 /*
1817  * Remove the given device.  Currently, this is supported only for hot spares
1818  * and level 2 cache devices.
1819  */
1820 int
1821 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
1822 {
1823 	zfs_cmd_t zc = { 0 };
1824 	char msg[1024];
1825 	nvlist_t *tgt;
1826 	boolean_t avail_spare, l2cache;
1827 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1828 
1829 	(void) snprintf(msg, sizeof (msg),
1830 	    dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
1831 
1832 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1833 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache)) == 0)
1834 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1835 
1836 	if (!avail_spare && !l2cache) {
1837 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1838 		    "only inactive hot spares or cache devices "
1839 		    "can be removed"));
1840 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1841 	}
1842 
1843 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1844 
1845 	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
1846 		return (0);
1847 
1848 	return (zpool_standard_error(hdl, errno, msg));
1849 }
1850 
1851 /*
1852  * Clear the errors for the pool, or the particular device if specified.
1853  */
1854 int
1855 zpool_clear(zpool_handle_t *zhp, const char *path)
1856 {
1857 	zfs_cmd_t zc = { 0 };
1858 	char msg[1024];
1859 	nvlist_t *tgt;
1860 	boolean_t avail_spare, l2cache;
1861 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1862 
1863 	if (path)
1864 		(void) snprintf(msg, sizeof (msg),
1865 		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
1866 		    path);
1867 	else
1868 		(void) snprintf(msg, sizeof (msg),
1869 		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
1870 		    zhp->zpool_name);
1871 
1872 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1873 	if (path) {
1874 		if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
1875 		    &l2cache)) == 0)
1876 			return (zfs_error(hdl, EZFS_NODEVICE, msg));
1877 
1878 		/*
1879 		 * Don't allow error clearing for hot spares.  Do allow
1880 		 * error clearing for l2cache devices.
1881 		 */
1882 		if (avail_spare)
1883 			return (zfs_error(hdl, EZFS_ISSPARE, msg));
1884 
1885 		verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
1886 		    &zc.zc_guid) == 0);
1887 	}
1888 
1889 	if (zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc) == 0)
1890 		return (0);
1891 
1892 	return (zpool_standard_error(hdl, errno, msg));
1893 }
1894 
1895 /*
1896  * Similar to zpool_clear(), but takes a GUID (used by fmd).
1897  */
1898 int
1899 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
1900 {
1901 	zfs_cmd_t zc = { 0 };
1902 	char msg[1024];
1903 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1904 
1905 	(void) snprintf(msg, sizeof (msg),
1906 	    dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
1907 	    guid);
1908 
1909 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1910 	zc.zc_guid = guid;
1911 
1912 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
1913 		return (0);
1914 
1915 	return (zpool_standard_error(hdl, errno, msg));
1916 }
1917 
1918 /*
1919  * Iterate over all zvols in a given pool by walking the /dev/zvol/dsk/<pool>
1920  * hierarchy.
1921  */
1922 int
1923 zpool_iter_zvol(zpool_handle_t *zhp, int (*cb)(const char *, void *),
1924     void *data)
1925 {
1926 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1927 	char (*paths)[MAXPATHLEN];
1928 	size_t size = 4;
1929 	int curr, fd, base, ret = 0;
1930 	DIR *dirp;
1931 	struct dirent *dp;
1932 	struct stat st;
1933 
1934 	if ((base = open("/dev/zvol/dsk", O_RDONLY)) < 0)
1935 		return (errno == ENOENT ? 0 : -1);
1936 
1937 	if (fstatat(base, zhp->zpool_name, &st, 0) != 0) {
1938 		int err = errno;
1939 		(void) close(base);
1940 		return (err == ENOENT ? 0 : -1);
1941 	}
1942 
1943 	/*
1944 	 * Oddly this wasn't a directory -- ignore that failure since we
1945 	 * know there are no links lower in the (non-existant) hierarchy.
1946 	 */
1947 	if (!S_ISDIR(st.st_mode)) {
1948 		(void) close(base);
1949 		return (0);
1950 	}
1951 
1952 	if ((paths = zfs_alloc(hdl, size * sizeof (paths[0]))) == NULL) {
1953 		(void) close(base);
1954 		return (-1);
1955 	}
1956 
1957 	(void) strlcpy(paths[0], zhp->zpool_name, sizeof (paths[0]));
1958 	curr = 0;
1959 
1960 	while (curr >= 0) {
1961 		if (fstatat(base, paths[curr], &st, AT_SYMLINK_NOFOLLOW) != 0)
1962 			goto err;
1963 
1964 		if (S_ISDIR(st.st_mode)) {
1965 			if ((fd = openat(base, paths[curr], O_RDONLY)) < 0)
1966 				goto err;
1967 
1968 			if ((dirp = fdopendir(fd)) == NULL) {
1969 				(void) close(fd);
1970 				goto err;
1971 			}
1972 
1973 			while ((dp = readdir(dirp)) != NULL) {
1974 				if (dp->d_name[0] == '.')
1975 					continue;
1976 
1977 				if (curr + 1 == size) {
1978 					paths = zfs_realloc(hdl, paths,
1979 					    size * sizeof (paths[0]),
1980 					    size * 2 * sizeof (paths[0]));
1981 					if (paths == NULL) {
1982 						(void) closedir(dirp);
1983 						(void) close(fd);
1984 						goto err;
1985 					}
1986 
1987 					size *= 2;
1988 				}
1989 
1990 				(void) strlcpy(paths[curr + 1], paths[curr],
1991 				    sizeof (paths[curr + 1]));
1992 				(void) strlcat(paths[curr], "/",
1993 				    sizeof (paths[curr]));
1994 				(void) strlcat(paths[curr], dp->d_name,
1995 				    sizeof (paths[curr]));
1996 				curr++;
1997 			}
1998 
1999 			(void) closedir(dirp);
2000 
2001 		} else {
2002 			if ((ret = cb(paths[curr], data)) != 0)
2003 				break;
2004 		}
2005 
2006 		curr--;
2007 	}
2008 
2009 	free(paths);
2010 	(void) close(base);
2011 
2012 	return (ret);
2013 
2014 err:
2015 	free(paths);
2016 	(void) close(base);
2017 	return (-1);
2018 }
2019 
2020 typedef struct zvol_cb {
2021 	zpool_handle_t *zcb_pool;
2022 	boolean_t zcb_create;
2023 } zvol_cb_t;
2024 
2025 /*ARGSUSED*/
2026 static int
2027 do_zvol_create(zfs_handle_t *zhp, void *data)
2028 {
2029 	int ret = 0;
2030 
2031 	if (ZFS_IS_VOLUME(zhp)) {
2032 		(void) zvol_create_link(zhp->zfs_hdl, zhp->zfs_name);
2033 		ret = zfs_iter_snapshots(zhp, do_zvol_create, NULL);
2034 	}
2035 
2036 	if (ret == 0)
2037 		ret = zfs_iter_filesystems(zhp, do_zvol_create, NULL);
2038 
2039 	zfs_close(zhp);
2040 
2041 	return (ret);
2042 }
2043 
2044 /*
2045  * Iterate over all zvols in the pool and make any necessary minor nodes.
2046  */
2047 int
2048 zpool_create_zvol_links(zpool_handle_t *zhp)
2049 {
2050 	zfs_handle_t *zfp;
2051 	int ret;
2052 
2053 	/*
2054 	 * If the pool is unavailable, just return success.
2055 	 */
2056 	if ((zfp = make_dataset_handle(zhp->zpool_hdl,
2057 	    zhp->zpool_name)) == NULL)
2058 		return (0);
2059 
2060 	ret = zfs_iter_filesystems(zfp, do_zvol_create, NULL);
2061 
2062 	zfs_close(zfp);
2063 	return (ret);
2064 }
2065 
2066 static int
2067 do_zvol_remove(const char *dataset, void *data)
2068 {
2069 	zpool_handle_t *zhp = data;
2070 
2071 	return (zvol_remove_link(zhp->zpool_hdl, dataset));
2072 }
2073 
2074 /*
2075  * Iterate over all zvols in the pool and remove any minor nodes.  We iterate
2076  * by examining the /dev links so that a corrupted pool doesn't impede this
2077  * operation.
2078  */
2079 int
2080 zpool_remove_zvol_links(zpool_handle_t *zhp)
2081 {
2082 	return (zpool_iter_zvol(zhp, do_zvol_remove, zhp));
2083 }
2084 
2085 /*
2086  * Convert from a devid string to a path.
2087  */
2088 static char *
2089 devid_to_path(char *devid_str)
2090 {
2091 	ddi_devid_t devid;
2092 	char *minor;
2093 	char *path;
2094 	devid_nmlist_t *list = NULL;
2095 	int ret;
2096 
2097 	if (devid_str_decode(devid_str, &devid, &minor) != 0)
2098 		return (NULL);
2099 
2100 	ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
2101 
2102 	devid_str_free(minor);
2103 	devid_free(devid);
2104 
2105 	if (ret != 0)
2106 		return (NULL);
2107 
2108 	if ((path = strdup(list[0].devname)) == NULL)
2109 		return (NULL);
2110 
2111 	devid_free_nmlist(list);
2112 
2113 	return (path);
2114 }
2115 
2116 /*
2117  * Convert from a path to a devid string.
2118  */
2119 static char *
2120 path_to_devid(const char *path)
2121 {
2122 	int fd;
2123 	ddi_devid_t devid;
2124 	char *minor, *ret;
2125 
2126 	if ((fd = open(path, O_RDONLY)) < 0)
2127 		return (NULL);
2128 
2129 	minor = NULL;
2130 	ret = NULL;
2131 	if (devid_get(fd, &devid) == 0) {
2132 		if (devid_get_minor_name(fd, &minor) == 0)
2133 			ret = devid_str_encode(devid, minor);
2134 		if (minor != NULL)
2135 			devid_str_free(minor);
2136 		devid_free(devid);
2137 	}
2138 	(void) close(fd);
2139 
2140 	return (ret);
2141 }
2142 
2143 /*
2144  * Issue the necessary ioctl() to update the stored path value for the vdev.  We
2145  * ignore any failure here, since a common case is for an unprivileged user to
2146  * type 'zpool status', and we'll display the correct information anyway.
2147  */
2148 static void
2149 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
2150 {
2151 	zfs_cmd_t zc = { 0 };
2152 
2153 	(void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2154 	(void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
2155 	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2156 	    &zc.zc_guid) == 0);
2157 
2158 	(void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
2159 }
2160 
2161 /*
2162  * Given a vdev, return the name to display in iostat.  If the vdev has a path,
2163  * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
2164  * We also check if this is a whole disk, in which case we strip off the
2165  * trailing 's0' slice name.
2166  *
2167  * This routine is also responsible for identifying when disks have been
2168  * reconfigured in a new location.  The kernel will have opened the device by
2169  * devid, but the path will still refer to the old location.  To catch this, we
2170  * first do a path -> devid translation (which is fast for the common case).  If
2171  * the devid matches, we're done.  If not, we do a reverse devid -> path
2172  * translation and issue the appropriate ioctl() to update the path of the vdev.
2173  * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
2174  * of these checks.
2175  */
2176 char *
2177 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv)
2178 {
2179 	char *path, *devid;
2180 	uint64_t value;
2181 	char buf[64];
2182 	vdev_stat_t *vs;
2183 	uint_t vsc;
2184 
2185 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
2186 	    &value) == 0) {
2187 		verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2188 		    &value) == 0);
2189 		(void) snprintf(buf, sizeof (buf), "%llu",
2190 		    (u_longlong_t)value);
2191 		path = buf;
2192 	} else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
2193 
2194 		/*
2195 		 * If the device is dead (faulted, offline, etc) then don't
2196 		 * bother opening it.  Otherwise we may be forcing the user to
2197 		 * open a misbehaving device, which can have undesirable
2198 		 * effects.
2199 		 */
2200 		if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_STATS,
2201 		    (uint64_t **)&vs, &vsc) != 0 ||
2202 		    vs->vs_state >= VDEV_STATE_DEGRADED) &&
2203 		    zhp != NULL &&
2204 		    nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
2205 			/*
2206 			 * Determine if the current path is correct.
2207 			 */
2208 			char *newdevid = path_to_devid(path);
2209 
2210 			if (newdevid == NULL ||
2211 			    strcmp(devid, newdevid) != 0) {
2212 				char *newpath;
2213 
2214 				if ((newpath = devid_to_path(devid)) != NULL) {
2215 					/*
2216 					 * Update the path appropriately.
2217 					 */
2218 					set_path(zhp, nv, newpath);
2219 					if (nvlist_add_string(nv,
2220 					    ZPOOL_CONFIG_PATH, newpath) == 0)
2221 						verify(nvlist_lookup_string(nv,
2222 						    ZPOOL_CONFIG_PATH,
2223 						    &path) == 0);
2224 					free(newpath);
2225 				}
2226 			}
2227 
2228 			if (newdevid)
2229 				devid_str_free(newdevid);
2230 		}
2231 
2232 		if (strncmp(path, "/dev/dsk/", 9) == 0)
2233 			path += 9;
2234 
2235 		if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
2236 		    &value) == 0 && value) {
2237 			char *tmp = zfs_strdup(hdl, path);
2238 			if (tmp == NULL)
2239 				return (NULL);
2240 			tmp[strlen(path) - 2] = '\0';
2241 			return (tmp);
2242 		}
2243 	} else {
2244 		verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
2245 
2246 		/*
2247 		 * If it's a raidz device, we need to stick in the parity level.
2248 		 */
2249 		if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
2250 			verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
2251 			    &value) == 0);
2252 			(void) snprintf(buf, sizeof (buf), "%s%llu", path,
2253 			    (u_longlong_t)value);
2254 			path = buf;
2255 		}
2256 	}
2257 
2258 	return (zfs_strdup(hdl, path));
2259 }
2260 
2261 static int
2262 zbookmark_compare(const void *a, const void *b)
2263 {
2264 	return (memcmp(a, b, sizeof (zbookmark_t)));
2265 }
2266 
2267 /*
2268  * Retrieve the persistent error log, uniquify the members, and return to the
2269  * caller.
2270  */
2271 int
2272 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
2273 {
2274 	zfs_cmd_t zc = { 0 };
2275 	uint64_t count;
2276 	zbookmark_t *zb = NULL;
2277 	int i;
2278 
2279 	/*
2280 	 * Retrieve the raw error list from the kernel.  If the number of errors
2281 	 * has increased, allocate more space and continue until we get the
2282 	 * entire list.
2283 	 */
2284 	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
2285 	    &count) == 0);
2286 	if (count == 0)
2287 		return (0);
2288 	if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
2289 	    count * sizeof (zbookmark_t))) == (uintptr_t)NULL)
2290 		return (-1);
2291 	zc.zc_nvlist_dst_size = count;
2292 	(void) strcpy(zc.zc_name, zhp->zpool_name);
2293 	for (;;) {
2294 		if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
2295 		    &zc) != 0) {
2296 			free((void *)(uintptr_t)zc.zc_nvlist_dst);
2297 			if (errno == ENOMEM) {
2298 				count = zc.zc_nvlist_dst_size;
2299 				if ((zc.zc_nvlist_dst = (uintptr_t)
2300 				    zfs_alloc(zhp->zpool_hdl, count *
2301 				    sizeof (zbookmark_t))) == (uintptr_t)NULL)
2302 					return (-1);
2303 			} else {
2304 				return (-1);
2305 			}
2306 		} else {
2307 			break;
2308 		}
2309 	}
2310 
2311 	/*
2312 	 * Sort the resulting bookmarks.  This is a little confusing due to the
2313 	 * implementation of ZFS_IOC_ERROR_LOG.  The bookmarks are copied last
2314 	 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
2315 	 * _not_ copied as part of the process.  So we point the start of our
2316 	 * array appropriate and decrement the total number of elements.
2317 	 */
2318 	zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) +
2319 	    zc.zc_nvlist_dst_size;
2320 	count -= zc.zc_nvlist_dst_size;
2321 
2322 	qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare);
2323 
2324 	verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
2325 
2326 	/*
2327 	 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
2328 	 */
2329 	for (i = 0; i < count; i++) {
2330 		nvlist_t *nv;
2331 
2332 		/* ignoring zb_blkid and zb_level for now */
2333 		if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
2334 		    zb[i-1].zb_object == zb[i].zb_object)
2335 			continue;
2336 
2337 		if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
2338 			goto nomem;
2339 		if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
2340 		    zb[i].zb_objset) != 0) {
2341 			nvlist_free(nv);
2342 			goto nomem;
2343 		}
2344 		if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
2345 		    zb[i].zb_object) != 0) {
2346 			nvlist_free(nv);
2347 			goto nomem;
2348 		}
2349 		if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
2350 			nvlist_free(nv);
2351 			goto nomem;
2352 		}
2353 		nvlist_free(nv);
2354 	}
2355 
2356 	free((void *)(uintptr_t)zc.zc_nvlist_dst);
2357 	return (0);
2358 
2359 nomem:
2360 	free((void *)(uintptr_t)zc.zc_nvlist_dst);
2361 	return (no_memory(zhp->zpool_hdl));
2362 }
2363 
2364 /*
2365  * Upgrade a ZFS pool to the latest on-disk version.
2366  */
2367 int
2368 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
2369 {
2370 	zfs_cmd_t zc = { 0 };
2371 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2372 
2373 	(void) strcpy(zc.zc_name, zhp->zpool_name);
2374 	zc.zc_cookie = new_version;
2375 
2376 	if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
2377 		return (zpool_standard_error_fmt(hdl, errno,
2378 		    dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
2379 		    zhp->zpool_name));
2380 	return (0);
2381 }
2382 
2383 void
2384 zpool_set_history_str(const char *subcommand, int argc, char **argv,
2385     char *history_str)
2386 {
2387 	int i;
2388 
2389 	(void) strlcpy(history_str, subcommand, HIS_MAX_RECORD_LEN);
2390 	for (i = 1; i < argc; i++) {
2391 		if (strlen(history_str) + 1 + strlen(argv[i]) >
2392 		    HIS_MAX_RECORD_LEN)
2393 			break;
2394 		(void) strlcat(history_str, " ", HIS_MAX_RECORD_LEN);
2395 		(void) strlcat(history_str, argv[i], HIS_MAX_RECORD_LEN);
2396 	}
2397 }
2398 
2399 /*
2400  * Stage command history for logging.
2401  */
2402 int
2403 zpool_stage_history(libzfs_handle_t *hdl, const char *history_str)
2404 {
2405 	if (history_str == NULL)
2406 		return (EINVAL);
2407 
2408 	if (strlen(history_str) > HIS_MAX_RECORD_LEN)
2409 		return (EINVAL);
2410 
2411 	if (hdl->libzfs_log_str != NULL)
2412 		free(hdl->libzfs_log_str);
2413 
2414 	if ((hdl->libzfs_log_str = strdup(history_str)) == NULL)
2415 		return (no_memory(hdl));
2416 
2417 	return (0);
2418 }
2419 
2420 /*
2421  * Perform ioctl to get some command history of a pool.
2422  *
2423  * 'buf' is the buffer to fill up to 'len' bytes.  'off' is the
2424  * logical offset of the history buffer to start reading from.
2425  *
2426  * Upon return, 'off' is the next logical offset to read from and
2427  * 'len' is the actual amount of bytes read into 'buf'.
2428  */
2429 static int
2430 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
2431 {
2432 	zfs_cmd_t zc = { 0 };
2433 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2434 
2435 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2436 
2437 	zc.zc_history = (uint64_t)(uintptr_t)buf;
2438 	zc.zc_history_len = *len;
2439 	zc.zc_history_offset = *off;
2440 
2441 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
2442 		switch (errno) {
2443 		case EPERM:
2444 			return (zfs_error_fmt(hdl, EZFS_PERM,
2445 			    dgettext(TEXT_DOMAIN,
2446 			    "cannot show history for pool '%s'"),
2447 			    zhp->zpool_name));
2448 		case ENOENT:
2449 			return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
2450 			    dgettext(TEXT_DOMAIN, "cannot get history for pool "
2451 			    "'%s'"), zhp->zpool_name));
2452 		case ENOTSUP:
2453 			return (zfs_error_fmt(hdl, EZFS_BADVERSION,
2454 			    dgettext(TEXT_DOMAIN, "cannot get history for pool "
2455 			    "'%s', pool must be upgraded"), zhp->zpool_name));
2456 		default:
2457 			return (zpool_standard_error_fmt(hdl, errno,
2458 			    dgettext(TEXT_DOMAIN,
2459 			    "cannot get history for '%s'"), zhp->zpool_name));
2460 		}
2461 	}
2462 
2463 	*len = zc.zc_history_len;
2464 	*off = zc.zc_history_offset;
2465 
2466 	return (0);
2467 }
2468 
2469 /*
2470  * Process the buffer of nvlists, unpacking and storing each nvlist record
2471  * into 'records'.  'leftover' is set to the number of bytes that weren't
2472  * processed as there wasn't a complete record.
2473  */
2474 static int
2475 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
2476     nvlist_t ***records, uint_t *numrecords)
2477 {
2478 	uint64_t reclen;
2479 	nvlist_t *nv;
2480 	int i;
2481 
2482 	while (bytes_read > sizeof (reclen)) {
2483 
2484 		/* get length of packed record (stored as little endian) */
2485 		for (i = 0, reclen = 0; i < sizeof (reclen); i++)
2486 			reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
2487 
2488 		if (bytes_read < sizeof (reclen) + reclen)
2489 			break;
2490 
2491 		/* unpack record */
2492 		if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
2493 			return (ENOMEM);
2494 		bytes_read -= sizeof (reclen) + reclen;
2495 		buf += sizeof (reclen) + reclen;
2496 
2497 		/* add record to nvlist array */
2498 		(*numrecords)++;
2499 		if (ISP2(*numrecords + 1)) {
2500 			*records = realloc(*records,
2501 			    *numrecords * 2 * sizeof (nvlist_t *));
2502 		}
2503 		(*records)[*numrecords - 1] = nv;
2504 	}
2505 
2506 	*leftover = bytes_read;
2507 	return (0);
2508 }
2509 
2510 #define	HIS_BUF_LEN	(128*1024)
2511 
2512 /*
2513  * Retrieve the command history of a pool.
2514  */
2515 int
2516 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
2517 {
2518 	char buf[HIS_BUF_LEN];
2519 	uint64_t off = 0;
2520 	nvlist_t **records = NULL;
2521 	uint_t numrecords = 0;
2522 	int err, i;
2523 
2524 	do {
2525 		uint64_t bytes_read = sizeof (buf);
2526 		uint64_t leftover;
2527 
2528 		if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
2529 			break;
2530 
2531 		/* if nothing else was read in, we're at EOF, just return */
2532 		if (!bytes_read)
2533 			break;
2534 
2535 		if ((err = zpool_history_unpack(buf, bytes_read,
2536 		    &leftover, &records, &numrecords)) != 0)
2537 			break;
2538 		off -= leftover;
2539 
2540 		/* CONSTCOND */
2541 	} while (1);
2542 
2543 	if (!err) {
2544 		verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
2545 		verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
2546 		    records, numrecords) == 0);
2547 	}
2548 	for (i = 0; i < numrecords; i++)
2549 		nvlist_free(records[i]);
2550 	free(records);
2551 
2552 	return (err);
2553 }
2554 
2555 void
2556 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
2557     char *pathname, size_t len)
2558 {
2559 	zfs_cmd_t zc = { 0 };
2560 	boolean_t mounted = B_FALSE;
2561 	char *mntpnt = NULL;
2562 	char dsname[MAXNAMELEN];
2563 
2564 	if (dsobj == 0) {
2565 		/* special case for the MOS */
2566 		(void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj);
2567 		return;
2568 	}
2569 
2570 	/* get the dataset's name */
2571 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2572 	zc.zc_obj = dsobj;
2573 	if (ioctl(zhp->zpool_hdl->libzfs_fd,
2574 	    ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
2575 		/* just write out a path of two object numbers */
2576 		(void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
2577 		    dsobj, obj);
2578 		return;
2579 	}
2580 	(void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
2581 
2582 	/* find out if the dataset is mounted */
2583 	mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
2584 
2585 	/* get the corrupted object's path */
2586 	(void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
2587 	zc.zc_obj = obj;
2588 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
2589 	    &zc) == 0) {
2590 		if (mounted) {
2591 			(void) snprintf(pathname, len, "%s%s", mntpnt,
2592 			    zc.zc_value);
2593 		} else {
2594 			(void) snprintf(pathname, len, "%s:%s",
2595 			    dsname, zc.zc_value);
2596 		}
2597 	} else {
2598 		(void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj);
2599 	}
2600 	free(mntpnt);
2601 }
2602 
2603 #define	RDISK_ROOT	"/dev/rdsk"
2604 #define	BACKUP_SLICE	"s2"
2605 /*
2606  * Don't start the slice at the default block of 34; many storage
2607  * devices will use a stripe width of 128k, so start there instead.
2608  */
2609 #define	NEW_START_BLOCK	256
2610 
2611 /*
2612  * Read the EFI label from the config, if a label does not exist then
2613  * pass back the error to the caller. If the caller has passed a non-NULL
2614  * diskaddr argument then we set it to the starting address of the EFI
2615  * partition.
2616  */
2617 static int
2618 read_efi_label(nvlist_t *config, diskaddr_t *sb)
2619 {
2620 	char *path;
2621 	int fd;
2622 	char diskname[MAXPATHLEN];
2623 	int err = -1;
2624 
2625 	if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
2626 		return (err);
2627 
2628 	(void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT,
2629 	    strrchr(path, '/'));
2630 	if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) {
2631 		struct dk_gpt *vtoc;
2632 
2633 		if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
2634 			if (sb != NULL)
2635 				*sb = vtoc->efi_parts[0].p_start;
2636 			efi_free(vtoc);
2637 		}
2638 		(void) close(fd);
2639 	}
2640 	return (err);
2641 }
2642 
2643 /*
2644  * determine where a partition starts on a disk in the current
2645  * configuration
2646  */
2647 static diskaddr_t
2648 find_start_block(nvlist_t *config)
2649 {
2650 	nvlist_t **child;
2651 	uint_t c, children;
2652 	diskaddr_t sb = MAXOFFSET_T;
2653 	uint64_t wholedisk;
2654 
2655 	if (nvlist_lookup_nvlist_array(config,
2656 	    ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
2657 		if (nvlist_lookup_uint64(config,
2658 		    ZPOOL_CONFIG_WHOLE_DISK,
2659 		    &wholedisk) != 0 || !wholedisk) {
2660 			return (MAXOFFSET_T);
2661 		}
2662 		if (read_efi_label(config, &sb) < 0)
2663 			sb = MAXOFFSET_T;
2664 		return (sb);
2665 	}
2666 
2667 	for (c = 0; c < children; c++) {
2668 		sb = find_start_block(child[c]);
2669 		if (sb != MAXOFFSET_T) {
2670 			return (sb);
2671 		}
2672 	}
2673 	return (MAXOFFSET_T);
2674 }
2675 
2676 /*
2677  * Label an individual disk.  The name provided is the short name,
2678  * stripped of any leading /dev path.
2679  */
2680 int
2681 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
2682 {
2683 	char path[MAXPATHLEN];
2684 	struct dk_gpt *vtoc;
2685 	int fd;
2686 	size_t resv = EFI_MIN_RESV_SIZE;
2687 	uint64_t slice_size;
2688 	diskaddr_t start_block;
2689 	char errbuf[1024];
2690 
2691 	/* prepare an error message just in case */
2692 	(void) snprintf(errbuf, sizeof (errbuf),
2693 	    dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
2694 
2695 	if (zhp) {
2696 		nvlist_t *nvroot;
2697 
2698 		verify(nvlist_lookup_nvlist(zhp->zpool_config,
2699 		    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
2700 
2701 		if (zhp->zpool_start_block == 0)
2702 			start_block = find_start_block(nvroot);
2703 		else
2704 			start_block = zhp->zpool_start_block;
2705 		zhp->zpool_start_block = start_block;
2706 	} else {
2707 		/* new pool */
2708 		start_block = NEW_START_BLOCK;
2709 	}
2710 
2711 	(void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name,
2712 	    BACKUP_SLICE);
2713 
2714 	if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
2715 		/*
2716 		 * This shouldn't happen.  We've long since verified that this
2717 		 * is a valid device.
2718 		 */
2719 		zfs_error_aux(hdl,
2720 		    dgettext(TEXT_DOMAIN, "unable to open device"));
2721 		return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
2722 	}
2723 
2724 	if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
2725 		/*
2726 		 * The only way this can fail is if we run out of memory, or we
2727 		 * were unable to read the disk's capacity
2728 		 */
2729 		if (errno == ENOMEM)
2730 			(void) no_memory(hdl);
2731 
2732 		(void) close(fd);
2733 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2734 		    "unable to read disk capacity"), name);
2735 
2736 		return (zfs_error(hdl, EZFS_NOCAP, errbuf));
2737 	}
2738 
2739 	slice_size = vtoc->efi_last_u_lba + 1;
2740 	slice_size -= EFI_MIN_RESV_SIZE;
2741 	if (start_block == MAXOFFSET_T)
2742 		start_block = NEW_START_BLOCK;
2743 	slice_size -= start_block;
2744 
2745 	vtoc->efi_parts[0].p_start = start_block;
2746 	vtoc->efi_parts[0].p_size = slice_size;
2747 
2748 	/*
2749 	 * Why we use V_USR: V_BACKUP confuses users, and is considered
2750 	 * disposable by some EFI utilities (since EFI doesn't have a backup
2751 	 * slice).  V_UNASSIGNED is supposed to be used only for zero size
2752 	 * partitions, and efi_write() will fail if we use it.  V_ROOT, V_BOOT,
2753 	 * etc. were all pretty specific.  V_USR is as close to reality as we
2754 	 * can get, in the absence of V_OTHER.
2755 	 */
2756 	vtoc->efi_parts[0].p_tag = V_USR;
2757 	(void) strcpy(vtoc->efi_parts[0].p_name, "zfs");
2758 
2759 	vtoc->efi_parts[8].p_start = slice_size + start_block;
2760 	vtoc->efi_parts[8].p_size = resv;
2761 	vtoc->efi_parts[8].p_tag = V_RESERVED;
2762 
2763 	if (efi_write(fd, vtoc) != 0) {
2764 		/*
2765 		 * Some block drivers (like pcata) may not support EFI
2766 		 * GPT labels.  Print out a helpful error message dir-
2767 		 * ecting the user to manually label the disk and give
2768 		 * a specific slice.
2769 		 */
2770 		(void) close(fd);
2771 		efi_free(vtoc);
2772 
2773 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2774 		    "try using fdisk(1M) and then provide a specific slice"));
2775 		return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
2776 	}
2777 
2778 	(void) close(fd);
2779 	efi_free(vtoc);
2780 	return (0);
2781 }
2782 
2783 static boolean_t
2784 supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf)
2785 {
2786 	char *type;
2787 	nvlist_t **child;
2788 	uint_t children, c;
2789 
2790 	verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0);
2791 	if (strcmp(type, VDEV_TYPE_RAIDZ) == 0 ||
2792 	    strcmp(type, VDEV_TYPE_FILE) == 0 ||
2793 	    strcmp(type, VDEV_TYPE_LOG) == 0 ||
2794 	    strcmp(type, VDEV_TYPE_MISSING) == 0) {
2795 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2796 		    "vdev type '%s' is not supported"), type);
2797 		(void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf);
2798 		return (B_FALSE);
2799 	}
2800 	if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
2801 	    &child, &children) == 0) {
2802 		for (c = 0; c < children; c++) {
2803 			if (!supported_dump_vdev_type(hdl, child[c], errbuf))
2804 				return (B_FALSE);
2805 		}
2806 	}
2807 	return (B_TRUE);
2808 }
2809 
2810 /*
2811  * check if this zvol is allowable for use as a dump device; zero if
2812  * it is, > 0 if it isn't, < 0 if it isn't a zvol
2813  */
2814 int
2815 zvol_check_dump_config(char *arg)
2816 {
2817 	zpool_handle_t *zhp = NULL;
2818 	nvlist_t *config, *nvroot;
2819 	char *p, *volname;
2820 	nvlist_t **top;
2821 	uint_t toplevels;
2822 	libzfs_handle_t *hdl;
2823 	char errbuf[1024];
2824 	char poolname[ZPOOL_MAXNAMELEN];
2825 	int pathlen = strlen(ZVOL_FULL_DEV_DIR);
2826 	int ret = 1;
2827 
2828 	if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) {
2829 		return (-1);
2830 	}
2831 
2832 	(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
2833 	    "dump is not supported on device '%s'"), arg);
2834 
2835 	if ((hdl = libzfs_init()) == NULL)
2836 		return (1);
2837 	libzfs_print_on_error(hdl, B_TRUE);
2838 
2839 	volname = arg + pathlen;
2840 
2841 	/* check the configuration of the pool */
2842 	if ((p = strchr(volname, '/')) == NULL) {
2843 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2844 		    "malformed dataset name"));
2845 		(void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
2846 		return (1);
2847 	} else if (p - volname >= ZFS_MAXNAMELEN) {
2848 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2849 		    "dataset name is too long"));
2850 		(void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf);
2851 		return (1);
2852 	} else {
2853 		(void) strncpy(poolname, volname, p - volname);
2854 		poolname[p - volname] = '\0';
2855 	}
2856 
2857 	if ((zhp = zpool_open(hdl, poolname)) == NULL) {
2858 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2859 		    "could not open pool '%s'"), poolname);
2860 		(void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
2861 		goto out;
2862 	}
2863 	config = zpool_get_config(zhp, NULL);
2864 	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2865 	    &nvroot) != 0) {
2866 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2867 		    "could not obtain vdev configuration for  '%s'"), poolname);
2868 		(void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
2869 		goto out;
2870 	}
2871 
2872 	verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2873 	    &top, &toplevels) == 0);
2874 	if (toplevels != 1) {
2875 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2876 		    "'%s' has multiple top level vdevs"), poolname);
2877 		(void) zfs_error(hdl, EZFS_DEVOVERFLOW, errbuf);
2878 		goto out;
2879 	}
2880 
2881 	if (!supported_dump_vdev_type(hdl, top[0], errbuf)) {
2882 		goto out;
2883 	}
2884 	ret = 0;
2885 
2886 out:
2887 	if (zhp)
2888 		zpool_close(zhp);
2889 	libzfs_fini(hdl);
2890 	return (ret);
2891 }
2892