xref: /titanic_51/usr/src/lib/libzfs/common/libzfs_pool.c (revision 53a7b6b6763f5865522a76e5e887390a8f4777d7)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <alloca.h>
28 #include <assert.h>
29 #include <ctype.h>
30 #include <errno.h>
31 #include <devid.h>
32 #include <dirent.h>
33 #include <fcntl.h>
34 #include <libintl.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <strings.h>
38 #include <unistd.h>
39 #include <zone.h>
40 #include <sys/efi_partition.h>
41 #include <sys/vtoc.h>
42 #include <sys/zfs_ioctl.h>
43 #include <sys/zio.h>
44 #include <strings.h>
45 
46 #include "zfs_namecheck.h"
47 #include "zfs_prop.h"
48 #include "libzfs_impl.h"
49 
50 static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
51 
52 #if defined(__i386) || defined(__amd64)
53 #define	BOOTCMD	"installgrub(1M)"
54 #else
55 #define	BOOTCMD	"installboot(1M)"
56 #endif
57 
58 /*
59  * ====================================================================
60  *   zpool property functions
61  * ====================================================================
62  */
63 
64 static int
65 zpool_get_all_props(zpool_handle_t *zhp)
66 {
67 	zfs_cmd_t zc = { 0 };
68 	libzfs_handle_t *hdl = zhp->zpool_hdl;
69 
70 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
71 
72 	if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
73 		return (-1);
74 
75 	while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
76 		if (errno == ENOMEM) {
77 			if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
78 				zcmd_free_nvlists(&zc);
79 				return (-1);
80 			}
81 		} else {
82 			zcmd_free_nvlists(&zc);
83 			return (-1);
84 		}
85 	}
86 
87 	if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
88 		zcmd_free_nvlists(&zc);
89 		return (-1);
90 	}
91 
92 	zcmd_free_nvlists(&zc);
93 
94 	return (0);
95 }
96 
97 static int
98 zpool_props_refresh(zpool_handle_t *zhp)
99 {
100 	nvlist_t *old_props;
101 
102 	old_props = zhp->zpool_props;
103 
104 	if (zpool_get_all_props(zhp) != 0)
105 		return (-1);
106 
107 	nvlist_free(old_props);
108 	return (0);
109 }
110 
111 static char *
112 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
113     zprop_source_t *src)
114 {
115 	nvlist_t *nv, *nvl;
116 	uint64_t ival;
117 	char *value;
118 	zprop_source_t source;
119 
120 	nvl = zhp->zpool_props;
121 	if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
122 		verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
123 		source = ival;
124 		verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
125 	} else {
126 		source = ZPROP_SRC_DEFAULT;
127 		if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
128 			value = "-";
129 	}
130 
131 	if (src)
132 		*src = source;
133 
134 	return (value);
135 }
136 
137 uint64_t
138 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
139 {
140 	nvlist_t *nv, *nvl;
141 	uint64_t value;
142 	zprop_source_t source;
143 
144 	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
145 		/*
146 		 * zpool_get_all_props() has most likely failed because
147 		 * the pool is faulted, but if all we need is the top level
148 		 * vdev's guid then get it from the zhp config nvlist.
149 		 */
150 		if ((prop == ZPOOL_PROP_GUID) &&
151 		    (nvlist_lookup_nvlist(zhp->zpool_config,
152 		    ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
153 		    (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
154 		    == 0)) {
155 			return (value);
156 		}
157 		return (zpool_prop_default_numeric(prop));
158 	}
159 
160 	nvl = zhp->zpool_props;
161 	if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
162 		verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
163 		source = value;
164 		verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
165 	} else {
166 		source = ZPROP_SRC_DEFAULT;
167 		value = zpool_prop_default_numeric(prop);
168 	}
169 
170 	if (src)
171 		*src = source;
172 
173 	return (value);
174 }
175 
176 /*
177  * Map VDEV STATE to printed strings.
178  */
179 char *
180 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
181 {
182 	switch (state) {
183 	case VDEV_STATE_CLOSED:
184 	case VDEV_STATE_OFFLINE:
185 		return (gettext("OFFLINE"));
186 	case VDEV_STATE_REMOVED:
187 		return (gettext("REMOVED"));
188 	case VDEV_STATE_CANT_OPEN:
189 		if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
190 			return (gettext("FAULTED"));
191 		else
192 			return (gettext("UNAVAIL"));
193 	case VDEV_STATE_FAULTED:
194 		return (gettext("FAULTED"));
195 	case VDEV_STATE_DEGRADED:
196 		return (gettext("DEGRADED"));
197 	case VDEV_STATE_HEALTHY:
198 		return (gettext("ONLINE"));
199 	}
200 
201 	return (gettext("UNKNOWN"));
202 }
203 
204 /*
205  * Get a zpool property value for 'prop' and return the value in
206  * a pre-allocated buffer.
207  */
208 int
209 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
210     zprop_source_t *srctype)
211 {
212 	uint64_t intval;
213 	const char *strval;
214 	zprop_source_t src = ZPROP_SRC_NONE;
215 	nvlist_t *nvroot;
216 	vdev_stat_t *vs;
217 	uint_t vsc;
218 
219 	if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
220 		if (prop == ZPOOL_PROP_NAME)
221 			(void) strlcpy(buf, zpool_get_name(zhp), len);
222 		else if (prop == ZPOOL_PROP_HEALTH)
223 			(void) strlcpy(buf, "FAULTED", len);
224 		else
225 			(void) strlcpy(buf, "-", len);
226 		return (0);
227 	}
228 
229 	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
230 	    prop != ZPOOL_PROP_NAME)
231 		return (-1);
232 
233 	switch (zpool_prop_get_type(prop)) {
234 	case PROP_TYPE_STRING:
235 		(void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
236 		    len);
237 		break;
238 
239 	case PROP_TYPE_NUMBER:
240 		intval = zpool_get_prop_int(zhp, prop, &src);
241 
242 		switch (prop) {
243 		case ZPOOL_PROP_SIZE:
244 		case ZPOOL_PROP_USED:
245 		case ZPOOL_PROP_AVAILABLE:
246 			(void) zfs_nicenum(intval, buf, len);
247 			break;
248 
249 		case ZPOOL_PROP_CAPACITY:
250 			(void) snprintf(buf, len, "%llu%%",
251 			    (u_longlong_t)intval);
252 			break;
253 
254 		case ZPOOL_PROP_HEALTH:
255 			verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
256 			    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
257 			verify(nvlist_lookup_uint64_array(nvroot,
258 			    ZPOOL_CONFIG_STATS, (uint64_t **)&vs, &vsc) == 0);
259 
260 			(void) strlcpy(buf, zpool_state_to_name(intval,
261 			    vs->vs_aux), len);
262 			break;
263 		default:
264 			(void) snprintf(buf, len, "%llu", intval);
265 		}
266 		break;
267 
268 	case PROP_TYPE_INDEX:
269 		intval = zpool_get_prop_int(zhp, prop, &src);
270 		if (zpool_prop_index_to_string(prop, intval, &strval)
271 		    != 0)
272 			return (-1);
273 		(void) strlcpy(buf, strval, len);
274 		break;
275 
276 	default:
277 		abort();
278 	}
279 
280 	if (srctype)
281 		*srctype = src;
282 
283 	return (0);
284 }
285 
286 /*
287  * Check if the bootfs name has the same pool name as it is set to.
288  * Assuming bootfs is a valid dataset name.
289  */
290 static boolean_t
291 bootfs_name_valid(const char *pool, char *bootfs)
292 {
293 	int len = strlen(pool);
294 
295 	if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
296 		return (B_FALSE);
297 
298 	if (strncmp(pool, bootfs, len) == 0 &&
299 	    (bootfs[len] == '/' || bootfs[len] == '\0'))
300 		return (B_TRUE);
301 
302 	return (B_FALSE);
303 }
304 
305 /*
306  * Inspect the configuration to determine if any of the devices contain
307  * an EFI label.
308  */
309 static boolean_t
310 pool_uses_efi(nvlist_t *config)
311 {
312 	nvlist_t **child;
313 	uint_t c, children;
314 
315 	if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
316 	    &child, &children) != 0)
317 		return (read_efi_label(config, NULL) >= 0);
318 
319 	for (c = 0; c < children; c++) {
320 		if (pool_uses_efi(child[c]))
321 			return (B_TRUE);
322 	}
323 	return (B_FALSE);
324 }
325 
326 static boolean_t
327 pool_is_bootable(zpool_handle_t *zhp)
328 {
329 	char bootfs[ZPOOL_MAXNAMELEN];
330 
331 	return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
332 	    sizeof (bootfs), NULL) == 0 && strncmp(bootfs, "-",
333 	    sizeof (bootfs)) != 0);
334 }
335 
336 
337 /*
338  * Given an nvlist of zpool properties to be set, validate that they are
339  * correct, and parse any numeric properties (index, boolean, etc) if they are
340  * specified as strings.
341  */
342 static nvlist_t *
343 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
344     nvlist_t *props, uint64_t version, boolean_t create_or_import, char *errbuf)
345 {
346 	nvpair_t *elem;
347 	nvlist_t *retprops;
348 	zpool_prop_t prop;
349 	char *strval;
350 	uint64_t intval;
351 	char *slash;
352 	struct stat64 statbuf;
353 	zpool_handle_t *zhp;
354 	nvlist_t *nvroot;
355 
356 	if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
357 		(void) no_memory(hdl);
358 		return (NULL);
359 	}
360 
361 	elem = NULL;
362 	while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
363 		const char *propname = nvpair_name(elem);
364 
365 		/*
366 		 * Make sure this property is valid and applies to this type.
367 		 */
368 		if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL) {
369 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
370 			    "invalid property '%s'"), propname);
371 			(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
372 			goto error;
373 		}
374 
375 		if (zpool_prop_readonly(prop)) {
376 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
377 			    "is readonly"), propname);
378 			(void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
379 			goto error;
380 		}
381 
382 		if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
383 		    &strval, &intval, errbuf) != 0)
384 			goto error;
385 
386 		/*
387 		 * Perform additional checking for specific properties.
388 		 */
389 		switch (prop) {
390 		case ZPOOL_PROP_VERSION:
391 			if (intval < version || intval > SPA_VERSION) {
392 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
393 				    "property '%s' number %d is invalid."),
394 				    propname, intval);
395 				(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
396 				goto error;
397 			}
398 			break;
399 
400 		case ZPOOL_PROP_BOOTFS:
401 			if (create_or_import) {
402 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
403 				    "property '%s' cannot be set at creation "
404 				    "or import time"), propname);
405 				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
406 				goto error;
407 			}
408 
409 			if (version < SPA_VERSION_BOOTFS) {
410 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
411 				    "pool must be upgraded to support "
412 				    "'%s' property"), propname);
413 				(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
414 				goto error;
415 			}
416 
417 			/*
418 			 * bootfs property value has to be a dataset name and
419 			 * the dataset has to be in the same pool as it sets to.
420 			 */
421 			if (strval[0] != '\0' && !bootfs_name_valid(poolname,
422 			    strval)) {
423 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
424 				    "is an invalid name"), strval);
425 				(void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
426 				goto error;
427 			}
428 
429 			if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
430 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
431 				    "could not open pool '%s'"), poolname);
432 				(void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
433 				goto error;
434 			}
435 			verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
436 			    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
437 
438 			/*
439 			 * bootfs property cannot be set on a disk which has
440 			 * been EFI labeled.
441 			 */
442 			if (pool_uses_efi(nvroot)) {
443 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
444 				    "property '%s' not supported on "
445 				    "EFI labeled devices"), propname);
446 				(void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf);
447 				zpool_close(zhp);
448 				goto error;
449 			}
450 			zpool_close(zhp);
451 			break;
452 
453 		case ZPOOL_PROP_ALTROOT:
454 			if (!create_or_import) {
455 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
456 				    "property '%s' can only be set during pool "
457 				    "creation or import"), propname);
458 				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
459 				goto error;
460 			}
461 
462 			if (strval[0] != '/') {
463 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
464 				    "bad alternate root '%s'"), strval);
465 				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
466 				goto error;
467 			}
468 			break;
469 
470 		case ZPOOL_PROP_CACHEFILE:
471 			if (strval[0] == '\0')
472 				break;
473 
474 			if (strcmp(strval, "none") == 0)
475 				break;
476 
477 			if (strval[0] != '/') {
478 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
479 				    "property '%s' must be empty, an "
480 				    "absolute path, or 'none'"), propname);
481 				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
482 				goto error;
483 			}
484 
485 			slash = strrchr(strval, '/');
486 
487 			if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
488 			    strcmp(slash, "/..") == 0) {
489 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
490 				    "'%s' is not a valid file"), strval);
491 				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
492 				goto error;
493 			}
494 
495 			*slash = '\0';
496 
497 			if (strval[0] != '\0' &&
498 			    (stat64(strval, &statbuf) != 0 ||
499 			    !S_ISDIR(statbuf.st_mode))) {
500 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
501 				    "'%s' is not a valid directory"),
502 				    strval);
503 				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
504 				goto error;
505 			}
506 
507 			*slash = '/';
508 			break;
509 		}
510 	}
511 
512 	return (retprops);
513 error:
514 	nvlist_free(retprops);
515 	return (NULL);
516 }
517 
518 /*
519  * Set zpool property : propname=propval.
520  */
521 int
522 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
523 {
524 	zfs_cmd_t zc = { 0 };
525 	int ret = -1;
526 	char errbuf[1024];
527 	nvlist_t *nvl = NULL;
528 	nvlist_t *realprops;
529 	uint64_t version;
530 
531 	(void) snprintf(errbuf, sizeof (errbuf),
532 	    dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
533 	    zhp->zpool_name);
534 
535 	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp))
536 		return (zfs_error(zhp->zpool_hdl, EZFS_POOLPROPS, errbuf));
537 
538 	if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
539 		return (no_memory(zhp->zpool_hdl));
540 
541 	if (nvlist_add_string(nvl, propname, propval) != 0) {
542 		nvlist_free(nvl);
543 		return (no_memory(zhp->zpool_hdl));
544 	}
545 
546 	version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
547 	if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
548 	    zhp->zpool_name, nvl, version, B_FALSE, errbuf)) == NULL) {
549 		nvlist_free(nvl);
550 		return (-1);
551 	}
552 
553 	nvlist_free(nvl);
554 	nvl = realprops;
555 
556 	/*
557 	 * Execute the corresponding ioctl() to set this property.
558 	 */
559 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
560 
561 	if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
562 		nvlist_free(nvl);
563 		return (-1);
564 	}
565 
566 	ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
567 
568 	zcmd_free_nvlists(&zc);
569 	nvlist_free(nvl);
570 
571 	if (ret)
572 		(void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
573 	else
574 		(void) zpool_props_refresh(zhp);
575 
576 	return (ret);
577 }
578 
579 int
580 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
581 {
582 	libzfs_handle_t *hdl = zhp->zpool_hdl;
583 	zprop_list_t *entry;
584 	char buf[ZFS_MAXPROPLEN];
585 
586 	if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
587 		return (-1);
588 
589 	for (entry = *plp; entry != NULL; entry = entry->pl_next) {
590 
591 		if (entry->pl_fixed)
592 			continue;
593 
594 		if (entry->pl_prop != ZPROP_INVAL &&
595 		    zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
596 		    NULL) == 0) {
597 			if (strlen(buf) > entry->pl_width)
598 				entry->pl_width = strlen(buf);
599 		}
600 	}
601 
602 	return (0);
603 }
604 
605 
606 /*
607  * Validate the given pool name, optionally putting an extended error message in
608  * 'buf'.
609  */
610 boolean_t
611 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
612 {
613 	namecheck_err_t why;
614 	char what;
615 	int ret;
616 
617 	ret = pool_namecheck(pool, &why, &what);
618 
619 	/*
620 	 * The rules for reserved pool names were extended at a later point.
621 	 * But we need to support users with existing pools that may now be
622 	 * invalid.  So we only check for this expanded set of names during a
623 	 * create (or import), and only in userland.
624 	 */
625 	if (ret == 0 && !isopen &&
626 	    (strncmp(pool, "mirror", 6) == 0 ||
627 	    strncmp(pool, "raidz", 5) == 0 ||
628 	    strncmp(pool, "spare", 5) == 0 ||
629 	    strcmp(pool, "log") == 0)) {
630 		if (hdl != NULL)
631 			zfs_error_aux(hdl,
632 			    dgettext(TEXT_DOMAIN, "name is reserved"));
633 		return (B_FALSE);
634 	}
635 
636 
637 	if (ret != 0) {
638 		if (hdl != NULL) {
639 			switch (why) {
640 			case NAME_ERR_TOOLONG:
641 				zfs_error_aux(hdl,
642 				    dgettext(TEXT_DOMAIN, "name is too long"));
643 				break;
644 
645 			case NAME_ERR_INVALCHAR:
646 				zfs_error_aux(hdl,
647 				    dgettext(TEXT_DOMAIN, "invalid character "
648 				    "'%c' in pool name"), what);
649 				break;
650 
651 			case NAME_ERR_NOLETTER:
652 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
653 				    "name must begin with a letter"));
654 				break;
655 
656 			case NAME_ERR_RESERVED:
657 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
658 				    "name is reserved"));
659 				break;
660 
661 			case NAME_ERR_DISKLIKE:
662 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
663 				    "pool name is reserved"));
664 				break;
665 
666 			case NAME_ERR_LEADING_SLASH:
667 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
668 				    "leading slash in name"));
669 				break;
670 
671 			case NAME_ERR_EMPTY_COMPONENT:
672 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
673 				    "empty component in name"));
674 				break;
675 
676 			case NAME_ERR_TRAILING_SLASH:
677 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
678 				    "trailing slash in name"));
679 				break;
680 
681 			case NAME_ERR_MULTIPLE_AT:
682 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
683 				    "multiple '@' delimiters in name"));
684 				break;
685 
686 			}
687 		}
688 		return (B_FALSE);
689 	}
690 
691 	return (B_TRUE);
692 }
693 
694 /*
695  * Open a handle to the given pool, even if the pool is currently in the FAULTED
696  * state.
697  */
698 zpool_handle_t *
699 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
700 {
701 	zpool_handle_t *zhp;
702 	boolean_t missing;
703 
704 	/*
705 	 * Make sure the pool name is valid.
706 	 */
707 	if (!zpool_name_valid(hdl, B_TRUE, pool)) {
708 		(void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
709 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
710 		    pool);
711 		return (NULL);
712 	}
713 
714 	if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
715 		return (NULL);
716 
717 	zhp->zpool_hdl = hdl;
718 	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
719 
720 	if (zpool_refresh_stats(zhp, &missing) != 0) {
721 		zpool_close(zhp);
722 		return (NULL);
723 	}
724 
725 	if (missing) {
726 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
727 		(void) zfs_error_fmt(hdl, EZFS_NOENT,
728 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
729 		zpool_close(zhp);
730 		return (NULL);
731 	}
732 
733 	return (zhp);
734 }
735 
736 /*
737  * Like the above, but silent on error.  Used when iterating over pools (because
738  * the configuration cache may be out of date).
739  */
740 int
741 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
742 {
743 	zpool_handle_t *zhp;
744 	boolean_t missing;
745 
746 	if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
747 		return (-1);
748 
749 	zhp->zpool_hdl = hdl;
750 	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
751 
752 	if (zpool_refresh_stats(zhp, &missing) != 0) {
753 		zpool_close(zhp);
754 		return (-1);
755 	}
756 
757 	if (missing) {
758 		zpool_close(zhp);
759 		*ret = NULL;
760 		return (0);
761 	}
762 
763 	*ret = zhp;
764 	return (0);
765 }
766 
767 /*
768  * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
769  * state.
770  */
771 zpool_handle_t *
772 zpool_open(libzfs_handle_t *hdl, const char *pool)
773 {
774 	zpool_handle_t *zhp;
775 
776 	if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
777 		return (NULL);
778 
779 	if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
780 		(void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
781 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
782 		zpool_close(zhp);
783 		return (NULL);
784 	}
785 
786 	return (zhp);
787 }
788 
789 /*
790  * Close the handle.  Simply frees the memory associated with the handle.
791  */
792 void
793 zpool_close(zpool_handle_t *zhp)
794 {
795 	if (zhp->zpool_config)
796 		nvlist_free(zhp->zpool_config);
797 	if (zhp->zpool_old_config)
798 		nvlist_free(zhp->zpool_old_config);
799 	if (zhp->zpool_props)
800 		nvlist_free(zhp->zpool_props);
801 	free(zhp);
802 }
803 
804 /*
805  * Return the name of the pool.
806  */
807 const char *
808 zpool_get_name(zpool_handle_t *zhp)
809 {
810 	return (zhp->zpool_name);
811 }
812 
813 
814 /*
815  * Return the state of the pool (ACTIVE or UNAVAILABLE)
816  */
817 int
818 zpool_get_state(zpool_handle_t *zhp)
819 {
820 	return (zhp->zpool_state);
821 }
822 
823 /*
824  * Create the named pool, using the provided vdev list.  It is assumed
825  * that the consumer has already validated the contents of the nvlist, so we
826  * don't have to worry about error semantics.
827  */
828 int
829 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
830     nvlist_t *props, nvlist_t *fsprops)
831 {
832 	zfs_cmd_t zc = { 0 };
833 	nvlist_t *zc_fsprops = NULL;
834 	nvlist_t *zc_props = NULL;
835 	char msg[1024];
836 	char *altroot;
837 	int ret = -1;
838 
839 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
840 	    "cannot create '%s'"), pool);
841 
842 	if (!zpool_name_valid(hdl, B_FALSE, pool))
843 		return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
844 
845 	if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
846 		return (-1);
847 
848 	if (props) {
849 		if ((zc_props = zpool_valid_proplist(hdl, pool, props,
850 		    SPA_VERSION_1, B_TRUE, msg)) == NULL) {
851 			goto create_failed;
852 		}
853 	}
854 
855 	if (fsprops) {
856 		uint64_t zoned;
857 		char *zonestr;
858 
859 		zoned = ((nvlist_lookup_string(fsprops,
860 		    zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
861 		    strcmp(zonestr, "on") == 0);
862 
863 		if ((zc_fsprops = zfs_valid_proplist(hdl,
864 		    ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) {
865 			goto create_failed;
866 		}
867 		if (!zc_props &&
868 		    (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
869 			goto create_failed;
870 		}
871 		if (nvlist_add_nvlist(zc_props,
872 		    ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
873 			goto create_failed;
874 		}
875 	}
876 
877 	if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
878 		goto create_failed;
879 
880 	(void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
881 
882 	if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
883 
884 		zcmd_free_nvlists(&zc);
885 		nvlist_free(zc_props);
886 		nvlist_free(zc_fsprops);
887 
888 		switch (errno) {
889 		case EBUSY:
890 			/*
891 			 * This can happen if the user has specified the same
892 			 * device multiple times.  We can't reliably detect this
893 			 * until we try to add it and see we already have a
894 			 * label.
895 			 */
896 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
897 			    "one or more vdevs refer to the same device"));
898 			return (zfs_error(hdl, EZFS_BADDEV, msg));
899 
900 		case EOVERFLOW:
901 			/*
902 			 * This occurs when one of the devices is below
903 			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
904 			 * device was the problem device since there's no
905 			 * reliable way to determine device size from userland.
906 			 */
907 			{
908 				char buf[64];
909 
910 				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
911 
912 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
913 				    "one or more devices is less than the "
914 				    "minimum size (%s)"), buf);
915 			}
916 			return (zfs_error(hdl, EZFS_BADDEV, msg));
917 
918 		case ENOSPC:
919 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
920 			    "one or more devices is out of space"));
921 			return (zfs_error(hdl, EZFS_BADDEV, msg));
922 
923 		case ENOTBLK:
924 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
925 			    "cache device must be a disk or disk slice"));
926 			return (zfs_error(hdl, EZFS_BADDEV, msg));
927 
928 		default:
929 			return (zpool_standard_error(hdl, errno, msg));
930 		}
931 	}
932 
933 	/*
934 	 * If this is an alternate root pool, then we automatically set the
935 	 * mountpoint of the root dataset to be '/'.
936 	 */
937 	if (nvlist_lookup_string(props, zpool_prop_to_name(ZPOOL_PROP_ALTROOT),
938 	    &altroot) == 0) {
939 		zfs_handle_t *zhp;
940 
941 		verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_DATASET)) != NULL);
942 		verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
943 		    "/") == 0);
944 
945 		zfs_close(zhp);
946 	}
947 
948 create_failed:
949 	zcmd_free_nvlists(&zc);
950 	nvlist_free(zc_props);
951 	nvlist_free(zc_fsprops);
952 	return (ret);
953 }
954 
955 /*
956  * Destroy the given pool.  It is up to the caller to ensure that there are no
957  * datasets left in the pool.
958  */
959 int
960 zpool_destroy(zpool_handle_t *zhp)
961 {
962 	zfs_cmd_t zc = { 0 };
963 	zfs_handle_t *zfp = NULL;
964 	libzfs_handle_t *hdl = zhp->zpool_hdl;
965 	char msg[1024];
966 
967 	if (zhp->zpool_state == POOL_STATE_ACTIVE &&
968 	    (zfp = zfs_open(zhp->zpool_hdl, zhp->zpool_name,
969 	    ZFS_TYPE_FILESYSTEM)) == NULL)
970 		return (-1);
971 
972 	if (zpool_remove_zvol_links(zhp) != 0)
973 		return (-1);
974 
975 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
976 
977 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
978 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
979 		    "cannot destroy '%s'"), zhp->zpool_name);
980 
981 		if (errno == EROFS) {
982 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
983 			    "one or more devices is read only"));
984 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
985 		} else {
986 			(void) zpool_standard_error(hdl, errno, msg);
987 		}
988 
989 		if (zfp)
990 			zfs_close(zfp);
991 		return (-1);
992 	}
993 
994 	if (zfp) {
995 		remove_mountpoint(zfp);
996 		zfs_close(zfp);
997 	}
998 
999 	return (0);
1000 }
1001 
1002 /*
1003  * Add the given vdevs to the pool.  The caller must have already performed the
1004  * necessary verification to ensure that the vdev specification is well-formed.
1005  */
1006 int
1007 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1008 {
1009 	zfs_cmd_t zc = { 0 };
1010 	int ret;
1011 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1012 	char msg[1024];
1013 	nvlist_t **spares, **l2cache;
1014 	uint_t nspares, nl2cache;
1015 
1016 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1017 	    "cannot add to '%s'"), zhp->zpool_name);
1018 
1019 	if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1020 	    SPA_VERSION_SPARES &&
1021 	    nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1022 	    &spares, &nspares) == 0) {
1023 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1024 		    "upgraded to add hot spares"));
1025 		return (zfs_error(hdl, EZFS_BADVERSION, msg));
1026 	}
1027 
1028 	if (pool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot,
1029 	    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) {
1030 		uint64_t s;
1031 
1032 		for (s = 0; s < nspares; s++) {
1033 			char *path;
1034 
1035 			if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH,
1036 			    &path) == 0 && pool_uses_efi(spares[s])) {
1037 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1038 				    "device '%s' contains an EFI label and "
1039 				    "cannot be used on root pools."),
1040 				    zpool_vdev_name(hdl, NULL, spares[s]));
1041 				return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
1042 			}
1043 		}
1044 	}
1045 
1046 	if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1047 	    SPA_VERSION_L2CACHE &&
1048 	    nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1049 	    &l2cache, &nl2cache) == 0) {
1050 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1051 		    "upgraded to add cache devices"));
1052 		return (zfs_error(hdl, EZFS_BADVERSION, msg));
1053 	}
1054 
1055 	if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1056 		return (-1);
1057 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1058 
1059 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1060 		switch (errno) {
1061 		case EBUSY:
1062 			/*
1063 			 * This can happen if the user has specified the same
1064 			 * device multiple times.  We can't reliably detect this
1065 			 * until we try to add it and see we already have a
1066 			 * label.
1067 			 */
1068 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1069 			    "one or more vdevs refer to the same device"));
1070 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
1071 			break;
1072 
1073 		case EOVERFLOW:
1074 			/*
1075 			 * This occurrs when one of the devices is below
1076 			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
1077 			 * device was the problem device since there's no
1078 			 * reliable way to determine device size from userland.
1079 			 */
1080 			{
1081 				char buf[64];
1082 
1083 				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1084 
1085 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1086 				    "device is less than the minimum "
1087 				    "size (%s)"), buf);
1088 			}
1089 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
1090 			break;
1091 
1092 		case ENOTSUP:
1093 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1094 			    "pool must be upgraded to add these vdevs"));
1095 			(void) zfs_error(hdl, EZFS_BADVERSION, msg);
1096 			break;
1097 
1098 		case EDOM:
1099 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1100 			    "root pool can not have multiple vdevs"
1101 			    " or separate logs"));
1102 			(void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
1103 			break;
1104 
1105 		case ENOTBLK:
1106 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1107 			    "cache device must be a disk or disk slice"));
1108 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
1109 			break;
1110 
1111 		default:
1112 			(void) zpool_standard_error(hdl, errno, msg);
1113 		}
1114 
1115 		ret = -1;
1116 	} else {
1117 		ret = 0;
1118 	}
1119 
1120 	zcmd_free_nvlists(&zc);
1121 
1122 	return (ret);
1123 }
1124 
1125 /*
1126  * Exports the pool from the system.  The caller must ensure that there are no
1127  * mounted datasets in the pool.
1128  */
1129 int
1130 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce)
1131 {
1132 	zfs_cmd_t zc = { 0 };
1133 	char msg[1024];
1134 
1135 	if (zpool_remove_zvol_links(zhp) != 0)
1136 		return (-1);
1137 
1138 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1139 	    "cannot export '%s'"), zhp->zpool_name);
1140 
1141 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1142 	zc.zc_cookie = force;
1143 	zc.zc_guid = hardforce;
1144 
1145 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1146 		switch (errno) {
1147 		case EXDEV:
1148 			zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1149 			    "use '-f' to override the following errors:\n"
1150 			    "'%s' has an active shared spare which could be"
1151 			    " used by other pools once '%s' is exported."),
1152 			    zhp->zpool_name, zhp->zpool_name);
1153 			return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1154 			    msg));
1155 		default:
1156 			return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1157 			    msg));
1158 		}
1159 	}
1160 
1161 	return (0);
1162 }
1163 
1164 int
1165 zpool_export(zpool_handle_t *zhp, boolean_t force)
1166 {
1167 	return (zpool_export_common(zhp, force, B_FALSE));
1168 }
1169 
1170 int
1171 zpool_export_force(zpool_handle_t *zhp)
1172 {
1173 	return (zpool_export_common(zhp, B_TRUE, B_TRUE));
1174 }
1175 
1176 /*
1177  * zpool_import() is a contracted interface. Should be kept the same
1178  * if possible.
1179  *
1180  * Applications should use zpool_import_props() to import a pool with
1181  * new properties value to be set.
1182  */
1183 int
1184 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1185     char *altroot)
1186 {
1187 	nvlist_t *props = NULL;
1188 	int ret;
1189 
1190 	if (altroot != NULL) {
1191 		if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1192 			return (zfs_error_fmt(hdl, EZFS_NOMEM,
1193 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1194 			    newname));
1195 		}
1196 
1197 		if (nvlist_add_string(props,
1198 		    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1199 		    nvlist_add_string(props,
1200 		    zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
1201 			nvlist_free(props);
1202 			return (zfs_error_fmt(hdl, EZFS_NOMEM,
1203 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1204 			    newname));
1205 		}
1206 	}
1207 
1208 	ret = zpool_import_props(hdl, config, newname, props, B_FALSE);
1209 	if (props)
1210 		nvlist_free(props);
1211 	return (ret);
1212 }
1213 
1214 /*
1215  * Import the given pool using the known configuration and a list of
1216  * properties to be set. The configuration should have come from
1217  * zpool_find_import(). The 'newname' parameters control whether the pool
1218  * is imported with a different name.
1219  */
1220 int
1221 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1222     nvlist_t *props, boolean_t importfaulted)
1223 {
1224 	zfs_cmd_t zc = { 0 };
1225 	char *thename;
1226 	char *origname;
1227 	int ret;
1228 	char errbuf[1024];
1229 
1230 	verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1231 	    &origname) == 0);
1232 
1233 	(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1234 	    "cannot import pool '%s'"), origname);
1235 
1236 	if (newname != NULL) {
1237 		if (!zpool_name_valid(hdl, B_FALSE, newname))
1238 			return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1239 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1240 			    newname));
1241 		thename = (char *)newname;
1242 	} else {
1243 		thename = origname;
1244 	}
1245 
1246 	if (props) {
1247 		uint64_t version;
1248 
1249 		verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1250 		    &version) == 0);
1251 
1252 		if ((props = zpool_valid_proplist(hdl, origname,
1253 		    props, version, B_TRUE, errbuf)) == NULL) {
1254 			return (-1);
1255 		} else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1256 			nvlist_free(props);
1257 			return (-1);
1258 		}
1259 	}
1260 
1261 	(void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1262 
1263 	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1264 	    &zc.zc_guid) == 0);
1265 
1266 	if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1267 		nvlist_free(props);
1268 		return (-1);
1269 	}
1270 
1271 	zc.zc_cookie = (uint64_t)importfaulted;
1272 	ret = 0;
1273 	if (zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc) != 0) {
1274 		char desc[1024];
1275 		if (newname == NULL)
1276 			(void) snprintf(desc, sizeof (desc),
1277 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1278 			    thename);
1279 		else
1280 			(void) snprintf(desc, sizeof (desc),
1281 			    dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1282 			    origname, thename);
1283 
1284 		switch (errno) {
1285 		case ENOTSUP:
1286 			/*
1287 			 * Unsupported version.
1288 			 */
1289 			(void) zfs_error(hdl, EZFS_BADVERSION, desc);
1290 			break;
1291 
1292 		case EINVAL:
1293 			(void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1294 			break;
1295 
1296 		default:
1297 			(void) zpool_standard_error(hdl, errno, desc);
1298 		}
1299 
1300 		ret = -1;
1301 	} else {
1302 		zpool_handle_t *zhp;
1303 
1304 		/*
1305 		 * This should never fail, but play it safe anyway.
1306 		 */
1307 		if (zpool_open_silent(hdl, thename, &zhp) != 0) {
1308 			ret = -1;
1309 		} else if (zhp != NULL) {
1310 			ret = zpool_create_zvol_links(zhp);
1311 			zpool_close(zhp);
1312 		}
1313 
1314 	}
1315 
1316 	zcmd_free_nvlists(&zc);
1317 	nvlist_free(props);
1318 
1319 	return (ret);
1320 }
1321 
1322 /*
1323  * Scrub the pool.
1324  */
1325 int
1326 zpool_scrub(zpool_handle_t *zhp, pool_scrub_type_t type)
1327 {
1328 	zfs_cmd_t zc = { 0 };
1329 	char msg[1024];
1330 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1331 
1332 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1333 	zc.zc_cookie = type;
1334 
1335 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SCRUB, &zc) == 0)
1336 		return (0);
1337 
1338 	(void) snprintf(msg, sizeof (msg),
1339 	    dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
1340 
1341 	if (errno == EBUSY)
1342 		return (zfs_error(hdl, EZFS_RESILVERING, msg));
1343 	else
1344 		return (zpool_standard_error(hdl, errno, msg));
1345 }
1346 
1347 /*
1348  * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1349  * spare; but FALSE if its an INUSE spare.
1350  */
1351 static nvlist_t *
1352 vdev_to_nvlist_iter(nvlist_t *nv, const char *search, uint64_t guid,
1353     boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
1354 {
1355 	uint_t c, children;
1356 	nvlist_t **child;
1357 	uint64_t theguid, present;
1358 	char *path;
1359 	uint64_t wholedisk = 0;
1360 	nvlist_t *ret;
1361 	uint64_t is_log;
1362 
1363 	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &theguid) == 0);
1364 
1365 	if (search == NULL &&
1366 	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &present) == 0) {
1367 		/*
1368 		 * If the device has never been present since import, the only
1369 		 * reliable way to match the vdev is by GUID.
1370 		 */
1371 		if (theguid == guid)
1372 			return (nv);
1373 	} else if (search != NULL &&
1374 	    nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
1375 		(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
1376 		    &wholedisk);
1377 		if (wholedisk) {
1378 			/*
1379 			 * For whole disks, the internal path has 's0', but the
1380 			 * path passed in by the user doesn't.
1381 			 */
1382 			if (strlen(search) == strlen(path) - 2 &&
1383 			    strncmp(search, path, strlen(search)) == 0)
1384 				return (nv);
1385 		} else if (strcmp(search, path) == 0) {
1386 			return (nv);
1387 		}
1388 	}
1389 
1390 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1391 	    &child, &children) != 0)
1392 		return (NULL);
1393 
1394 	for (c = 0; c < children; c++) {
1395 		if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
1396 		    avail_spare, l2cache, NULL)) != NULL) {
1397 			/*
1398 			 * The 'is_log' value is only set for the toplevel
1399 			 * vdev, not the leaf vdevs.  So we always lookup the
1400 			 * log device from the root of the vdev tree (where
1401 			 * 'log' is non-NULL).
1402 			 */
1403 			if (log != NULL &&
1404 			    nvlist_lookup_uint64(child[c],
1405 			    ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
1406 			    is_log) {
1407 				*log = B_TRUE;
1408 			}
1409 			return (ret);
1410 		}
1411 	}
1412 
1413 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
1414 	    &child, &children) == 0) {
1415 		for (c = 0; c < children; c++) {
1416 			if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
1417 			    avail_spare, l2cache, NULL)) != NULL) {
1418 				*avail_spare = B_TRUE;
1419 				return (ret);
1420 			}
1421 		}
1422 	}
1423 
1424 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
1425 	    &child, &children) == 0) {
1426 		for (c = 0; c < children; c++) {
1427 			if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
1428 			    avail_spare, l2cache, NULL)) != NULL) {
1429 				*l2cache = B_TRUE;
1430 				return (ret);
1431 			}
1432 		}
1433 	}
1434 
1435 	return (NULL);
1436 }
1437 
1438 nvlist_t *
1439 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
1440     boolean_t *l2cache, boolean_t *log)
1441 {
1442 	char buf[MAXPATHLEN];
1443 	const char *search;
1444 	char *end;
1445 	nvlist_t *nvroot;
1446 	uint64_t guid;
1447 
1448 	guid = strtoull(path, &end, 10);
1449 	if (guid != 0 && *end == '\0') {
1450 		search = NULL;
1451 	} else if (path[0] != '/') {
1452 		(void) snprintf(buf, sizeof (buf), "%s%s", "/dev/dsk/", path);
1453 		search = buf;
1454 	} else {
1455 		search = path;
1456 	}
1457 
1458 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1459 	    &nvroot) == 0);
1460 
1461 	*avail_spare = B_FALSE;
1462 	*l2cache = B_FALSE;
1463 	if (log != NULL)
1464 		*log = B_FALSE;
1465 	return (vdev_to_nvlist_iter(nvroot, search, guid, avail_spare,
1466 	    l2cache, log));
1467 }
1468 
1469 static int
1470 vdev_online(nvlist_t *nv)
1471 {
1472 	uint64_t ival;
1473 
1474 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
1475 	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
1476 	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
1477 		return (0);
1478 
1479 	return (1);
1480 }
1481 
1482 /*
1483  * Get phys_path for a root pool
1484  * Return 0 on success; non-zeron on failure.
1485  */
1486 int
1487 zpool_get_physpath(zpool_handle_t *zhp, char *physpath)
1488 {
1489 	nvlist_t *vdev_root;
1490 	nvlist_t **child;
1491 	uint_t count;
1492 	int i;
1493 
1494 	/*
1495 	 * Make sure this is a root pool, as phys_path doesn't mean
1496 	 * anything to a non-root pool.
1497 	 */
1498 	if (!pool_is_bootable(zhp))
1499 		return (-1);
1500 
1501 	verify(nvlist_lookup_nvlist(zhp->zpool_config,
1502 	    ZPOOL_CONFIG_VDEV_TREE, &vdev_root) == 0);
1503 
1504 	if (nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
1505 	    &child, &count) != 0)
1506 		return (-2);
1507 
1508 	for (i = 0; i < count; i++) {
1509 		nvlist_t **child2;
1510 		uint_t count2;
1511 		char *type;
1512 		char *tmppath;
1513 		int j;
1514 
1515 		if (nvlist_lookup_string(child[i], ZPOOL_CONFIG_TYPE, &type)
1516 		    != 0)
1517 			return (-3);
1518 
1519 		if (strcmp(type, VDEV_TYPE_DISK) == 0) {
1520 			if (!vdev_online(child[i]))
1521 				return (-8);
1522 			verify(nvlist_lookup_string(child[i],
1523 			    ZPOOL_CONFIG_PHYS_PATH, &tmppath) == 0);
1524 			(void) strncpy(physpath, tmppath, strlen(tmppath));
1525 		} else if (strcmp(type, VDEV_TYPE_MIRROR) == 0) {
1526 			if (nvlist_lookup_nvlist_array(child[i],
1527 			    ZPOOL_CONFIG_CHILDREN, &child2, &count2) != 0)
1528 				return (-4);
1529 
1530 			for (j = 0; j < count2; j++) {
1531 				if (!vdev_online(child2[j]))
1532 					return (-8);
1533 				if (nvlist_lookup_string(child2[j],
1534 				    ZPOOL_CONFIG_PHYS_PATH, &tmppath) != 0)
1535 					return (-5);
1536 
1537 				if ((strlen(physpath) + strlen(tmppath)) >
1538 				    MAXNAMELEN)
1539 					return (-6);
1540 
1541 				if (strlen(physpath) == 0) {
1542 					(void) strncpy(physpath, tmppath,
1543 					    strlen(tmppath));
1544 				} else {
1545 					(void) strcat(physpath, " ");
1546 					(void) strcat(physpath, tmppath);
1547 				}
1548 			}
1549 		} else {
1550 			return (-7);
1551 		}
1552 	}
1553 
1554 	return (0);
1555 }
1556 
1557 /*
1558  * Returns TRUE if the given guid corresponds to the given type.
1559  * This is used to check for hot spares (INUSE or not), and level 2 cache
1560  * devices.
1561  */
1562 static boolean_t
1563 is_guid_type(zpool_handle_t *zhp, uint64_t guid, const char *type)
1564 {
1565 	uint64_t target_guid;
1566 	nvlist_t *nvroot;
1567 	nvlist_t **list;
1568 	uint_t count;
1569 	int i;
1570 
1571 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1572 	    &nvroot) == 0);
1573 	if (nvlist_lookup_nvlist_array(nvroot, type, &list, &count) == 0) {
1574 		for (i = 0; i < count; i++) {
1575 			verify(nvlist_lookup_uint64(list[i], ZPOOL_CONFIG_GUID,
1576 			    &target_guid) == 0);
1577 			if (guid == target_guid)
1578 				return (B_TRUE);
1579 		}
1580 	}
1581 
1582 	return (B_FALSE);
1583 }
1584 
1585 /*
1586  * Bring the specified vdev online.   The 'flags' parameter is a set of the
1587  * ZFS_ONLINE_* flags.
1588  */
1589 int
1590 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
1591     vdev_state_t *newstate)
1592 {
1593 	zfs_cmd_t zc = { 0 };
1594 	char msg[1024];
1595 	nvlist_t *tgt;
1596 	boolean_t avail_spare, l2cache;
1597 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1598 
1599 	(void) snprintf(msg, sizeof (msg),
1600 	    dgettext(TEXT_DOMAIN, "cannot online %s"), path);
1601 
1602 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1603 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
1604 	    NULL)) == NULL)
1605 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1606 
1607 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1608 
1609 	if (avail_spare ||
1610 	    is_guid_type(zhp, zc.zc_guid, ZPOOL_CONFIG_SPARES) == B_TRUE)
1611 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
1612 
1613 	zc.zc_cookie = VDEV_STATE_ONLINE;
1614 	zc.zc_obj = flags;
1615 
1616 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0)
1617 		return (zpool_standard_error(hdl, errno, msg));
1618 
1619 	*newstate = zc.zc_cookie;
1620 	return (0);
1621 }
1622 
1623 /*
1624  * Take the specified vdev offline
1625  */
1626 int
1627 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
1628 {
1629 	zfs_cmd_t zc = { 0 };
1630 	char msg[1024];
1631 	nvlist_t *tgt;
1632 	boolean_t avail_spare, l2cache;
1633 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1634 
1635 	(void) snprintf(msg, sizeof (msg),
1636 	    dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
1637 
1638 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1639 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
1640 	    NULL)) == NULL)
1641 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1642 
1643 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1644 
1645 	if (avail_spare ||
1646 	    is_guid_type(zhp, zc.zc_guid, ZPOOL_CONFIG_SPARES) == B_TRUE)
1647 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
1648 
1649 	zc.zc_cookie = VDEV_STATE_OFFLINE;
1650 	zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
1651 
1652 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
1653 		return (0);
1654 
1655 	switch (errno) {
1656 	case EBUSY:
1657 
1658 		/*
1659 		 * There are no other replicas of this device.
1660 		 */
1661 		return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
1662 
1663 	default:
1664 		return (zpool_standard_error(hdl, errno, msg));
1665 	}
1666 }
1667 
1668 /*
1669  * Mark the given vdev faulted.
1670  */
1671 int
1672 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid)
1673 {
1674 	zfs_cmd_t zc = { 0 };
1675 	char msg[1024];
1676 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1677 
1678 	(void) snprintf(msg, sizeof (msg),
1679 	    dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid);
1680 
1681 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1682 	zc.zc_guid = guid;
1683 	zc.zc_cookie = VDEV_STATE_FAULTED;
1684 
1685 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
1686 		return (0);
1687 
1688 	switch (errno) {
1689 	case EBUSY:
1690 
1691 		/*
1692 		 * There are no other replicas of this device.
1693 		 */
1694 		return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
1695 
1696 	default:
1697 		return (zpool_standard_error(hdl, errno, msg));
1698 	}
1699 
1700 }
1701 
1702 /*
1703  * Mark the given vdev degraded.
1704  */
1705 int
1706 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid)
1707 {
1708 	zfs_cmd_t zc = { 0 };
1709 	char msg[1024];
1710 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1711 
1712 	(void) snprintf(msg, sizeof (msg),
1713 	    dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid);
1714 
1715 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1716 	zc.zc_guid = guid;
1717 	zc.zc_cookie = VDEV_STATE_DEGRADED;
1718 
1719 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
1720 		return (0);
1721 
1722 	return (zpool_standard_error(hdl, errno, msg));
1723 }
1724 
1725 /*
1726  * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
1727  * a hot spare.
1728  */
1729 static boolean_t
1730 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
1731 {
1732 	nvlist_t **child;
1733 	uint_t c, children;
1734 	char *type;
1735 
1736 	if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
1737 	    &children) == 0) {
1738 		verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
1739 		    &type) == 0);
1740 
1741 		if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
1742 		    children == 2 && child[which] == tgt)
1743 			return (B_TRUE);
1744 
1745 		for (c = 0; c < children; c++)
1746 			if (is_replacing_spare(child[c], tgt, which))
1747 				return (B_TRUE);
1748 	}
1749 
1750 	return (B_FALSE);
1751 }
1752 
1753 /*
1754  * Attach new_disk (fully described by nvroot) to old_disk.
1755  * If 'replacing' is specified, the new disk will replace the old one.
1756  */
1757 int
1758 zpool_vdev_attach(zpool_handle_t *zhp,
1759     const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
1760 {
1761 	zfs_cmd_t zc = { 0 };
1762 	char msg[1024];
1763 	int ret;
1764 	nvlist_t *tgt;
1765 	boolean_t avail_spare, l2cache, islog;
1766 	uint64_t val;
1767 	char *path, *newname;
1768 	nvlist_t **child;
1769 	uint_t children;
1770 	nvlist_t *config_root;
1771 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1772 	boolean_t rootpool = pool_is_bootable(zhp);
1773 
1774 	if (replacing)
1775 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1776 		    "cannot replace %s with %s"), old_disk, new_disk);
1777 	else
1778 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1779 		    "cannot attach %s to %s"), new_disk, old_disk);
1780 
1781 	/*
1782 	 * If this is a root pool, make sure that we're not attaching an
1783 	 * EFI labeled device.
1784 	 */
1785 	if (rootpool && pool_uses_efi(nvroot)) {
1786 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1787 		    "EFI labeled devices are not supported on root pools."));
1788 		return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
1789 	}
1790 
1791 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1792 	if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
1793 	    &islog)) == 0)
1794 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1795 
1796 	if (avail_spare)
1797 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
1798 
1799 	if (l2cache)
1800 		return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
1801 
1802 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1803 	zc.zc_cookie = replacing;
1804 
1805 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
1806 	    &child, &children) != 0 || children != 1) {
1807 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1808 		    "new device must be a single disk"));
1809 		return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
1810 	}
1811 
1812 	verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
1813 	    ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
1814 
1815 	if ((newname = zpool_vdev_name(NULL, NULL, child[0])) == NULL)
1816 		return (-1);
1817 
1818 	/*
1819 	 * If the target is a hot spare that has been swapped in, we can only
1820 	 * replace it with another hot spare.
1821 	 */
1822 	if (replacing &&
1823 	    nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
1824 	    (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
1825 	    NULL) == NULL || !avail_spare) &&
1826 	    is_replacing_spare(config_root, tgt, 1)) {
1827 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1828 		    "can only be replaced by another hot spare"));
1829 		free(newname);
1830 		return (zfs_error(hdl, EZFS_BADTARGET, msg));
1831 	}
1832 
1833 	/*
1834 	 * If we are attempting to replace a spare, it canot be applied to an
1835 	 * already spared device.
1836 	 */
1837 	if (replacing &&
1838 	    nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
1839 	    zpool_find_vdev(zhp, newname, &avail_spare,
1840 	    &l2cache, NULL) != NULL && avail_spare &&
1841 	    is_replacing_spare(config_root, tgt, 0)) {
1842 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1843 		    "device has already been replaced with a spare"));
1844 		free(newname);
1845 		return (zfs_error(hdl, EZFS_BADTARGET, msg));
1846 	}
1847 
1848 	free(newname);
1849 
1850 	if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1851 		return (-1);
1852 
1853 	ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ATTACH, &zc);
1854 
1855 	zcmd_free_nvlists(&zc);
1856 
1857 	if (ret == 0) {
1858 		if (rootpool) {
1859 			/*
1860 			 * XXX - This should be removed once we can
1861 			 * automatically install the bootblocks on the
1862 			 * newly attached disk.
1863 			 */
1864 			(void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Please "
1865 			    "be sure to invoke %s to make '%s' bootable.\n"),
1866 			    BOOTCMD, new_disk);
1867 		}
1868 		return (0);
1869 	}
1870 
1871 	switch (errno) {
1872 	case ENOTSUP:
1873 		/*
1874 		 * Can't attach to or replace this type of vdev.
1875 		 */
1876 		if (replacing) {
1877 			if (islog)
1878 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1879 				    "cannot replace a log with a spare"));
1880 			else
1881 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1882 				    "cannot replace a replacing device"));
1883 		} else {
1884 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1885 			    "can only attach to mirrors and top-level "
1886 			    "disks"));
1887 		}
1888 		(void) zfs_error(hdl, EZFS_BADTARGET, msg);
1889 		break;
1890 
1891 	case EINVAL:
1892 		/*
1893 		 * The new device must be a single disk.
1894 		 */
1895 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1896 		    "new device must be a single disk"));
1897 		(void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
1898 		break;
1899 
1900 	case EBUSY:
1901 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
1902 		    new_disk);
1903 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
1904 		break;
1905 
1906 	case EOVERFLOW:
1907 		/*
1908 		 * The new device is too small.
1909 		 */
1910 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1911 		    "device is too small"));
1912 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
1913 		break;
1914 
1915 	case EDOM:
1916 		/*
1917 		 * The new device has a different alignment requirement.
1918 		 */
1919 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1920 		    "devices have different sector alignment"));
1921 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
1922 		break;
1923 
1924 	case ENAMETOOLONG:
1925 		/*
1926 		 * The resulting top-level vdev spec won't fit in the label.
1927 		 */
1928 		(void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
1929 		break;
1930 
1931 	default:
1932 		(void) zpool_standard_error(hdl, errno, msg);
1933 	}
1934 
1935 	return (-1);
1936 }
1937 
1938 /*
1939  * Detach the specified device.
1940  */
1941 int
1942 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
1943 {
1944 	zfs_cmd_t zc = { 0 };
1945 	char msg[1024];
1946 	nvlist_t *tgt;
1947 	boolean_t avail_spare, l2cache;
1948 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1949 
1950 	(void) snprintf(msg, sizeof (msg),
1951 	    dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
1952 
1953 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1954 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
1955 	    NULL)) == 0)
1956 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1957 
1958 	if (avail_spare)
1959 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
1960 
1961 	if (l2cache)
1962 		return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
1963 
1964 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1965 
1966 	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
1967 		return (0);
1968 
1969 	switch (errno) {
1970 
1971 	case ENOTSUP:
1972 		/*
1973 		 * Can't detach from this type of vdev.
1974 		 */
1975 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
1976 		    "applicable to mirror and replacing vdevs"));
1977 		(void) zfs_error(zhp->zpool_hdl, EZFS_BADTARGET, msg);
1978 		break;
1979 
1980 	case EBUSY:
1981 		/*
1982 		 * There are no other replicas of this device.
1983 		 */
1984 		(void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
1985 		break;
1986 
1987 	default:
1988 		(void) zpool_standard_error(hdl, errno, msg);
1989 	}
1990 
1991 	return (-1);
1992 }
1993 
1994 /*
1995  * Remove the given device.  Currently, this is supported only for hot spares
1996  * and level 2 cache devices.
1997  */
1998 int
1999 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
2000 {
2001 	zfs_cmd_t zc = { 0 };
2002 	char msg[1024];
2003 	nvlist_t *tgt;
2004 	boolean_t avail_spare, l2cache;
2005 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2006 
2007 	(void) snprintf(msg, sizeof (msg),
2008 	    dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
2009 
2010 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2011 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2012 	    NULL)) == 0)
2013 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
2014 
2015 	if (!avail_spare && !l2cache) {
2016 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2017 		    "only inactive hot spares or cache devices "
2018 		    "can be removed"));
2019 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
2020 	}
2021 
2022 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2023 
2024 	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
2025 		return (0);
2026 
2027 	return (zpool_standard_error(hdl, errno, msg));
2028 }
2029 
2030 /*
2031  * Clear the errors for the pool, or the particular device if specified.
2032  */
2033 int
2034 zpool_clear(zpool_handle_t *zhp, const char *path)
2035 {
2036 	zfs_cmd_t zc = { 0 };
2037 	char msg[1024];
2038 	nvlist_t *tgt;
2039 	boolean_t avail_spare, l2cache;
2040 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2041 
2042 	if (path)
2043 		(void) snprintf(msg, sizeof (msg),
2044 		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
2045 		    path);
2046 	else
2047 		(void) snprintf(msg, sizeof (msg),
2048 		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
2049 		    zhp->zpool_name);
2050 
2051 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2052 	if (path) {
2053 		if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
2054 		    &l2cache, NULL)) == 0)
2055 			return (zfs_error(hdl, EZFS_NODEVICE, msg));
2056 
2057 		/*
2058 		 * Don't allow error clearing for hot spares.  Do allow
2059 		 * error clearing for l2cache devices.
2060 		 */
2061 		if (avail_spare)
2062 			return (zfs_error(hdl, EZFS_ISSPARE, msg));
2063 
2064 		verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
2065 		    &zc.zc_guid) == 0);
2066 	}
2067 
2068 	if (zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc) == 0)
2069 		return (0);
2070 
2071 	return (zpool_standard_error(hdl, errno, msg));
2072 }
2073 
2074 /*
2075  * Similar to zpool_clear(), but takes a GUID (used by fmd).
2076  */
2077 int
2078 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
2079 {
2080 	zfs_cmd_t zc = { 0 };
2081 	char msg[1024];
2082 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2083 
2084 	(void) snprintf(msg, sizeof (msg),
2085 	    dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
2086 	    guid);
2087 
2088 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2089 	zc.zc_guid = guid;
2090 
2091 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
2092 		return (0);
2093 
2094 	return (zpool_standard_error(hdl, errno, msg));
2095 }
2096 
2097 /*
2098  * Iterate over all zvols in a given pool by walking the /dev/zvol/dsk/<pool>
2099  * hierarchy.
2100  */
2101 int
2102 zpool_iter_zvol(zpool_handle_t *zhp, int (*cb)(const char *, void *),
2103     void *data)
2104 {
2105 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2106 	char (*paths)[MAXPATHLEN];
2107 	size_t size = 4;
2108 	int curr, fd, base, ret = 0;
2109 	DIR *dirp;
2110 	struct dirent *dp;
2111 	struct stat st;
2112 
2113 	if ((base = open("/dev/zvol/dsk", O_RDONLY)) < 0)
2114 		return (errno == ENOENT ? 0 : -1);
2115 
2116 	if (fstatat(base, zhp->zpool_name, &st, 0) != 0) {
2117 		int err = errno;
2118 		(void) close(base);
2119 		return (err == ENOENT ? 0 : -1);
2120 	}
2121 
2122 	/*
2123 	 * Oddly this wasn't a directory -- ignore that failure since we
2124 	 * know there are no links lower in the (non-existant) hierarchy.
2125 	 */
2126 	if (!S_ISDIR(st.st_mode)) {
2127 		(void) close(base);
2128 		return (0);
2129 	}
2130 
2131 	if ((paths = zfs_alloc(hdl, size * sizeof (paths[0]))) == NULL) {
2132 		(void) close(base);
2133 		return (-1);
2134 	}
2135 
2136 	(void) strlcpy(paths[0], zhp->zpool_name, sizeof (paths[0]));
2137 	curr = 0;
2138 
2139 	while (curr >= 0) {
2140 		if (fstatat(base, paths[curr], &st, AT_SYMLINK_NOFOLLOW) != 0)
2141 			goto err;
2142 
2143 		if (S_ISDIR(st.st_mode)) {
2144 			if ((fd = openat(base, paths[curr], O_RDONLY)) < 0)
2145 				goto err;
2146 
2147 			if ((dirp = fdopendir(fd)) == NULL) {
2148 				(void) close(fd);
2149 				goto err;
2150 			}
2151 
2152 			while ((dp = readdir(dirp)) != NULL) {
2153 				if (dp->d_name[0] == '.')
2154 					continue;
2155 
2156 				if (curr + 1 == size) {
2157 					paths = zfs_realloc(hdl, paths,
2158 					    size * sizeof (paths[0]),
2159 					    size * 2 * sizeof (paths[0]));
2160 					if (paths == NULL) {
2161 						(void) closedir(dirp);
2162 						(void) close(fd);
2163 						goto err;
2164 					}
2165 
2166 					size *= 2;
2167 				}
2168 
2169 				(void) strlcpy(paths[curr + 1], paths[curr],
2170 				    sizeof (paths[curr + 1]));
2171 				(void) strlcat(paths[curr], "/",
2172 				    sizeof (paths[curr]));
2173 				(void) strlcat(paths[curr], dp->d_name,
2174 				    sizeof (paths[curr]));
2175 				curr++;
2176 			}
2177 
2178 			(void) closedir(dirp);
2179 
2180 		} else {
2181 			if ((ret = cb(paths[curr], data)) != 0)
2182 				break;
2183 		}
2184 
2185 		curr--;
2186 	}
2187 
2188 	free(paths);
2189 	(void) close(base);
2190 
2191 	return (ret);
2192 
2193 err:
2194 	free(paths);
2195 	(void) close(base);
2196 	return (-1);
2197 }
2198 
2199 typedef struct zvol_cb {
2200 	zpool_handle_t *zcb_pool;
2201 	boolean_t zcb_create;
2202 } zvol_cb_t;
2203 
2204 /*ARGSUSED*/
2205 static int
2206 do_zvol_create(zfs_handle_t *zhp, void *data)
2207 {
2208 	int ret = 0;
2209 
2210 	if (ZFS_IS_VOLUME(zhp)) {
2211 		(void) zvol_create_link(zhp->zfs_hdl, zhp->zfs_name);
2212 		ret = zfs_iter_snapshots(zhp, do_zvol_create, NULL);
2213 	}
2214 
2215 	if (ret == 0)
2216 		ret = zfs_iter_filesystems(zhp, do_zvol_create, NULL);
2217 
2218 	zfs_close(zhp);
2219 
2220 	return (ret);
2221 }
2222 
2223 /*
2224  * Iterate over all zvols in the pool and make any necessary minor nodes.
2225  */
2226 int
2227 zpool_create_zvol_links(zpool_handle_t *zhp)
2228 {
2229 	zfs_handle_t *zfp;
2230 	int ret;
2231 
2232 	/*
2233 	 * If the pool is unavailable, just return success.
2234 	 */
2235 	if ((zfp = make_dataset_handle(zhp->zpool_hdl,
2236 	    zhp->zpool_name)) == NULL)
2237 		return (0);
2238 
2239 	ret = zfs_iter_filesystems(zfp, do_zvol_create, NULL);
2240 
2241 	zfs_close(zfp);
2242 	return (ret);
2243 }
2244 
2245 static int
2246 do_zvol_remove(const char *dataset, void *data)
2247 {
2248 	zpool_handle_t *zhp = data;
2249 
2250 	return (zvol_remove_link(zhp->zpool_hdl, dataset));
2251 }
2252 
2253 /*
2254  * Iterate over all zvols in the pool and remove any minor nodes.  We iterate
2255  * by examining the /dev links so that a corrupted pool doesn't impede this
2256  * operation.
2257  */
2258 int
2259 zpool_remove_zvol_links(zpool_handle_t *zhp)
2260 {
2261 	return (zpool_iter_zvol(zhp, do_zvol_remove, zhp));
2262 }
2263 
2264 /*
2265  * Convert from a devid string to a path.
2266  */
2267 static char *
2268 devid_to_path(char *devid_str)
2269 {
2270 	ddi_devid_t devid;
2271 	char *minor;
2272 	char *path;
2273 	devid_nmlist_t *list = NULL;
2274 	int ret;
2275 
2276 	if (devid_str_decode(devid_str, &devid, &minor) != 0)
2277 		return (NULL);
2278 
2279 	ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
2280 
2281 	devid_str_free(minor);
2282 	devid_free(devid);
2283 
2284 	if (ret != 0)
2285 		return (NULL);
2286 
2287 	if ((path = strdup(list[0].devname)) == NULL)
2288 		return (NULL);
2289 
2290 	devid_free_nmlist(list);
2291 
2292 	return (path);
2293 }
2294 
2295 /*
2296  * Convert from a path to a devid string.
2297  */
2298 static char *
2299 path_to_devid(const char *path)
2300 {
2301 	int fd;
2302 	ddi_devid_t devid;
2303 	char *minor, *ret;
2304 
2305 	if ((fd = open(path, O_RDONLY)) < 0)
2306 		return (NULL);
2307 
2308 	minor = NULL;
2309 	ret = NULL;
2310 	if (devid_get(fd, &devid) == 0) {
2311 		if (devid_get_minor_name(fd, &minor) == 0)
2312 			ret = devid_str_encode(devid, minor);
2313 		if (minor != NULL)
2314 			devid_str_free(minor);
2315 		devid_free(devid);
2316 	}
2317 	(void) close(fd);
2318 
2319 	return (ret);
2320 }
2321 
2322 /*
2323  * Issue the necessary ioctl() to update the stored path value for the vdev.  We
2324  * ignore any failure here, since a common case is for an unprivileged user to
2325  * type 'zpool status', and we'll display the correct information anyway.
2326  */
2327 static void
2328 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
2329 {
2330 	zfs_cmd_t zc = { 0 };
2331 
2332 	(void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2333 	(void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
2334 	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2335 	    &zc.zc_guid) == 0);
2336 
2337 	(void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
2338 }
2339 
2340 /*
2341  * Given a vdev, return the name to display in iostat.  If the vdev has a path,
2342  * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
2343  * We also check if this is a whole disk, in which case we strip off the
2344  * trailing 's0' slice name.
2345  *
2346  * This routine is also responsible for identifying when disks have been
2347  * reconfigured in a new location.  The kernel will have opened the device by
2348  * devid, but the path will still refer to the old location.  To catch this, we
2349  * first do a path -> devid translation (which is fast for the common case).  If
2350  * the devid matches, we're done.  If not, we do a reverse devid -> path
2351  * translation and issue the appropriate ioctl() to update the path of the vdev.
2352  * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
2353  * of these checks.
2354  */
2355 char *
2356 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv)
2357 {
2358 	char *path, *devid;
2359 	uint64_t value;
2360 	char buf[64];
2361 	vdev_stat_t *vs;
2362 	uint_t vsc;
2363 
2364 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
2365 	    &value) == 0) {
2366 		verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2367 		    &value) == 0);
2368 		(void) snprintf(buf, sizeof (buf), "%llu",
2369 		    (u_longlong_t)value);
2370 		path = buf;
2371 	} else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
2372 
2373 		/*
2374 		 * If the device is dead (faulted, offline, etc) then don't
2375 		 * bother opening it.  Otherwise we may be forcing the user to
2376 		 * open a misbehaving device, which can have undesirable
2377 		 * effects.
2378 		 */
2379 		if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_STATS,
2380 		    (uint64_t **)&vs, &vsc) != 0 ||
2381 		    vs->vs_state >= VDEV_STATE_DEGRADED) &&
2382 		    zhp != NULL &&
2383 		    nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
2384 			/*
2385 			 * Determine if the current path is correct.
2386 			 */
2387 			char *newdevid = path_to_devid(path);
2388 
2389 			if (newdevid == NULL ||
2390 			    strcmp(devid, newdevid) != 0) {
2391 				char *newpath;
2392 
2393 				if ((newpath = devid_to_path(devid)) != NULL) {
2394 					/*
2395 					 * Update the path appropriately.
2396 					 */
2397 					set_path(zhp, nv, newpath);
2398 					if (nvlist_add_string(nv,
2399 					    ZPOOL_CONFIG_PATH, newpath) == 0)
2400 						verify(nvlist_lookup_string(nv,
2401 						    ZPOOL_CONFIG_PATH,
2402 						    &path) == 0);
2403 					free(newpath);
2404 				}
2405 			}
2406 
2407 			if (newdevid)
2408 				devid_str_free(newdevid);
2409 		}
2410 
2411 		if (strncmp(path, "/dev/dsk/", 9) == 0)
2412 			path += 9;
2413 
2414 		if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
2415 		    &value) == 0 && value) {
2416 			char *tmp = zfs_strdup(hdl, path);
2417 			if (tmp == NULL)
2418 				return (NULL);
2419 			tmp[strlen(path) - 2] = '\0';
2420 			return (tmp);
2421 		}
2422 	} else {
2423 		verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
2424 
2425 		/*
2426 		 * If it's a raidz device, we need to stick in the parity level.
2427 		 */
2428 		if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
2429 			verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
2430 			    &value) == 0);
2431 			(void) snprintf(buf, sizeof (buf), "%s%llu", path,
2432 			    (u_longlong_t)value);
2433 			path = buf;
2434 		}
2435 	}
2436 
2437 	return (zfs_strdup(hdl, path));
2438 }
2439 
2440 static int
2441 zbookmark_compare(const void *a, const void *b)
2442 {
2443 	return (memcmp(a, b, sizeof (zbookmark_t)));
2444 }
2445 
2446 /*
2447  * Retrieve the persistent error log, uniquify the members, and return to the
2448  * caller.
2449  */
2450 int
2451 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
2452 {
2453 	zfs_cmd_t zc = { 0 };
2454 	uint64_t count;
2455 	zbookmark_t *zb = NULL;
2456 	int i;
2457 
2458 	/*
2459 	 * Retrieve the raw error list from the kernel.  If the number of errors
2460 	 * has increased, allocate more space and continue until we get the
2461 	 * entire list.
2462 	 */
2463 	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
2464 	    &count) == 0);
2465 	if (count == 0)
2466 		return (0);
2467 	if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
2468 	    count * sizeof (zbookmark_t))) == (uintptr_t)NULL)
2469 		return (-1);
2470 	zc.zc_nvlist_dst_size = count;
2471 	(void) strcpy(zc.zc_name, zhp->zpool_name);
2472 	for (;;) {
2473 		if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
2474 		    &zc) != 0) {
2475 			free((void *)(uintptr_t)zc.zc_nvlist_dst);
2476 			if (errno == ENOMEM) {
2477 				count = zc.zc_nvlist_dst_size;
2478 				if ((zc.zc_nvlist_dst = (uintptr_t)
2479 				    zfs_alloc(zhp->zpool_hdl, count *
2480 				    sizeof (zbookmark_t))) == (uintptr_t)NULL)
2481 					return (-1);
2482 			} else {
2483 				return (-1);
2484 			}
2485 		} else {
2486 			break;
2487 		}
2488 	}
2489 
2490 	/*
2491 	 * Sort the resulting bookmarks.  This is a little confusing due to the
2492 	 * implementation of ZFS_IOC_ERROR_LOG.  The bookmarks are copied last
2493 	 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
2494 	 * _not_ copied as part of the process.  So we point the start of our
2495 	 * array appropriate and decrement the total number of elements.
2496 	 */
2497 	zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) +
2498 	    zc.zc_nvlist_dst_size;
2499 	count -= zc.zc_nvlist_dst_size;
2500 
2501 	qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare);
2502 
2503 	verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
2504 
2505 	/*
2506 	 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
2507 	 */
2508 	for (i = 0; i < count; i++) {
2509 		nvlist_t *nv;
2510 
2511 		/* ignoring zb_blkid and zb_level for now */
2512 		if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
2513 		    zb[i-1].zb_object == zb[i].zb_object)
2514 			continue;
2515 
2516 		if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
2517 			goto nomem;
2518 		if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
2519 		    zb[i].zb_objset) != 0) {
2520 			nvlist_free(nv);
2521 			goto nomem;
2522 		}
2523 		if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
2524 		    zb[i].zb_object) != 0) {
2525 			nvlist_free(nv);
2526 			goto nomem;
2527 		}
2528 		if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
2529 			nvlist_free(nv);
2530 			goto nomem;
2531 		}
2532 		nvlist_free(nv);
2533 	}
2534 
2535 	free((void *)(uintptr_t)zc.zc_nvlist_dst);
2536 	return (0);
2537 
2538 nomem:
2539 	free((void *)(uintptr_t)zc.zc_nvlist_dst);
2540 	return (no_memory(zhp->zpool_hdl));
2541 }
2542 
2543 /*
2544  * Upgrade a ZFS pool to the latest on-disk version.
2545  */
2546 int
2547 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
2548 {
2549 	zfs_cmd_t zc = { 0 };
2550 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2551 
2552 	(void) strcpy(zc.zc_name, zhp->zpool_name);
2553 	zc.zc_cookie = new_version;
2554 
2555 	if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
2556 		return (zpool_standard_error_fmt(hdl, errno,
2557 		    dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
2558 		    zhp->zpool_name));
2559 	return (0);
2560 }
2561 
2562 void
2563 zpool_set_history_str(const char *subcommand, int argc, char **argv,
2564     char *history_str)
2565 {
2566 	int i;
2567 
2568 	(void) strlcpy(history_str, subcommand, HIS_MAX_RECORD_LEN);
2569 	for (i = 1; i < argc; i++) {
2570 		if (strlen(history_str) + 1 + strlen(argv[i]) >
2571 		    HIS_MAX_RECORD_LEN)
2572 			break;
2573 		(void) strlcat(history_str, " ", HIS_MAX_RECORD_LEN);
2574 		(void) strlcat(history_str, argv[i], HIS_MAX_RECORD_LEN);
2575 	}
2576 }
2577 
2578 /*
2579  * Stage command history for logging.
2580  */
2581 int
2582 zpool_stage_history(libzfs_handle_t *hdl, const char *history_str)
2583 {
2584 	if (history_str == NULL)
2585 		return (EINVAL);
2586 
2587 	if (strlen(history_str) > HIS_MAX_RECORD_LEN)
2588 		return (EINVAL);
2589 
2590 	if (hdl->libzfs_log_str != NULL)
2591 		free(hdl->libzfs_log_str);
2592 
2593 	if ((hdl->libzfs_log_str = strdup(history_str)) == NULL)
2594 		return (no_memory(hdl));
2595 
2596 	return (0);
2597 }
2598 
2599 /*
2600  * Perform ioctl to get some command history of a pool.
2601  *
2602  * 'buf' is the buffer to fill up to 'len' bytes.  'off' is the
2603  * logical offset of the history buffer to start reading from.
2604  *
2605  * Upon return, 'off' is the next logical offset to read from and
2606  * 'len' is the actual amount of bytes read into 'buf'.
2607  */
2608 static int
2609 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
2610 {
2611 	zfs_cmd_t zc = { 0 };
2612 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2613 
2614 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2615 
2616 	zc.zc_history = (uint64_t)(uintptr_t)buf;
2617 	zc.zc_history_len = *len;
2618 	zc.zc_history_offset = *off;
2619 
2620 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
2621 		switch (errno) {
2622 		case EPERM:
2623 			return (zfs_error_fmt(hdl, EZFS_PERM,
2624 			    dgettext(TEXT_DOMAIN,
2625 			    "cannot show history for pool '%s'"),
2626 			    zhp->zpool_name));
2627 		case ENOENT:
2628 			return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
2629 			    dgettext(TEXT_DOMAIN, "cannot get history for pool "
2630 			    "'%s'"), zhp->zpool_name));
2631 		case ENOTSUP:
2632 			return (zfs_error_fmt(hdl, EZFS_BADVERSION,
2633 			    dgettext(TEXT_DOMAIN, "cannot get history for pool "
2634 			    "'%s', pool must be upgraded"), zhp->zpool_name));
2635 		default:
2636 			return (zpool_standard_error_fmt(hdl, errno,
2637 			    dgettext(TEXT_DOMAIN,
2638 			    "cannot get history for '%s'"), zhp->zpool_name));
2639 		}
2640 	}
2641 
2642 	*len = zc.zc_history_len;
2643 	*off = zc.zc_history_offset;
2644 
2645 	return (0);
2646 }
2647 
2648 /*
2649  * Process the buffer of nvlists, unpacking and storing each nvlist record
2650  * into 'records'.  'leftover' is set to the number of bytes that weren't
2651  * processed as there wasn't a complete record.
2652  */
2653 static int
2654 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
2655     nvlist_t ***records, uint_t *numrecords)
2656 {
2657 	uint64_t reclen;
2658 	nvlist_t *nv;
2659 	int i;
2660 
2661 	while (bytes_read > sizeof (reclen)) {
2662 
2663 		/* get length of packed record (stored as little endian) */
2664 		for (i = 0, reclen = 0; i < sizeof (reclen); i++)
2665 			reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
2666 
2667 		if (bytes_read < sizeof (reclen) + reclen)
2668 			break;
2669 
2670 		/* unpack record */
2671 		if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
2672 			return (ENOMEM);
2673 		bytes_read -= sizeof (reclen) + reclen;
2674 		buf += sizeof (reclen) + reclen;
2675 
2676 		/* add record to nvlist array */
2677 		(*numrecords)++;
2678 		if (ISP2(*numrecords + 1)) {
2679 			*records = realloc(*records,
2680 			    *numrecords * 2 * sizeof (nvlist_t *));
2681 		}
2682 		(*records)[*numrecords - 1] = nv;
2683 	}
2684 
2685 	*leftover = bytes_read;
2686 	return (0);
2687 }
2688 
2689 #define	HIS_BUF_LEN	(128*1024)
2690 
2691 /*
2692  * Retrieve the command history of a pool.
2693  */
2694 int
2695 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
2696 {
2697 	char buf[HIS_BUF_LEN];
2698 	uint64_t off = 0;
2699 	nvlist_t **records = NULL;
2700 	uint_t numrecords = 0;
2701 	int err, i;
2702 
2703 	do {
2704 		uint64_t bytes_read = sizeof (buf);
2705 		uint64_t leftover;
2706 
2707 		if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
2708 			break;
2709 
2710 		/* if nothing else was read in, we're at EOF, just return */
2711 		if (!bytes_read)
2712 			break;
2713 
2714 		if ((err = zpool_history_unpack(buf, bytes_read,
2715 		    &leftover, &records, &numrecords)) != 0)
2716 			break;
2717 		off -= leftover;
2718 
2719 		/* CONSTCOND */
2720 	} while (1);
2721 
2722 	if (!err) {
2723 		verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
2724 		verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
2725 		    records, numrecords) == 0);
2726 	}
2727 	for (i = 0; i < numrecords; i++)
2728 		nvlist_free(records[i]);
2729 	free(records);
2730 
2731 	return (err);
2732 }
2733 
2734 void
2735 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
2736     char *pathname, size_t len)
2737 {
2738 	zfs_cmd_t zc = { 0 };
2739 	boolean_t mounted = B_FALSE;
2740 	char *mntpnt = NULL;
2741 	char dsname[MAXNAMELEN];
2742 
2743 	if (dsobj == 0) {
2744 		/* special case for the MOS */
2745 		(void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj);
2746 		return;
2747 	}
2748 
2749 	/* get the dataset's name */
2750 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2751 	zc.zc_obj = dsobj;
2752 	if (ioctl(zhp->zpool_hdl->libzfs_fd,
2753 	    ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
2754 		/* just write out a path of two object numbers */
2755 		(void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
2756 		    dsobj, obj);
2757 		return;
2758 	}
2759 	(void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
2760 
2761 	/* find out if the dataset is mounted */
2762 	mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
2763 
2764 	/* get the corrupted object's path */
2765 	(void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
2766 	zc.zc_obj = obj;
2767 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
2768 	    &zc) == 0) {
2769 		if (mounted) {
2770 			(void) snprintf(pathname, len, "%s%s", mntpnt,
2771 			    zc.zc_value);
2772 		} else {
2773 			(void) snprintf(pathname, len, "%s:%s",
2774 			    dsname, zc.zc_value);
2775 		}
2776 	} else {
2777 		(void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj);
2778 	}
2779 	free(mntpnt);
2780 }
2781 
2782 #define	RDISK_ROOT	"/dev/rdsk"
2783 #define	BACKUP_SLICE	"s2"
2784 /*
2785  * Don't start the slice at the default block of 34; many storage
2786  * devices will use a stripe width of 128k, so start there instead.
2787  */
2788 #define	NEW_START_BLOCK	256
2789 
2790 /*
2791  * Read the EFI label from the config, if a label does not exist then
2792  * pass back the error to the caller. If the caller has passed a non-NULL
2793  * diskaddr argument then we set it to the starting address of the EFI
2794  * partition.
2795  */
2796 static int
2797 read_efi_label(nvlist_t *config, diskaddr_t *sb)
2798 {
2799 	char *path;
2800 	int fd;
2801 	char diskname[MAXPATHLEN];
2802 	int err = -1;
2803 
2804 	if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
2805 		return (err);
2806 
2807 	(void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT,
2808 	    strrchr(path, '/'));
2809 	if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) {
2810 		struct dk_gpt *vtoc;
2811 
2812 		if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
2813 			if (sb != NULL)
2814 				*sb = vtoc->efi_parts[0].p_start;
2815 			efi_free(vtoc);
2816 		}
2817 		(void) close(fd);
2818 	}
2819 	return (err);
2820 }
2821 
2822 /*
2823  * determine where a partition starts on a disk in the current
2824  * configuration
2825  */
2826 static diskaddr_t
2827 find_start_block(nvlist_t *config)
2828 {
2829 	nvlist_t **child;
2830 	uint_t c, children;
2831 	diskaddr_t sb = MAXOFFSET_T;
2832 	uint64_t wholedisk;
2833 
2834 	if (nvlist_lookup_nvlist_array(config,
2835 	    ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
2836 		if (nvlist_lookup_uint64(config,
2837 		    ZPOOL_CONFIG_WHOLE_DISK,
2838 		    &wholedisk) != 0 || !wholedisk) {
2839 			return (MAXOFFSET_T);
2840 		}
2841 		if (read_efi_label(config, &sb) < 0)
2842 			sb = MAXOFFSET_T;
2843 		return (sb);
2844 	}
2845 
2846 	for (c = 0; c < children; c++) {
2847 		sb = find_start_block(child[c]);
2848 		if (sb != MAXOFFSET_T) {
2849 			return (sb);
2850 		}
2851 	}
2852 	return (MAXOFFSET_T);
2853 }
2854 
2855 /*
2856  * Label an individual disk.  The name provided is the short name,
2857  * stripped of any leading /dev path.
2858  */
2859 int
2860 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
2861 {
2862 	char path[MAXPATHLEN];
2863 	struct dk_gpt *vtoc;
2864 	int fd;
2865 	size_t resv = EFI_MIN_RESV_SIZE;
2866 	uint64_t slice_size;
2867 	diskaddr_t start_block;
2868 	char errbuf[1024];
2869 
2870 	/* prepare an error message just in case */
2871 	(void) snprintf(errbuf, sizeof (errbuf),
2872 	    dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
2873 
2874 	if (zhp) {
2875 		nvlist_t *nvroot;
2876 
2877 		if (pool_is_bootable(zhp)) {
2878 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2879 			    "EFI labeled devices are not supported on root "
2880 			    "pools."));
2881 			return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf));
2882 		}
2883 
2884 		verify(nvlist_lookup_nvlist(zhp->zpool_config,
2885 		    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
2886 
2887 		if (zhp->zpool_start_block == 0)
2888 			start_block = find_start_block(nvroot);
2889 		else
2890 			start_block = zhp->zpool_start_block;
2891 		zhp->zpool_start_block = start_block;
2892 	} else {
2893 		/* new pool */
2894 		start_block = NEW_START_BLOCK;
2895 	}
2896 
2897 	(void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name,
2898 	    BACKUP_SLICE);
2899 
2900 	if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
2901 		/*
2902 		 * This shouldn't happen.  We've long since verified that this
2903 		 * is a valid device.
2904 		 */
2905 		zfs_error_aux(hdl,
2906 		    dgettext(TEXT_DOMAIN, "unable to open device"));
2907 		return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
2908 	}
2909 
2910 	if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
2911 		/*
2912 		 * The only way this can fail is if we run out of memory, or we
2913 		 * were unable to read the disk's capacity
2914 		 */
2915 		if (errno == ENOMEM)
2916 			(void) no_memory(hdl);
2917 
2918 		(void) close(fd);
2919 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2920 		    "unable to read disk capacity"), name);
2921 
2922 		return (zfs_error(hdl, EZFS_NOCAP, errbuf));
2923 	}
2924 
2925 	slice_size = vtoc->efi_last_u_lba + 1;
2926 	slice_size -= EFI_MIN_RESV_SIZE;
2927 	if (start_block == MAXOFFSET_T)
2928 		start_block = NEW_START_BLOCK;
2929 	slice_size -= start_block;
2930 
2931 	vtoc->efi_parts[0].p_start = start_block;
2932 	vtoc->efi_parts[0].p_size = slice_size;
2933 
2934 	/*
2935 	 * Why we use V_USR: V_BACKUP confuses users, and is considered
2936 	 * disposable by some EFI utilities (since EFI doesn't have a backup
2937 	 * slice).  V_UNASSIGNED is supposed to be used only for zero size
2938 	 * partitions, and efi_write() will fail if we use it.  V_ROOT, V_BOOT,
2939 	 * etc. were all pretty specific.  V_USR is as close to reality as we
2940 	 * can get, in the absence of V_OTHER.
2941 	 */
2942 	vtoc->efi_parts[0].p_tag = V_USR;
2943 	(void) strcpy(vtoc->efi_parts[0].p_name, "zfs");
2944 
2945 	vtoc->efi_parts[8].p_start = slice_size + start_block;
2946 	vtoc->efi_parts[8].p_size = resv;
2947 	vtoc->efi_parts[8].p_tag = V_RESERVED;
2948 
2949 	if (efi_write(fd, vtoc) != 0) {
2950 		/*
2951 		 * Some block drivers (like pcata) may not support EFI
2952 		 * GPT labels.  Print out a helpful error message dir-
2953 		 * ecting the user to manually label the disk and give
2954 		 * a specific slice.
2955 		 */
2956 		(void) close(fd);
2957 		efi_free(vtoc);
2958 
2959 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2960 		    "try using fdisk(1M) and then provide a specific slice"));
2961 		return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
2962 	}
2963 
2964 	(void) close(fd);
2965 	efi_free(vtoc);
2966 	return (0);
2967 }
2968 
2969 static boolean_t
2970 supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf)
2971 {
2972 	char *type;
2973 	nvlist_t **child;
2974 	uint_t children, c;
2975 
2976 	verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0);
2977 	if (strcmp(type, VDEV_TYPE_RAIDZ) == 0 ||
2978 	    strcmp(type, VDEV_TYPE_FILE) == 0 ||
2979 	    strcmp(type, VDEV_TYPE_LOG) == 0 ||
2980 	    strcmp(type, VDEV_TYPE_MISSING) == 0) {
2981 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2982 		    "vdev type '%s' is not supported"), type);
2983 		(void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf);
2984 		return (B_FALSE);
2985 	}
2986 	if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
2987 	    &child, &children) == 0) {
2988 		for (c = 0; c < children; c++) {
2989 			if (!supported_dump_vdev_type(hdl, child[c], errbuf))
2990 				return (B_FALSE);
2991 		}
2992 	}
2993 	return (B_TRUE);
2994 }
2995 
2996 /*
2997  * check if this zvol is allowable for use as a dump device; zero if
2998  * it is, > 0 if it isn't, < 0 if it isn't a zvol
2999  */
3000 int
3001 zvol_check_dump_config(char *arg)
3002 {
3003 	zpool_handle_t *zhp = NULL;
3004 	nvlist_t *config, *nvroot;
3005 	char *p, *volname;
3006 	nvlist_t **top;
3007 	uint_t toplevels;
3008 	libzfs_handle_t *hdl;
3009 	char errbuf[1024];
3010 	char poolname[ZPOOL_MAXNAMELEN];
3011 	int pathlen = strlen(ZVOL_FULL_DEV_DIR);
3012 	int ret = 1;
3013 
3014 	if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) {
3015 		return (-1);
3016 	}
3017 
3018 	(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
3019 	    "dump is not supported on device '%s'"), arg);
3020 
3021 	if ((hdl = libzfs_init()) == NULL)
3022 		return (1);
3023 	libzfs_print_on_error(hdl, B_TRUE);
3024 
3025 	volname = arg + pathlen;
3026 
3027 	/* check the configuration of the pool */
3028 	if ((p = strchr(volname, '/')) == NULL) {
3029 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3030 		    "malformed dataset name"));
3031 		(void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
3032 		return (1);
3033 	} else if (p - volname >= ZFS_MAXNAMELEN) {
3034 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3035 		    "dataset name is too long"));
3036 		(void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf);
3037 		return (1);
3038 	} else {
3039 		(void) strncpy(poolname, volname, p - volname);
3040 		poolname[p - volname] = '\0';
3041 	}
3042 
3043 	if ((zhp = zpool_open(hdl, poolname)) == NULL) {
3044 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3045 		    "could not open pool '%s'"), poolname);
3046 		(void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
3047 		goto out;
3048 	}
3049 	config = zpool_get_config(zhp, NULL);
3050 	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
3051 	    &nvroot) != 0) {
3052 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3053 		    "could not obtain vdev configuration for  '%s'"), poolname);
3054 		(void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
3055 		goto out;
3056 	}
3057 
3058 	verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
3059 	    &top, &toplevels) == 0);
3060 	if (toplevels != 1) {
3061 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3062 		    "'%s' has multiple top level vdevs"), poolname);
3063 		(void) zfs_error(hdl, EZFS_DEVOVERFLOW, errbuf);
3064 		goto out;
3065 	}
3066 
3067 	if (!supported_dump_vdev_type(hdl, top[0], errbuf)) {
3068 		goto out;
3069 	}
3070 	ret = 0;
3071 
3072 out:
3073 	if (zhp)
3074 		zpool_close(zhp);
3075 	libzfs_fini(hdl);
3076 	return (ret);
3077 }
3078