xref: /titanic_41/usr/src/lib/libzfs/common/libzfs_pool.c (revision 0cd13cbfb4270b840b4bd22ec5f673b2b6a2c02b)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <alloca.h>
30 #include <assert.h>
31 #include <ctype.h>
32 #include <errno.h>
33 #include <devid.h>
34 #include <dirent.h>
35 #include <fcntl.h>
36 #include <libintl.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <strings.h>
40 #include <unistd.h>
41 #include <sys/efi_partition.h>
42 #include <sys/vtoc.h>
43 #include <sys/zfs_ioctl.h>
44 #include <sys/zio.h>
45 #include <strings.h>
46 
47 #include "zfs_namecheck.h"
48 #include "zfs_prop.h"
49 #include "libzfs_impl.h"
50 
51 
52 /*
53  * ====================================================================
54  *   zpool property functions
55  * ====================================================================
56  */
57 
58 static int
59 zpool_get_all_props(zpool_handle_t *zhp)
60 {
61 	zfs_cmd_t zc = { 0 };
62 	libzfs_handle_t *hdl = zhp->zpool_hdl;
63 
64 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
65 
66 	if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
67 		return (-1);
68 
69 	while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
70 		if (errno == ENOMEM) {
71 			if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
72 				zcmd_free_nvlists(&zc);
73 				return (-1);
74 			}
75 		} else {
76 			zcmd_free_nvlists(&zc);
77 			return (-1);
78 		}
79 	}
80 
81 	if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
82 		zcmd_free_nvlists(&zc);
83 		return (-1);
84 	}
85 
86 	zcmd_free_nvlists(&zc);
87 
88 	return (0);
89 }
90 
91 static int
92 zpool_props_refresh(zpool_handle_t *zhp)
93 {
94 	nvlist_t *old_props;
95 
96 	old_props = zhp->zpool_props;
97 
98 	if (zpool_get_all_props(zhp) != 0)
99 		return (-1);
100 
101 	nvlist_free(old_props);
102 	return (0);
103 }
104 
105 static char *
106 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
107     zprop_source_t *src)
108 {
109 	nvlist_t *nv, *nvl;
110 	uint64_t ival;
111 	char *value;
112 	zprop_source_t source;
113 
114 	nvl = zhp->zpool_props;
115 	if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
116 		verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
117 		source = ival;
118 		verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
119 	} else {
120 		source = ZPROP_SRC_DEFAULT;
121 		if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
122 			value = "-";
123 	}
124 
125 	if (src)
126 		*src = source;
127 
128 	return (value);
129 }
130 
131 uint64_t
132 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
133 {
134 	nvlist_t *nv, *nvl;
135 	uint64_t value;
136 	zprop_source_t source;
137 
138 	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp))
139 		return (zpool_prop_default_numeric(prop));
140 
141 	nvl = zhp->zpool_props;
142 	if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
143 		verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
144 		source = value;
145 		verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
146 	} else {
147 		source = ZPROP_SRC_DEFAULT;
148 		value = zpool_prop_default_numeric(prop);
149 	}
150 
151 	if (src)
152 		*src = source;
153 
154 	return (value);
155 }
156 
157 /*
158  * Map VDEV STATE to printed strings.
159  */
160 char *
161 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
162 {
163 	switch (state) {
164 	case VDEV_STATE_CLOSED:
165 	case VDEV_STATE_OFFLINE:
166 		return (gettext("OFFLINE"));
167 	case VDEV_STATE_REMOVED:
168 		return (gettext("REMOVED"));
169 	case VDEV_STATE_CANT_OPEN:
170 		if (aux == VDEV_AUX_CORRUPT_DATA)
171 			return (gettext("FAULTED"));
172 		else
173 			return (gettext("UNAVAIL"));
174 	case VDEV_STATE_FAULTED:
175 		return (gettext("FAULTED"));
176 	case VDEV_STATE_DEGRADED:
177 		return (gettext("DEGRADED"));
178 	case VDEV_STATE_HEALTHY:
179 		return (gettext("ONLINE"));
180 	}
181 
182 	return (gettext("UNKNOWN"));
183 }
184 
185 /*
186  * Get a zpool property value for 'prop' and return the value in
187  * a pre-allocated buffer.
188  */
189 int
190 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
191     zprop_source_t *srctype)
192 {
193 	uint64_t intval;
194 	const char *strval;
195 	zprop_source_t src = ZPROP_SRC_NONE;
196 	nvlist_t *nvroot;
197 	vdev_stat_t *vs;
198 	uint_t vsc;
199 
200 	if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
201 		if (prop == ZPOOL_PROP_NAME)
202 			(void) strlcpy(buf, zpool_get_name(zhp), len);
203 		else if (prop == ZPOOL_PROP_HEALTH)
204 			(void) strlcpy(buf, "FAULTED", len);
205 		else
206 			(void) strlcpy(buf, "-", len);
207 		return (0);
208 	}
209 
210 	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
211 	    prop != ZPOOL_PROP_NAME)
212 		return (-1);
213 
214 	switch (zpool_prop_get_type(prop)) {
215 	case PROP_TYPE_STRING:
216 		(void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
217 		    len);
218 		break;
219 
220 	case PROP_TYPE_NUMBER:
221 		intval = zpool_get_prop_int(zhp, prop, &src);
222 
223 		switch (prop) {
224 		case ZPOOL_PROP_SIZE:
225 		case ZPOOL_PROP_USED:
226 		case ZPOOL_PROP_AVAILABLE:
227 			(void) zfs_nicenum(intval, buf, len);
228 			break;
229 
230 		case ZPOOL_PROP_CAPACITY:
231 			(void) snprintf(buf, len, "%llu%%",
232 			    (u_longlong_t)intval);
233 			break;
234 
235 		case ZPOOL_PROP_HEALTH:
236 			verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
237 			    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
238 			verify(nvlist_lookup_uint64_array(nvroot,
239 			    ZPOOL_CONFIG_STATS, (uint64_t **)&vs, &vsc) == 0);
240 
241 			(void) strlcpy(buf, zpool_state_to_name(intval,
242 			    vs->vs_aux), len);
243 			break;
244 		default:
245 			(void) snprintf(buf, len, "%llu", intval);
246 		}
247 		break;
248 
249 	case PROP_TYPE_INDEX:
250 		intval = zpool_get_prop_int(zhp, prop, &src);
251 		if (zpool_prop_index_to_string(prop, intval, &strval)
252 		    != 0)
253 			return (-1);
254 		(void) strlcpy(buf, strval, len);
255 		break;
256 
257 	default:
258 		abort();
259 	}
260 
261 	if (srctype)
262 		*srctype = src;
263 
264 	return (0);
265 }
266 
267 /*
268  * Check if the bootfs name has the same pool name as it is set to.
269  * Assuming bootfs is a valid dataset name.
270  */
271 static boolean_t
272 bootfs_name_valid(const char *pool, char *bootfs)
273 {
274 	int len = strlen(pool);
275 
276 	if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM))
277 		return (B_FALSE);
278 
279 	if (strncmp(pool, bootfs, len) == 0 &&
280 	    (bootfs[len] == '/' || bootfs[len] == '\0'))
281 		return (B_TRUE);
282 
283 	return (B_FALSE);
284 }
285 
286 /*
287  * Given an nvlist of zpool properties to be set, validate that they are
288  * correct, and parse any numeric properties (index, boolean, etc) if they are
289  * specified as strings.
290  */
291 static nvlist_t *
292 zpool_validate_properties(libzfs_handle_t *hdl, const char *poolname,
293     nvlist_t *props, uint64_t version, boolean_t create_or_import, char *errbuf)
294 {
295 	nvpair_t *elem;
296 	nvlist_t *retprops;
297 	zpool_prop_t prop;
298 	char *strval;
299 	uint64_t intval;
300 	char *slash;
301 	struct stat64 statbuf;
302 
303 	if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
304 		(void) no_memory(hdl);
305 		return (NULL);
306 	}
307 
308 	elem = NULL;
309 	while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
310 		const char *propname = nvpair_name(elem);
311 
312 		/*
313 		 * Make sure this property is valid and applies to this type.
314 		 */
315 		if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL) {
316 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
317 			    "invalid property '%s'"), propname);
318 			(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
319 			goto error;
320 		}
321 
322 		if (zpool_prop_readonly(prop)) {
323 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
324 			    "is readonly"), propname);
325 			(void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
326 			goto error;
327 		}
328 
329 		if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
330 		    &strval, &intval, errbuf) != 0)
331 			goto error;
332 
333 		/*
334 		 * Perform additional checking for specific properties.
335 		 */
336 		switch (prop) {
337 		case ZPOOL_PROP_VERSION:
338 			if (intval < version || intval > SPA_VERSION) {
339 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
340 				    "property '%s' number %d is invalid."),
341 				    propname, intval);
342 				(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
343 				goto error;
344 			}
345 			break;
346 
347 		case ZPOOL_PROP_BOOTFS:
348 			if (create_or_import) {
349 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
350 				    "property '%s' cannot be set at creation "
351 				    "or import time"), propname);
352 				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
353 				goto error;
354 			}
355 
356 			if (version < SPA_VERSION_BOOTFS) {
357 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
358 				    "pool must be upgraded to support "
359 				    "'%s' property"), propname);
360 				(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
361 				goto error;
362 			}
363 
364 			/*
365 			 * bootfs property value has to be a dataset name and
366 			 * the dataset has to be in the same pool as it sets to.
367 			 */
368 			if (strval[0] != '\0' && !bootfs_name_valid(poolname,
369 			    strval)) {
370 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
371 				    "is an invalid name"), strval);
372 				(void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
373 				goto error;
374 			}
375 			break;
376 
377 		case ZPOOL_PROP_ALTROOT:
378 			if (!create_or_import) {
379 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
380 				    "property '%s' can only be set during pool "
381 				    "creation or import"), propname);
382 				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
383 				goto error;
384 			}
385 
386 			if (strval[0] != '/') {
387 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
388 				    "bad alternate root '%s'"), strval);
389 				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
390 				goto error;
391 			}
392 			break;
393 
394 		case ZPOOL_PROP_CACHEFILE:
395 			if (strval[0] == '\0')
396 				break;
397 
398 			if (strcmp(strval, "none") == 0)
399 				break;
400 
401 			if (strval[0] != '/') {
402 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
403 				    "property '%s' must be empty, an "
404 				    "absolute path, or 'none'"), propname);
405 				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
406 				goto error;
407 			}
408 
409 			slash = strrchr(strval, '/');
410 
411 			if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
412 			    strcmp(slash, "/..") == 0) {
413 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
414 				    "'%s' is not a valid file"), strval);
415 				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
416 				goto error;
417 			}
418 
419 			*slash = '\0';
420 
421 			if (strval[0] != '\0' &&
422 			    (stat64(strval, &statbuf) != 0 ||
423 			    !S_ISDIR(statbuf.st_mode))) {
424 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
425 				    "'%s' is not a valid directory"),
426 				    strval);
427 				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
428 				goto error;
429 			}
430 
431 			*slash = '/';
432 			break;
433 		}
434 	}
435 
436 	return (retprops);
437 error:
438 	nvlist_free(retprops);
439 	return (NULL);
440 }
441 
442 /*
443  * Set zpool property : propname=propval.
444  */
445 int
446 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
447 {
448 	zfs_cmd_t zc = { 0 };
449 	int ret = -1;
450 	char errbuf[1024];
451 	nvlist_t *nvl = NULL;
452 	nvlist_t *realprops;
453 	uint64_t version;
454 
455 	(void) snprintf(errbuf, sizeof (errbuf),
456 	    dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
457 	    zhp->zpool_name);
458 
459 	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp))
460 		return (zfs_error(zhp->zpool_hdl, EZFS_POOLPROPS, errbuf));
461 
462 	if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
463 		return (no_memory(zhp->zpool_hdl));
464 
465 	if (nvlist_add_string(nvl, propname, propval) != 0) {
466 		nvlist_free(nvl);
467 		return (no_memory(zhp->zpool_hdl));
468 	}
469 
470 	version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
471 	if ((realprops = zpool_validate_properties(zhp->zpool_hdl,
472 	    zhp->zpool_name, nvl, version, B_FALSE, errbuf)) == NULL) {
473 		nvlist_free(nvl);
474 		return (-1);
475 	}
476 
477 	nvlist_free(nvl);
478 	nvl = realprops;
479 
480 	/*
481 	 * Execute the corresponding ioctl() to set this property.
482 	 */
483 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
484 
485 	if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
486 		nvlist_free(nvl);
487 		return (-1);
488 	}
489 
490 	ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
491 
492 	zcmd_free_nvlists(&zc);
493 	nvlist_free(nvl);
494 
495 	if (ret)
496 		(void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
497 	else
498 		(void) zpool_props_refresh(zhp);
499 
500 	return (ret);
501 }
502 
503 int
504 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
505 {
506 	libzfs_handle_t *hdl = zhp->zpool_hdl;
507 	zprop_list_t *entry;
508 	char buf[ZFS_MAXPROPLEN];
509 
510 	if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
511 		return (-1);
512 
513 	for (entry = *plp; entry != NULL; entry = entry->pl_next) {
514 
515 		if (entry->pl_fixed)
516 			continue;
517 
518 		if (entry->pl_prop != ZPROP_INVAL &&
519 		    zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
520 		    NULL) == 0) {
521 			if (strlen(buf) > entry->pl_width)
522 				entry->pl_width = strlen(buf);
523 		}
524 	}
525 
526 	return (0);
527 }
528 
529 
530 /*
531  * Validate the given pool name, optionally putting an extended error message in
532  * 'buf'.
533  */
534 boolean_t
535 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
536 {
537 	namecheck_err_t why;
538 	char what;
539 	int ret;
540 
541 	ret = pool_namecheck(pool, &why, &what);
542 
543 	/*
544 	 * The rules for reserved pool names were extended at a later point.
545 	 * But we need to support users with existing pools that may now be
546 	 * invalid.  So we only check for this expanded set of names during a
547 	 * create (or import), and only in userland.
548 	 */
549 	if (ret == 0 && !isopen &&
550 	    (strncmp(pool, "mirror", 6) == 0 ||
551 	    strncmp(pool, "raidz", 5) == 0 ||
552 	    strncmp(pool, "spare", 5) == 0 ||
553 	    strcmp(pool, "log") == 0)) {
554 		if (hdl != NULL)
555 			zfs_error_aux(hdl,
556 			    dgettext(TEXT_DOMAIN, "name is reserved"));
557 		return (B_FALSE);
558 	}
559 
560 
561 	if (ret != 0) {
562 		if (hdl != NULL) {
563 			switch (why) {
564 			case NAME_ERR_TOOLONG:
565 				zfs_error_aux(hdl,
566 				    dgettext(TEXT_DOMAIN, "name is too long"));
567 				break;
568 
569 			case NAME_ERR_INVALCHAR:
570 				zfs_error_aux(hdl,
571 				    dgettext(TEXT_DOMAIN, "invalid character "
572 				    "'%c' in pool name"), what);
573 				break;
574 
575 			case NAME_ERR_NOLETTER:
576 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
577 				    "name must begin with a letter"));
578 				break;
579 
580 			case NAME_ERR_RESERVED:
581 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
582 				    "name is reserved"));
583 				break;
584 
585 			case NAME_ERR_DISKLIKE:
586 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
587 				    "pool name is reserved"));
588 				break;
589 
590 			case NAME_ERR_LEADING_SLASH:
591 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
592 				    "leading slash in name"));
593 				break;
594 
595 			case NAME_ERR_EMPTY_COMPONENT:
596 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
597 				    "empty component in name"));
598 				break;
599 
600 			case NAME_ERR_TRAILING_SLASH:
601 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
602 				    "trailing slash in name"));
603 				break;
604 
605 			case NAME_ERR_MULTIPLE_AT:
606 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
607 				    "multiple '@' delimiters in name"));
608 				break;
609 
610 			}
611 		}
612 		return (B_FALSE);
613 	}
614 
615 	return (B_TRUE);
616 }
617 
618 /*
619  * Open a handle to the given pool, even if the pool is currently in the FAULTED
620  * state.
621  */
622 zpool_handle_t *
623 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
624 {
625 	zpool_handle_t *zhp;
626 	boolean_t missing;
627 
628 	/*
629 	 * Make sure the pool name is valid.
630 	 */
631 	if (!zpool_name_valid(hdl, B_TRUE, pool)) {
632 		(void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
633 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
634 		    pool);
635 		return (NULL);
636 	}
637 
638 	if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
639 		return (NULL);
640 
641 	zhp->zpool_hdl = hdl;
642 	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
643 
644 	if (zpool_refresh_stats(zhp, &missing) != 0) {
645 		zpool_close(zhp);
646 		return (NULL);
647 	}
648 
649 	if (missing) {
650 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
651 		(void) zfs_error_fmt(hdl, EZFS_NOENT,
652 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
653 		zpool_close(zhp);
654 		return (NULL);
655 	}
656 
657 	return (zhp);
658 }
659 
660 /*
661  * Like the above, but silent on error.  Used when iterating over pools (because
662  * the configuration cache may be out of date).
663  */
664 int
665 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
666 {
667 	zpool_handle_t *zhp;
668 	boolean_t missing;
669 
670 	if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
671 		return (-1);
672 
673 	zhp->zpool_hdl = hdl;
674 	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
675 
676 	if (zpool_refresh_stats(zhp, &missing) != 0) {
677 		zpool_close(zhp);
678 		return (-1);
679 	}
680 
681 	if (missing) {
682 		zpool_close(zhp);
683 		*ret = NULL;
684 		return (0);
685 	}
686 
687 	*ret = zhp;
688 	return (0);
689 }
690 
691 /*
692  * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
693  * state.
694  */
695 zpool_handle_t *
696 zpool_open(libzfs_handle_t *hdl, const char *pool)
697 {
698 	zpool_handle_t *zhp;
699 
700 	if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
701 		return (NULL);
702 
703 	if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
704 		(void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
705 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
706 		zpool_close(zhp);
707 		return (NULL);
708 	}
709 
710 	return (zhp);
711 }
712 
713 /*
714  * Close the handle.  Simply frees the memory associated with the handle.
715  */
716 void
717 zpool_close(zpool_handle_t *zhp)
718 {
719 	if (zhp->zpool_config)
720 		nvlist_free(zhp->zpool_config);
721 	if (zhp->zpool_old_config)
722 		nvlist_free(zhp->zpool_old_config);
723 	if (zhp->zpool_props)
724 		nvlist_free(zhp->zpool_props);
725 	free(zhp);
726 }
727 
728 /*
729  * Return the name of the pool.
730  */
731 const char *
732 zpool_get_name(zpool_handle_t *zhp)
733 {
734 	return (zhp->zpool_name);
735 }
736 
737 
738 /*
739  * Return the state of the pool (ACTIVE or UNAVAILABLE)
740  */
741 int
742 zpool_get_state(zpool_handle_t *zhp)
743 {
744 	return (zhp->zpool_state);
745 }
746 
747 /*
748  * Create the named pool, using the provided vdev list.  It is assumed
749  * that the consumer has already validated the contents of the nvlist, so we
750  * don't have to worry about error semantics.
751  */
752 int
753 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
754     nvlist_t *props)
755 {
756 	zfs_cmd_t zc = { 0 };
757 	char msg[1024];
758 	char *altroot;
759 
760 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
761 	    "cannot create '%s'"), pool);
762 
763 	if (!zpool_name_valid(hdl, B_FALSE, pool))
764 		return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
765 
766 	if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
767 		return (-1);
768 
769 	if (props && (props = zpool_validate_properties(hdl, pool, props,
770 	    SPA_VERSION_1, B_TRUE, msg)) == NULL)
771 		return (-1);
772 
773 	if (props && zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
774 		nvlist_free(props);
775 		return (-1);
776 	}
777 
778 	(void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
779 
780 	if (zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc) != 0) {
781 
782 		zcmd_free_nvlists(&zc);
783 		nvlist_free(props);
784 
785 		switch (errno) {
786 		case EBUSY:
787 			/*
788 			 * This can happen if the user has specified the same
789 			 * device multiple times.  We can't reliably detect this
790 			 * until we try to add it and see we already have a
791 			 * label.
792 			 */
793 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
794 			    "one or more vdevs refer to the same device"));
795 			return (zfs_error(hdl, EZFS_BADDEV, msg));
796 
797 		case EOVERFLOW:
798 			/*
799 			 * This occurs when one of the devices is below
800 			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
801 			 * device was the problem device since there's no
802 			 * reliable way to determine device size from userland.
803 			 */
804 			{
805 				char buf[64];
806 
807 				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
808 
809 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
810 				    "one or more devices is less than the "
811 				    "minimum size (%s)"), buf);
812 			}
813 			return (zfs_error(hdl, EZFS_BADDEV, msg));
814 
815 		case ENOSPC:
816 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
817 			    "one or more devices is out of space"));
818 			return (zfs_error(hdl, EZFS_BADDEV, msg));
819 
820 		case ENOTBLK:
821 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
822 			    "cache device must be a disk or disk slice"));
823 			return (zfs_error(hdl, EZFS_BADDEV, msg));
824 
825 		default:
826 			return (zpool_standard_error(hdl, errno, msg));
827 		}
828 	}
829 
830 	/*
831 	 * If this is an alternate root pool, then we automatically set the
832 	 * mountpoint of the root dataset to be '/'.
833 	 */
834 	if (nvlist_lookup_string(props, zpool_prop_to_name(ZPOOL_PROP_ALTROOT),
835 	    &altroot) == 0) {
836 		zfs_handle_t *zhp;
837 
838 		verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_DATASET)) != NULL);
839 		verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
840 		    "/") == 0);
841 
842 		zfs_close(zhp);
843 	}
844 
845 	zcmd_free_nvlists(&zc);
846 	nvlist_free(props);
847 	return (0);
848 }
849 
850 /*
851  * Destroy the given pool.  It is up to the caller to ensure that there are no
852  * datasets left in the pool.
853  */
854 int
855 zpool_destroy(zpool_handle_t *zhp)
856 {
857 	zfs_cmd_t zc = { 0 };
858 	zfs_handle_t *zfp = NULL;
859 	libzfs_handle_t *hdl = zhp->zpool_hdl;
860 	char msg[1024];
861 
862 	if (zhp->zpool_state == POOL_STATE_ACTIVE &&
863 	    (zfp = zfs_open(zhp->zpool_hdl, zhp->zpool_name,
864 	    ZFS_TYPE_FILESYSTEM)) == NULL)
865 		return (-1);
866 
867 	if (zpool_remove_zvol_links(zhp) != 0)
868 		return (-1);
869 
870 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
871 
872 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
873 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
874 		    "cannot destroy '%s'"), zhp->zpool_name);
875 
876 		if (errno == EROFS) {
877 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
878 			    "one or more devices is read only"));
879 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
880 		} else {
881 			(void) zpool_standard_error(hdl, errno, msg);
882 		}
883 
884 		if (zfp)
885 			zfs_close(zfp);
886 		return (-1);
887 	}
888 
889 	if (zfp) {
890 		remove_mountpoint(zfp);
891 		zfs_close(zfp);
892 	}
893 
894 	return (0);
895 }
896 
897 /*
898  * Add the given vdevs to the pool.  The caller must have already performed the
899  * necessary verification to ensure that the vdev specification is well-formed.
900  */
901 int
902 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
903 {
904 	zfs_cmd_t zc = { 0 };
905 	int ret;
906 	libzfs_handle_t *hdl = zhp->zpool_hdl;
907 	char msg[1024];
908 	nvlist_t **spares, **l2cache;
909 	uint_t nspares, nl2cache;
910 
911 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
912 	    "cannot add to '%s'"), zhp->zpool_name);
913 
914 	if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
915 	    SPA_VERSION_SPARES &&
916 	    nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
917 	    &spares, &nspares) == 0) {
918 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
919 		    "upgraded to add hot spares"));
920 		return (zfs_error(hdl, EZFS_BADVERSION, msg));
921 	}
922 
923 	if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
924 	    SPA_VERSION_L2CACHE &&
925 	    nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
926 	    &l2cache, &nl2cache) == 0) {
927 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
928 		    "upgraded to add cache devices"));
929 		return (zfs_error(hdl, EZFS_BADVERSION, msg));
930 	}
931 
932 	if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
933 		return (-1);
934 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
935 
936 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
937 		switch (errno) {
938 		case EBUSY:
939 			/*
940 			 * This can happen if the user has specified the same
941 			 * device multiple times.  We can't reliably detect this
942 			 * until we try to add it and see we already have a
943 			 * label.
944 			 */
945 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
946 			    "one or more vdevs refer to the same device"));
947 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
948 			break;
949 
950 		case EOVERFLOW:
951 			/*
952 			 * This occurrs when one of the devices is below
953 			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
954 			 * device was the problem device since there's no
955 			 * reliable way to determine device size from userland.
956 			 */
957 			{
958 				char buf[64];
959 
960 				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
961 
962 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
963 				    "device is less than the minimum "
964 				    "size (%s)"), buf);
965 			}
966 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
967 			break;
968 
969 		case ENOTSUP:
970 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
971 			    "pool must be upgraded to add these vdevs"));
972 			(void) zfs_error(hdl, EZFS_BADVERSION, msg);
973 			break;
974 
975 		case EDOM:
976 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
977 			    "root pool can not have multiple vdevs"
978 			    " or separate logs"));
979 			(void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
980 			break;
981 
982 		case ENOTBLK:
983 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
984 			    "cache device must be a disk or disk slice"));
985 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
986 			break;
987 
988 		default:
989 			(void) zpool_standard_error(hdl, errno, msg);
990 		}
991 
992 		ret = -1;
993 	} else {
994 		ret = 0;
995 	}
996 
997 	zcmd_free_nvlists(&zc);
998 
999 	return (ret);
1000 }
1001 
1002 /*
1003  * Exports the pool from the system.  The caller must ensure that there are no
1004  * mounted datasets in the pool.
1005  */
1006 int
1007 zpool_export(zpool_handle_t *zhp)
1008 {
1009 	zfs_cmd_t zc = { 0 };
1010 
1011 	if (zpool_remove_zvol_links(zhp) != 0)
1012 		return (-1);
1013 
1014 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1015 
1016 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0)
1017 		return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1018 		    dgettext(TEXT_DOMAIN, "cannot export '%s'"),
1019 		    zhp->zpool_name));
1020 	return (0);
1021 }
1022 
1023 /*
1024  * zpool_import() is a contracted interface. Should be kept the same
1025  * if possible.
1026  *
1027  * Applications should use zpool_import_props() to import a pool with
1028  * new properties value to be set.
1029  */
1030 int
1031 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1032     char *altroot)
1033 {
1034 	nvlist_t *props = NULL;
1035 	int ret;
1036 
1037 	if (altroot != NULL) {
1038 		if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1039 			return (zfs_error_fmt(hdl, EZFS_NOMEM,
1040 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1041 			    newname));
1042 		}
1043 
1044 		if (nvlist_add_string(props,
1045 		    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0) {
1046 			nvlist_free(props);
1047 			return (zfs_error_fmt(hdl, EZFS_NOMEM,
1048 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1049 			    newname));
1050 		}
1051 	}
1052 
1053 	ret = zpool_import_props(hdl, config, newname, props);
1054 	if (props)
1055 		nvlist_free(props);
1056 	return (ret);
1057 }
1058 
1059 /*
1060  * Import the given pool using the known configuration and a list of
1061  * properties to be set. The configuration should have come from
1062  * zpool_find_import(). The 'newname' parameters control whether the pool
1063  * is imported with a different name.
1064  */
1065 int
1066 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1067     nvlist_t *props)
1068 {
1069 	zfs_cmd_t zc = { 0 };
1070 	char *thename;
1071 	char *origname;
1072 	int ret;
1073 	char errbuf[1024];
1074 
1075 	verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1076 	    &origname) == 0);
1077 
1078 	(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1079 	    "cannot import pool '%s'"), origname);
1080 
1081 	if (newname != NULL) {
1082 		if (!zpool_name_valid(hdl, B_FALSE, newname))
1083 			return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1084 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1085 			    newname));
1086 		thename = (char *)newname;
1087 	} else {
1088 		thename = origname;
1089 	}
1090 
1091 	if (props) {
1092 		uint64_t version;
1093 
1094 		verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1095 		    &version) == 0);
1096 
1097 		if ((props = zpool_validate_properties(hdl, origname,
1098 		    props, version, B_TRUE, errbuf)) == NULL) {
1099 			return (-1);
1100 		} else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1101 			nvlist_free(props);
1102 			return (-1);
1103 		}
1104 	}
1105 
1106 	(void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1107 
1108 	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1109 	    &zc.zc_guid) == 0);
1110 
1111 	if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1112 		nvlist_free(props);
1113 		return (-1);
1114 	}
1115 
1116 	ret = 0;
1117 	if (zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc) != 0) {
1118 		char desc[1024];
1119 		if (newname == NULL)
1120 			(void) snprintf(desc, sizeof (desc),
1121 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1122 			    thename);
1123 		else
1124 			(void) snprintf(desc, sizeof (desc),
1125 			    dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1126 			    origname, thename);
1127 
1128 		switch (errno) {
1129 		case ENOTSUP:
1130 			/*
1131 			 * Unsupported version.
1132 			 */
1133 			(void) zfs_error(hdl, EZFS_BADVERSION, desc);
1134 			break;
1135 
1136 		case EINVAL:
1137 			(void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1138 			break;
1139 
1140 		default:
1141 			(void) zpool_standard_error(hdl, errno, desc);
1142 		}
1143 
1144 		ret = -1;
1145 	} else {
1146 		zpool_handle_t *zhp;
1147 
1148 		/*
1149 		 * This should never fail, but play it safe anyway.
1150 		 */
1151 		if (zpool_open_silent(hdl, thename, &zhp) != 0) {
1152 			ret = -1;
1153 		} else if (zhp != NULL) {
1154 			ret = zpool_create_zvol_links(zhp);
1155 			zpool_close(zhp);
1156 		}
1157 
1158 	}
1159 
1160 	zcmd_free_nvlists(&zc);
1161 	nvlist_free(props);
1162 
1163 	return (ret);
1164 }
1165 
1166 /*
1167  * Scrub the pool.
1168  */
1169 int
1170 zpool_scrub(zpool_handle_t *zhp, pool_scrub_type_t type)
1171 {
1172 	zfs_cmd_t zc = { 0 };
1173 	char msg[1024];
1174 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1175 
1176 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1177 	zc.zc_cookie = type;
1178 
1179 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SCRUB, &zc) == 0)
1180 		return (0);
1181 
1182 	(void) snprintf(msg, sizeof (msg),
1183 	    dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
1184 
1185 	if (errno == EBUSY)
1186 		return (zfs_error(hdl, EZFS_RESILVERING, msg));
1187 	else
1188 		return (zpool_standard_error(hdl, errno, msg));
1189 }
1190 
1191 /*
1192  * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1193  * spare; but FALSE if its an INUSE spare.
1194  */
1195 static nvlist_t *
1196 vdev_to_nvlist_iter(nvlist_t *nv, const char *search, uint64_t guid,
1197     boolean_t *avail_spare, boolean_t *l2cache)
1198 {
1199 	uint_t c, children;
1200 	nvlist_t **child;
1201 	uint64_t theguid, present;
1202 	char *path;
1203 	uint64_t wholedisk = 0;
1204 	nvlist_t *ret;
1205 
1206 	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &theguid) == 0);
1207 
1208 	if (search == NULL &&
1209 	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &present) == 0) {
1210 		/*
1211 		 * If the device has never been present since import, the only
1212 		 * reliable way to match the vdev is by GUID.
1213 		 */
1214 		if (theguid == guid)
1215 			return (nv);
1216 	} else if (search != NULL &&
1217 	    nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
1218 		(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
1219 		    &wholedisk);
1220 		if (wholedisk) {
1221 			/*
1222 			 * For whole disks, the internal path has 's0', but the
1223 			 * path passed in by the user doesn't.
1224 			 */
1225 			if (strlen(search) == strlen(path) - 2 &&
1226 			    strncmp(search, path, strlen(search)) == 0)
1227 				return (nv);
1228 		} else if (strcmp(search, path) == 0) {
1229 			return (nv);
1230 		}
1231 	}
1232 
1233 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1234 	    &child, &children) != 0)
1235 		return (NULL);
1236 
1237 	for (c = 0; c < children; c++)
1238 		if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
1239 		    avail_spare, l2cache)) != NULL)
1240 			return (ret);
1241 
1242 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
1243 	    &child, &children) == 0) {
1244 		for (c = 0; c < children; c++) {
1245 			if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
1246 			    avail_spare, l2cache)) != NULL) {
1247 				*avail_spare = B_TRUE;
1248 				return (ret);
1249 			}
1250 		}
1251 	}
1252 
1253 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
1254 	    &child, &children) == 0) {
1255 		for (c = 0; c < children; c++) {
1256 			if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
1257 			    avail_spare, l2cache)) != NULL) {
1258 				*l2cache = B_TRUE;
1259 				return (ret);
1260 			}
1261 		}
1262 	}
1263 
1264 	return (NULL);
1265 }
1266 
1267 nvlist_t *
1268 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
1269     boolean_t *l2cache)
1270 {
1271 	char buf[MAXPATHLEN];
1272 	const char *search;
1273 	char *end;
1274 	nvlist_t *nvroot;
1275 	uint64_t guid;
1276 
1277 	guid = strtoull(path, &end, 10);
1278 	if (guid != 0 && *end == '\0') {
1279 		search = NULL;
1280 	} else if (path[0] != '/') {
1281 		(void) snprintf(buf, sizeof (buf), "%s%s", "/dev/dsk/", path);
1282 		search = buf;
1283 	} else {
1284 		search = path;
1285 	}
1286 
1287 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1288 	    &nvroot) == 0);
1289 
1290 	*avail_spare = B_FALSE;
1291 	*l2cache = B_FALSE;
1292 	return (vdev_to_nvlist_iter(nvroot, search, guid, avail_spare,
1293 	    l2cache));
1294 }
1295 
1296 /*
1297  * Returns TRUE if the given guid corresponds to the given type.
1298  * This is used to check for hot spares (INUSE or not), and level 2 cache
1299  * devices.
1300  */
1301 static boolean_t
1302 is_guid_type(zpool_handle_t *zhp, uint64_t guid, const char *type)
1303 {
1304 	uint64_t target_guid;
1305 	nvlist_t *nvroot;
1306 	nvlist_t **list;
1307 	uint_t count;
1308 	int i;
1309 
1310 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1311 	    &nvroot) == 0);
1312 	if (nvlist_lookup_nvlist_array(nvroot, type, &list, &count) == 0) {
1313 		for (i = 0; i < count; i++) {
1314 			verify(nvlist_lookup_uint64(list[i], ZPOOL_CONFIG_GUID,
1315 			    &target_guid) == 0);
1316 			if (guid == target_guid)
1317 				return (B_TRUE);
1318 		}
1319 	}
1320 
1321 	return (B_FALSE);
1322 }
1323 
1324 /*
1325  * Bring the specified vdev online.   The 'flags' parameter is a set of the
1326  * ZFS_ONLINE_* flags.
1327  */
1328 int
1329 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
1330     vdev_state_t *newstate)
1331 {
1332 	zfs_cmd_t zc = { 0 };
1333 	char msg[1024];
1334 	nvlist_t *tgt;
1335 	boolean_t avail_spare, l2cache;
1336 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1337 
1338 	(void) snprintf(msg, sizeof (msg),
1339 	    dgettext(TEXT_DOMAIN, "cannot online %s"), path);
1340 
1341 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1342 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache)) == NULL)
1343 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1344 
1345 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1346 
1347 	if (avail_spare ||
1348 	    is_guid_type(zhp, zc.zc_guid, ZPOOL_CONFIG_SPARES) == B_TRUE)
1349 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
1350 
1351 	if (l2cache ||
1352 	    is_guid_type(zhp, zc.zc_guid, ZPOOL_CONFIG_L2CACHE) == B_TRUE)
1353 		return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
1354 
1355 	zc.zc_cookie = VDEV_STATE_ONLINE;
1356 	zc.zc_obj = flags;
1357 
1358 
1359 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0)
1360 		return (zpool_standard_error(hdl, errno, msg));
1361 
1362 	*newstate = zc.zc_cookie;
1363 	return (0);
1364 }
1365 
1366 /*
1367  * Take the specified vdev offline
1368  */
1369 int
1370 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
1371 {
1372 	zfs_cmd_t zc = { 0 };
1373 	char msg[1024];
1374 	nvlist_t *tgt;
1375 	boolean_t avail_spare, l2cache;
1376 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1377 
1378 	(void) snprintf(msg, sizeof (msg),
1379 	    dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
1380 
1381 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1382 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache)) == NULL)
1383 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1384 
1385 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1386 
1387 	if (avail_spare ||
1388 	    is_guid_type(zhp, zc.zc_guid, ZPOOL_CONFIG_SPARES) == B_TRUE)
1389 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
1390 
1391 	if (l2cache ||
1392 	    is_guid_type(zhp, zc.zc_guid, ZPOOL_CONFIG_L2CACHE) == B_TRUE)
1393 		return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
1394 
1395 	zc.zc_cookie = VDEV_STATE_OFFLINE;
1396 	zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
1397 
1398 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
1399 		return (0);
1400 
1401 	switch (errno) {
1402 	case EBUSY:
1403 
1404 		/*
1405 		 * There are no other replicas of this device.
1406 		 */
1407 		return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
1408 
1409 	default:
1410 		return (zpool_standard_error(hdl, errno, msg));
1411 	}
1412 }
1413 
1414 /*
1415  * Mark the given vdev faulted.
1416  */
1417 int
1418 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid)
1419 {
1420 	zfs_cmd_t zc = { 0 };
1421 	char msg[1024];
1422 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1423 
1424 	(void) snprintf(msg, sizeof (msg),
1425 	    dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid);
1426 
1427 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1428 	zc.zc_guid = guid;
1429 	zc.zc_cookie = VDEV_STATE_FAULTED;
1430 
1431 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
1432 		return (0);
1433 
1434 	switch (errno) {
1435 	case EBUSY:
1436 
1437 		/*
1438 		 * There are no other replicas of this device.
1439 		 */
1440 		return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
1441 
1442 	default:
1443 		return (zpool_standard_error(hdl, errno, msg));
1444 	}
1445 
1446 }
1447 
1448 /*
1449  * Mark the given vdev degraded.
1450  */
1451 int
1452 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid)
1453 {
1454 	zfs_cmd_t zc = { 0 };
1455 	char msg[1024];
1456 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1457 
1458 	(void) snprintf(msg, sizeof (msg),
1459 	    dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid);
1460 
1461 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1462 	zc.zc_guid = guid;
1463 	zc.zc_cookie = VDEV_STATE_DEGRADED;
1464 
1465 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
1466 		return (0);
1467 
1468 	return (zpool_standard_error(hdl, errno, msg));
1469 }
1470 
1471 /*
1472  * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
1473  * a hot spare.
1474  */
1475 static boolean_t
1476 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
1477 {
1478 	nvlist_t **child;
1479 	uint_t c, children;
1480 	char *type;
1481 
1482 	if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
1483 	    &children) == 0) {
1484 		verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
1485 		    &type) == 0);
1486 
1487 		if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
1488 		    children == 2 && child[which] == tgt)
1489 			return (B_TRUE);
1490 
1491 		for (c = 0; c < children; c++)
1492 			if (is_replacing_spare(child[c], tgt, which))
1493 				return (B_TRUE);
1494 	}
1495 
1496 	return (B_FALSE);
1497 }
1498 
1499 /*
1500  * Attach new_disk (fully described by nvroot) to old_disk.
1501  * If 'replacing' is specified, the new disk will replace the old one.
1502  */
1503 int
1504 zpool_vdev_attach(zpool_handle_t *zhp,
1505     const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
1506 {
1507 	zfs_cmd_t zc = { 0 };
1508 	char msg[1024];
1509 	int ret;
1510 	nvlist_t *tgt;
1511 	boolean_t avail_spare, l2cache;
1512 	uint64_t val, is_log;
1513 	char *path;
1514 	nvlist_t **child;
1515 	uint_t children;
1516 	nvlist_t *config_root;
1517 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1518 
1519 	if (replacing)
1520 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1521 		    "cannot replace %s with %s"), old_disk, new_disk);
1522 	else
1523 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1524 		    "cannot attach %s to %s"), new_disk, old_disk);
1525 
1526 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1527 	if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache)) == 0)
1528 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1529 
1530 	if (avail_spare)
1531 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
1532 
1533 	if (l2cache)
1534 		return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
1535 
1536 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1537 	zc.zc_cookie = replacing;
1538 
1539 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
1540 	    &child, &children) != 0 || children != 1) {
1541 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1542 		    "new device must be a single disk"));
1543 		return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
1544 	}
1545 
1546 	verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
1547 	    ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
1548 
1549 	/*
1550 	 * If the target is a hot spare that has been swapped in, we can only
1551 	 * replace it with another hot spare.
1552 	 */
1553 	if (replacing &&
1554 	    nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
1555 	    nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
1556 	    (zpool_find_vdev(zhp, path, &avail_spare, &l2cache) == NULL ||
1557 	    !avail_spare) && is_replacing_spare(config_root, tgt, 1)) {
1558 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1559 		    "can only be replaced by another hot spare"));
1560 		return (zfs_error(hdl, EZFS_BADTARGET, msg));
1561 	}
1562 
1563 	/*
1564 	 * If we are attempting to replace a spare, it canot be applied to an
1565 	 * already spared device.
1566 	 */
1567 	if (replacing &&
1568 	    nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
1569 	    zpool_find_vdev(zhp, path, &avail_spare, &l2cache) != NULL &&
1570 	    avail_spare && is_replacing_spare(config_root, tgt, 0)) {
1571 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1572 		    "device has already been replaced with a spare"));
1573 		return (zfs_error(hdl, EZFS_BADTARGET, msg));
1574 	}
1575 
1576 	if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1577 		return (-1);
1578 
1579 	ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ATTACH, &zc);
1580 
1581 	zcmd_free_nvlists(&zc);
1582 
1583 	if (ret == 0)
1584 		return (0);
1585 
1586 	switch (errno) {
1587 	case ENOTSUP:
1588 		/*
1589 		 * Can't attach to or replace this type of vdev.
1590 		 */
1591 		if (replacing) {
1592 			is_log = B_FALSE;
1593 			(void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_LOG,
1594 			    &is_log);
1595 			if (is_log)
1596 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1597 				    "cannot replace a log with a spare"));
1598 			else
1599 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1600 				    "cannot replace a replacing device"));
1601 		} else {
1602 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1603 			    "can only attach to mirrors and top-level "
1604 			    "disks"));
1605 		}
1606 		(void) zfs_error(hdl, EZFS_BADTARGET, msg);
1607 		break;
1608 
1609 	case EINVAL:
1610 		/*
1611 		 * The new device must be a single disk.
1612 		 */
1613 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1614 		    "new device must be a single disk"));
1615 		(void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
1616 		break;
1617 
1618 	case EBUSY:
1619 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
1620 		    new_disk);
1621 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
1622 		break;
1623 
1624 	case EOVERFLOW:
1625 		/*
1626 		 * The new device is too small.
1627 		 */
1628 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1629 		    "device is too small"));
1630 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
1631 		break;
1632 
1633 	case EDOM:
1634 		/*
1635 		 * The new device has a different alignment requirement.
1636 		 */
1637 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1638 		    "devices have different sector alignment"));
1639 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
1640 		break;
1641 
1642 	case ENAMETOOLONG:
1643 		/*
1644 		 * The resulting top-level vdev spec won't fit in the label.
1645 		 */
1646 		(void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
1647 		break;
1648 
1649 	default:
1650 		(void) zpool_standard_error(hdl, errno, msg);
1651 	}
1652 
1653 	return (-1);
1654 }
1655 
1656 /*
1657  * Detach the specified device.
1658  */
1659 int
1660 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
1661 {
1662 	zfs_cmd_t zc = { 0 };
1663 	char msg[1024];
1664 	nvlist_t *tgt;
1665 	boolean_t avail_spare, l2cache;
1666 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1667 
1668 	(void) snprintf(msg, sizeof (msg),
1669 	    dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
1670 
1671 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1672 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache)) == 0)
1673 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1674 
1675 	if (avail_spare)
1676 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
1677 
1678 	if (l2cache)
1679 		return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
1680 
1681 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1682 
1683 	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
1684 		return (0);
1685 
1686 	switch (errno) {
1687 
1688 	case ENOTSUP:
1689 		/*
1690 		 * Can't detach from this type of vdev.
1691 		 */
1692 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
1693 		    "applicable to mirror and replacing vdevs"));
1694 		(void) zfs_error(zhp->zpool_hdl, EZFS_BADTARGET, msg);
1695 		break;
1696 
1697 	case EBUSY:
1698 		/*
1699 		 * There are no other replicas of this device.
1700 		 */
1701 		(void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
1702 		break;
1703 
1704 	default:
1705 		(void) zpool_standard_error(hdl, errno, msg);
1706 	}
1707 
1708 	return (-1);
1709 }
1710 
1711 /*
1712  * Remove the given device.  Currently, this is supported only for hot spares
1713  * and level 2 cache devices.
1714  */
1715 int
1716 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
1717 {
1718 	zfs_cmd_t zc = { 0 };
1719 	char msg[1024];
1720 	nvlist_t *tgt;
1721 	boolean_t avail_spare, l2cache;
1722 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1723 
1724 	(void) snprintf(msg, sizeof (msg),
1725 	    dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
1726 
1727 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1728 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache)) == 0)
1729 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1730 
1731 	if (!avail_spare && !l2cache) {
1732 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1733 		    "only inactive hot spares or cache devices "
1734 		    "can be removed"));
1735 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1736 	}
1737 
1738 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1739 
1740 	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
1741 		return (0);
1742 
1743 	return (zpool_standard_error(hdl, errno, msg));
1744 }
1745 
1746 /*
1747  * Clear the errors for the pool, or the particular device if specified.
1748  */
1749 int
1750 zpool_clear(zpool_handle_t *zhp, const char *path)
1751 {
1752 	zfs_cmd_t zc = { 0 };
1753 	char msg[1024];
1754 	nvlist_t *tgt;
1755 	boolean_t avail_spare, l2cache;
1756 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1757 
1758 	if (path)
1759 		(void) snprintf(msg, sizeof (msg),
1760 		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
1761 		    path);
1762 	else
1763 		(void) snprintf(msg, sizeof (msg),
1764 		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
1765 		    zhp->zpool_name);
1766 
1767 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1768 	if (path) {
1769 		if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
1770 		    &l2cache)) == 0)
1771 			return (zfs_error(hdl, EZFS_NODEVICE, msg));
1772 
1773 		/*
1774 		 * Don't allow error clearing for hot spares.  Do allow
1775 		 * error clearing for l2cache devices.
1776 		 */
1777 		if (avail_spare)
1778 			return (zfs_error(hdl, EZFS_ISSPARE, msg));
1779 
1780 		verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
1781 		    &zc.zc_guid) == 0);
1782 	}
1783 
1784 	if (zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc) == 0)
1785 		return (0);
1786 
1787 	return (zpool_standard_error(hdl, errno, msg));
1788 }
1789 
1790 /*
1791  * Similar to zpool_clear(), but takes a GUID (used by fmd).
1792  */
1793 int
1794 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
1795 {
1796 	zfs_cmd_t zc = { 0 };
1797 	char msg[1024];
1798 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1799 
1800 	(void) snprintf(msg, sizeof (msg),
1801 	    dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
1802 	    guid);
1803 
1804 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1805 	zc.zc_guid = guid;
1806 
1807 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
1808 		return (0);
1809 
1810 	return (zpool_standard_error(hdl, errno, msg));
1811 }
1812 
1813 /*
1814  * Iterate over all zvols in a given pool by walking the /dev/zvol/dsk/<pool>
1815  * hierarchy.
1816  */
1817 int
1818 zpool_iter_zvol(zpool_handle_t *zhp, int (*cb)(const char *, void *),
1819     void *data)
1820 {
1821 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1822 	char (*paths)[MAXPATHLEN];
1823 	size_t size = 4;
1824 	int curr, fd, base, ret = 0;
1825 	DIR *dirp;
1826 	struct dirent *dp;
1827 	struct stat st;
1828 
1829 	if ((base = open("/dev/zvol/dsk", O_RDONLY)) < 0)
1830 		return (errno == ENOENT ? 0 : -1);
1831 
1832 	if (fstatat(base, zhp->zpool_name, &st, 0) != 0) {
1833 		int err = errno;
1834 		(void) close(base);
1835 		return (err == ENOENT ? 0 : -1);
1836 	}
1837 
1838 	/*
1839 	 * Oddly this wasn't a directory -- ignore that failure since we
1840 	 * know there are no links lower in the (non-existant) hierarchy.
1841 	 */
1842 	if (!S_ISDIR(st.st_mode)) {
1843 		(void) close(base);
1844 		return (0);
1845 	}
1846 
1847 	if ((paths = zfs_alloc(hdl, size * sizeof (paths[0]))) == NULL) {
1848 		(void) close(base);
1849 		return (-1);
1850 	}
1851 
1852 	(void) strlcpy(paths[0], zhp->zpool_name, sizeof (paths[0]));
1853 	curr = 0;
1854 
1855 	while (curr >= 0) {
1856 		if (fstatat(base, paths[curr], &st, AT_SYMLINK_NOFOLLOW) != 0)
1857 			goto err;
1858 
1859 		if (S_ISDIR(st.st_mode)) {
1860 			if ((fd = openat(base, paths[curr], O_RDONLY)) < 0)
1861 				goto err;
1862 
1863 			if ((dirp = fdopendir(fd)) == NULL) {
1864 				(void) close(fd);
1865 				goto err;
1866 			}
1867 
1868 			while ((dp = readdir(dirp)) != NULL) {
1869 				if (dp->d_name[0] == '.')
1870 					continue;
1871 
1872 				if (curr + 1 == size) {
1873 					paths = zfs_realloc(hdl, paths,
1874 					    size * sizeof (paths[0]),
1875 					    size * 2 * sizeof (paths[0]));
1876 					if (paths == NULL) {
1877 						(void) closedir(dirp);
1878 						(void) close(fd);
1879 						goto err;
1880 					}
1881 
1882 					size *= 2;
1883 				}
1884 
1885 				(void) strlcpy(paths[curr + 1], paths[curr],
1886 				    sizeof (paths[curr + 1]));
1887 				(void) strlcat(paths[curr], "/",
1888 				    sizeof (paths[curr]));
1889 				(void) strlcat(paths[curr], dp->d_name,
1890 				    sizeof (paths[curr]));
1891 				curr++;
1892 			}
1893 
1894 			(void) closedir(dirp);
1895 
1896 		} else {
1897 			if ((ret = cb(paths[curr], data)) != 0)
1898 				break;
1899 		}
1900 
1901 		curr--;
1902 	}
1903 
1904 	free(paths);
1905 	(void) close(base);
1906 
1907 	return (ret);
1908 
1909 err:
1910 	free(paths);
1911 	(void) close(base);
1912 	return (-1);
1913 }
1914 
1915 typedef struct zvol_cb {
1916 	zpool_handle_t *zcb_pool;
1917 	boolean_t zcb_create;
1918 } zvol_cb_t;
1919 
1920 /*ARGSUSED*/
1921 static int
1922 do_zvol_create(zfs_handle_t *zhp, void *data)
1923 {
1924 	int ret = 0;
1925 
1926 	if (ZFS_IS_VOLUME(zhp)) {
1927 		(void) zvol_create_link(zhp->zfs_hdl, zhp->zfs_name);
1928 		ret = zfs_iter_snapshots(zhp, do_zvol_create, NULL);
1929 	}
1930 
1931 	if (ret == 0)
1932 		ret = zfs_iter_filesystems(zhp, do_zvol_create, NULL);
1933 
1934 	zfs_close(zhp);
1935 
1936 	return (ret);
1937 }
1938 
1939 /*
1940  * Iterate over all zvols in the pool and make any necessary minor nodes.
1941  */
1942 int
1943 zpool_create_zvol_links(zpool_handle_t *zhp)
1944 {
1945 	zfs_handle_t *zfp;
1946 	int ret;
1947 
1948 	/*
1949 	 * If the pool is unavailable, just return success.
1950 	 */
1951 	if ((zfp = make_dataset_handle(zhp->zpool_hdl,
1952 	    zhp->zpool_name)) == NULL)
1953 		return (0);
1954 
1955 	ret = zfs_iter_filesystems(zfp, do_zvol_create, NULL);
1956 
1957 	zfs_close(zfp);
1958 	return (ret);
1959 }
1960 
1961 static int
1962 do_zvol_remove(const char *dataset, void *data)
1963 {
1964 	zpool_handle_t *zhp = data;
1965 
1966 	return (zvol_remove_link(zhp->zpool_hdl, dataset));
1967 }
1968 
1969 /*
1970  * Iterate over all zvols in the pool and remove any minor nodes.  We iterate
1971  * by examining the /dev links so that a corrupted pool doesn't impede this
1972  * operation.
1973  */
1974 int
1975 zpool_remove_zvol_links(zpool_handle_t *zhp)
1976 {
1977 	return (zpool_iter_zvol(zhp, do_zvol_remove, zhp));
1978 }
1979 
1980 /*
1981  * Convert from a devid string to a path.
1982  */
1983 static char *
1984 devid_to_path(char *devid_str)
1985 {
1986 	ddi_devid_t devid;
1987 	char *minor;
1988 	char *path;
1989 	devid_nmlist_t *list = NULL;
1990 	int ret;
1991 
1992 	if (devid_str_decode(devid_str, &devid, &minor) != 0)
1993 		return (NULL);
1994 
1995 	ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
1996 
1997 	devid_str_free(minor);
1998 	devid_free(devid);
1999 
2000 	if (ret != 0)
2001 		return (NULL);
2002 
2003 	if ((path = strdup(list[0].devname)) == NULL)
2004 		return (NULL);
2005 
2006 	devid_free_nmlist(list);
2007 
2008 	return (path);
2009 }
2010 
2011 /*
2012  * Convert from a path to a devid string.
2013  */
2014 static char *
2015 path_to_devid(const char *path)
2016 {
2017 	int fd;
2018 	ddi_devid_t devid;
2019 	char *minor, *ret;
2020 
2021 	if ((fd = open(path, O_RDONLY)) < 0)
2022 		return (NULL);
2023 
2024 	minor = NULL;
2025 	ret = NULL;
2026 	if (devid_get(fd, &devid) == 0) {
2027 		if (devid_get_minor_name(fd, &minor) == 0)
2028 			ret = devid_str_encode(devid, minor);
2029 		if (minor != NULL)
2030 			devid_str_free(minor);
2031 		devid_free(devid);
2032 	}
2033 	(void) close(fd);
2034 
2035 	return (ret);
2036 }
2037 
2038 /*
2039  * Issue the necessary ioctl() to update the stored path value for the vdev.  We
2040  * ignore any failure here, since a common case is for an unprivileged user to
2041  * type 'zpool status', and we'll display the correct information anyway.
2042  */
2043 static void
2044 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
2045 {
2046 	zfs_cmd_t zc = { 0 };
2047 
2048 	(void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2049 	(void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
2050 	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2051 	    &zc.zc_guid) == 0);
2052 
2053 	(void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
2054 }
2055 
2056 /*
2057  * Given a vdev, return the name to display in iostat.  If the vdev has a path,
2058  * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
2059  * We also check if this is a whole disk, in which case we strip off the
2060  * trailing 's0' slice name.
2061  *
2062  * This routine is also responsible for identifying when disks have been
2063  * reconfigured in a new location.  The kernel will have opened the device by
2064  * devid, but the path will still refer to the old location.  To catch this, we
2065  * first do a path -> devid translation (which is fast for the common case).  If
2066  * the devid matches, we're done.  If not, we do a reverse devid -> path
2067  * translation and issue the appropriate ioctl() to update the path of the vdev.
2068  * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
2069  * of these checks.
2070  */
2071 char *
2072 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv)
2073 {
2074 	char *path, *devid;
2075 	uint64_t value;
2076 	char buf[64];
2077 	vdev_stat_t *vs;
2078 	uint_t vsc;
2079 
2080 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
2081 	    &value) == 0) {
2082 		verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2083 		    &value) == 0);
2084 		(void) snprintf(buf, sizeof (buf), "%llu",
2085 		    (u_longlong_t)value);
2086 		path = buf;
2087 	} else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
2088 
2089 		/*
2090 		 * If the device is dead (faulted, offline, etc) then don't
2091 		 * bother opening it.  Otherwise we may be forcing the user to
2092 		 * open a misbehaving device, which can have undesirable
2093 		 * effects.
2094 		 */
2095 		if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_STATS,
2096 		    (uint64_t **)&vs, &vsc) != 0 ||
2097 		    vs->vs_state >= VDEV_STATE_DEGRADED) &&
2098 		    zhp != NULL &&
2099 		    nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
2100 			/*
2101 			 * Determine if the current path is correct.
2102 			 */
2103 			char *newdevid = path_to_devid(path);
2104 
2105 			if (newdevid == NULL ||
2106 			    strcmp(devid, newdevid) != 0) {
2107 				char *newpath;
2108 
2109 				if ((newpath = devid_to_path(devid)) != NULL) {
2110 					/*
2111 					 * Update the path appropriately.
2112 					 */
2113 					set_path(zhp, nv, newpath);
2114 					if (nvlist_add_string(nv,
2115 					    ZPOOL_CONFIG_PATH, newpath) == 0)
2116 						verify(nvlist_lookup_string(nv,
2117 						    ZPOOL_CONFIG_PATH,
2118 						    &path) == 0);
2119 					free(newpath);
2120 				}
2121 			}
2122 
2123 			if (newdevid)
2124 				devid_str_free(newdevid);
2125 		}
2126 
2127 		if (strncmp(path, "/dev/dsk/", 9) == 0)
2128 			path += 9;
2129 
2130 		if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
2131 		    &value) == 0 && value) {
2132 			char *tmp = zfs_strdup(hdl, path);
2133 			if (tmp == NULL)
2134 				return (NULL);
2135 			tmp[strlen(path) - 2] = '\0';
2136 			return (tmp);
2137 		}
2138 	} else {
2139 		verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
2140 
2141 		/*
2142 		 * If it's a raidz device, we need to stick in the parity level.
2143 		 */
2144 		if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
2145 			verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
2146 			    &value) == 0);
2147 			(void) snprintf(buf, sizeof (buf), "%s%llu", path,
2148 			    (u_longlong_t)value);
2149 			path = buf;
2150 		}
2151 	}
2152 
2153 	return (zfs_strdup(hdl, path));
2154 }
2155 
2156 static int
2157 zbookmark_compare(const void *a, const void *b)
2158 {
2159 	return (memcmp(a, b, sizeof (zbookmark_t)));
2160 }
2161 
2162 /*
2163  * Retrieve the persistent error log, uniquify the members, and return to the
2164  * caller.
2165  */
2166 int
2167 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
2168 {
2169 	zfs_cmd_t zc = { 0 };
2170 	uint64_t count;
2171 	zbookmark_t *zb = NULL;
2172 	int i;
2173 
2174 	/*
2175 	 * Retrieve the raw error list from the kernel.  If the number of errors
2176 	 * has increased, allocate more space and continue until we get the
2177 	 * entire list.
2178 	 */
2179 	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
2180 	    &count) == 0);
2181 	if (count == 0)
2182 		return (0);
2183 	if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
2184 	    count * sizeof (zbookmark_t))) == (uintptr_t)NULL)
2185 		return (-1);
2186 	zc.zc_nvlist_dst_size = count;
2187 	(void) strcpy(zc.zc_name, zhp->zpool_name);
2188 	for (;;) {
2189 		if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
2190 		    &zc) != 0) {
2191 			free((void *)(uintptr_t)zc.zc_nvlist_dst);
2192 			if (errno == ENOMEM) {
2193 				count = zc.zc_nvlist_dst_size;
2194 				if ((zc.zc_nvlist_dst = (uintptr_t)
2195 				    zfs_alloc(zhp->zpool_hdl, count *
2196 				    sizeof (zbookmark_t))) == (uintptr_t)NULL)
2197 					return (-1);
2198 			} else {
2199 				return (-1);
2200 			}
2201 		} else {
2202 			break;
2203 		}
2204 	}
2205 
2206 	/*
2207 	 * Sort the resulting bookmarks.  This is a little confusing due to the
2208 	 * implementation of ZFS_IOC_ERROR_LOG.  The bookmarks are copied last
2209 	 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
2210 	 * _not_ copied as part of the process.  So we point the start of our
2211 	 * array appropriate and decrement the total number of elements.
2212 	 */
2213 	zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) +
2214 	    zc.zc_nvlist_dst_size;
2215 	count -= zc.zc_nvlist_dst_size;
2216 
2217 	qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare);
2218 
2219 	verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
2220 
2221 	/*
2222 	 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
2223 	 */
2224 	for (i = 0; i < count; i++) {
2225 		nvlist_t *nv;
2226 
2227 		/* ignoring zb_blkid and zb_level for now */
2228 		if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
2229 		    zb[i-1].zb_object == zb[i].zb_object)
2230 			continue;
2231 
2232 		if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
2233 			goto nomem;
2234 		if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
2235 		    zb[i].zb_objset) != 0) {
2236 			nvlist_free(nv);
2237 			goto nomem;
2238 		}
2239 		if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
2240 		    zb[i].zb_object) != 0) {
2241 			nvlist_free(nv);
2242 			goto nomem;
2243 		}
2244 		if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
2245 			nvlist_free(nv);
2246 			goto nomem;
2247 		}
2248 		nvlist_free(nv);
2249 	}
2250 
2251 	free((void *)(uintptr_t)zc.zc_nvlist_dst);
2252 	return (0);
2253 
2254 nomem:
2255 	free((void *)(uintptr_t)zc.zc_nvlist_dst);
2256 	return (no_memory(zhp->zpool_hdl));
2257 }
2258 
2259 /*
2260  * Upgrade a ZFS pool to the latest on-disk version.
2261  */
2262 int
2263 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
2264 {
2265 	zfs_cmd_t zc = { 0 };
2266 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2267 
2268 	(void) strcpy(zc.zc_name, zhp->zpool_name);
2269 	zc.zc_cookie = new_version;
2270 
2271 	if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
2272 		return (zpool_standard_error_fmt(hdl, errno,
2273 		    dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
2274 		    zhp->zpool_name));
2275 	return (0);
2276 }
2277 
2278 void
2279 zpool_set_history_str(const char *subcommand, int argc, char **argv,
2280     char *history_str)
2281 {
2282 	int i;
2283 
2284 	(void) strlcpy(history_str, subcommand, HIS_MAX_RECORD_LEN);
2285 	for (i = 1; i < argc; i++) {
2286 		if (strlen(history_str) + 1 + strlen(argv[i]) >
2287 		    HIS_MAX_RECORD_LEN)
2288 			break;
2289 		(void) strlcat(history_str, " ", HIS_MAX_RECORD_LEN);
2290 		(void) strlcat(history_str, argv[i], HIS_MAX_RECORD_LEN);
2291 	}
2292 }
2293 
2294 /*
2295  * Stage command history for logging.
2296  */
2297 int
2298 zpool_stage_history(libzfs_handle_t *hdl, const char *history_str)
2299 {
2300 	if (history_str == NULL)
2301 		return (EINVAL);
2302 
2303 	if (strlen(history_str) > HIS_MAX_RECORD_LEN)
2304 		return (EINVAL);
2305 
2306 	if (hdl->libzfs_log_str != NULL)
2307 		free(hdl->libzfs_log_str);
2308 
2309 	if ((hdl->libzfs_log_str = strdup(history_str)) == NULL)
2310 		return (no_memory(hdl));
2311 
2312 	return (0);
2313 }
2314 
2315 /*
2316  * Perform ioctl to get some command history of a pool.
2317  *
2318  * 'buf' is the buffer to fill up to 'len' bytes.  'off' is the
2319  * logical offset of the history buffer to start reading from.
2320  *
2321  * Upon return, 'off' is the next logical offset to read from and
2322  * 'len' is the actual amount of bytes read into 'buf'.
2323  */
2324 static int
2325 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
2326 {
2327 	zfs_cmd_t zc = { 0 };
2328 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2329 
2330 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2331 
2332 	zc.zc_history = (uint64_t)(uintptr_t)buf;
2333 	zc.zc_history_len = *len;
2334 	zc.zc_history_offset = *off;
2335 
2336 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
2337 		switch (errno) {
2338 		case EPERM:
2339 			return (zfs_error_fmt(hdl, EZFS_PERM,
2340 			    dgettext(TEXT_DOMAIN,
2341 			    "cannot show history for pool '%s'"),
2342 			    zhp->zpool_name));
2343 		case ENOENT:
2344 			return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
2345 			    dgettext(TEXT_DOMAIN, "cannot get history for pool "
2346 			    "'%s'"), zhp->zpool_name));
2347 		case ENOTSUP:
2348 			return (zfs_error_fmt(hdl, EZFS_BADVERSION,
2349 			    dgettext(TEXT_DOMAIN, "cannot get history for pool "
2350 			    "'%s', pool must be upgraded"), zhp->zpool_name));
2351 		default:
2352 			return (zpool_standard_error_fmt(hdl, errno,
2353 			    dgettext(TEXT_DOMAIN,
2354 			    "cannot get history for '%s'"), zhp->zpool_name));
2355 		}
2356 	}
2357 
2358 	*len = zc.zc_history_len;
2359 	*off = zc.zc_history_offset;
2360 
2361 	return (0);
2362 }
2363 
2364 /*
2365  * Process the buffer of nvlists, unpacking and storing each nvlist record
2366  * into 'records'.  'leftover' is set to the number of bytes that weren't
2367  * processed as there wasn't a complete record.
2368  */
2369 static int
2370 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
2371     nvlist_t ***records, uint_t *numrecords)
2372 {
2373 	uint64_t reclen;
2374 	nvlist_t *nv;
2375 	int i;
2376 
2377 	while (bytes_read > sizeof (reclen)) {
2378 
2379 		/* get length of packed record (stored as little endian) */
2380 		for (i = 0, reclen = 0; i < sizeof (reclen); i++)
2381 			reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
2382 
2383 		if (bytes_read < sizeof (reclen) + reclen)
2384 			break;
2385 
2386 		/* unpack record */
2387 		if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
2388 			return (ENOMEM);
2389 		bytes_read -= sizeof (reclen) + reclen;
2390 		buf += sizeof (reclen) + reclen;
2391 
2392 		/* add record to nvlist array */
2393 		(*numrecords)++;
2394 		if (ISP2(*numrecords + 1)) {
2395 			*records = realloc(*records,
2396 			    *numrecords * 2 * sizeof (nvlist_t *));
2397 		}
2398 		(*records)[*numrecords - 1] = nv;
2399 	}
2400 
2401 	*leftover = bytes_read;
2402 	return (0);
2403 }
2404 
2405 #define	HIS_BUF_LEN	(128*1024)
2406 
2407 /*
2408  * Retrieve the command history of a pool.
2409  */
2410 int
2411 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
2412 {
2413 	char buf[HIS_BUF_LEN];
2414 	uint64_t off = 0;
2415 	nvlist_t **records = NULL;
2416 	uint_t numrecords = 0;
2417 	int err, i;
2418 
2419 	do {
2420 		uint64_t bytes_read = sizeof (buf);
2421 		uint64_t leftover;
2422 
2423 		if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
2424 			break;
2425 
2426 		/* if nothing else was read in, we're at EOF, just return */
2427 		if (!bytes_read)
2428 			break;
2429 
2430 		if ((err = zpool_history_unpack(buf, bytes_read,
2431 		    &leftover, &records, &numrecords)) != 0)
2432 			break;
2433 		off -= leftover;
2434 
2435 		/* CONSTCOND */
2436 	} while (1);
2437 
2438 	if (!err) {
2439 		verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
2440 		verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
2441 		    records, numrecords) == 0);
2442 	}
2443 	for (i = 0; i < numrecords; i++)
2444 		nvlist_free(records[i]);
2445 	free(records);
2446 
2447 	return (err);
2448 }
2449 
2450 void
2451 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
2452     char *pathname, size_t len)
2453 {
2454 	zfs_cmd_t zc = { 0 };
2455 	boolean_t mounted = B_FALSE;
2456 	char *mntpnt = NULL;
2457 	char dsname[MAXNAMELEN];
2458 
2459 	if (dsobj == 0) {
2460 		/* special case for the MOS */
2461 		(void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj);
2462 		return;
2463 	}
2464 
2465 	/* get the dataset's name */
2466 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2467 	zc.zc_obj = dsobj;
2468 	if (ioctl(zhp->zpool_hdl->libzfs_fd,
2469 	    ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
2470 		/* just write out a path of two object numbers */
2471 		(void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
2472 		    dsobj, obj);
2473 		return;
2474 	}
2475 	(void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
2476 
2477 	/* find out if the dataset is mounted */
2478 	mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
2479 
2480 	/* get the corrupted object's path */
2481 	(void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
2482 	zc.zc_obj = obj;
2483 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
2484 	    &zc) == 0) {
2485 		if (mounted) {
2486 			(void) snprintf(pathname, len, "%s%s", mntpnt,
2487 			    zc.zc_value);
2488 		} else {
2489 			(void) snprintf(pathname, len, "%s:%s",
2490 			    dsname, zc.zc_value);
2491 		}
2492 	} else {
2493 		(void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj);
2494 	}
2495 	free(mntpnt);
2496 }
2497 
2498 #define	RDISK_ROOT	"/dev/rdsk"
2499 #define	BACKUP_SLICE	"s2"
2500 /*
2501  * Don't start the slice at the default block of 34; many storage
2502  * devices will use a stripe width of 128k, so start there instead.
2503  */
2504 #define	NEW_START_BLOCK	256
2505 
2506 /*
2507  * determine where a partition starts on a disk in the current
2508  * configuration
2509  */
2510 static diskaddr_t
2511 find_start_block(nvlist_t *config)
2512 {
2513 	nvlist_t **child;
2514 	uint_t c, children;
2515 	char *path;
2516 	diskaddr_t sb = MAXOFFSET_T;
2517 	int fd;
2518 	char diskname[MAXPATHLEN];
2519 	uint64_t wholedisk;
2520 
2521 	if (nvlist_lookup_nvlist_array(config,
2522 	    ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
2523 		if (nvlist_lookup_uint64(config,
2524 		    ZPOOL_CONFIG_WHOLE_DISK,
2525 		    &wholedisk) != 0 || !wholedisk) {
2526 			return (MAXOFFSET_T);
2527 		}
2528 		if (nvlist_lookup_string(config,
2529 		    ZPOOL_CONFIG_PATH, &path) != 0) {
2530 			return (MAXOFFSET_T);
2531 		}
2532 
2533 		(void) snprintf(diskname, sizeof (diskname), "%s%s",
2534 		    RDISK_ROOT, strrchr(path, '/'));
2535 		if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) {
2536 			struct dk_gpt *vtoc;
2537 			if (efi_alloc_and_read(fd, &vtoc) >= 0) {
2538 				sb = vtoc->efi_parts[0].p_start;
2539 				efi_free(vtoc);
2540 			}
2541 			(void) close(fd);
2542 		}
2543 		return (sb);
2544 	}
2545 
2546 	for (c = 0; c < children; c++) {
2547 		sb = find_start_block(child[c]);
2548 		if (sb != MAXOFFSET_T) {
2549 			return (sb);
2550 		}
2551 	}
2552 	return (MAXOFFSET_T);
2553 }
2554 
2555 /*
2556  * Label an individual disk.  The name provided is the short name,
2557  * stripped of any leading /dev path.
2558  */
2559 int
2560 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
2561 {
2562 	char path[MAXPATHLEN];
2563 	struct dk_gpt *vtoc;
2564 	int fd;
2565 	size_t resv = EFI_MIN_RESV_SIZE;
2566 	uint64_t slice_size;
2567 	diskaddr_t start_block;
2568 	char errbuf[1024];
2569 
2570 	/* prepare an error message just in case */
2571 	(void) snprintf(errbuf, sizeof (errbuf),
2572 	    dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
2573 
2574 	if (zhp) {
2575 		nvlist_t *nvroot;
2576 
2577 		verify(nvlist_lookup_nvlist(zhp->zpool_config,
2578 		    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
2579 
2580 		if (zhp->zpool_start_block == 0)
2581 			start_block = find_start_block(nvroot);
2582 		else
2583 			start_block = zhp->zpool_start_block;
2584 		zhp->zpool_start_block = start_block;
2585 	} else {
2586 		/* new pool */
2587 		start_block = NEW_START_BLOCK;
2588 	}
2589 
2590 	(void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name,
2591 	    BACKUP_SLICE);
2592 
2593 	if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
2594 		/*
2595 		 * This shouldn't happen.  We've long since verified that this
2596 		 * is a valid device.
2597 		 */
2598 		zfs_error_aux(hdl,
2599 		    dgettext(TEXT_DOMAIN, "unable to open device"));
2600 		return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
2601 	}
2602 
2603 	if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
2604 		/*
2605 		 * The only way this can fail is if we run out of memory, or we
2606 		 * were unable to read the disk's capacity
2607 		 */
2608 		if (errno == ENOMEM)
2609 			(void) no_memory(hdl);
2610 
2611 		(void) close(fd);
2612 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2613 		    "unable to read disk capacity"), name);
2614 
2615 		return (zfs_error(hdl, EZFS_NOCAP, errbuf));
2616 	}
2617 
2618 	slice_size = vtoc->efi_last_u_lba + 1;
2619 	slice_size -= EFI_MIN_RESV_SIZE;
2620 	if (start_block == MAXOFFSET_T)
2621 		start_block = NEW_START_BLOCK;
2622 	slice_size -= start_block;
2623 
2624 	vtoc->efi_parts[0].p_start = start_block;
2625 	vtoc->efi_parts[0].p_size = slice_size;
2626 
2627 	/*
2628 	 * Why we use V_USR: V_BACKUP confuses users, and is considered
2629 	 * disposable by some EFI utilities (since EFI doesn't have a backup
2630 	 * slice).  V_UNASSIGNED is supposed to be used only for zero size
2631 	 * partitions, and efi_write() will fail if we use it.  V_ROOT, V_BOOT,
2632 	 * etc. were all pretty specific.  V_USR is as close to reality as we
2633 	 * can get, in the absence of V_OTHER.
2634 	 */
2635 	vtoc->efi_parts[0].p_tag = V_USR;
2636 	(void) strcpy(vtoc->efi_parts[0].p_name, "zfs");
2637 
2638 	vtoc->efi_parts[8].p_start = slice_size + start_block;
2639 	vtoc->efi_parts[8].p_size = resv;
2640 	vtoc->efi_parts[8].p_tag = V_RESERVED;
2641 
2642 	if (efi_write(fd, vtoc) != 0) {
2643 		/*
2644 		 * Some block drivers (like pcata) may not support EFI
2645 		 * GPT labels.  Print out a helpful error message dir-
2646 		 * ecting the user to manually label the disk and give
2647 		 * a specific slice.
2648 		 */
2649 		(void) close(fd);
2650 		efi_free(vtoc);
2651 
2652 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2653 		    "try using fdisk(1M) and then provide a specific slice"));
2654 		return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
2655 	}
2656 
2657 	(void) close(fd);
2658 	efi_free(vtoc);
2659 	return (0);
2660 }
2661 
2662 static boolean_t
2663 supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf)
2664 {
2665 	char *type;
2666 	nvlist_t **child;
2667 	uint_t children, c;
2668 
2669 	verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0);
2670 	if (strcmp(type, VDEV_TYPE_RAIDZ) == 0 ||
2671 	    strcmp(type, VDEV_TYPE_FILE) == 0 ||
2672 	    strcmp(type, VDEV_TYPE_LOG) == 0 ||
2673 	    strcmp(type, VDEV_TYPE_MISSING) == 0) {
2674 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2675 		    "vdev type '%s' is not supported"), type);
2676 		(void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf);
2677 		return (B_FALSE);
2678 	}
2679 	if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
2680 	    &child, &children) == 0) {
2681 		for (c = 0; c < children; c++) {
2682 			if (!supported_dump_vdev_type(hdl, child[c], errbuf))
2683 				return (B_FALSE);
2684 		}
2685 	}
2686 	return (B_TRUE);
2687 }
2688 
2689 /*
2690  * check if this zvol is allowable for use as a dump device; zero if
2691  * it is, > 0 if it isn't, < 0 if it isn't a zvol
2692  */
2693 int
2694 zvol_check_dump_config(char *arg)
2695 {
2696 	zpool_handle_t *zhp = NULL;
2697 	nvlist_t *config, *nvroot;
2698 	char *p, *volname;
2699 	nvlist_t **top;
2700 	uint_t toplevels;
2701 	libzfs_handle_t *hdl;
2702 	char errbuf[1024];
2703 	char poolname[ZPOOL_MAXNAMELEN];
2704 	int pathlen = strlen(ZVOL_FULL_DEV_DIR);
2705 	int ret = 1;
2706 
2707 	if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) {
2708 		return (-1);
2709 	}
2710 
2711 	(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
2712 	    "dump is not supported on device '%s'"), arg);
2713 
2714 	if ((hdl = libzfs_init()) == NULL)
2715 		return (1);
2716 	libzfs_print_on_error(hdl, B_TRUE);
2717 
2718 	volname = arg + pathlen;
2719 
2720 	/* check the configuration of the pool */
2721 	if ((p = strchr(volname, '/')) == NULL) {
2722 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2723 		    "malformed dataset name"));
2724 		(void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
2725 		return (1);
2726 	} else if (p - volname >= ZFS_MAXNAMELEN) {
2727 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2728 		    "dataset name is too long"));
2729 		(void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf);
2730 		return (1);
2731 	} else {
2732 		(void) strncpy(poolname, volname, p - volname);
2733 		poolname[p - volname] = '\0';
2734 	}
2735 
2736 	if ((zhp = zpool_open(hdl, poolname)) == NULL) {
2737 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2738 		    "could not open pool '%s'"), poolname);
2739 		(void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
2740 		goto out;
2741 	}
2742 	config = zpool_get_config(zhp, NULL);
2743 	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2744 	    &nvroot) != 0) {
2745 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2746 		    "could not obtain vdev configuration for  '%s'"), poolname);
2747 		(void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
2748 		goto out;
2749 	}
2750 
2751 	verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2752 	    &top, &toplevels) == 0);
2753 	if (toplevels != 1) {
2754 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2755 		    "'%s' has multiple top level vdevs"), poolname);
2756 		(void) zfs_error(hdl, EZFS_DEVOVERFLOW, errbuf);
2757 		goto out;
2758 	}
2759 
2760 	if (!supported_dump_vdev_type(hdl, top[0], errbuf)) {
2761 		goto out;
2762 	}
2763 	ret = 0;
2764 
2765 out:
2766 	if (zhp)
2767 		zpool_close(zhp);
2768 	libzfs_fini(hdl);
2769 	return (ret);
2770 }
2771