xref: /titanic_44/usr/src/lib/libzfs/common/libzfs_pool.c (revision fdd1ecae0dfe07e6aa8ee90687e2e91c876dc189)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <alloca.h>
30 #include <assert.h>
31 #include <ctype.h>
32 #include <errno.h>
33 #include <devid.h>
34 #include <dirent.h>
35 #include <fcntl.h>
36 #include <libintl.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <strings.h>
40 #include <unistd.h>
41 #include <sys/efi_partition.h>
42 #include <sys/vtoc.h>
43 #include <sys/zfs_ioctl.h>
44 #include <sys/zio.h>
45 #include <strings.h>
46 
47 #include "zfs_namecheck.h"
48 #include "zfs_prop.h"
49 #include "libzfs_impl.h"
50 
51 /*
52  * Validate the given pool name, optionally putting an extended error message in
53  * 'buf'.
54  */
55 static boolean_t
56 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
57 {
58 	namecheck_err_t why;
59 	char what;
60 	int ret;
61 
62 	ret = pool_namecheck(pool, &why, &what);
63 
64 	/*
65 	 * The rules for reserved pool names were extended at a later point.
66 	 * But we need to support users with existing pools that may now be
67 	 * invalid.  So we only check for this expanded set of names during a
68 	 * create (or import), and only in userland.
69 	 */
70 	if (ret == 0 && !isopen &&
71 	    (strncmp(pool, "mirror", 6) == 0 ||
72 	    strncmp(pool, "raidz", 5) == 0 ||
73 	    strncmp(pool, "spare", 5) == 0 ||
74 	    strcmp(pool, "log") == 0)) {
75 		zfs_error_aux(hdl,
76 		    dgettext(TEXT_DOMAIN, "name is reserved"));
77 		return (B_FALSE);
78 	}
79 
80 
81 	if (ret != 0) {
82 		if (hdl != NULL) {
83 			switch (why) {
84 			case NAME_ERR_TOOLONG:
85 				zfs_error_aux(hdl,
86 				    dgettext(TEXT_DOMAIN, "name is too long"));
87 				break;
88 
89 			case NAME_ERR_INVALCHAR:
90 				zfs_error_aux(hdl,
91 				    dgettext(TEXT_DOMAIN, "invalid character "
92 				    "'%c' in pool name"), what);
93 				break;
94 
95 			case NAME_ERR_NOLETTER:
96 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
97 				    "name must begin with a letter"));
98 				break;
99 
100 			case NAME_ERR_RESERVED:
101 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
102 				    "name is reserved"));
103 				break;
104 
105 			case NAME_ERR_DISKLIKE:
106 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
107 				    "pool name is reserved"));
108 				break;
109 
110 			case NAME_ERR_LEADING_SLASH:
111 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
112 				    "leading slash in name"));
113 				break;
114 
115 			case NAME_ERR_EMPTY_COMPONENT:
116 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
117 				    "empty component in name"));
118 				break;
119 
120 			case NAME_ERR_TRAILING_SLASH:
121 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
122 				    "trailing slash in name"));
123 				break;
124 
125 			case NAME_ERR_MULTIPLE_AT:
126 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
127 				    "multiple '@' delimiters in name"));
128 				break;
129 
130 			}
131 		}
132 		return (B_FALSE);
133 	}
134 
135 	return (B_TRUE);
136 }
137 
138 static int
139 zpool_get_all_props(zpool_handle_t *zhp)
140 {
141 	zfs_cmd_t zc = { 0 };
142 	libzfs_handle_t *hdl = zhp->zpool_hdl;
143 
144 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
145 
146 	if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
147 		return (-1);
148 
149 	while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
150 		if (errno == ENOMEM) {
151 			if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
152 				zcmd_free_nvlists(&zc);
153 				return (-1);
154 			}
155 		} else {
156 			zcmd_free_nvlists(&zc);
157 			return (-1);
158 		}
159 	}
160 
161 	if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
162 		zcmd_free_nvlists(&zc);
163 		return (-1);
164 	}
165 
166 	zcmd_free_nvlists(&zc);
167 
168 	return (0);
169 }
170 
171 /*
172  * Open a handle to the given pool, even if the pool is currently in the FAULTED
173  * state.
174  */
175 zpool_handle_t *
176 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
177 {
178 	zpool_handle_t *zhp;
179 	boolean_t missing;
180 
181 	/*
182 	 * Make sure the pool name is valid.
183 	 */
184 	if (!zpool_name_valid(hdl, B_TRUE, pool)) {
185 		(void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
186 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
187 		    pool);
188 		return (NULL);
189 	}
190 
191 	if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
192 		return (NULL);
193 
194 	zhp->zpool_hdl = hdl;
195 	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
196 
197 	if (zpool_refresh_stats(zhp, &missing) != 0) {
198 		zpool_close(zhp);
199 		return (NULL);
200 	}
201 
202 	if (missing) {
203 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
204 		    "no such pool"));
205 		(void) zfs_error_fmt(hdl, EZFS_NOENT,
206 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
207 		    pool);
208 		zpool_close(zhp);
209 		return (NULL);
210 	}
211 
212 	return (zhp);
213 }
214 
215 /*
216  * Like the above, but silent on error.  Used when iterating over pools (because
217  * the configuration cache may be out of date).
218  */
219 int
220 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
221 {
222 	zpool_handle_t *zhp;
223 	boolean_t missing;
224 
225 	if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
226 		return (-1);
227 
228 	zhp->zpool_hdl = hdl;
229 	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
230 
231 	if (zpool_refresh_stats(zhp, &missing) != 0) {
232 		zpool_close(zhp);
233 		return (-1);
234 	}
235 
236 	if (missing) {
237 		zpool_close(zhp);
238 		*ret = NULL;
239 		return (0);
240 	}
241 
242 	*ret = zhp;
243 	return (0);
244 }
245 
246 /*
247  * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
248  * state.
249  */
250 zpool_handle_t *
251 zpool_open(libzfs_handle_t *hdl, const char *pool)
252 {
253 	zpool_handle_t *zhp;
254 
255 	if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
256 		return (NULL);
257 
258 	if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
259 		(void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
260 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
261 		zpool_close(zhp);
262 		return (NULL);
263 	}
264 
265 	return (zhp);
266 }
267 
268 /*
269  * Close the handle.  Simply frees the memory associated with the handle.
270  */
271 void
272 zpool_close(zpool_handle_t *zhp)
273 {
274 	if (zhp->zpool_config)
275 		nvlist_free(zhp->zpool_config);
276 	if (zhp->zpool_old_config)
277 		nvlist_free(zhp->zpool_old_config);
278 	if (zhp->zpool_props)
279 		nvlist_free(zhp->zpool_props);
280 	free(zhp);
281 }
282 
283 /*
284  * Return the name of the pool.
285  */
286 const char *
287 zpool_get_name(zpool_handle_t *zhp)
288 {
289 	return (zhp->zpool_name);
290 }
291 
292 /*
293  * Return the GUID of the pool.
294  */
295 uint64_t
296 zpool_get_guid(zpool_handle_t *zhp)
297 {
298 	uint64_t guid;
299 
300 	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID,
301 	    &guid) == 0);
302 	return (guid);
303 }
304 
305 /*
306  * Return the version of the pool.
307  */
308 uint64_t
309 zpool_get_version(zpool_handle_t *zhp)
310 {
311 	uint64_t version;
312 
313 	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_VERSION,
314 	    &version) == 0);
315 
316 	return (version);
317 }
318 
319 /*
320  * Return the amount of space currently consumed by the pool.
321  */
322 uint64_t
323 zpool_get_space_used(zpool_handle_t *zhp)
324 {
325 	nvlist_t *nvroot;
326 	vdev_stat_t *vs;
327 	uint_t vsc;
328 
329 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
330 	    &nvroot) == 0);
331 	verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS,
332 	    (uint64_t **)&vs, &vsc) == 0);
333 
334 	return (vs->vs_alloc);
335 }
336 
337 /*
338  * Return the total space in the pool.
339  */
340 uint64_t
341 zpool_get_space_total(zpool_handle_t *zhp)
342 {
343 	nvlist_t *nvroot;
344 	vdev_stat_t *vs;
345 	uint_t vsc;
346 
347 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
348 	    &nvroot) == 0);
349 	verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS,
350 	    (uint64_t **)&vs, &vsc) == 0);
351 
352 	return (vs->vs_space);
353 }
354 
355 /*
356  * Return the alternate root for this pool, if any.
357  */
358 int
359 zpool_get_root(zpool_handle_t *zhp, char *buf, size_t buflen)
360 {
361 	zfs_cmd_t zc = { 0 };
362 
363 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
364 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, &zc) != 0 ||
365 	    zc.zc_value[0] == '\0')
366 		return (-1);
367 
368 	(void) strlcpy(buf, zc.zc_value, buflen);
369 
370 	return (0);
371 }
372 
373 /*
374  * Return the state of the pool (ACTIVE or UNAVAILABLE)
375  */
376 int
377 zpool_get_state(zpool_handle_t *zhp)
378 {
379 	return (zhp->zpool_state);
380 }
381 
382 /*
383  * Create the named pool, using the provided vdev list.  It is assumed
384  * that the consumer has already validated the contents of the nvlist, so we
385  * don't have to worry about error semantics.
386  */
387 int
388 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
389     const char *altroot)
390 {
391 	zfs_cmd_t zc = { 0 };
392 	char msg[1024];
393 
394 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
395 	    "cannot create '%s'"), pool);
396 
397 	if (!zpool_name_valid(hdl, B_FALSE, pool))
398 		return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
399 
400 	if (altroot != NULL && altroot[0] != '/')
401 		return (zfs_error_fmt(hdl, EZFS_BADPATH,
402 		    dgettext(TEXT_DOMAIN, "bad alternate root '%s'"), altroot));
403 
404 	if (zcmd_write_src_nvlist(hdl, &zc, nvroot, NULL) != 0)
405 		return (-1);
406 
407 	(void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
408 
409 	if (altroot != NULL)
410 		(void) strlcpy(zc.zc_value, altroot, sizeof (zc.zc_value));
411 
412 	if (zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc) != 0) {
413 		zcmd_free_nvlists(&zc);
414 
415 		switch (errno) {
416 		case EBUSY:
417 			/*
418 			 * This can happen if the user has specified the same
419 			 * device multiple times.  We can't reliably detect this
420 			 * until we try to add it and see we already have a
421 			 * label.
422 			 */
423 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
424 			    "one or more vdevs refer to the same device"));
425 			return (zfs_error(hdl, EZFS_BADDEV, msg));
426 
427 		case EOVERFLOW:
428 			/*
429 			 * This occurs when one of the devices is below
430 			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
431 			 * device was the problem device since there's no
432 			 * reliable way to determine device size from userland.
433 			 */
434 			{
435 				char buf[64];
436 
437 				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
438 
439 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
440 				    "one or more devices is less than the "
441 				    "minimum size (%s)"), buf);
442 			}
443 			return (zfs_error(hdl, EZFS_BADDEV, msg));
444 
445 		case ENOSPC:
446 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
447 			    "one or more devices is out of space"));
448 			return (zfs_error(hdl, EZFS_BADDEV, msg));
449 
450 		default:
451 			return (zpool_standard_error(hdl, errno, msg));
452 		}
453 	}
454 	zcmd_free_nvlists(&zc);
455 
456 	/*
457 	 * If this is an alternate root pool, then we automatically set the
458 	 * mountpoint of the root dataset to be '/'.
459 	 */
460 	if (altroot != NULL) {
461 		zfs_handle_t *zhp;
462 
463 		verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_ANY)) != NULL);
464 		verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
465 		    "/") == 0);
466 
467 		zfs_close(zhp);
468 	}
469 
470 	return (0);
471 }
472 
473 /*
474  * Destroy the given pool.  It is up to the caller to ensure that there are no
475  * datasets left in the pool.
476  */
477 int
478 zpool_destroy(zpool_handle_t *zhp)
479 {
480 	zfs_cmd_t zc = { 0 };
481 	zfs_handle_t *zfp = NULL;
482 	libzfs_handle_t *hdl = zhp->zpool_hdl;
483 	char msg[1024];
484 
485 	if (zhp->zpool_state == POOL_STATE_ACTIVE &&
486 	    (zfp = zfs_open(zhp->zpool_hdl, zhp->zpool_name,
487 	    ZFS_TYPE_FILESYSTEM)) == NULL)
488 		return (-1);
489 
490 	if (zpool_remove_zvol_links(zhp) != 0)
491 		return (-1);
492 
493 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
494 
495 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
496 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
497 		    "cannot destroy '%s'"), zhp->zpool_name);
498 
499 		if (errno == EROFS) {
500 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
501 			    "one or more devices is read only"));
502 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
503 		} else {
504 			(void) zpool_standard_error(hdl, errno, msg);
505 		}
506 
507 		if (zfp)
508 			zfs_close(zfp);
509 		return (-1);
510 	}
511 
512 	if (zfp) {
513 		remove_mountpoint(zfp);
514 		zfs_close(zfp);
515 	}
516 
517 	return (0);
518 }
519 
520 /*
521  * Add the given vdevs to the pool.  The caller must have already performed the
522  * necessary verification to ensure that the vdev specification is well-formed.
523  */
524 int
525 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
526 {
527 	zfs_cmd_t zc = { 0 };
528 	int ret;
529 	libzfs_handle_t *hdl = zhp->zpool_hdl;
530 	char msg[1024];
531 	nvlist_t **spares;
532 	uint_t nspares;
533 
534 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
535 	    "cannot add to '%s'"), zhp->zpool_name);
536 
537 	if (zpool_get_version(zhp) < SPA_VERSION_SPARES &&
538 	    nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
539 	    &spares, &nspares) == 0) {
540 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
541 		    "upgraded to add hot spares"));
542 		return (zfs_error(hdl, EZFS_BADVERSION, msg));
543 	}
544 
545 	if (zcmd_write_src_nvlist(hdl, &zc, nvroot, NULL) != 0)
546 		return (-1);
547 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
548 
549 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
550 		switch (errno) {
551 		case EBUSY:
552 			/*
553 			 * This can happen if the user has specified the same
554 			 * device multiple times.  We can't reliably detect this
555 			 * until we try to add it and see we already have a
556 			 * label.
557 			 */
558 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
559 			    "one or more vdevs refer to the same device"));
560 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
561 			break;
562 
563 		case EOVERFLOW:
564 			/*
565 			 * This occurrs when one of the devices is below
566 			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
567 			 * device was the problem device since there's no
568 			 * reliable way to determine device size from userland.
569 			 */
570 			{
571 				char buf[64];
572 
573 				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
574 
575 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
576 				    "device is less than the minimum "
577 				    "size (%s)"), buf);
578 			}
579 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
580 			break;
581 
582 		case ENOTSUP:
583 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
584 			    "pool must be upgraded to add these vdevs"));
585 			(void) zfs_error(hdl, EZFS_BADVERSION, msg);
586 			break;
587 
588 		case EDOM:
589 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
590 			    "root pool can not have multiple vdevs"
591 			    " or separate logs"));
592 			(void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
593 			break;
594 
595 		default:
596 			(void) zpool_standard_error(hdl, errno, msg);
597 		}
598 
599 		ret = -1;
600 	} else {
601 		ret = 0;
602 	}
603 
604 	zcmd_free_nvlists(&zc);
605 
606 	return (ret);
607 }
608 
609 /*
610  * Exports the pool from the system.  The caller must ensure that there are no
611  * mounted datasets in the pool.
612  */
613 int
614 zpool_export(zpool_handle_t *zhp)
615 {
616 	zfs_cmd_t zc = { 0 };
617 
618 	if (zpool_remove_zvol_links(zhp) != 0)
619 		return (-1);
620 
621 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
622 
623 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0)
624 		return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
625 		    dgettext(TEXT_DOMAIN, "cannot export '%s'"),
626 		    zhp->zpool_name));
627 	return (0);
628 }
629 
630 /*
631  * Import the given pool using the known configuration.  The configuration
632  * should have come from zpool_find_import().  The 'newname' and 'altroot'
633  * parameters control whether the pool is imported with a different name or with
634  * an alternate root, respectively.
635  */
636 int
637 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
638     const char *altroot)
639 {
640 	zfs_cmd_t zc = { 0 };
641 	char *thename;
642 	char *origname;
643 	int ret;
644 
645 	verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
646 	    &origname) == 0);
647 
648 	if (newname != NULL) {
649 		if (!zpool_name_valid(hdl, B_FALSE, newname))
650 			return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
651 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
652 			    newname));
653 		thename = (char *)newname;
654 	} else {
655 		thename = origname;
656 	}
657 
658 	if (altroot != NULL && altroot[0] != '/')
659 		return (zfs_error_fmt(hdl, EZFS_BADPATH,
660 		    dgettext(TEXT_DOMAIN, "bad alternate root '%s'"),
661 		    altroot));
662 
663 	(void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
664 
665 	if (altroot != NULL)
666 		(void) strlcpy(zc.zc_value, altroot, sizeof (zc.zc_value));
667 	else
668 		zc.zc_value[0] = '\0';
669 
670 	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
671 	    &zc.zc_guid) == 0);
672 
673 	if (zcmd_write_src_nvlist(hdl, &zc, config, NULL) != 0)
674 		return (-1);
675 
676 	ret = 0;
677 	if (zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc) != 0) {
678 		char desc[1024];
679 		if (newname == NULL)
680 			(void) snprintf(desc, sizeof (desc),
681 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
682 			    thename);
683 		else
684 			(void) snprintf(desc, sizeof (desc),
685 			    dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
686 			    origname, thename);
687 
688 		switch (errno) {
689 		case ENOTSUP:
690 			/*
691 			 * Unsupported version.
692 			 */
693 			(void) zfs_error(hdl, EZFS_BADVERSION, desc);
694 			break;
695 
696 		case EINVAL:
697 			(void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
698 			break;
699 
700 		default:
701 			(void) zpool_standard_error(hdl, errno, desc);
702 		}
703 
704 		ret = -1;
705 	} else {
706 		zpool_handle_t *zhp;
707 
708 		/*
709 		 * This should never fail, but play it safe anyway.
710 		 */
711 		if (zpool_open_silent(hdl, thename, &zhp) != 0) {
712 			ret = -1;
713 		} else if (zhp != NULL) {
714 			ret = zpool_create_zvol_links(zhp);
715 			zpool_close(zhp);
716 		}
717 
718 	}
719 
720 
721 	zcmd_free_nvlists(&zc);
722 	return (ret);
723 }
724 
725 /*
726  * Scrub the pool.
727  */
728 int
729 zpool_scrub(zpool_handle_t *zhp, pool_scrub_type_t type)
730 {
731 	zfs_cmd_t zc = { 0 };
732 	char msg[1024];
733 	libzfs_handle_t *hdl = zhp->zpool_hdl;
734 
735 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
736 	zc.zc_cookie = type;
737 
738 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SCRUB, &zc) == 0)
739 		return (0);
740 
741 	(void) snprintf(msg, sizeof (msg),
742 	    dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
743 
744 	if (errno == EBUSY)
745 		return (zfs_error(hdl, EZFS_RESILVERING, msg));
746 	else
747 		return (zpool_standard_error(hdl, errno, msg));
748 }
749 
750 /*
751  * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
752  * spare; but FALSE if its an INUSE spare.
753  */
754 static nvlist_t *
755 vdev_to_nvlist_iter(nvlist_t *nv, const char *search, uint64_t guid,
756     boolean_t *avail_spare)
757 {
758 	uint_t c, children;
759 	nvlist_t **child;
760 	uint64_t theguid, present;
761 	char *path;
762 	uint64_t wholedisk = 0;
763 	nvlist_t *ret;
764 
765 	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &theguid) == 0);
766 
767 	if (search == NULL &&
768 	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &present) == 0) {
769 		/*
770 		 * If the device has never been present since import, the only
771 		 * reliable way to match the vdev is by GUID.
772 		 */
773 		if (theguid == guid)
774 			return (nv);
775 	} else if (search != NULL &&
776 	    nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
777 		(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
778 		    &wholedisk);
779 		if (wholedisk) {
780 			/*
781 			 * For whole disks, the internal path has 's0', but the
782 			 * path passed in by the user doesn't.
783 			 */
784 			if (strlen(search) == strlen(path) - 2 &&
785 			    strncmp(search, path, strlen(search)) == 0)
786 				return (nv);
787 		} else if (strcmp(search, path) == 0) {
788 			return (nv);
789 		}
790 	}
791 
792 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
793 	    &child, &children) != 0)
794 		return (NULL);
795 
796 	for (c = 0; c < children; c++)
797 		if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
798 		    avail_spare)) != NULL)
799 			return (ret);
800 
801 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
802 	    &child, &children) == 0) {
803 		for (c = 0; c < children; c++) {
804 			if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
805 			    avail_spare)) != NULL) {
806 				*avail_spare = B_TRUE;
807 				return (ret);
808 			}
809 		}
810 	}
811 
812 	return (NULL);
813 }
814 
815 nvlist_t *
816 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare)
817 {
818 	char buf[MAXPATHLEN];
819 	const char *search;
820 	char *end;
821 	nvlist_t *nvroot;
822 	uint64_t guid;
823 
824 	guid = strtoull(path, &end, 10);
825 	if (guid != 0 && *end == '\0') {
826 		search = NULL;
827 	} else if (path[0] != '/') {
828 		(void) snprintf(buf, sizeof (buf), "%s%s", "/dev/dsk/", path);
829 		search = buf;
830 	} else {
831 		search = path;
832 	}
833 
834 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
835 	    &nvroot) == 0);
836 
837 	*avail_spare = B_FALSE;
838 	return (vdev_to_nvlist_iter(nvroot, search, guid, avail_spare));
839 }
840 
841 /*
842  * Returns TRUE if the given guid corresponds to a spare (INUSE or not).
843  */
844 static boolean_t
845 is_spare(zpool_handle_t *zhp, uint64_t guid)
846 {
847 	uint64_t spare_guid;
848 	nvlist_t *nvroot;
849 	nvlist_t **spares;
850 	uint_t nspares;
851 	int i;
852 
853 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
854 	    &nvroot) == 0);
855 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
856 	    &spares, &nspares) == 0) {
857 		for (i = 0; i < nspares; i++) {
858 			verify(nvlist_lookup_uint64(spares[i],
859 			    ZPOOL_CONFIG_GUID, &spare_guid) == 0);
860 			if (guid == spare_guid)
861 				return (B_TRUE);
862 		}
863 	}
864 
865 	return (B_FALSE);
866 }
867 
868 /*
869  * Bring the specified vdev online.   The 'flags' parameter is a set of the
870  * ZFS_ONLINE_* flags.
871  */
872 int
873 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
874     vdev_state_t *newstate)
875 {
876 	zfs_cmd_t zc = { 0 };
877 	char msg[1024];
878 	nvlist_t *tgt;
879 	boolean_t avail_spare;
880 	libzfs_handle_t *hdl = zhp->zpool_hdl;
881 
882 	(void) snprintf(msg, sizeof (msg),
883 	    dgettext(TEXT_DOMAIN, "cannot online %s"), path);
884 
885 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
886 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == NULL)
887 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
888 
889 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
890 
891 	if (avail_spare || is_spare(zhp, zc.zc_guid) == B_TRUE)
892 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
893 
894 	zc.zc_cookie = VDEV_STATE_ONLINE;
895 	zc.zc_obj = flags;
896 
897 
898 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0)
899 		return (zpool_standard_error(hdl, errno, msg));
900 
901 	*newstate = zc.zc_cookie;
902 	return (0);
903 }
904 
905 /*
906  * Take the specified vdev offline
907  */
908 int
909 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
910 {
911 	zfs_cmd_t zc = { 0 };
912 	char msg[1024];
913 	nvlist_t *tgt;
914 	boolean_t avail_spare;
915 	libzfs_handle_t *hdl = zhp->zpool_hdl;
916 
917 	(void) snprintf(msg, sizeof (msg),
918 	    dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
919 
920 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
921 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == NULL)
922 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
923 
924 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
925 
926 	if (avail_spare || is_spare(zhp, zc.zc_guid) == B_TRUE)
927 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
928 
929 	zc.zc_cookie = VDEV_STATE_OFFLINE;
930 	zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
931 
932 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
933 		return (0);
934 
935 	switch (errno) {
936 	case EBUSY:
937 
938 		/*
939 		 * There are no other replicas of this device.
940 		 */
941 		return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
942 
943 	default:
944 		return (zpool_standard_error(hdl, errno, msg));
945 	}
946 }
947 
948 /*
949  * Mark the given vdev faulted.
950  */
951 int
952 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid)
953 {
954 	zfs_cmd_t zc = { 0 };
955 	char msg[1024];
956 	libzfs_handle_t *hdl = zhp->zpool_hdl;
957 
958 	(void) snprintf(msg, sizeof (msg),
959 	    dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid);
960 
961 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
962 	zc.zc_guid = guid;
963 	zc.zc_cookie = VDEV_STATE_FAULTED;
964 
965 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
966 		return (0);
967 
968 	switch (errno) {
969 	case EBUSY:
970 
971 		/*
972 		 * There are no other replicas of this device.
973 		 */
974 		return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
975 
976 	default:
977 		return (zpool_standard_error(hdl, errno, msg));
978 	}
979 
980 }
981 
982 /*
983  * Mark the given vdev degraded.
984  */
985 int
986 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid)
987 {
988 	zfs_cmd_t zc = { 0 };
989 	char msg[1024];
990 	libzfs_handle_t *hdl = zhp->zpool_hdl;
991 
992 	(void) snprintf(msg, sizeof (msg),
993 	    dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid);
994 
995 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
996 	zc.zc_guid = guid;
997 	zc.zc_cookie = VDEV_STATE_DEGRADED;
998 
999 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
1000 		return (0);
1001 
1002 	return (zpool_standard_error(hdl, errno, msg));
1003 }
1004 
1005 /*
1006  * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
1007  * a hot spare.
1008  */
1009 static boolean_t
1010 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
1011 {
1012 	nvlist_t **child;
1013 	uint_t c, children;
1014 	char *type;
1015 
1016 	if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
1017 	    &children) == 0) {
1018 		verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
1019 		    &type) == 0);
1020 
1021 		if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
1022 		    children == 2 && child[which] == tgt)
1023 			return (B_TRUE);
1024 
1025 		for (c = 0; c < children; c++)
1026 			if (is_replacing_spare(child[c], tgt, which))
1027 				return (B_TRUE);
1028 	}
1029 
1030 	return (B_FALSE);
1031 }
1032 
1033 /*
1034  * Attach new_disk (fully described by nvroot) to old_disk.
1035  * If 'replacing' is specified, the new disk will replace the old one.
1036  */
1037 int
1038 zpool_vdev_attach(zpool_handle_t *zhp,
1039     const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
1040 {
1041 	zfs_cmd_t zc = { 0 };
1042 	char msg[1024];
1043 	int ret;
1044 	nvlist_t *tgt;
1045 	boolean_t avail_spare;
1046 	uint64_t val, is_log;
1047 	char *path;
1048 	nvlist_t **child;
1049 	uint_t children;
1050 	nvlist_t *config_root;
1051 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1052 
1053 	if (replacing)
1054 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1055 		    "cannot replace %s with %s"), old_disk, new_disk);
1056 	else
1057 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1058 		    "cannot attach %s to %s"), new_disk, old_disk);
1059 
1060 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1061 	if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare)) == 0)
1062 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1063 
1064 	if (avail_spare)
1065 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
1066 
1067 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1068 	zc.zc_cookie = replacing;
1069 
1070 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
1071 	    &child, &children) != 0 || children != 1) {
1072 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1073 		    "new device must be a single disk"));
1074 		return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
1075 	}
1076 
1077 	verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
1078 	    ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
1079 
1080 	/*
1081 	 * If the target is a hot spare that has been swapped in, we can only
1082 	 * replace it with another hot spare.
1083 	 */
1084 	if (replacing &&
1085 	    nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
1086 	    nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
1087 	    (zpool_find_vdev(zhp, path, &avail_spare) == NULL ||
1088 	    !avail_spare) && is_replacing_spare(config_root, tgt, 1)) {
1089 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1090 		    "can only be replaced by another hot spare"));
1091 		return (zfs_error(hdl, EZFS_BADTARGET, msg));
1092 	}
1093 
1094 	/*
1095 	 * If we are attempting to replace a spare, it canot be applied to an
1096 	 * already spared device.
1097 	 */
1098 	if (replacing &&
1099 	    nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
1100 	    zpool_find_vdev(zhp, path, &avail_spare) != NULL && avail_spare &&
1101 	    is_replacing_spare(config_root, tgt, 0)) {
1102 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1103 		    "device has already been replaced with a spare"));
1104 		return (zfs_error(hdl, EZFS_BADTARGET, msg));
1105 	}
1106 
1107 	if (zcmd_write_src_nvlist(hdl, &zc, nvroot, NULL) != 0)
1108 		return (-1);
1109 
1110 	ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ATTACH, &zc);
1111 
1112 	zcmd_free_nvlists(&zc);
1113 
1114 	if (ret == 0)
1115 		return (0);
1116 
1117 	switch (errno) {
1118 	case ENOTSUP:
1119 		/*
1120 		 * Can't attach to or replace this type of vdev.
1121 		 */
1122 		if (replacing) {
1123 			is_log = B_FALSE;
1124 			(void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_LOG,
1125 			    &is_log);
1126 			if (is_log)
1127 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1128 				    "cannot replace a log with a spare"));
1129 			else
1130 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1131 				    "cannot replace a replacing device"));
1132 		} else {
1133 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1134 			    "can only attach to mirrors and top-level "
1135 			    "disks"));
1136 		}
1137 		(void) zfs_error(hdl, EZFS_BADTARGET, msg);
1138 		break;
1139 
1140 	case EINVAL:
1141 		/*
1142 		 * The new device must be a single disk.
1143 		 */
1144 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1145 		    "new device must be a single disk"));
1146 		(void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
1147 		break;
1148 
1149 	case EBUSY:
1150 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
1151 		    new_disk);
1152 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
1153 		break;
1154 
1155 	case EOVERFLOW:
1156 		/*
1157 		 * The new device is too small.
1158 		 */
1159 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1160 		    "device is too small"));
1161 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
1162 		break;
1163 
1164 	case EDOM:
1165 		/*
1166 		 * The new device has a different alignment requirement.
1167 		 */
1168 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1169 		    "devices have different sector alignment"));
1170 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
1171 		break;
1172 
1173 	case ENAMETOOLONG:
1174 		/*
1175 		 * The resulting top-level vdev spec won't fit in the label.
1176 		 */
1177 		(void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
1178 		break;
1179 
1180 	default:
1181 		(void) zpool_standard_error(hdl, errno, msg);
1182 	}
1183 
1184 	return (-1);
1185 }
1186 
1187 /*
1188  * Detach the specified device.
1189  */
1190 int
1191 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
1192 {
1193 	zfs_cmd_t zc = { 0 };
1194 	char msg[1024];
1195 	nvlist_t *tgt;
1196 	boolean_t avail_spare;
1197 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1198 
1199 	(void) snprintf(msg, sizeof (msg),
1200 	    dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
1201 
1202 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1203 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == 0)
1204 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1205 
1206 	if (avail_spare)
1207 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
1208 
1209 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1210 
1211 	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
1212 		return (0);
1213 
1214 	switch (errno) {
1215 
1216 	case ENOTSUP:
1217 		/*
1218 		 * Can't detach from this type of vdev.
1219 		 */
1220 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
1221 		    "applicable to mirror and replacing vdevs"));
1222 		(void) zfs_error(zhp->zpool_hdl, EZFS_BADTARGET, msg);
1223 		break;
1224 
1225 	case EBUSY:
1226 		/*
1227 		 * There are no other replicas of this device.
1228 		 */
1229 		(void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
1230 		break;
1231 
1232 	default:
1233 		(void) zpool_standard_error(hdl, errno, msg);
1234 	}
1235 
1236 	return (-1);
1237 }
1238 
1239 /*
1240  * Remove the given device.  Currently, this is supported only for hot spares.
1241  */
1242 int
1243 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
1244 {
1245 	zfs_cmd_t zc = { 0 };
1246 	char msg[1024];
1247 	nvlist_t *tgt;
1248 	boolean_t avail_spare;
1249 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1250 
1251 	(void) snprintf(msg, sizeof (msg),
1252 	    dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
1253 
1254 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1255 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == 0)
1256 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1257 
1258 	if (!avail_spare) {
1259 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1260 		    "only inactive hot spares can be removed"));
1261 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1262 	}
1263 
1264 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1265 
1266 	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
1267 		return (0);
1268 
1269 	return (zpool_standard_error(hdl, errno, msg));
1270 }
1271 
1272 /*
1273  * Clear the errors for the pool, or the particular device if specified.
1274  */
1275 int
1276 zpool_clear(zpool_handle_t *zhp, const char *path)
1277 {
1278 	zfs_cmd_t zc = { 0 };
1279 	char msg[1024];
1280 	nvlist_t *tgt;
1281 	boolean_t avail_spare;
1282 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1283 
1284 	if (path)
1285 		(void) snprintf(msg, sizeof (msg),
1286 		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
1287 		    path);
1288 	else
1289 		(void) snprintf(msg, sizeof (msg),
1290 		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
1291 		    zhp->zpool_name);
1292 
1293 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1294 	if (path) {
1295 		if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == 0)
1296 			return (zfs_error(hdl, EZFS_NODEVICE, msg));
1297 
1298 		if (avail_spare)
1299 			return (zfs_error(hdl, EZFS_ISSPARE, msg));
1300 
1301 		verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
1302 		    &zc.zc_guid) == 0);
1303 	}
1304 
1305 	if (zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc) == 0)
1306 		return (0);
1307 
1308 	return (zpool_standard_error(hdl, errno, msg));
1309 }
1310 
1311 /*
1312  * Similar to zpool_clear(), but takes a GUID (used by fmd).
1313  */
1314 int
1315 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
1316 {
1317 	zfs_cmd_t zc = { 0 };
1318 	char msg[1024];
1319 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1320 
1321 	(void) snprintf(msg, sizeof (msg),
1322 	    dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
1323 	    guid);
1324 
1325 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1326 	zc.zc_guid = guid;
1327 
1328 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
1329 		return (0);
1330 
1331 	return (zpool_standard_error(hdl, errno, msg));
1332 }
1333 
1334 /*
1335  * Iterate over all zvols in a given pool by walking the /dev/zvol/dsk/<pool>
1336  * hierarchy.
1337  */
1338 int
1339 zpool_iter_zvol(zpool_handle_t *zhp, int (*cb)(const char *, void *),
1340     void *data)
1341 {
1342 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1343 	char (*paths)[MAXPATHLEN];
1344 	size_t size = 4;
1345 	int curr, fd, base, ret = 0;
1346 	DIR *dirp;
1347 	struct dirent *dp;
1348 	struct stat st;
1349 
1350 	if ((base = open("/dev/zvol/dsk", O_RDONLY)) < 0)
1351 		return (errno == ENOENT ? 0 : -1);
1352 
1353 	if (fstatat(base, zhp->zpool_name, &st, 0) != 0) {
1354 		int err = errno;
1355 		(void) close(base);
1356 		return (err == ENOENT ? 0 : -1);
1357 	}
1358 
1359 	/*
1360 	 * Oddly this wasn't a directory -- ignore that failure since we
1361 	 * know there are no links lower in the (non-existant) hierarchy.
1362 	 */
1363 	if (!S_ISDIR(st.st_mode)) {
1364 		(void) close(base);
1365 		return (0);
1366 	}
1367 
1368 	if ((paths = zfs_alloc(hdl, size * sizeof (paths[0]))) == NULL) {
1369 		(void) close(base);
1370 		return (-1);
1371 	}
1372 
1373 	(void) strlcpy(paths[0], zhp->zpool_name, sizeof (paths[0]));
1374 	curr = 0;
1375 
1376 	while (curr >= 0) {
1377 		if (fstatat(base, paths[curr], &st, AT_SYMLINK_NOFOLLOW) != 0)
1378 			goto err;
1379 
1380 		if (S_ISDIR(st.st_mode)) {
1381 			if ((fd = openat(base, paths[curr], O_RDONLY)) < 0)
1382 				goto err;
1383 
1384 			if ((dirp = fdopendir(fd)) == NULL) {
1385 				(void) close(fd);
1386 				goto err;
1387 			}
1388 
1389 			while ((dp = readdir(dirp)) != NULL) {
1390 				if (dp->d_name[0] == '.')
1391 					continue;
1392 
1393 				if (curr + 1 == size) {
1394 					paths = zfs_realloc(hdl, paths,
1395 					    size * sizeof (paths[0]),
1396 					    size * 2 * sizeof (paths[0]));
1397 					if (paths == NULL) {
1398 						(void) closedir(dirp);
1399 						(void) close(fd);
1400 						goto err;
1401 					}
1402 
1403 					size *= 2;
1404 				}
1405 
1406 				(void) strlcpy(paths[curr + 1], paths[curr],
1407 				    sizeof (paths[curr + 1]));
1408 				(void) strlcat(paths[curr], "/",
1409 				    sizeof (paths[curr]));
1410 				(void) strlcat(paths[curr], dp->d_name,
1411 				    sizeof (paths[curr]));
1412 				curr++;
1413 			}
1414 
1415 			(void) closedir(dirp);
1416 
1417 		} else {
1418 			if ((ret = cb(paths[curr], data)) != 0)
1419 				break;
1420 		}
1421 
1422 		curr--;
1423 	}
1424 
1425 	free(paths);
1426 	(void) close(base);
1427 
1428 	return (ret);
1429 
1430 err:
1431 	free(paths);
1432 	(void) close(base);
1433 	return (-1);
1434 }
1435 
1436 typedef struct zvol_cb {
1437 	zpool_handle_t *zcb_pool;
1438 	boolean_t zcb_create;
1439 } zvol_cb_t;
1440 
1441 /*ARGSUSED*/
1442 static int
1443 do_zvol_create(zfs_handle_t *zhp, void *data)
1444 {
1445 	int ret;
1446 
1447 	if (ZFS_IS_VOLUME(zhp))
1448 		(void) zvol_create_link(zhp->zfs_hdl, zhp->zfs_name);
1449 
1450 	ret = zfs_iter_children(zhp, do_zvol_create, NULL);
1451 
1452 	zfs_close(zhp);
1453 
1454 	return (ret);
1455 }
1456 
1457 /*
1458  * Iterate over all zvols in the pool and make any necessary minor nodes.
1459  */
1460 int
1461 zpool_create_zvol_links(zpool_handle_t *zhp)
1462 {
1463 	zfs_handle_t *zfp;
1464 	int ret;
1465 
1466 	/*
1467 	 * If the pool is unavailable, just return success.
1468 	 */
1469 	if ((zfp = make_dataset_handle(zhp->zpool_hdl,
1470 	    zhp->zpool_name)) == NULL)
1471 		return (0);
1472 
1473 	ret = zfs_iter_children(zfp, do_zvol_create, NULL);
1474 
1475 	zfs_close(zfp);
1476 	return (ret);
1477 }
1478 
1479 static int
1480 do_zvol_remove(const char *dataset, void *data)
1481 {
1482 	zpool_handle_t *zhp = data;
1483 
1484 	return (zvol_remove_link(zhp->zpool_hdl, dataset));
1485 }
1486 
1487 /*
1488  * Iterate over all zvols in the pool and remove any minor nodes.  We iterate
1489  * by examining the /dev links so that a corrupted pool doesn't impede this
1490  * operation.
1491  */
1492 int
1493 zpool_remove_zvol_links(zpool_handle_t *zhp)
1494 {
1495 	return (zpool_iter_zvol(zhp, do_zvol_remove, zhp));
1496 }
1497 
1498 /*
1499  * Convert from a devid string to a path.
1500  */
1501 static char *
1502 devid_to_path(char *devid_str)
1503 {
1504 	ddi_devid_t devid;
1505 	char *minor;
1506 	char *path;
1507 	devid_nmlist_t *list = NULL;
1508 	int ret;
1509 
1510 	if (devid_str_decode(devid_str, &devid, &minor) != 0)
1511 		return (NULL);
1512 
1513 	ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
1514 
1515 	devid_str_free(minor);
1516 	devid_free(devid);
1517 
1518 	if (ret != 0)
1519 		return (NULL);
1520 
1521 	if ((path = strdup(list[0].devname)) == NULL)
1522 		return (NULL);
1523 
1524 	devid_free_nmlist(list);
1525 
1526 	return (path);
1527 }
1528 
1529 /*
1530  * Convert from a path to a devid string.
1531  */
1532 static char *
1533 path_to_devid(const char *path)
1534 {
1535 	int fd;
1536 	ddi_devid_t devid;
1537 	char *minor, *ret;
1538 
1539 	if ((fd = open(path, O_RDONLY)) < 0)
1540 		return (NULL);
1541 
1542 	minor = NULL;
1543 	ret = NULL;
1544 	if (devid_get(fd, &devid) == 0) {
1545 		if (devid_get_minor_name(fd, &minor) == 0)
1546 			ret = devid_str_encode(devid, minor);
1547 		if (minor != NULL)
1548 			devid_str_free(minor);
1549 		devid_free(devid);
1550 	}
1551 	(void) close(fd);
1552 
1553 	return (ret);
1554 }
1555 
1556 /*
1557  * Issue the necessary ioctl() to update the stored path value for the vdev.  We
1558  * ignore any failure here, since a common case is for an unprivileged user to
1559  * type 'zpool status', and we'll display the correct information anyway.
1560  */
1561 static void
1562 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
1563 {
1564 	zfs_cmd_t zc = { 0 };
1565 
1566 	(void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1567 	(void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
1568 	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1569 	    &zc.zc_guid) == 0);
1570 
1571 	(void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
1572 }
1573 
1574 /*
1575  * Given a vdev, return the name to display in iostat.  If the vdev has a path,
1576  * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
1577  * We also check if this is a whole disk, in which case we strip off the
1578  * trailing 's0' slice name.
1579  *
1580  * This routine is also responsible for identifying when disks have been
1581  * reconfigured in a new location.  The kernel will have opened the device by
1582  * devid, but the path will still refer to the old location.  To catch this, we
1583  * first do a path -> devid translation (which is fast for the common case).  If
1584  * the devid matches, we're done.  If not, we do a reverse devid -> path
1585  * translation and issue the appropriate ioctl() to update the path of the vdev.
1586  * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
1587  * of these checks.
1588  */
1589 char *
1590 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv)
1591 {
1592 	char *path, *devid;
1593 	uint64_t value;
1594 	char buf[64];
1595 	vdev_stat_t *vs;
1596 	uint_t vsc;
1597 
1598 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
1599 	    &value) == 0) {
1600 		verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1601 		    &value) == 0);
1602 		(void) snprintf(buf, sizeof (buf), "%llu",
1603 		    (u_longlong_t)value);
1604 		path = buf;
1605 	} else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
1606 
1607 		/*
1608 		 * If the device is dead (faulted, offline, etc) then don't
1609 		 * bother opening it.  Otherwise we may be forcing the user to
1610 		 * open a misbehaving device, which can have undesirable
1611 		 * effects.
1612 		 */
1613 		if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_STATS,
1614 		    (uint64_t **)&vs, &vsc) != 0 ||
1615 		    vs->vs_state >= VDEV_STATE_DEGRADED) &&
1616 		    zhp != NULL &&
1617 		    nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
1618 			/*
1619 			 * Determine if the current path is correct.
1620 			 */
1621 			char *newdevid = path_to_devid(path);
1622 
1623 			if (newdevid == NULL ||
1624 			    strcmp(devid, newdevid) != 0) {
1625 				char *newpath;
1626 
1627 				if ((newpath = devid_to_path(devid)) != NULL) {
1628 					/*
1629 					 * Update the path appropriately.
1630 					 */
1631 					set_path(zhp, nv, newpath);
1632 					if (nvlist_add_string(nv,
1633 					    ZPOOL_CONFIG_PATH, newpath) == 0)
1634 						verify(nvlist_lookup_string(nv,
1635 						    ZPOOL_CONFIG_PATH,
1636 						    &path) == 0);
1637 					free(newpath);
1638 				}
1639 			}
1640 
1641 			if (newdevid)
1642 				devid_str_free(newdevid);
1643 		}
1644 
1645 		if (strncmp(path, "/dev/dsk/", 9) == 0)
1646 			path += 9;
1647 
1648 		if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
1649 		    &value) == 0 && value) {
1650 			char *tmp = zfs_strdup(hdl, path);
1651 			if (tmp == NULL)
1652 				return (NULL);
1653 			tmp[strlen(path) - 2] = '\0';
1654 			return (tmp);
1655 		}
1656 	} else {
1657 		verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
1658 
1659 		/*
1660 		 * If it's a raidz device, we need to stick in the parity level.
1661 		 */
1662 		if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
1663 			verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
1664 			    &value) == 0);
1665 			(void) snprintf(buf, sizeof (buf), "%s%llu", path,
1666 			    (u_longlong_t)value);
1667 			path = buf;
1668 		}
1669 	}
1670 
1671 	return (zfs_strdup(hdl, path));
1672 }
1673 
1674 static int
1675 zbookmark_compare(const void *a, const void *b)
1676 {
1677 	return (memcmp(a, b, sizeof (zbookmark_t)));
1678 }
1679 
1680 /*
1681  * Retrieve the persistent error log, uniquify the members, and return to the
1682  * caller.
1683  */
1684 int
1685 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
1686 {
1687 	zfs_cmd_t zc = { 0 };
1688 	uint64_t count;
1689 	zbookmark_t *zb = NULL;
1690 	int i;
1691 
1692 	/*
1693 	 * Retrieve the raw error list from the kernel.  If the number of errors
1694 	 * has increased, allocate more space and continue until we get the
1695 	 * entire list.
1696 	 */
1697 	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
1698 	    &count) == 0);
1699 	if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
1700 	    count * sizeof (zbookmark_t))) == (uintptr_t)NULL)
1701 		return (-1);
1702 	zc.zc_nvlist_dst_size = count;
1703 	(void) strcpy(zc.zc_name, zhp->zpool_name);
1704 	for (;;) {
1705 		if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
1706 		    &zc) != 0) {
1707 			free((void *)(uintptr_t)zc.zc_nvlist_dst);
1708 			if (errno == ENOMEM) {
1709 				count = zc.zc_nvlist_dst_size;
1710 				if ((zc.zc_nvlist_dst = (uintptr_t)
1711 				    zfs_alloc(zhp->zpool_hdl, count *
1712 				    sizeof (zbookmark_t))) == (uintptr_t)NULL)
1713 					return (-1);
1714 			} else {
1715 				return (-1);
1716 			}
1717 		} else {
1718 			break;
1719 		}
1720 	}
1721 
1722 	/*
1723 	 * Sort the resulting bookmarks.  This is a little confusing due to the
1724 	 * implementation of ZFS_IOC_ERROR_LOG.  The bookmarks are copied last
1725 	 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
1726 	 * _not_ copied as part of the process.  So we point the start of our
1727 	 * array appropriate and decrement the total number of elements.
1728 	 */
1729 	zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) +
1730 	    zc.zc_nvlist_dst_size;
1731 	count -= zc.zc_nvlist_dst_size;
1732 
1733 	qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare);
1734 
1735 	verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
1736 
1737 	/*
1738 	 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
1739 	 */
1740 	for (i = 0; i < count; i++) {
1741 		nvlist_t *nv;
1742 
1743 		/* ignoring zb_blkid and zb_level for now */
1744 		if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
1745 		    zb[i-1].zb_object == zb[i].zb_object)
1746 			continue;
1747 
1748 		if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
1749 			goto nomem;
1750 		if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
1751 		    zb[i].zb_objset) != 0) {
1752 			nvlist_free(nv);
1753 			goto nomem;
1754 		}
1755 		if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
1756 		    zb[i].zb_object) != 0) {
1757 			nvlist_free(nv);
1758 			goto nomem;
1759 		}
1760 		if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
1761 			nvlist_free(nv);
1762 			goto nomem;
1763 		}
1764 		nvlist_free(nv);
1765 	}
1766 
1767 	free((void *)(uintptr_t)zc.zc_nvlist_dst);
1768 	return (0);
1769 
1770 nomem:
1771 	free((void *)(uintptr_t)zc.zc_nvlist_dst);
1772 	return (no_memory(zhp->zpool_hdl));
1773 }
1774 
1775 /*
1776  * Upgrade a ZFS pool to the latest on-disk version.
1777  */
1778 int
1779 zpool_upgrade(zpool_handle_t *zhp)
1780 {
1781 	zfs_cmd_t zc = { 0 };
1782 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1783 
1784 	(void) strcpy(zc.zc_name, zhp->zpool_name);
1785 	if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
1786 		return (zpool_standard_error_fmt(hdl, errno,
1787 		    dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
1788 		    zhp->zpool_name));
1789 	return (0);
1790 }
1791 
1792 /*
1793  * Log command history.
1794  *
1795  * 'pool' is B_TRUE if we are logging a command for 'zpool'; B_FALSE
1796  * otherwise ('zfs').  'pool_create' is B_TRUE if we are logging the creation
1797  * of the pool; B_FALSE otherwise.  'path' is the pathname containing the
1798  * poolname.  'argc' and 'argv' are used to construct the command string.
1799  */
1800 void
1801 zpool_stage_history(libzfs_handle_t *hdl, int argc, char **argv,
1802     boolean_t zfs_cmd, boolean_t pool_create)
1803 {
1804 	char *cmd_buf;
1805 	int i;
1806 
1807 	if (hdl->libzfs_log_str != NULL) {
1808 		free(hdl->libzfs_log_str);
1809 	}
1810 
1811 	if ((hdl->libzfs_log_str = zfs_alloc(hdl, HIS_MAX_RECORD_LEN)) == NULL)
1812 		return;
1813 
1814 	hdl->libzfs_log_type =
1815 	    (pool_create == B_TRUE) ? LOG_CMD_POOL_CREATE : LOG_CMD_NORMAL;
1816 	cmd_buf = hdl->libzfs_log_str;
1817 
1818 	/* construct the command string */
1819 	(void) strlcpy(cmd_buf, zfs_cmd ? "zfs" : "zpool",
1820 	    HIS_MAX_RECORD_LEN);
1821 	for (i = 1; i < argc; i++) {
1822 		if (strlen(cmd_buf) + 1 + strlen(argv[i]) > HIS_MAX_RECORD_LEN)
1823 			break;
1824 		(void) strlcat(cmd_buf, " ", HIS_MAX_RECORD_LEN);
1825 		(void) strlcat(cmd_buf, argv[i], HIS_MAX_RECORD_LEN);
1826 	}
1827 }
1828 
1829 /*
1830  * Perform ioctl to get some command history of a pool.
1831  *
1832  * 'buf' is the buffer to fill up to 'len' bytes.  'off' is the
1833  * logical offset of the history buffer to start reading from.
1834  *
1835  * Upon return, 'off' is the next logical offset to read from and
1836  * 'len' is the actual amount of bytes read into 'buf'.
1837  */
1838 static int
1839 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
1840 {
1841 	zfs_cmd_t zc = { 0 };
1842 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1843 
1844 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1845 
1846 	zc.zc_history = (uint64_t)(uintptr_t)buf;
1847 	zc.zc_history_len = *len;
1848 	zc.zc_history_offset = *off;
1849 
1850 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
1851 		switch (errno) {
1852 		case EPERM:
1853 			return (zfs_error_fmt(hdl, EZFS_PERM,
1854 			    dgettext(TEXT_DOMAIN,
1855 			    "cannot show history for pool '%s'"),
1856 			    zhp->zpool_name));
1857 		case ENOENT:
1858 			return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
1859 			    dgettext(TEXT_DOMAIN, "cannot get history for pool "
1860 			    "'%s'"), zhp->zpool_name));
1861 		case ENOTSUP:
1862 			return (zfs_error_fmt(hdl, EZFS_BADVERSION,
1863 			    dgettext(TEXT_DOMAIN, "cannot get history for pool "
1864 			    "'%s', pool must be upgraded"), zhp->zpool_name));
1865 		default:
1866 			return (zpool_standard_error_fmt(hdl, errno,
1867 			    dgettext(TEXT_DOMAIN,
1868 			    "cannot get history for '%s'"), zhp->zpool_name));
1869 		}
1870 	}
1871 
1872 	*len = zc.zc_history_len;
1873 	*off = zc.zc_history_offset;
1874 
1875 	return (0);
1876 }
1877 
1878 /*
1879  * Process the buffer of nvlists, unpacking and storing each nvlist record
1880  * into 'records'.  'leftover' is set to the number of bytes that weren't
1881  * processed as there wasn't a complete record.
1882  */
1883 static int
1884 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
1885     nvlist_t ***records, uint_t *numrecords)
1886 {
1887 	uint64_t reclen;
1888 	nvlist_t *nv;
1889 	int i;
1890 
1891 	while (bytes_read > sizeof (reclen)) {
1892 
1893 		/* get length of packed record (stored as little endian) */
1894 		for (i = 0, reclen = 0; i < sizeof (reclen); i++)
1895 			reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
1896 
1897 		if (bytes_read < sizeof (reclen) + reclen)
1898 			break;
1899 
1900 		/* unpack record */
1901 		if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
1902 			return (ENOMEM);
1903 		bytes_read -= sizeof (reclen) + reclen;
1904 		buf += sizeof (reclen) + reclen;
1905 
1906 		/* add record to nvlist array */
1907 		(*numrecords)++;
1908 		if (ISP2(*numrecords + 1)) {
1909 			*records = realloc(*records,
1910 			    *numrecords * 2 * sizeof (nvlist_t *));
1911 		}
1912 		(*records)[*numrecords - 1] = nv;
1913 	}
1914 
1915 	*leftover = bytes_read;
1916 	return (0);
1917 }
1918 
1919 #define	HIS_BUF_LEN	(128*1024)
1920 
1921 /*
1922  * Retrieve the command history of a pool.
1923  */
1924 int
1925 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
1926 {
1927 	char buf[HIS_BUF_LEN];
1928 	uint64_t off = 0;
1929 	nvlist_t **records = NULL;
1930 	uint_t numrecords = 0;
1931 	int err, i;
1932 
1933 	do {
1934 		uint64_t bytes_read = sizeof (buf);
1935 		uint64_t leftover;
1936 
1937 		if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
1938 			break;
1939 
1940 		/* if nothing else was read in, we're at EOF, just return */
1941 		if (!bytes_read)
1942 			break;
1943 
1944 		if ((err = zpool_history_unpack(buf, bytes_read,
1945 		    &leftover, &records, &numrecords)) != 0)
1946 			break;
1947 		off -= leftover;
1948 
1949 		/* CONSTCOND */
1950 	} while (1);
1951 
1952 	if (!err) {
1953 		verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
1954 		verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
1955 		    records, numrecords) == 0);
1956 	}
1957 	for (i = 0; i < numrecords; i++)
1958 		nvlist_free(records[i]);
1959 	free(records);
1960 
1961 	return (err);
1962 }
1963 
1964 void
1965 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
1966     char *pathname, size_t len)
1967 {
1968 	zfs_cmd_t zc = { 0 };
1969 	boolean_t mounted = B_FALSE;
1970 	char *mntpnt = NULL;
1971 	char dsname[MAXNAMELEN];
1972 
1973 	if (dsobj == 0) {
1974 		/* special case for the MOS */
1975 		(void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj);
1976 		return;
1977 	}
1978 
1979 	/* get the dataset's name */
1980 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1981 	zc.zc_obj = dsobj;
1982 	if (ioctl(zhp->zpool_hdl->libzfs_fd,
1983 	    ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
1984 		/* just write out a path of two object numbers */
1985 		(void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
1986 		    dsobj, obj);
1987 		return;
1988 	}
1989 	(void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
1990 
1991 	/* find out if the dataset is mounted */
1992 	mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
1993 
1994 	/* get the corrupted object's path */
1995 	(void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
1996 	zc.zc_obj = obj;
1997 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
1998 	    &zc) == 0) {
1999 		if (mounted) {
2000 			(void) snprintf(pathname, len, "%s%s", mntpnt,
2001 			    zc.zc_value);
2002 		} else {
2003 			(void) snprintf(pathname, len, "%s:%s",
2004 			    dsname, zc.zc_value);
2005 		}
2006 	} else {
2007 		(void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj);
2008 	}
2009 	free(mntpnt);
2010 }
2011 
2012 #define	RDISK_ROOT	"/dev/rdsk"
2013 #define	BACKUP_SLICE	"s2"
2014 /*
2015  * Don't start the slice at the default block of 34; many storage
2016  * devices will use a stripe width of 128k, so start there instead.
2017  */
2018 #define	NEW_START_BLOCK	256
2019 
2020 /*
2021  * determine where a partition starts on a disk in the current
2022  * configuration
2023  */
2024 static diskaddr_t
2025 find_start_block(nvlist_t *config)
2026 {
2027 	nvlist_t **child;
2028 	uint_t c, children;
2029 	char *path;
2030 	diskaddr_t sb = MAXOFFSET_T;
2031 	int fd;
2032 	char diskname[MAXPATHLEN];
2033 	uint64_t wholedisk;
2034 
2035 	if (nvlist_lookup_nvlist_array(config,
2036 	    ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
2037 		if (nvlist_lookup_uint64(config,
2038 		    ZPOOL_CONFIG_WHOLE_DISK,
2039 		    &wholedisk) != 0 || !wholedisk) {
2040 			return (MAXOFFSET_T);
2041 		}
2042 		if (nvlist_lookup_string(config,
2043 		    ZPOOL_CONFIG_PATH, &path) != 0) {
2044 			return (MAXOFFSET_T);
2045 		}
2046 
2047 		(void) snprintf(diskname, sizeof (diskname), "%s%s",
2048 		    RDISK_ROOT, strrchr(path, '/'));
2049 		if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) {
2050 			struct dk_gpt *vtoc;
2051 			if (efi_alloc_and_read(fd, &vtoc) >= 0) {
2052 				sb = vtoc->efi_parts[0].p_start;
2053 				efi_free(vtoc);
2054 			}
2055 			(void) close(fd);
2056 		}
2057 		return (sb);
2058 	}
2059 
2060 	for (c = 0; c < children; c++) {
2061 		sb = find_start_block(child[c]);
2062 		if (sb != MAXOFFSET_T) {
2063 			return (sb);
2064 		}
2065 	}
2066 	return (MAXOFFSET_T);
2067 }
2068 
2069 /*
2070  * Label an individual disk.  The name provided is the short name,
2071  * stripped of any leading /dev path.
2072  */
2073 int
2074 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
2075 {
2076 	char path[MAXPATHLEN];
2077 	struct dk_gpt *vtoc;
2078 	int fd;
2079 	size_t resv = EFI_MIN_RESV_SIZE;
2080 	uint64_t slice_size;
2081 	diskaddr_t start_block;
2082 	char errbuf[1024];
2083 
2084 	if (zhp) {
2085 		nvlist_t *nvroot;
2086 
2087 		verify(nvlist_lookup_nvlist(zhp->zpool_config,
2088 		    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
2089 
2090 		if (zhp->zpool_start_block == 0)
2091 			start_block = find_start_block(nvroot);
2092 		else
2093 			start_block = zhp->zpool_start_block;
2094 		zhp->zpool_start_block = start_block;
2095 	} else {
2096 		/* new pool */
2097 		start_block = NEW_START_BLOCK;
2098 	}
2099 
2100 	(void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name,
2101 	    BACKUP_SLICE);
2102 
2103 	if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
2104 		/*
2105 		 * This shouldn't happen.  We've long since verified that this
2106 		 * is a valid device.
2107 		 */
2108 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2109 		    "label '%s': unable to open device"), name);
2110 		return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
2111 	}
2112 
2113 	if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
2114 		/*
2115 		 * The only way this can fail is if we run out of memory, or we
2116 		 * were unable to read the disk's capacity
2117 		 */
2118 		if (errno == ENOMEM)
2119 			(void) no_memory(hdl);
2120 
2121 		(void) close(fd);
2122 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2123 		    "label '%s': unable to read disk capacity"), name);
2124 
2125 		return (zfs_error(hdl, EZFS_NOCAP, errbuf));
2126 	}
2127 
2128 	slice_size = vtoc->efi_last_u_lba + 1;
2129 	slice_size -= EFI_MIN_RESV_SIZE;
2130 	if (start_block == MAXOFFSET_T)
2131 		start_block = NEW_START_BLOCK;
2132 	slice_size -= start_block;
2133 
2134 	vtoc->efi_parts[0].p_start = start_block;
2135 	vtoc->efi_parts[0].p_size = slice_size;
2136 
2137 	/*
2138 	 * Why we use V_USR: V_BACKUP confuses users, and is considered
2139 	 * disposable by some EFI utilities (since EFI doesn't have a backup
2140 	 * slice).  V_UNASSIGNED is supposed to be used only for zero size
2141 	 * partitions, and efi_write() will fail if we use it.  V_ROOT, V_BOOT,
2142 	 * etc. were all pretty specific.  V_USR is as close to reality as we
2143 	 * can get, in the absence of V_OTHER.
2144 	 */
2145 	vtoc->efi_parts[0].p_tag = V_USR;
2146 	(void) strcpy(vtoc->efi_parts[0].p_name, "zfs");
2147 
2148 	vtoc->efi_parts[8].p_start = slice_size + start_block;
2149 	vtoc->efi_parts[8].p_size = resv;
2150 	vtoc->efi_parts[8].p_tag = V_RESERVED;
2151 
2152 	if (efi_write(fd, vtoc) != 0) {
2153 		/*
2154 		 * Some block drivers (like pcata) may not support EFI
2155 		 * GPT labels.  Print out a helpful error message dir-
2156 		 * ecting the user to manually label the disk and give
2157 		 * a specific slice.
2158 		 */
2159 		(void) close(fd);
2160 		efi_free(vtoc);
2161 
2162 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2163 		    "cannot label '%s': try using fdisk(1M) and then "
2164 		    "provide a specific slice"), name);
2165 		return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
2166 	}
2167 
2168 	(void) close(fd);
2169 	efi_free(vtoc);
2170 	return (0);
2171 }
2172 
2173 int
2174 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
2175 {
2176 	zfs_cmd_t zc = { 0 };
2177 	int ret = -1;
2178 	char errbuf[1024];
2179 	nvlist_t *nvl = NULL;
2180 	nvlist_t *realprops;
2181 
2182 	(void) snprintf(errbuf, sizeof (errbuf),
2183 	    dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
2184 	    zhp->zpool_name);
2185 
2186 	if (zpool_get_version(zhp) < SPA_VERSION_BOOTFS) {
2187 		zfs_error_aux(zhp->zpool_hdl,
2188 		    dgettext(TEXT_DOMAIN, "pool must be "
2189 		    "upgraded to support pool properties"));
2190 		return (zfs_error(zhp->zpool_hdl, EZFS_BADVERSION, errbuf));
2191 	}
2192 
2193 	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp))
2194 		return (zfs_error(zhp->zpool_hdl, EZFS_POOLPROPS, errbuf));
2195 
2196 	if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0 ||
2197 	    nvlist_add_string(nvl, propname, propval) != 0) {
2198 		return (no_memory(zhp->zpool_hdl));
2199 	}
2200 
2201 	if ((realprops = zfs_validate_properties(zhp->zpool_hdl, ZFS_TYPE_POOL,
2202 	    zhp->zpool_name, nvl, 0, NULL, errbuf)) == NULL) {
2203 		nvlist_free(nvl);
2204 		return (-1);
2205 	}
2206 
2207 	nvlist_free(nvl);
2208 	nvl = realprops;
2209 
2210 	/*
2211 	 * Execute the corresponding ioctl() to set this property.
2212 	 */
2213 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2214 
2215 	if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl, NULL) != 0)
2216 		return (-1);
2217 
2218 	ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
2219 	zcmd_free_nvlists(&zc);
2220 
2221 	if (ret)
2222 		(void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
2223 
2224 	return (ret);
2225 }
2226 
2227 uint64_t
2228 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop)
2229 {
2230 	uint64_t value;
2231 	nvlist_t *nvp;
2232 
2233 	if (zpool_get_version(zhp) < SPA_VERSION_BOOTFS)
2234 		return (0);
2235 
2236 	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp))
2237 		return (zpool_prop_default_numeric(prop));
2238 
2239 	switch (prop) {
2240 	case ZPOOL_PROP_AUTOREPLACE:
2241 		if (nvlist_lookup_nvlist(zhp->zpool_props,
2242 		    zpool_prop_to_name(prop), &nvp) != 0) {
2243 			value = zpool_prop_default_numeric(prop);
2244 		} else {
2245 			VERIFY(nvlist_lookup_uint64(nvp, ZFS_PROP_VALUE,
2246 			    &value) == 0);
2247 		}
2248 		return (value);
2249 		break;
2250 
2251 	default:
2252 		assert(0);
2253 	}
2254 
2255 	return (0);
2256 }
2257 
2258 int
2259 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *propbuf,
2260     size_t proplen, zfs_source_t *srctype)
2261 {
2262 	uint64_t value;
2263 	char msg[1024], *strvalue;
2264 	nvlist_t *nvp;
2265 	zfs_source_t src = ZFS_SRC_NONE;
2266 
2267 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2268 	    "cannot get property '%s'"), zpool_prop_to_name(prop));
2269 
2270 	if (zpool_get_version(zhp) < SPA_VERSION_BOOTFS) {
2271 		zfs_error_aux(zhp->zpool_hdl,
2272 		    dgettext(TEXT_DOMAIN, "pool must be "
2273 		    "upgraded to support pool properties"));
2274 		return (zfs_error(zhp->zpool_hdl, EZFS_BADVERSION, msg));
2275 	}
2276 
2277 	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
2278 	    prop != ZPOOL_PROP_NAME)
2279 		return (zfs_error(zhp->zpool_hdl, EZFS_POOLPROPS, msg));
2280 
2281 	switch (prop) {
2282 	case ZPOOL_PROP_NAME:
2283 		(void) strlcpy(propbuf, zhp->zpool_name, proplen);
2284 		break;
2285 
2286 	case ZPOOL_PROP_BOOTFS:
2287 		if (nvlist_lookup_nvlist(zhp->zpool_props,
2288 		    zpool_prop_to_name(prop), &nvp) != 0) {
2289 			strvalue = (char *)zfs_prop_default_string(prop);
2290 			if (strvalue == NULL)
2291 				strvalue = "-";
2292 			src = ZFS_SRC_DEFAULT;
2293 		} else {
2294 			VERIFY(nvlist_lookup_uint64(nvp,
2295 			    ZFS_PROP_SOURCE, &value) == 0);
2296 			src = value;
2297 			VERIFY(nvlist_lookup_string(nvp, ZFS_PROP_VALUE,
2298 			    &strvalue) == 0);
2299 			if (strlen(strvalue) >= proplen)
2300 				return (-1);
2301 		}
2302 		(void) strlcpy(propbuf, strvalue, proplen);
2303 		break;
2304 
2305 	case ZPOOL_PROP_DELEGATION:
2306 	case ZPOOL_PROP_AUTOREPLACE:
2307 		if (nvlist_lookup_nvlist(zhp->zpool_props,
2308 		    zpool_prop_to_name(prop), &nvp) != 0) {
2309 			value = zpool_prop_default_numeric(prop);
2310 			src = ZFS_SRC_DEFAULT;
2311 		} else {
2312 			VERIFY(nvlist_lookup_uint64(nvp,
2313 			    ZFS_PROP_SOURCE, &value) == 0);
2314 			src = value;
2315 			VERIFY(nvlist_lookup_uint64(nvp, ZFS_PROP_VALUE,
2316 			    &value) == 0);
2317 		}
2318 		(void) strlcpy(propbuf, value ? "on" : "off", proplen);
2319 		break;
2320 
2321 	default:
2322 		return (-1);
2323 	}
2324 	if (srctype)
2325 		*srctype = src;
2326 	return (0);
2327 }
2328 
2329 int
2330 zpool_get_proplist(libzfs_handle_t *hdl, char *fields, zpool_proplist_t **listp)
2331 {
2332 	return (zfs_get_proplist_common(hdl, fields, listp, ZFS_TYPE_POOL));
2333 }
2334 
2335 
2336 int
2337 zpool_expand_proplist(zpool_handle_t *zhp, zpool_proplist_t **plp)
2338 {
2339 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2340 	zpool_proplist_t *entry;
2341 	char buf[ZFS_MAXPROPLEN];
2342 
2343 	if (zfs_expand_proplist_common(hdl, plp, ZFS_TYPE_POOL) != 0)
2344 		return (-1);
2345 
2346 	for (entry = *plp; entry != NULL; entry = entry->pl_next) {
2347 
2348 		if (entry->pl_fixed)
2349 			continue;
2350 
2351 		if (entry->pl_prop != ZFS_PROP_INVAL &&
2352 		    zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
2353 		    NULL) == 0) {
2354 			if (strlen(buf) > entry->pl_width)
2355 				entry->pl_width = strlen(buf);
2356 		}
2357 	}
2358 
2359 	return (0);
2360 }
2361