xref: /illumos-gate/usr/src/lib/libzfs/common/libzfs_pool.c (revision 7247f8883be6bcac5fe4735b6f87f873387dbbef)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <alloca.h>
30 #include <assert.h>
31 #include <ctype.h>
32 #include <errno.h>
33 #include <devid.h>
34 #include <dirent.h>
35 #include <fcntl.h>
36 #include <libintl.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <strings.h>
40 #include <unistd.h>
41 #include <sys/efi_partition.h>
42 #include <sys/vtoc.h>
43 #include <sys/zfs_ioctl.h>
44 #include <sys/zio.h>
45 #include <strings.h>
46 
47 #include "zfs_namecheck.h"
48 #include "zfs_prop.h"
49 #include "libzfs_impl.h"
50 
51 /*
52  * Validate the given pool name, optionally putting an extended error message in
53  * 'buf'.
54  */
55 static boolean_t
56 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
57 {
58 	namecheck_err_t why;
59 	char what;
60 	int ret;
61 
62 	ret = pool_namecheck(pool, &why, &what);
63 
64 	/*
65 	 * The rules for reserved pool names were extended at a later point.
66 	 * But we need to support users with existing pools that may now be
67 	 * invalid.  So we only check for this expanded set of names during a
68 	 * create (or import), and only in userland.
69 	 */
70 	if (ret == 0 && !isopen &&
71 	    (strncmp(pool, "mirror", 6) == 0 ||
72 	    strncmp(pool, "raidz", 5) == 0 ||
73 	    strncmp(pool, "spare", 5) == 0 ||
74 	    strcmp(pool, "log") == 0)) {
75 		zfs_error_aux(hdl,
76 		    dgettext(TEXT_DOMAIN, "name is reserved"));
77 		return (B_FALSE);
78 	}
79 
80 
81 	if (ret != 0) {
82 		if (hdl != NULL) {
83 			switch (why) {
84 			case NAME_ERR_TOOLONG:
85 				zfs_error_aux(hdl,
86 				    dgettext(TEXT_DOMAIN, "name is too long"));
87 				break;
88 
89 			case NAME_ERR_INVALCHAR:
90 				zfs_error_aux(hdl,
91 				    dgettext(TEXT_DOMAIN, "invalid character "
92 				    "'%c' in pool name"), what);
93 				break;
94 
95 			case NAME_ERR_NOLETTER:
96 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
97 				    "name must begin with a letter"));
98 				break;
99 
100 			case NAME_ERR_RESERVED:
101 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
102 				    "name is reserved"));
103 				break;
104 
105 			case NAME_ERR_DISKLIKE:
106 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
107 				    "pool name is reserved"));
108 				break;
109 
110 			case NAME_ERR_LEADING_SLASH:
111 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
112 				    "leading slash in name"));
113 				break;
114 
115 			case NAME_ERR_EMPTY_COMPONENT:
116 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
117 				    "empty component in name"));
118 				break;
119 
120 			case NAME_ERR_TRAILING_SLASH:
121 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
122 				    "trailing slash in name"));
123 				break;
124 
125 			case NAME_ERR_MULTIPLE_AT:
126 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
127 				    "multiple '@' delimiters in name"));
128 				break;
129 
130 			}
131 		}
132 		return (B_FALSE);
133 	}
134 
135 	return (B_TRUE);
136 }
137 
138 static int
139 zpool_get_all_props(zpool_handle_t *zhp)
140 {
141 	zfs_cmd_t zc = { 0 };
142 	libzfs_handle_t *hdl = zhp->zpool_hdl;
143 
144 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
145 
146 	if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
147 		return (-1);
148 
149 	while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
150 		if (errno == ENOMEM) {
151 			if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
152 				zcmd_free_nvlists(&zc);
153 				return (-1);
154 			}
155 		} else {
156 			zcmd_free_nvlists(&zc);
157 			return (-1);
158 		}
159 	}
160 
161 	if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
162 		zcmd_free_nvlists(&zc);
163 		return (-1);
164 	}
165 
166 	zcmd_free_nvlists(&zc);
167 
168 	return (0);
169 }
170 
171 /*
172  * Open a handle to the given pool, even if the pool is currently in the FAULTED
173  * state.
174  */
175 zpool_handle_t *
176 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
177 {
178 	zpool_handle_t *zhp;
179 	boolean_t missing;
180 
181 	/*
182 	 * Make sure the pool name is valid.
183 	 */
184 	if (!zpool_name_valid(hdl, B_TRUE, pool)) {
185 		(void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
186 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
187 		    pool);
188 		return (NULL);
189 	}
190 
191 	if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
192 		return (NULL);
193 
194 	zhp->zpool_hdl = hdl;
195 	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
196 
197 	if (zpool_refresh_stats(zhp, &missing) != 0) {
198 		zpool_close(zhp);
199 		return (NULL);
200 	}
201 
202 	if (missing) {
203 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
204 		    "no such pool"));
205 		(void) zfs_error_fmt(hdl, EZFS_NOENT,
206 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
207 		    pool);
208 		zpool_close(zhp);
209 		return (NULL);
210 	}
211 
212 	return (zhp);
213 }
214 
215 /*
216  * Like the above, but silent on error.  Used when iterating over pools (because
217  * the configuration cache may be out of date).
218  */
219 int
220 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
221 {
222 	zpool_handle_t *zhp;
223 	boolean_t missing;
224 
225 	if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
226 		return (-1);
227 
228 	zhp->zpool_hdl = hdl;
229 	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
230 
231 	if (zpool_refresh_stats(zhp, &missing) != 0) {
232 		zpool_close(zhp);
233 		return (-1);
234 	}
235 
236 	if (missing) {
237 		zpool_close(zhp);
238 		*ret = NULL;
239 		return (0);
240 	}
241 
242 	*ret = zhp;
243 	return (0);
244 }
245 
246 /*
247  * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
248  * state.
249  */
250 zpool_handle_t *
251 zpool_open(libzfs_handle_t *hdl, const char *pool)
252 {
253 	zpool_handle_t *zhp;
254 
255 	if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
256 		return (NULL);
257 
258 	if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
259 		(void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
260 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
261 		zpool_close(zhp);
262 		return (NULL);
263 	}
264 
265 	return (zhp);
266 }
267 
268 /*
269  * Close the handle.  Simply frees the memory associated with the handle.
270  */
271 void
272 zpool_close(zpool_handle_t *zhp)
273 {
274 	if (zhp->zpool_config)
275 		nvlist_free(zhp->zpool_config);
276 	if (zhp->zpool_old_config)
277 		nvlist_free(zhp->zpool_old_config);
278 	if (zhp->zpool_props)
279 		nvlist_free(zhp->zpool_props);
280 	free(zhp);
281 }
282 
283 /*
284  * Return the name of the pool.
285  */
286 const char *
287 zpool_get_name(zpool_handle_t *zhp)
288 {
289 	return (zhp->zpool_name);
290 }
291 
292 /*
293  * Return the GUID of the pool.
294  */
295 uint64_t
296 zpool_get_guid(zpool_handle_t *zhp)
297 {
298 	uint64_t guid;
299 
300 	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID,
301 	    &guid) == 0);
302 	return (guid);
303 }
304 
305 /*
306  * Return the version of the pool.
307  */
308 uint64_t
309 zpool_get_version(zpool_handle_t *zhp)
310 {
311 	uint64_t version;
312 
313 	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_VERSION,
314 	    &version) == 0);
315 
316 	return (version);
317 }
318 
319 /*
320  * Return the amount of space currently consumed by the pool.
321  */
322 uint64_t
323 zpool_get_space_used(zpool_handle_t *zhp)
324 {
325 	nvlist_t *nvroot;
326 	vdev_stat_t *vs;
327 	uint_t vsc;
328 
329 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
330 	    &nvroot) == 0);
331 	verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS,
332 	    (uint64_t **)&vs, &vsc) == 0);
333 
334 	return (vs->vs_alloc);
335 }
336 
337 /*
338  * Return the total space in the pool.
339  */
340 uint64_t
341 zpool_get_space_total(zpool_handle_t *zhp)
342 {
343 	nvlist_t *nvroot;
344 	vdev_stat_t *vs;
345 	uint_t vsc;
346 
347 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
348 	    &nvroot) == 0);
349 	verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS,
350 	    (uint64_t **)&vs, &vsc) == 0);
351 
352 	return (vs->vs_space);
353 }
354 
355 /*
356  * Return the alternate root for this pool, if any.
357  */
358 int
359 zpool_get_root(zpool_handle_t *zhp, char *buf, size_t buflen)
360 {
361 	zfs_cmd_t zc = { 0 };
362 
363 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
364 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, &zc) != 0 ||
365 	    zc.zc_value[0] == '\0')
366 		return (-1);
367 
368 	(void) strlcpy(buf, zc.zc_value, buflen);
369 
370 	return (0);
371 }
372 
373 /*
374  * Return the state of the pool (ACTIVE or UNAVAILABLE)
375  */
376 int
377 zpool_get_state(zpool_handle_t *zhp)
378 {
379 	return (zhp->zpool_state);
380 }
381 
382 /*
383  * Create the named pool, using the provided vdev list.  It is assumed
384  * that the consumer has already validated the contents of the nvlist, so we
385  * don't have to worry about error semantics.
386  */
387 int
388 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
389     const char *altroot)
390 {
391 	zfs_cmd_t zc = { 0 };
392 	char msg[1024];
393 
394 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
395 	    "cannot create '%s'"), pool);
396 
397 	if (!zpool_name_valid(hdl, B_FALSE, pool))
398 		return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
399 
400 	if (altroot != NULL && altroot[0] != '/')
401 		return (zfs_error_fmt(hdl, EZFS_BADPATH,
402 		    dgettext(TEXT_DOMAIN, "bad alternate root '%s'"), altroot));
403 
404 	if (zcmd_write_src_nvlist(hdl, &zc, nvroot, NULL) != 0)
405 		return (-1);
406 
407 	(void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
408 
409 	if (altroot != NULL)
410 		(void) strlcpy(zc.zc_value, altroot, sizeof (zc.zc_value));
411 
412 	if (zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc) != 0) {
413 		zcmd_free_nvlists(&zc);
414 
415 		switch (errno) {
416 		case EBUSY:
417 			/*
418 			 * This can happen if the user has specified the same
419 			 * device multiple times.  We can't reliably detect this
420 			 * until we try to add it and see we already have a
421 			 * label.
422 			 */
423 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
424 			    "one or more vdevs refer to the same device"));
425 			return (zfs_error(hdl, EZFS_BADDEV, msg));
426 
427 		case EOVERFLOW:
428 			/*
429 			 * This occurs when one of the devices is below
430 			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
431 			 * device was the problem device since there's no
432 			 * reliable way to determine device size from userland.
433 			 */
434 			{
435 				char buf[64];
436 
437 				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
438 
439 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
440 				    "one or more devices is less than the "
441 				    "minimum size (%s)"), buf);
442 			}
443 			return (zfs_error(hdl, EZFS_BADDEV, msg));
444 
445 		case ENOSPC:
446 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
447 			    "one or more devices is out of space"));
448 			return (zfs_error(hdl, EZFS_BADDEV, msg));
449 
450 		default:
451 			return (zpool_standard_error(hdl, errno, msg));
452 		}
453 	}
454 	zcmd_free_nvlists(&zc);
455 
456 	/*
457 	 * If this is an alternate root pool, then we automatically set the
458 	 * mountpoint of the root dataset to be '/'.
459 	 */
460 	if (altroot != NULL) {
461 		zfs_handle_t *zhp;
462 
463 		verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_ANY)) != NULL);
464 		verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
465 		    "/") == 0);
466 
467 		zfs_close(zhp);
468 	}
469 
470 	return (0);
471 }
472 
473 /*
474  * Destroy the given pool.  It is up to the caller to ensure that there are no
475  * datasets left in the pool.
476  */
477 int
478 zpool_destroy(zpool_handle_t *zhp)
479 {
480 	zfs_cmd_t zc = { 0 };
481 	zfs_handle_t *zfp = NULL;
482 	libzfs_handle_t *hdl = zhp->zpool_hdl;
483 	char msg[1024];
484 
485 	if (zhp->zpool_state == POOL_STATE_ACTIVE &&
486 	    (zfp = zfs_open(zhp->zpool_hdl, zhp->zpool_name,
487 	    ZFS_TYPE_FILESYSTEM)) == NULL)
488 		return (-1);
489 
490 	if (zpool_remove_zvol_links(zhp) != 0)
491 		return (-1);
492 
493 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
494 
495 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
496 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
497 		    "cannot destroy '%s'"), zhp->zpool_name);
498 
499 		if (errno == EROFS) {
500 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
501 			    "one or more devices is read only"));
502 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
503 		} else {
504 			(void) zpool_standard_error(hdl, errno, msg);
505 		}
506 
507 		if (zfp)
508 			zfs_close(zfp);
509 		return (-1);
510 	}
511 
512 	if (zfp) {
513 		remove_mountpoint(zfp);
514 		zfs_close(zfp);
515 	}
516 
517 	return (0);
518 }
519 
520 /*
521  * Add the given vdevs to the pool.  The caller must have already performed the
522  * necessary verification to ensure that the vdev specification is well-formed.
523  */
524 int
525 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
526 {
527 	zfs_cmd_t zc = { 0 };
528 	int ret;
529 	libzfs_handle_t *hdl = zhp->zpool_hdl;
530 	char msg[1024];
531 	nvlist_t **spares;
532 	uint_t nspares;
533 
534 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
535 	    "cannot add to '%s'"), zhp->zpool_name);
536 
537 	if (zpool_get_version(zhp) < SPA_VERSION_SPARES &&
538 	    nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
539 	    &spares, &nspares) == 0) {
540 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
541 		    "upgraded to add hot spares"));
542 		return (zfs_error(hdl, EZFS_BADVERSION, msg));
543 	}
544 
545 	if (zcmd_write_src_nvlist(hdl, &zc, nvroot, NULL) != 0)
546 		return (-1);
547 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
548 
549 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
550 		switch (errno) {
551 		case EBUSY:
552 			/*
553 			 * This can happen if the user has specified the same
554 			 * device multiple times.  We can't reliably detect this
555 			 * until we try to add it and see we already have a
556 			 * label.
557 			 */
558 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
559 			    "one or more vdevs refer to the same device"));
560 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
561 			break;
562 
563 		case EOVERFLOW:
564 			/*
565 			 * This occurrs when one of the devices is below
566 			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
567 			 * device was the problem device since there's no
568 			 * reliable way to determine device size from userland.
569 			 */
570 			{
571 				char buf[64];
572 
573 				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
574 
575 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
576 				    "device is less than the minimum "
577 				    "size (%s)"), buf);
578 			}
579 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
580 			break;
581 
582 		case ENOTSUP:
583 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
584 			    "pool must be upgraded to add these vdevs"));
585 			(void) zfs_error(hdl, EZFS_BADVERSION, msg);
586 			break;
587 
588 		case EDOM:
589 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
590 			    "root pool can not have multiple vdevs"
591 			    " or separate logs"));
592 			(void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
593 			break;
594 
595 		default:
596 			(void) zpool_standard_error(hdl, errno, msg);
597 		}
598 
599 		ret = -1;
600 	} else {
601 		ret = 0;
602 	}
603 
604 	zcmd_free_nvlists(&zc);
605 
606 	return (ret);
607 }
608 
609 /*
610  * Exports the pool from the system.  The caller must ensure that there are no
611  * mounted datasets in the pool.
612  */
613 int
614 zpool_export(zpool_handle_t *zhp)
615 {
616 	zfs_cmd_t zc = { 0 };
617 
618 	if (zpool_remove_zvol_links(zhp) != 0)
619 		return (-1);
620 
621 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
622 
623 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0)
624 		return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
625 		    dgettext(TEXT_DOMAIN, "cannot export '%s'"),
626 		    zhp->zpool_name));
627 	return (0);
628 }
629 
630 /*
631  * Import the given pool using the known configuration.  The configuration
632  * should have come from zpool_find_import().  The 'newname' and 'altroot'
633  * parameters control whether the pool is imported with a different name or with
634  * an alternate root, respectively.
635  */
636 int
637 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
638     const char *altroot)
639 {
640 	zfs_cmd_t zc = { 0 };
641 	char *thename;
642 	char *origname;
643 	int ret;
644 
645 	verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
646 	    &origname) == 0);
647 
648 	if (newname != NULL) {
649 		if (!zpool_name_valid(hdl, B_FALSE, newname))
650 			return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
651 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
652 			    newname));
653 		thename = (char *)newname;
654 	} else {
655 		thename = origname;
656 	}
657 
658 	if (altroot != NULL && altroot[0] != '/')
659 		return (zfs_error_fmt(hdl, EZFS_BADPATH,
660 		    dgettext(TEXT_DOMAIN, "bad alternate root '%s'"),
661 		    altroot));
662 
663 	(void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
664 
665 	if (altroot != NULL)
666 		(void) strlcpy(zc.zc_value, altroot, sizeof (zc.zc_value));
667 	else
668 		zc.zc_value[0] = '\0';
669 
670 	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
671 	    &zc.zc_guid) == 0);
672 
673 	if (zcmd_write_src_nvlist(hdl, &zc, config, NULL) != 0)
674 		return (-1);
675 
676 	ret = 0;
677 	if (zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc) != 0) {
678 		char desc[1024];
679 		if (newname == NULL)
680 			(void) snprintf(desc, sizeof (desc),
681 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
682 			    thename);
683 		else
684 			(void) snprintf(desc, sizeof (desc),
685 			    dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
686 			    origname, thename);
687 
688 		switch (errno) {
689 		case ENOTSUP:
690 			/*
691 			 * Unsupported version.
692 			 */
693 			(void) zfs_error(hdl, EZFS_BADVERSION, desc);
694 			break;
695 
696 		case EINVAL:
697 			(void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
698 			break;
699 
700 		default:
701 			(void) zpool_standard_error(hdl, errno, desc);
702 		}
703 
704 		ret = -1;
705 	} else {
706 		zpool_handle_t *zhp;
707 
708 		/*
709 		 * This should never fail, but play it safe anyway.
710 		 */
711 		if (zpool_open_silent(hdl, thename, &zhp) != 0) {
712 			ret = -1;
713 		} else if (zhp != NULL) {
714 			ret = zpool_create_zvol_links(zhp);
715 			zpool_close(zhp);
716 		}
717 
718 	}
719 
720 
721 	zcmd_free_nvlists(&zc);
722 	return (ret);
723 }
724 
725 /*
726  * Scrub the pool.
727  */
728 int
729 zpool_scrub(zpool_handle_t *zhp, pool_scrub_type_t type)
730 {
731 	zfs_cmd_t zc = { 0 };
732 	char msg[1024];
733 	libzfs_handle_t *hdl = zhp->zpool_hdl;
734 
735 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
736 	zc.zc_cookie = type;
737 
738 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SCRUB, &zc) == 0)
739 		return (0);
740 
741 	(void) snprintf(msg, sizeof (msg),
742 	    dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
743 
744 	if (errno == EBUSY)
745 		return (zfs_error(hdl, EZFS_RESILVERING, msg));
746 	else
747 		return (zpool_standard_error(hdl, errno, msg));
748 }
749 
750 /*
751  * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
752  * spare; but FALSE if its an INUSE spare.
753  */
754 static nvlist_t *
755 vdev_to_nvlist_iter(nvlist_t *nv, const char *search, uint64_t guid,
756     boolean_t *avail_spare)
757 {
758 	uint_t c, children;
759 	nvlist_t **child;
760 	uint64_t theguid, present;
761 	char *path;
762 	uint64_t wholedisk = 0;
763 	nvlist_t *ret;
764 
765 	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &theguid) == 0);
766 
767 	if (search == NULL &&
768 	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &present) == 0) {
769 		/*
770 		 * If the device has never been present since import, the only
771 		 * reliable way to match the vdev is by GUID.
772 		 */
773 		if (theguid == guid)
774 			return (nv);
775 	} else if (search != NULL &&
776 	    nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
777 		(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
778 		    &wholedisk);
779 		if (wholedisk) {
780 			/*
781 			 * For whole disks, the internal path has 's0', but the
782 			 * path passed in by the user doesn't.
783 			 */
784 			if (strlen(search) == strlen(path) - 2 &&
785 			    strncmp(search, path, strlen(search)) == 0)
786 				return (nv);
787 		} else if (strcmp(search, path) == 0) {
788 			return (nv);
789 		}
790 	}
791 
792 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
793 	    &child, &children) != 0)
794 		return (NULL);
795 
796 	for (c = 0; c < children; c++)
797 		if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
798 		    avail_spare)) != NULL)
799 			return (ret);
800 
801 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
802 	    &child, &children) == 0) {
803 		for (c = 0; c < children; c++) {
804 			if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
805 			    avail_spare)) != NULL) {
806 				*avail_spare = B_TRUE;
807 				return (ret);
808 			}
809 		}
810 	}
811 
812 	return (NULL);
813 }
814 
815 nvlist_t *
816 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare)
817 {
818 	char buf[MAXPATHLEN];
819 	const char *search;
820 	char *end;
821 	nvlist_t *nvroot;
822 	uint64_t guid;
823 
824 	guid = strtoull(path, &end, 10);
825 	if (guid != 0 && *end == '\0') {
826 		search = NULL;
827 	} else if (path[0] != '/') {
828 		(void) snprintf(buf, sizeof (buf), "%s%s", "/dev/dsk/", path);
829 		search = buf;
830 	} else {
831 		search = path;
832 	}
833 
834 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
835 	    &nvroot) == 0);
836 
837 	*avail_spare = B_FALSE;
838 	return (vdev_to_nvlist_iter(nvroot, search, guid, avail_spare));
839 }
840 
841 /*
842  * Returns TRUE if the given guid corresponds to a spare (INUSE or not).
843  */
844 static boolean_t
845 is_spare(zpool_handle_t *zhp, uint64_t guid)
846 {
847 	uint64_t spare_guid;
848 	nvlist_t *nvroot;
849 	nvlist_t **spares;
850 	uint_t nspares;
851 	int i;
852 
853 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
854 	    &nvroot) == 0);
855 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
856 	    &spares, &nspares) == 0) {
857 		for (i = 0; i < nspares; i++) {
858 			verify(nvlist_lookup_uint64(spares[i],
859 			    ZPOOL_CONFIG_GUID, &spare_guid) == 0);
860 			if (guid == spare_guid)
861 				return (B_TRUE);
862 		}
863 	}
864 
865 	return (B_FALSE);
866 }
867 
868 /*
869  * Bring the specified vdev online.   The 'flags' parameter is a set of the
870  * ZFS_ONLINE_* flags.
871  */
872 int
873 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
874     vdev_state_t *newstate)
875 {
876 	zfs_cmd_t zc = { 0 };
877 	char msg[1024];
878 	nvlist_t *tgt;
879 	boolean_t avail_spare;
880 	libzfs_handle_t *hdl = zhp->zpool_hdl;
881 
882 	(void) snprintf(msg, sizeof (msg),
883 	    dgettext(TEXT_DOMAIN, "cannot online %s"), path);
884 
885 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
886 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == NULL)
887 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
888 
889 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
890 
891 	if (avail_spare || is_spare(zhp, zc.zc_guid) == B_TRUE)
892 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
893 
894 	zc.zc_cookie = VDEV_STATE_ONLINE;
895 	zc.zc_obj = flags;
896 
897 
898 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0)
899 		return (zpool_standard_error(hdl, errno, msg));
900 
901 	*newstate = zc.zc_cookie;
902 	return (0);
903 }
904 
905 /*
906  * Take the specified vdev offline
907  */
908 int
909 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
910 {
911 	zfs_cmd_t zc = { 0 };
912 	char msg[1024];
913 	nvlist_t *tgt;
914 	boolean_t avail_spare;
915 	libzfs_handle_t *hdl = zhp->zpool_hdl;
916 
917 	(void) snprintf(msg, sizeof (msg),
918 	    dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
919 
920 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
921 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == NULL)
922 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
923 
924 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
925 
926 	if (avail_spare || is_spare(zhp, zc.zc_guid) == B_TRUE)
927 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
928 
929 	zc.zc_cookie = VDEV_STATE_OFFLINE;
930 	zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
931 
932 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
933 		return (0);
934 
935 	switch (errno) {
936 	case EBUSY:
937 
938 		/*
939 		 * There are no other replicas of this device.
940 		 */
941 		return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
942 
943 	default:
944 		return (zpool_standard_error(hdl, errno, msg));
945 	}
946 }
947 
948 /*
949  * Mark the given vdev faulted.
950  */
951 int
952 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid)
953 {
954 	zfs_cmd_t zc = { 0 };
955 	char msg[1024];
956 	libzfs_handle_t *hdl = zhp->zpool_hdl;
957 
958 	(void) snprintf(msg, sizeof (msg),
959 	    dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid);
960 
961 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
962 	zc.zc_guid = guid;
963 	zc.zc_cookie = VDEV_STATE_FAULTED;
964 
965 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
966 		return (0);
967 
968 	switch (errno) {
969 	case EBUSY:
970 
971 		/*
972 		 * There are no other replicas of this device.
973 		 */
974 		return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
975 
976 	default:
977 		return (zpool_standard_error(hdl, errno, msg));
978 	}
979 
980 }
981 
982 /*
983  * Mark the given vdev degraded.
984  */
985 int
986 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid)
987 {
988 	zfs_cmd_t zc = { 0 };
989 	char msg[1024];
990 	libzfs_handle_t *hdl = zhp->zpool_hdl;
991 
992 	(void) snprintf(msg, sizeof (msg),
993 	    dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid);
994 
995 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
996 	zc.zc_guid = guid;
997 	zc.zc_cookie = VDEV_STATE_DEGRADED;
998 
999 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
1000 		return (0);
1001 
1002 	return (zpool_standard_error(hdl, errno, msg));
1003 }
1004 
1005 /*
1006  * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
1007  * a hot spare.
1008  */
1009 static boolean_t
1010 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
1011 {
1012 	nvlist_t **child;
1013 	uint_t c, children;
1014 	char *type;
1015 
1016 	if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
1017 	    &children) == 0) {
1018 		verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
1019 		    &type) == 0);
1020 
1021 		if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
1022 		    children == 2 && child[which] == tgt)
1023 			return (B_TRUE);
1024 
1025 		for (c = 0; c < children; c++)
1026 			if (is_replacing_spare(child[c], tgt, which))
1027 				return (B_TRUE);
1028 	}
1029 
1030 	return (B_FALSE);
1031 }
1032 
1033 /*
1034  * Attach new_disk (fully described by nvroot) to old_disk.
1035  * If 'replacing' is specified, the new disk will replace the old one.
1036  */
1037 int
1038 zpool_vdev_attach(zpool_handle_t *zhp,
1039     const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
1040 {
1041 	zfs_cmd_t zc = { 0 };
1042 	char msg[1024];
1043 	int ret;
1044 	nvlist_t *tgt;
1045 	boolean_t avail_spare;
1046 	uint64_t val, is_log;
1047 	char *path;
1048 	nvlist_t **child;
1049 	uint_t children;
1050 	nvlist_t *config_root;
1051 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1052 
1053 	if (replacing)
1054 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1055 		    "cannot replace %s with %s"), old_disk, new_disk);
1056 	else
1057 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1058 		    "cannot attach %s to %s"), new_disk, old_disk);
1059 
1060 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1061 	if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare)) == 0)
1062 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1063 
1064 	if (avail_spare)
1065 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
1066 
1067 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1068 	zc.zc_cookie = replacing;
1069 
1070 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
1071 	    &child, &children) != 0 || children != 1) {
1072 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1073 		    "new device must be a single disk"));
1074 		return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
1075 	}
1076 
1077 	verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
1078 	    ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
1079 
1080 	/*
1081 	 * If the target is a hot spare that has been swapped in, we can only
1082 	 * replace it with another hot spare.
1083 	 */
1084 	if (replacing &&
1085 	    nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
1086 	    nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
1087 	    (zpool_find_vdev(zhp, path, &avail_spare) == NULL ||
1088 	    !avail_spare) && is_replacing_spare(config_root, tgt, 1)) {
1089 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1090 		    "can only be replaced by another hot spare"));
1091 		return (zfs_error(hdl, EZFS_BADTARGET, msg));
1092 	}
1093 
1094 	/*
1095 	 * If we are attempting to replace a spare, it canot be applied to an
1096 	 * already spared device.
1097 	 */
1098 	if (replacing &&
1099 	    nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
1100 	    zpool_find_vdev(zhp, path, &avail_spare) != NULL && avail_spare &&
1101 	    is_replacing_spare(config_root, tgt, 0)) {
1102 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1103 		    "device has already been replaced with a spare"));
1104 		return (zfs_error(hdl, EZFS_BADTARGET, msg));
1105 	}
1106 
1107 	if (zcmd_write_src_nvlist(hdl, &zc, nvroot, NULL) != 0)
1108 		return (-1);
1109 
1110 	ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ATTACH, &zc);
1111 
1112 	zcmd_free_nvlists(&zc);
1113 
1114 	if (ret == 0)
1115 		return (0);
1116 
1117 	switch (errno) {
1118 	case ENOTSUP:
1119 		/*
1120 		 * Can't attach to or replace this type of vdev.
1121 		 */
1122 		if (replacing) {
1123 			is_log = B_FALSE;
1124 			(void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_LOG,
1125 			    &is_log);
1126 			if (is_log)
1127 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1128 				    "cannot replace a log with a spare"));
1129 			else
1130 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1131 				    "cannot replace a replacing device"));
1132 		} else {
1133 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1134 			    "can only attach to mirrors and top-level "
1135 			    "disks"));
1136 		}
1137 		(void) zfs_error(hdl, EZFS_BADTARGET, msg);
1138 		break;
1139 
1140 	case EINVAL:
1141 		/*
1142 		 * The new device must be a single disk.
1143 		 */
1144 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1145 		    "new device must be a single disk"));
1146 		(void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
1147 		break;
1148 
1149 	case EBUSY:
1150 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
1151 		    new_disk);
1152 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
1153 		break;
1154 
1155 	case EOVERFLOW:
1156 		/*
1157 		 * The new device is too small.
1158 		 */
1159 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1160 		    "device is too small"));
1161 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
1162 		break;
1163 
1164 	case EDOM:
1165 		/*
1166 		 * The new device has a different alignment requirement.
1167 		 */
1168 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1169 		    "devices have different sector alignment"));
1170 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
1171 		break;
1172 
1173 	case ENAMETOOLONG:
1174 		/*
1175 		 * The resulting top-level vdev spec won't fit in the label.
1176 		 */
1177 		(void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
1178 		break;
1179 
1180 	default:
1181 		(void) zpool_standard_error(hdl, errno, msg);
1182 	}
1183 
1184 	return (-1);
1185 }
1186 
1187 /*
1188  * Detach the specified device.
1189  */
1190 int
1191 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
1192 {
1193 	zfs_cmd_t zc = { 0 };
1194 	char msg[1024];
1195 	nvlist_t *tgt;
1196 	boolean_t avail_spare;
1197 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1198 
1199 	(void) snprintf(msg, sizeof (msg),
1200 	    dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
1201 
1202 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1203 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == 0)
1204 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1205 
1206 	if (avail_spare)
1207 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
1208 
1209 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1210 
1211 	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
1212 		return (0);
1213 
1214 	switch (errno) {
1215 
1216 	case ENOTSUP:
1217 		/*
1218 		 * Can't detach from this type of vdev.
1219 		 */
1220 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
1221 		    "applicable to mirror and replacing vdevs"));
1222 		(void) zfs_error(zhp->zpool_hdl, EZFS_BADTARGET, msg);
1223 		break;
1224 
1225 	case EBUSY:
1226 		/*
1227 		 * There are no other replicas of this device.
1228 		 */
1229 		(void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
1230 		break;
1231 
1232 	default:
1233 		(void) zpool_standard_error(hdl, errno, msg);
1234 	}
1235 
1236 	return (-1);
1237 }
1238 
1239 /*
1240  * Remove the given device.  Currently, this is supported only for hot spares.
1241  */
1242 int
1243 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
1244 {
1245 	zfs_cmd_t zc = { 0 };
1246 	char msg[1024];
1247 	nvlist_t *tgt;
1248 	boolean_t avail_spare;
1249 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1250 
1251 	(void) snprintf(msg, sizeof (msg),
1252 	    dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
1253 
1254 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1255 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == 0)
1256 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1257 
1258 	if (!avail_spare) {
1259 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1260 		    "only inactive hot spares can be removed"));
1261 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1262 	}
1263 
1264 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1265 
1266 	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
1267 		return (0);
1268 
1269 	return (zpool_standard_error(hdl, errno, msg));
1270 }
1271 
1272 /*
1273  * Clear the errors for the pool, or the particular device if specified.
1274  */
1275 int
1276 zpool_clear(zpool_handle_t *zhp, const char *path)
1277 {
1278 	zfs_cmd_t zc = { 0 };
1279 	char msg[1024];
1280 	nvlist_t *tgt;
1281 	boolean_t avail_spare;
1282 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1283 
1284 	if (path)
1285 		(void) snprintf(msg, sizeof (msg),
1286 		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
1287 		    path);
1288 	else
1289 		(void) snprintf(msg, sizeof (msg),
1290 		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
1291 		    zhp->zpool_name);
1292 
1293 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1294 	if (path) {
1295 		if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == 0)
1296 			return (zfs_error(hdl, EZFS_NODEVICE, msg));
1297 
1298 		if (avail_spare)
1299 			return (zfs_error(hdl, EZFS_ISSPARE, msg));
1300 
1301 		verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
1302 		    &zc.zc_guid) == 0);
1303 	}
1304 
1305 	if (zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc) == 0)
1306 		return (0);
1307 
1308 	return (zpool_standard_error(hdl, errno, msg));
1309 }
1310 
1311 /*
1312  * Similar to zpool_clear(), but takes a GUID (used by fmd).
1313  */
1314 int
1315 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
1316 {
1317 	zfs_cmd_t zc = { 0 };
1318 	char msg[1024];
1319 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1320 
1321 	(void) snprintf(msg, sizeof (msg),
1322 	    dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
1323 	    guid);
1324 
1325 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1326 	zc.zc_guid = guid;
1327 
1328 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
1329 		return (0);
1330 
1331 	return (zpool_standard_error(hdl, errno, msg));
1332 }
1333 
1334 /*
1335  * Iterate over all zvols in a given pool by walking the /dev/zvol/dsk/<pool>
1336  * hierarchy.
1337  */
1338 int
1339 zpool_iter_zvol(zpool_handle_t *zhp, int (*cb)(const char *, void *),
1340     void *data)
1341 {
1342 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1343 	char (*paths)[MAXPATHLEN];
1344 	size_t size = 4;
1345 	int curr, fd, base, ret = 0;
1346 	DIR *dirp;
1347 	struct dirent *dp;
1348 	struct stat st;
1349 
1350 	if ((base = open("/dev/zvol/dsk", O_RDONLY)) < 0)
1351 		return (errno == ENOENT ? 0 : -1);
1352 
1353 	if (fstatat(base, zhp->zpool_name, &st, 0) != 0) {
1354 		int err = errno;
1355 		(void) close(base);
1356 		return (err == ENOENT ? 0 : -1);
1357 	}
1358 
1359 	/*
1360 	 * Oddly this wasn't a directory -- ignore that failure since we
1361 	 * know there are no links lower in the (non-existant) hierarchy.
1362 	 */
1363 	if (!S_ISDIR(st.st_mode)) {
1364 		(void) close(base);
1365 		return (0);
1366 	}
1367 
1368 	if ((paths = zfs_alloc(hdl, size * sizeof (paths[0]))) == NULL) {
1369 		(void) close(base);
1370 		return (-1);
1371 	}
1372 
1373 	(void) strlcpy(paths[0], zhp->zpool_name, sizeof (paths[0]));
1374 	curr = 0;
1375 
1376 	while (curr >= 0) {
1377 		if (fstatat(base, paths[curr], &st, AT_SYMLINK_NOFOLLOW) != 0)
1378 			goto err;
1379 
1380 		if (S_ISDIR(st.st_mode)) {
1381 			if ((fd = openat(base, paths[curr], O_RDONLY)) < 0)
1382 				goto err;
1383 
1384 			if ((dirp = fdopendir(fd)) == NULL) {
1385 				(void) close(fd);
1386 				goto err;
1387 			}
1388 
1389 			while ((dp = readdir(dirp)) != NULL) {
1390 				if (dp->d_name[0] == '.')
1391 					continue;
1392 
1393 				if (curr + 1 == size) {
1394 					paths = zfs_realloc(hdl, paths,
1395 					    size * sizeof (paths[0]),
1396 					    size * 2 * sizeof (paths[0]));
1397 					if (paths == NULL) {
1398 						(void) closedir(dirp);
1399 						(void) close(fd);
1400 						goto err;
1401 					}
1402 
1403 					size *= 2;
1404 				}
1405 
1406 				(void) strlcpy(paths[curr + 1], paths[curr],
1407 				    sizeof (paths[curr + 1]));
1408 				(void) strlcat(paths[curr], "/",
1409 				    sizeof (paths[curr]));
1410 				(void) strlcat(paths[curr], dp->d_name,
1411 				    sizeof (paths[curr]));
1412 				curr++;
1413 			}
1414 
1415 			(void) closedir(dirp);
1416 
1417 		} else {
1418 			if ((ret = cb(paths[curr], data)) != 0)
1419 				break;
1420 		}
1421 
1422 		curr--;
1423 	}
1424 
1425 	free(paths);
1426 	(void) close(base);
1427 
1428 	return (ret);
1429 
1430 err:
1431 	free(paths);
1432 	(void) close(base);
1433 	return (-1);
1434 }
1435 
1436 typedef struct zvol_cb {
1437 	zpool_handle_t *zcb_pool;
1438 	boolean_t zcb_create;
1439 } zvol_cb_t;
1440 
1441 /*ARGSUSED*/
1442 static int
1443 do_zvol_create(zfs_handle_t *zhp, void *data)
1444 {
1445 	int ret = 0;
1446 
1447 	if (ZFS_IS_VOLUME(zhp)) {
1448 		(void) zvol_create_link(zhp->zfs_hdl, zhp->zfs_name);
1449 		ret = zfs_iter_snapshots(zhp, do_zvol_create, NULL);
1450 	}
1451 
1452 	if (ret == 0)
1453 		ret = zfs_iter_filesystems(zhp, do_zvol_create, NULL);
1454 
1455 	zfs_close(zhp);
1456 
1457 	return (ret);
1458 }
1459 
1460 /*
1461  * Iterate over all zvols in the pool and make any necessary minor nodes.
1462  */
1463 int
1464 zpool_create_zvol_links(zpool_handle_t *zhp)
1465 {
1466 	zfs_handle_t *zfp;
1467 	int ret;
1468 
1469 	/*
1470 	 * If the pool is unavailable, just return success.
1471 	 */
1472 	if ((zfp = make_dataset_handle(zhp->zpool_hdl,
1473 	    zhp->zpool_name)) == NULL)
1474 		return (0);
1475 
1476 	ret = zfs_iter_filesystems(zfp, do_zvol_create, NULL);
1477 
1478 	zfs_close(zfp);
1479 	return (ret);
1480 }
1481 
1482 static int
1483 do_zvol_remove(const char *dataset, void *data)
1484 {
1485 	zpool_handle_t *zhp = data;
1486 
1487 	return (zvol_remove_link(zhp->zpool_hdl, dataset));
1488 }
1489 
1490 /*
1491  * Iterate over all zvols in the pool and remove any minor nodes.  We iterate
1492  * by examining the /dev links so that a corrupted pool doesn't impede this
1493  * operation.
1494  */
1495 int
1496 zpool_remove_zvol_links(zpool_handle_t *zhp)
1497 {
1498 	return (zpool_iter_zvol(zhp, do_zvol_remove, zhp));
1499 }
1500 
1501 /*
1502  * Convert from a devid string to a path.
1503  */
1504 static char *
1505 devid_to_path(char *devid_str)
1506 {
1507 	ddi_devid_t devid;
1508 	char *minor;
1509 	char *path;
1510 	devid_nmlist_t *list = NULL;
1511 	int ret;
1512 
1513 	if (devid_str_decode(devid_str, &devid, &minor) != 0)
1514 		return (NULL);
1515 
1516 	ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
1517 
1518 	devid_str_free(minor);
1519 	devid_free(devid);
1520 
1521 	if (ret != 0)
1522 		return (NULL);
1523 
1524 	if ((path = strdup(list[0].devname)) == NULL)
1525 		return (NULL);
1526 
1527 	devid_free_nmlist(list);
1528 
1529 	return (path);
1530 }
1531 
1532 /*
1533  * Convert from a path to a devid string.
1534  */
1535 static char *
1536 path_to_devid(const char *path)
1537 {
1538 	int fd;
1539 	ddi_devid_t devid;
1540 	char *minor, *ret;
1541 
1542 	if ((fd = open(path, O_RDONLY)) < 0)
1543 		return (NULL);
1544 
1545 	minor = NULL;
1546 	ret = NULL;
1547 	if (devid_get(fd, &devid) == 0) {
1548 		if (devid_get_minor_name(fd, &minor) == 0)
1549 			ret = devid_str_encode(devid, minor);
1550 		if (minor != NULL)
1551 			devid_str_free(minor);
1552 		devid_free(devid);
1553 	}
1554 	(void) close(fd);
1555 
1556 	return (ret);
1557 }
1558 
1559 /*
1560  * Issue the necessary ioctl() to update the stored path value for the vdev.  We
1561  * ignore any failure here, since a common case is for an unprivileged user to
1562  * type 'zpool status', and we'll display the correct information anyway.
1563  */
1564 static void
1565 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
1566 {
1567 	zfs_cmd_t zc = { 0 };
1568 
1569 	(void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1570 	(void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
1571 	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1572 	    &zc.zc_guid) == 0);
1573 
1574 	(void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
1575 }
1576 
1577 /*
1578  * Given a vdev, return the name to display in iostat.  If the vdev has a path,
1579  * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
1580  * We also check if this is a whole disk, in which case we strip off the
1581  * trailing 's0' slice name.
1582  *
1583  * This routine is also responsible for identifying when disks have been
1584  * reconfigured in a new location.  The kernel will have opened the device by
1585  * devid, but the path will still refer to the old location.  To catch this, we
1586  * first do a path -> devid translation (which is fast for the common case).  If
1587  * the devid matches, we're done.  If not, we do a reverse devid -> path
1588  * translation and issue the appropriate ioctl() to update the path of the vdev.
1589  * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
1590  * of these checks.
1591  */
1592 char *
1593 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv)
1594 {
1595 	char *path, *devid;
1596 	uint64_t value;
1597 	char buf[64];
1598 	vdev_stat_t *vs;
1599 	uint_t vsc;
1600 
1601 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
1602 	    &value) == 0) {
1603 		verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1604 		    &value) == 0);
1605 		(void) snprintf(buf, sizeof (buf), "%llu",
1606 		    (u_longlong_t)value);
1607 		path = buf;
1608 	} else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
1609 
1610 		/*
1611 		 * If the device is dead (faulted, offline, etc) then don't
1612 		 * bother opening it.  Otherwise we may be forcing the user to
1613 		 * open a misbehaving device, which can have undesirable
1614 		 * effects.
1615 		 */
1616 		if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_STATS,
1617 		    (uint64_t **)&vs, &vsc) != 0 ||
1618 		    vs->vs_state >= VDEV_STATE_DEGRADED) &&
1619 		    zhp != NULL &&
1620 		    nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
1621 			/*
1622 			 * Determine if the current path is correct.
1623 			 */
1624 			char *newdevid = path_to_devid(path);
1625 
1626 			if (newdevid == NULL ||
1627 			    strcmp(devid, newdevid) != 0) {
1628 				char *newpath;
1629 
1630 				if ((newpath = devid_to_path(devid)) != NULL) {
1631 					/*
1632 					 * Update the path appropriately.
1633 					 */
1634 					set_path(zhp, nv, newpath);
1635 					if (nvlist_add_string(nv,
1636 					    ZPOOL_CONFIG_PATH, newpath) == 0)
1637 						verify(nvlist_lookup_string(nv,
1638 						    ZPOOL_CONFIG_PATH,
1639 						    &path) == 0);
1640 					free(newpath);
1641 				}
1642 			}
1643 
1644 			if (newdevid)
1645 				devid_str_free(newdevid);
1646 		}
1647 
1648 		if (strncmp(path, "/dev/dsk/", 9) == 0)
1649 			path += 9;
1650 
1651 		if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
1652 		    &value) == 0 && value) {
1653 			char *tmp = zfs_strdup(hdl, path);
1654 			if (tmp == NULL)
1655 				return (NULL);
1656 			tmp[strlen(path) - 2] = '\0';
1657 			return (tmp);
1658 		}
1659 	} else {
1660 		verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
1661 
1662 		/*
1663 		 * If it's a raidz device, we need to stick in the parity level.
1664 		 */
1665 		if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
1666 			verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
1667 			    &value) == 0);
1668 			(void) snprintf(buf, sizeof (buf), "%s%llu", path,
1669 			    (u_longlong_t)value);
1670 			path = buf;
1671 		}
1672 	}
1673 
1674 	return (zfs_strdup(hdl, path));
1675 }
1676 
1677 static int
1678 zbookmark_compare(const void *a, const void *b)
1679 {
1680 	return (memcmp(a, b, sizeof (zbookmark_t)));
1681 }
1682 
1683 /*
1684  * Retrieve the persistent error log, uniquify the members, and return to the
1685  * caller.
1686  */
1687 int
1688 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
1689 {
1690 	zfs_cmd_t zc = { 0 };
1691 	uint64_t count;
1692 	zbookmark_t *zb = NULL;
1693 	int i;
1694 
1695 	/*
1696 	 * Retrieve the raw error list from the kernel.  If the number of errors
1697 	 * has increased, allocate more space and continue until we get the
1698 	 * entire list.
1699 	 */
1700 	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
1701 	    &count) == 0);
1702 	if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
1703 	    count * sizeof (zbookmark_t))) == (uintptr_t)NULL)
1704 		return (-1);
1705 	zc.zc_nvlist_dst_size = count;
1706 	(void) strcpy(zc.zc_name, zhp->zpool_name);
1707 	for (;;) {
1708 		if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
1709 		    &zc) != 0) {
1710 			free((void *)(uintptr_t)zc.zc_nvlist_dst);
1711 			if (errno == ENOMEM) {
1712 				count = zc.zc_nvlist_dst_size;
1713 				if ((zc.zc_nvlist_dst = (uintptr_t)
1714 				    zfs_alloc(zhp->zpool_hdl, count *
1715 				    sizeof (zbookmark_t))) == (uintptr_t)NULL)
1716 					return (-1);
1717 			} else {
1718 				return (-1);
1719 			}
1720 		} else {
1721 			break;
1722 		}
1723 	}
1724 
1725 	/*
1726 	 * Sort the resulting bookmarks.  This is a little confusing due to the
1727 	 * implementation of ZFS_IOC_ERROR_LOG.  The bookmarks are copied last
1728 	 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
1729 	 * _not_ copied as part of the process.  So we point the start of our
1730 	 * array appropriate and decrement the total number of elements.
1731 	 */
1732 	zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) +
1733 	    zc.zc_nvlist_dst_size;
1734 	count -= zc.zc_nvlist_dst_size;
1735 
1736 	qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare);
1737 
1738 	verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
1739 
1740 	/*
1741 	 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
1742 	 */
1743 	for (i = 0; i < count; i++) {
1744 		nvlist_t *nv;
1745 
1746 		/* ignoring zb_blkid and zb_level for now */
1747 		if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
1748 		    zb[i-1].zb_object == zb[i].zb_object)
1749 			continue;
1750 
1751 		if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
1752 			goto nomem;
1753 		if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
1754 		    zb[i].zb_objset) != 0) {
1755 			nvlist_free(nv);
1756 			goto nomem;
1757 		}
1758 		if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
1759 		    zb[i].zb_object) != 0) {
1760 			nvlist_free(nv);
1761 			goto nomem;
1762 		}
1763 		if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
1764 			nvlist_free(nv);
1765 			goto nomem;
1766 		}
1767 		nvlist_free(nv);
1768 	}
1769 
1770 	free((void *)(uintptr_t)zc.zc_nvlist_dst);
1771 	return (0);
1772 
1773 nomem:
1774 	free((void *)(uintptr_t)zc.zc_nvlist_dst);
1775 	return (no_memory(zhp->zpool_hdl));
1776 }
1777 
1778 /*
1779  * Upgrade a ZFS pool to the latest on-disk version.
1780  */
1781 int
1782 zpool_upgrade(zpool_handle_t *zhp)
1783 {
1784 	zfs_cmd_t zc = { 0 };
1785 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1786 
1787 	(void) strcpy(zc.zc_name, zhp->zpool_name);
1788 	if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
1789 		return (zpool_standard_error_fmt(hdl, errno,
1790 		    dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
1791 		    zhp->zpool_name));
1792 	return (0);
1793 }
1794 
1795 /*
1796  * Log command history.
1797  *
1798  * 'pool' is B_TRUE if we are logging a command for 'zpool'; B_FALSE
1799  * otherwise ('zfs').  'pool_create' is B_TRUE if we are logging the creation
1800  * of the pool; B_FALSE otherwise.  'path' is the pathname containing the
1801  * poolname.  'argc' and 'argv' are used to construct the command string.
1802  */
1803 void
1804 zpool_stage_history(libzfs_handle_t *hdl, int argc, char **argv,
1805     boolean_t zfs_cmd, boolean_t pool_create)
1806 {
1807 	char *cmd_buf;
1808 	int i;
1809 
1810 	if (hdl->libzfs_log_str != NULL) {
1811 		free(hdl->libzfs_log_str);
1812 	}
1813 
1814 	if ((hdl->libzfs_log_str = zfs_alloc(hdl, HIS_MAX_RECORD_LEN)) == NULL)
1815 		return;
1816 
1817 	hdl->libzfs_log_type =
1818 	    (pool_create == B_TRUE) ? LOG_CMD_POOL_CREATE : LOG_CMD_NORMAL;
1819 	cmd_buf = hdl->libzfs_log_str;
1820 
1821 	/* construct the command string */
1822 	(void) strlcpy(cmd_buf, zfs_cmd ? "zfs" : "zpool",
1823 	    HIS_MAX_RECORD_LEN);
1824 	for (i = 1; i < argc; i++) {
1825 		if (strlen(cmd_buf) + 1 + strlen(argv[i]) > HIS_MAX_RECORD_LEN)
1826 			break;
1827 		(void) strlcat(cmd_buf, " ", HIS_MAX_RECORD_LEN);
1828 		(void) strlcat(cmd_buf, argv[i], HIS_MAX_RECORD_LEN);
1829 	}
1830 }
1831 
1832 /*
1833  * Perform ioctl to get some command history of a pool.
1834  *
1835  * 'buf' is the buffer to fill up to 'len' bytes.  'off' is the
1836  * logical offset of the history buffer to start reading from.
1837  *
1838  * Upon return, 'off' is the next logical offset to read from and
1839  * 'len' is the actual amount of bytes read into 'buf'.
1840  */
1841 static int
1842 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
1843 {
1844 	zfs_cmd_t zc = { 0 };
1845 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1846 
1847 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1848 
1849 	zc.zc_history = (uint64_t)(uintptr_t)buf;
1850 	zc.zc_history_len = *len;
1851 	zc.zc_history_offset = *off;
1852 
1853 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
1854 		switch (errno) {
1855 		case EPERM:
1856 			return (zfs_error_fmt(hdl, EZFS_PERM,
1857 			    dgettext(TEXT_DOMAIN,
1858 			    "cannot show history for pool '%s'"),
1859 			    zhp->zpool_name));
1860 		case ENOENT:
1861 			return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
1862 			    dgettext(TEXT_DOMAIN, "cannot get history for pool "
1863 			    "'%s'"), zhp->zpool_name));
1864 		case ENOTSUP:
1865 			return (zfs_error_fmt(hdl, EZFS_BADVERSION,
1866 			    dgettext(TEXT_DOMAIN, "cannot get history for pool "
1867 			    "'%s', pool must be upgraded"), zhp->zpool_name));
1868 		default:
1869 			return (zpool_standard_error_fmt(hdl, errno,
1870 			    dgettext(TEXT_DOMAIN,
1871 			    "cannot get history for '%s'"), zhp->zpool_name));
1872 		}
1873 	}
1874 
1875 	*len = zc.zc_history_len;
1876 	*off = zc.zc_history_offset;
1877 
1878 	return (0);
1879 }
1880 
1881 /*
1882  * Process the buffer of nvlists, unpacking and storing each nvlist record
1883  * into 'records'.  'leftover' is set to the number of bytes that weren't
1884  * processed as there wasn't a complete record.
1885  */
1886 static int
1887 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
1888     nvlist_t ***records, uint_t *numrecords)
1889 {
1890 	uint64_t reclen;
1891 	nvlist_t *nv;
1892 	int i;
1893 
1894 	while (bytes_read > sizeof (reclen)) {
1895 
1896 		/* get length of packed record (stored as little endian) */
1897 		for (i = 0, reclen = 0; i < sizeof (reclen); i++)
1898 			reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
1899 
1900 		if (bytes_read < sizeof (reclen) + reclen)
1901 			break;
1902 
1903 		/* unpack record */
1904 		if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
1905 			return (ENOMEM);
1906 		bytes_read -= sizeof (reclen) + reclen;
1907 		buf += sizeof (reclen) + reclen;
1908 
1909 		/* add record to nvlist array */
1910 		(*numrecords)++;
1911 		if (ISP2(*numrecords + 1)) {
1912 			*records = realloc(*records,
1913 			    *numrecords * 2 * sizeof (nvlist_t *));
1914 		}
1915 		(*records)[*numrecords - 1] = nv;
1916 	}
1917 
1918 	*leftover = bytes_read;
1919 	return (0);
1920 }
1921 
1922 #define	HIS_BUF_LEN	(128*1024)
1923 
1924 /*
1925  * Retrieve the command history of a pool.
1926  */
1927 int
1928 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
1929 {
1930 	char buf[HIS_BUF_LEN];
1931 	uint64_t off = 0;
1932 	nvlist_t **records = NULL;
1933 	uint_t numrecords = 0;
1934 	int err, i;
1935 
1936 	do {
1937 		uint64_t bytes_read = sizeof (buf);
1938 		uint64_t leftover;
1939 
1940 		if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
1941 			break;
1942 
1943 		/* if nothing else was read in, we're at EOF, just return */
1944 		if (!bytes_read)
1945 			break;
1946 
1947 		if ((err = zpool_history_unpack(buf, bytes_read,
1948 		    &leftover, &records, &numrecords)) != 0)
1949 			break;
1950 		off -= leftover;
1951 
1952 		/* CONSTCOND */
1953 	} while (1);
1954 
1955 	if (!err) {
1956 		verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
1957 		verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
1958 		    records, numrecords) == 0);
1959 	}
1960 	for (i = 0; i < numrecords; i++)
1961 		nvlist_free(records[i]);
1962 	free(records);
1963 
1964 	return (err);
1965 }
1966 
1967 void
1968 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
1969     char *pathname, size_t len)
1970 {
1971 	zfs_cmd_t zc = { 0 };
1972 	boolean_t mounted = B_FALSE;
1973 	char *mntpnt = NULL;
1974 	char dsname[MAXNAMELEN];
1975 
1976 	if (dsobj == 0) {
1977 		/* special case for the MOS */
1978 		(void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj);
1979 		return;
1980 	}
1981 
1982 	/* get the dataset's name */
1983 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1984 	zc.zc_obj = dsobj;
1985 	if (ioctl(zhp->zpool_hdl->libzfs_fd,
1986 	    ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
1987 		/* just write out a path of two object numbers */
1988 		(void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
1989 		    dsobj, obj);
1990 		return;
1991 	}
1992 	(void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
1993 
1994 	/* find out if the dataset is mounted */
1995 	mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
1996 
1997 	/* get the corrupted object's path */
1998 	(void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
1999 	zc.zc_obj = obj;
2000 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
2001 	    &zc) == 0) {
2002 		if (mounted) {
2003 			(void) snprintf(pathname, len, "%s%s", mntpnt,
2004 			    zc.zc_value);
2005 		} else {
2006 			(void) snprintf(pathname, len, "%s:%s",
2007 			    dsname, zc.zc_value);
2008 		}
2009 	} else {
2010 		(void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj);
2011 	}
2012 	free(mntpnt);
2013 }
2014 
2015 #define	RDISK_ROOT	"/dev/rdsk"
2016 #define	BACKUP_SLICE	"s2"
2017 /*
2018  * Don't start the slice at the default block of 34; many storage
2019  * devices will use a stripe width of 128k, so start there instead.
2020  */
2021 #define	NEW_START_BLOCK	256
2022 
2023 /*
2024  * determine where a partition starts on a disk in the current
2025  * configuration
2026  */
2027 static diskaddr_t
2028 find_start_block(nvlist_t *config)
2029 {
2030 	nvlist_t **child;
2031 	uint_t c, children;
2032 	char *path;
2033 	diskaddr_t sb = MAXOFFSET_T;
2034 	int fd;
2035 	char diskname[MAXPATHLEN];
2036 	uint64_t wholedisk;
2037 
2038 	if (nvlist_lookup_nvlist_array(config,
2039 	    ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
2040 		if (nvlist_lookup_uint64(config,
2041 		    ZPOOL_CONFIG_WHOLE_DISK,
2042 		    &wholedisk) != 0 || !wholedisk) {
2043 			return (MAXOFFSET_T);
2044 		}
2045 		if (nvlist_lookup_string(config,
2046 		    ZPOOL_CONFIG_PATH, &path) != 0) {
2047 			return (MAXOFFSET_T);
2048 		}
2049 
2050 		(void) snprintf(diskname, sizeof (diskname), "%s%s",
2051 		    RDISK_ROOT, strrchr(path, '/'));
2052 		if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) {
2053 			struct dk_gpt *vtoc;
2054 			if (efi_alloc_and_read(fd, &vtoc) >= 0) {
2055 				sb = vtoc->efi_parts[0].p_start;
2056 				efi_free(vtoc);
2057 			}
2058 			(void) close(fd);
2059 		}
2060 		return (sb);
2061 	}
2062 
2063 	for (c = 0; c < children; c++) {
2064 		sb = find_start_block(child[c]);
2065 		if (sb != MAXOFFSET_T) {
2066 			return (sb);
2067 		}
2068 	}
2069 	return (MAXOFFSET_T);
2070 }
2071 
2072 /*
2073  * Label an individual disk.  The name provided is the short name,
2074  * stripped of any leading /dev path.
2075  */
2076 int
2077 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
2078 {
2079 	char path[MAXPATHLEN];
2080 	struct dk_gpt *vtoc;
2081 	int fd;
2082 	size_t resv = EFI_MIN_RESV_SIZE;
2083 	uint64_t slice_size;
2084 	diskaddr_t start_block;
2085 	char errbuf[1024];
2086 
2087 	if (zhp) {
2088 		nvlist_t *nvroot;
2089 
2090 		verify(nvlist_lookup_nvlist(zhp->zpool_config,
2091 		    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
2092 
2093 		if (zhp->zpool_start_block == 0)
2094 			start_block = find_start_block(nvroot);
2095 		else
2096 			start_block = zhp->zpool_start_block;
2097 		zhp->zpool_start_block = start_block;
2098 	} else {
2099 		/* new pool */
2100 		start_block = NEW_START_BLOCK;
2101 	}
2102 
2103 	(void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name,
2104 	    BACKUP_SLICE);
2105 
2106 	if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
2107 		/*
2108 		 * This shouldn't happen.  We've long since verified that this
2109 		 * is a valid device.
2110 		 */
2111 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2112 		    "label '%s': unable to open device"), name);
2113 		return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
2114 	}
2115 
2116 	if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
2117 		/*
2118 		 * The only way this can fail is if we run out of memory, or we
2119 		 * were unable to read the disk's capacity
2120 		 */
2121 		if (errno == ENOMEM)
2122 			(void) no_memory(hdl);
2123 
2124 		(void) close(fd);
2125 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2126 		    "label '%s': unable to read disk capacity"), name);
2127 
2128 		return (zfs_error(hdl, EZFS_NOCAP, errbuf));
2129 	}
2130 
2131 	slice_size = vtoc->efi_last_u_lba + 1;
2132 	slice_size -= EFI_MIN_RESV_SIZE;
2133 	if (start_block == MAXOFFSET_T)
2134 		start_block = NEW_START_BLOCK;
2135 	slice_size -= start_block;
2136 
2137 	vtoc->efi_parts[0].p_start = start_block;
2138 	vtoc->efi_parts[0].p_size = slice_size;
2139 
2140 	/*
2141 	 * Why we use V_USR: V_BACKUP confuses users, and is considered
2142 	 * disposable by some EFI utilities (since EFI doesn't have a backup
2143 	 * slice).  V_UNASSIGNED is supposed to be used only for zero size
2144 	 * partitions, and efi_write() will fail if we use it.  V_ROOT, V_BOOT,
2145 	 * etc. were all pretty specific.  V_USR is as close to reality as we
2146 	 * can get, in the absence of V_OTHER.
2147 	 */
2148 	vtoc->efi_parts[0].p_tag = V_USR;
2149 	(void) strcpy(vtoc->efi_parts[0].p_name, "zfs");
2150 
2151 	vtoc->efi_parts[8].p_start = slice_size + start_block;
2152 	vtoc->efi_parts[8].p_size = resv;
2153 	vtoc->efi_parts[8].p_tag = V_RESERVED;
2154 
2155 	if (efi_write(fd, vtoc) != 0) {
2156 		/*
2157 		 * Some block drivers (like pcata) may not support EFI
2158 		 * GPT labels.  Print out a helpful error message dir-
2159 		 * ecting the user to manually label the disk and give
2160 		 * a specific slice.
2161 		 */
2162 		(void) close(fd);
2163 		efi_free(vtoc);
2164 
2165 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2166 		    "cannot label '%s': try using fdisk(1M) and then "
2167 		    "provide a specific slice"), name);
2168 		return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
2169 	}
2170 
2171 	(void) close(fd);
2172 	efi_free(vtoc);
2173 	return (0);
2174 }
2175 
2176 int
2177 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
2178 {
2179 	zfs_cmd_t zc = { 0 };
2180 	int ret = -1;
2181 	char errbuf[1024];
2182 	nvlist_t *nvl = NULL;
2183 	nvlist_t *realprops;
2184 
2185 	(void) snprintf(errbuf, sizeof (errbuf),
2186 	    dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
2187 	    zhp->zpool_name);
2188 
2189 	if (zpool_get_version(zhp) < SPA_VERSION_BOOTFS) {
2190 		zfs_error_aux(zhp->zpool_hdl,
2191 		    dgettext(TEXT_DOMAIN, "pool must be "
2192 		    "upgraded to support pool properties"));
2193 		return (zfs_error(zhp->zpool_hdl, EZFS_BADVERSION, errbuf));
2194 	}
2195 
2196 	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp))
2197 		return (zfs_error(zhp->zpool_hdl, EZFS_POOLPROPS, errbuf));
2198 
2199 	if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0 ||
2200 	    nvlist_add_string(nvl, propname, propval) != 0) {
2201 		return (no_memory(zhp->zpool_hdl));
2202 	}
2203 
2204 	if ((realprops = zfs_validate_properties(zhp->zpool_hdl, ZFS_TYPE_POOL,
2205 	    zhp->zpool_name, nvl, 0, NULL, errbuf)) == NULL) {
2206 		nvlist_free(nvl);
2207 		return (-1);
2208 	}
2209 
2210 	nvlist_free(nvl);
2211 	nvl = realprops;
2212 
2213 	/*
2214 	 * Execute the corresponding ioctl() to set this property.
2215 	 */
2216 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2217 
2218 	if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl, NULL) != 0)
2219 		return (-1);
2220 
2221 	ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
2222 	zcmd_free_nvlists(&zc);
2223 
2224 	if (ret)
2225 		(void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
2226 
2227 	return (ret);
2228 }
2229 
2230 uint64_t
2231 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop)
2232 {
2233 	uint64_t value;
2234 	nvlist_t *nvp;
2235 
2236 	if (zpool_get_version(zhp) < SPA_VERSION_BOOTFS)
2237 		return (0);
2238 
2239 	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp))
2240 		return (zpool_prop_default_numeric(prop));
2241 
2242 	switch (prop) {
2243 	case ZPOOL_PROP_AUTOREPLACE:
2244 		if (nvlist_lookup_nvlist(zhp->zpool_props,
2245 		    zpool_prop_to_name(prop), &nvp) != 0) {
2246 			value = zpool_prop_default_numeric(prop);
2247 		} else {
2248 			VERIFY(nvlist_lookup_uint64(nvp, ZFS_PROP_VALUE,
2249 			    &value) == 0);
2250 		}
2251 		return (value);
2252 		break;
2253 
2254 	default:
2255 		assert(0);
2256 	}
2257 
2258 	return (0);
2259 }
2260 
2261 int
2262 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *propbuf,
2263     size_t proplen, zfs_source_t *srctype)
2264 {
2265 	uint64_t value;
2266 	char msg[1024], *strvalue;
2267 	nvlist_t *nvp;
2268 	zfs_source_t src = ZFS_SRC_NONE;
2269 
2270 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2271 	    "cannot get property '%s'"), zpool_prop_to_name(prop));
2272 
2273 	if (zpool_get_version(zhp) < SPA_VERSION_BOOTFS) {
2274 		zfs_error_aux(zhp->zpool_hdl,
2275 		    dgettext(TEXT_DOMAIN, "pool must be "
2276 		    "upgraded to support pool properties"));
2277 		return (zfs_error(zhp->zpool_hdl, EZFS_BADVERSION, msg));
2278 	}
2279 
2280 	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
2281 	    prop != ZPOOL_PROP_NAME)
2282 		return (zfs_error(zhp->zpool_hdl, EZFS_POOLPROPS, msg));
2283 
2284 	switch (prop) {
2285 	case ZPOOL_PROP_NAME:
2286 		(void) strlcpy(propbuf, zhp->zpool_name, proplen);
2287 		break;
2288 
2289 	case ZPOOL_PROP_BOOTFS:
2290 		if (nvlist_lookup_nvlist(zhp->zpool_props,
2291 		    zpool_prop_to_name(prop), &nvp) != 0) {
2292 			strvalue = (char *)zfs_prop_default_string(prop);
2293 			if (strvalue == NULL)
2294 				strvalue = "-";
2295 			src = ZFS_SRC_DEFAULT;
2296 		} else {
2297 			VERIFY(nvlist_lookup_uint64(nvp,
2298 			    ZFS_PROP_SOURCE, &value) == 0);
2299 			src = value;
2300 			VERIFY(nvlist_lookup_string(nvp, ZFS_PROP_VALUE,
2301 			    &strvalue) == 0);
2302 			if (strlen(strvalue) >= proplen)
2303 				return (-1);
2304 		}
2305 		(void) strlcpy(propbuf, strvalue, proplen);
2306 		break;
2307 
2308 	case ZPOOL_PROP_DELEGATION:
2309 	case ZPOOL_PROP_AUTOREPLACE:
2310 		if (nvlist_lookup_nvlist(zhp->zpool_props,
2311 		    zpool_prop_to_name(prop), &nvp) != 0) {
2312 			value = zpool_prop_default_numeric(prop);
2313 			src = ZFS_SRC_DEFAULT;
2314 		} else {
2315 			VERIFY(nvlist_lookup_uint64(nvp,
2316 			    ZFS_PROP_SOURCE, &value) == 0);
2317 			src = value;
2318 			VERIFY(nvlist_lookup_uint64(nvp, ZFS_PROP_VALUE,
2319 			    &value) == 0);
2320 		}
2321 		(void) strlcpy(propbuf, value ? "on" : "off", proplen);
2322 		break;
2323 
2324 	default:
2325 		return (-1);
2326 	}
2327 	if (srctype)
2328 		*srctype = src;
2329 	return (0);
2330 }
2331 
2332 int
2333 zpool_get_proplist(libzfs_handle_t *hdl, char *fields, zpool_proplist_t **listp)
2334 {
2335 	return (zfs_get_proplist_common(hdl, fields, listp, ZFS_TYPE_POOL));
2336 }
2337 
2338 
2339 int
2340 zpool_expand_proplist(zpool_handle_t *zhp, zpool_proplist_t **plp)
2341 {
2342 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2343 	zpool_proplist_t *entry;
2344 	char buf[ZFS_MAXPROPLEN];
2345 
2346 	if (zfs_expand_proplist_common(hdl, plp, ZFS_TYPE_POOL) != 0)
2347 		return (-1);
2348 
2349 	for (entry = *plp; entry != NULL; entry = entry->pl_next) {
2350 
2351 		if (entry->pl_fixed)
2352 			continue;
2353 
2354 		if (entry->pl_prop != ZFS_PROP_INVAL &&
2355 		    zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
2356 		    NULL) == 0) {
2357 			if (strlen(buf) > entry->pl_width)
2358 				entry->pl_width = strlen(buf);
2359 		}
2360 	}
2361 
2362 	return (0);
2363 }
2364