xref: /titanic_44/usr/src/lib/libzfs/common/libzfs_pool.c (revision 587032cf0967234b39ccb50adca936a367841063)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <assert.h>
29 #include <ctype.h>
30 #include <errno.h>
31 #include <devid.h>
32 #include <fcntl.h>
33 #include <libintl.h>
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <string.h>
37 #include <unistd.h>
38 #include <sys/zfs_ioctl.h>
39 #include <sys/zio.h>
40 
41 #include "zfs_namecheck.h"
42 #include "libzfs_impl.h"
43 
44 /*
45  * Validate the given pool name, optionally putting an extended error message in
46  * 'buf'.
47  */
48 static int
49 zpool_name_valid(const char *pool, boolean_t isopen, char *buf, size_t buflen)
50 {
51 	namecheck_err_t why;
52 	char what;
53 	int ret;
54 
55 	ret = pool_namecheck(pool, &why, &what);
56 
57 	/*
58 	 * The rules for reserved pool names were extended at a later point.
59 	 * But we need to support users with existing pools that may now be
60 	 * invalid.  So we only check for this expanded set of names during a
61 	 * create (or import), and only in userland.
62 	 */
63 	if (ret == 0 && !isopen &&
64 	    (strncmp(pool, "mirror", 6) == 0 ||
65 	    strncmp(pool, "raidz", 5) == 0 ||
66 	    strncmp(pool, "spare", 5) == 0)) {
67 		ret = -1;
68 		why = NAME_ERR_RESERVED;
69 	}
70 
71 
72 	if (ret != 0) {
73 		if (buf != NULL) {
74 			switch (why) {
75 			case NAME_ERR_TOOLONG:
76 				(void) snprintf(buf, buflen,
77 				    dgettext(TEXT_DOMAIN, "name is too long"));
78 				break;
79 
80 			case NAME_ERR_INVALCHAR:
81 				(void) snprintf(buf, buflen,
82 				    dgettext(TEXT_DOMAIN, "invalid character "
83 				    "'%c' in pool name"), what);
84 				break;
85 
86 			case NAME_ERR_NOLETTER:
87 				(void) strlcpy(buf, dgettext(TEXT_DOMAIN,
88 				    "name must begin with a letter"), buflen);
89 				break;
90 
91 			case NAME_ERR_RESERVED:
92 				(void) strlcpy(buf, dgettext(TEXT_DOMAIN,
93 				    "name is reserved\n"
94 				    "pool name may have been omitted"), buflen);
95 				break;
96 
97 			case NAME_ERR_DISKLIKE:
98 				(void) strlcpy(buf, dgettext(TEXT_DOMAIN,
99 				    "pool name is reserved\n"
100 				    "pool name may have been omitted"), buflen);
101 				break;
102 			}
103 		}
104 		return (FALSE);
105 	}
106 
107 	return (TRUE);
108 }
109 
110 /*
111  * Set the pool-wide health based on the vdev state of the root vdev.
112  */
113 void
114 set_pool_health(nvlist_t *config)
115 {
116 	nvlist_t *nvroot;
117 	vdev_stat_t *vs;
118 	uint_t vsc;
119 	char *health;
120 
121 	verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
122 	    &nvroot) == 0);
123 	verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS,
124 	    (uint64_t **)&vs, &vsc) == 0);
125 
126 	switch (vs->vs_state) {
127 
128 	case VDEV_STATE_CLOSED:
129 	case VDEV_STATE_CANT_OPEN:
130 	case VDEV_STATE_OFFLINE:
131 		health = dgettext(TEXT_DOMAIN, "FAULTED");
132 		break;
133 
134 	case VDEV_STATE_DEGRADED:
135 		health = dgettext(TEXT_DOMAIN, "DEGRADED");
136 		break;
137 
138 	case VDEV_STATE_HEALTHY:
139 		health = dgettext(TEXT_DOMAIN, "ONLINE");
140 		break;
141 
142 	default:
143 		zfs_baderror(vs->vs_state);
144 	}
145 
146 	verify(nvlist_add_string(config, ZPOOL_CONFIG_POOL_HEALTH,
147 	    health) == 0);
148 }
149 
150 /*
151  * Open a handle to the given pool, even if the pool is currently in the FAULTED
152  * state.
153  */
154 zpool_handle_t *
155 zpool_open_canfail(const char *pool)
156 {
157 	zpool_handle_t *zhp;
158 	int error;
159 
160 	/*
161 	 * Make sure the pool name is valid.
162 	 */
163 	if (!zpool_name_valid(pool, B_TRUE, NULL, 0)) {
164 		zfs_error(dgettext(TEXT_DOMAIN, "cannot open '%s': invalid "
165 		    "pool name"), pool);
166 		return (NULL);
167 	}
168 
169 	zhp = zfs_malloc(sizeof (zpool_handle_t));
170 
171 	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
172 
173 	if ((error = zpool_refresh_stats(zhp)) != 0) {
174 		if (error == ENOENT || error == EINVAL) {
175 			zfs_error(dgettext(TEXT_DOMAIN, "cannot open '%s': no "
176 			    "such pool"), pool);
177 			free(zhp);
178 			return (NULL);
179 		} else {
180 			zhp->zpool_state = POOL_STATE_UNAVAIL;
181 		}
182 	} else {
183 		zhp->zpool_state = POOL_STATE_ACTIVE;
184 	}
185 
186 	return (zhp);
187 }
188 
189 /*
190  * Like the above, but silent on error.  Used when iterating over pools (because
191  * the configuration cache may be out of date).
192  */
193 zpool_handle_t *
194 zpool_open_silent(const char *pool)
195 {
196 	zpool_handle_t *zhp;
197 	int error;
198 
199 	zhp = zfs_malloc(sizeof (zpool_handle_t));
200 
201 	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
202 
203 	if ((error = zpool_refresh_stats(zhp)) != 0) {
204 		if (error == ENOENT || error == EINVAL) {
205 			free(zhp);
206 			return (NULL);
207 		} else {
208 			zhp->zpool_state = POOL_STATE_UNAVAIL;
209 		}
210 	} else {
211 		zhp->zpool_state = POOL_STATE_ACTIVE;
212 	}
213 
214 	return (zhp);
215 }
216 
217 /*
218  * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
219  * state.
220  */
221 zpool_handle_t *
222 zpool_open(const char *pool)
223 {
224 	zpool_handle_t *zhp;
225 
226 	if ((zhp = zpool_open_canfail(pool)) == NULL)
227 		return (NULL);
228 
229 	if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
230 		zfs_error(dgettext(TEXT_DOMAIN, "cannot open '%s': pool is "
231 		    "currently unavailable"), zhp->zpool_name);
232 		zfs_error(dgettext(TEXT_DOMAIN, "run 'zpool status %s' for "
233 		    "detailed information"), zhp->zpool_name);
234 		zpool_close(zhp);
235 		return (NULL);
236 	}
237 
238 	return (zhp);
239 }
240 
241 /*
242  * Close the handle.  Simply frees the memory associated with the handle.
243  */
244 void
245 zpool_close(zpool_handle_t *zhp)
246 {
247 	if (zhp->zpool_config)
248 		nvlist_free(zhp->zpool_config);
249 	if (zhp->zpool_old_config)
250 		nvlist_free(zhp->zpool_old_config);
251 	if (zhp->zpool_error_log) {
252 		int i;
253 		for (i = 0; i < zhp->zpool_error_count; i++)
254 			free(zhp->zpool_error_log[i]);
255 		free(zhp->zpool_error_log);
256 	}
257 	free(zhp);
258 }
259 
260 /*
261  * Return the name of the pool.
262  */
263 const char *
264 zpool_get_name(zpool_handle_t *zhp)
265 {
266 	return (zhp->zpool_name);
267 }
268 
269 /*
270  * Return the GUID of the pool.
271  */
272 uint64_t
273 zpool_get_guid(zpool_handle_t *zhp)
274 {
275 	uint64_t guid;
276 
277 	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID,
278 	    &guid) == 0);
279 	return (guid);
280 }
281 
282 /*
283  * Return the amount of space currently consumed by the pool.
284  */
285 uint64_t
286 zpool_get_space_used(zpool_handle_t *zhp)
287 {
288 	nvlist_t *nvroot;
289 	vdev_stat_t *vs;
290 	uint_t vsc;
291 
292 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
293 	    &nvroot) == 0);
294 	verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS,
295 	    (uint64_t **)&vs, &vsc) == 0);
296 
297 	return (vs->vs_alloc);
298 }
299 
300 /*
301  * Return the total space in the pool.
302  */
303 uint64_t
304 zpool_get_space_total(zpool_handle_t *zhp)
305 {
306 	nvlist_t *nvroot;
307 	vdev_stat_t *vs;
308 	uint_t vsc;
309 
310 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
311 	    &nvroot) == 0);
312 	verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS,
313 	    (uint64_t **)&vs, &vsc) == 0);
314 
315 	return (vs->vs_space);
316 }
317 
318 /*
319  * Return the alternate root for this pool, if any.
320  */
321 int
322 zpool_get_root(zpool_handle_t *zhp, char *buf, size_t buflen)
323 {
324 	zfs_cmd_t zc = { 0 };
325 
326 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
327 	if (zfs_ioctl(ZFS_IOC_OBJSET_STATS, &zc) != 0 ||
328 	    zc.zc_root[0] == '\0')
329 		return (-1);
330 
331 	(void) strlcpy(buf, zc.zc_root, buflen);
332 
333 	return (0);
334 }
335 
336 /*
337  * Return the state of the pool (ACTIVE or UNAVAILABLE)
338  */
339 int
340 zpool_get_state(zpool_handle_t *zhp)
341 {
342 	return (zhp->zpool_state);
343 }
344 
345 /*
346  * Create the named pool, using the provided vdev list.  It is assumed
347  * that the consumer has already validated the contents of the nvlist, so we
348  * don't have to worry about error semantics.
349  */
350 int
351 zpool_create(const char *pool, nvlist_t *nvroot, const char *altroot)
352 {
353 	zfs_cmd_t zc = { 0 };
354 	char *packed;
355 	size_t len;
356 	int err;
357 	char reason[64];
358 
359 	if (!zpool_name_valid(pool, B_FALSE, reason, sizeof (reason))) {
360 		zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': %s"),
361 		    pool, reason);
362 		return (-1);
363 	}
364 
365 	if (altroot != NULL && altroot[0] != '/') {
366 		zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': alternate "
367 		    "root '%s' must be a complete path"), pool, altroot);
368 		return (-1);
369 	}
370 
371 	if ((err = nvlist_size(nvroot, &len, NV_ENCODE_NATIVE)) != 0)
372 		zfs_baderror(err);
373 
374 	packed = zfs_malloc(len);
375 
376 	if ((err = nvlist_pack(nvroot, &packed, &len,
377 	    NV_ENCODE_NATIVE, 0)) != 0)
378 		zfs_baderror(err);
379 
380 	(void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
381 	zc.zc_config_src = (uint64_t)(uintptr_t)packed;
382 	zc.zc_config_src_size = len;
383 
384 	if (altroot != NULL)
385 		(void) strlcpy(zc.zc_root, altroot, sizeof (zc.zc_root));
386 
387 	if (zfs_ioctl(ZFS_IOC_POOL_CREATE, &zc) != 0) {
388 		switch (errno) {
389 		case EEXIST:
390 			zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': "
391 			    "pool exists"), pool);
392 			break;
393 
394 		case EPERM:
395 			zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': "
396 			    "permission denied"), pool);
397 			break;
398 
399 		case EBUSY:
400 			/*
401 			 * This can happen if the user has specified the same
402 			 * device multiple times.  We can't reliably detect this
403 			 * until we try to add it and see we already have a
404 			 * label.
405 			 */
406 			zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': "
407 			    "one or more vdevs refer to the same device"),
408 			    pool);
409 			break;
410 
411 		case EOVERFLOW:
412 			/*
413 			 * This occurrs when one of the devices is below
414 			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
415 			 * device was the problem device since there's no
416 			 * reliable way to determine device size from userland.
417 			 */
418 			{
419 				char buf[64];
420 
421 				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
422 
423 				zfs_error(dgettext(TEXT_DOMAIN, "cannot "
424 				    "create '%s': one or more devices is less "
425 				    "than the minimum size (%s)"), pool,
426 				    buf);
427 			}
428 			break;
429 
430 		case ENAMETOOLONG:
431 			/*
432 			 * One of the vdevs has exceeded VDEV_SPEC_MAX length in
433 			 * its plaintext representation.
434 			 */
435 			zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': "
436 			    "too many devices in a single vdev"), pool);
437 			break;
438 
439 		case EIO:
440 			zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': "
441 			    "I/O error on one or more devices"), pool);
442 			break;
443 
444 		case ENXIO:
445 			/*
446 			 * This is unlikely to happen since we've verified that
447 			 * all the devices can be opened from userland, but it's
448 			 * still possible in some circumstances.
449 			 */
450 			zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': "
451 			    "one or more devices is unavailable"), pool);
452 			break;
453 
454 		case ENOSPC:
455 			/*
456 			 * This can occur if we were incapable of writing to a
457 			 * file vdev because the underlying filesystem is out of
458 			 * space.  This is very similar to EOVERFLOW, but we'll
459 			 * produce a slightly different message.
460 			 */
461 			zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': "
462 			    "one or more devices is out of space"), pool);
463 			break;
464 
465 		default:
466 			zfs_baderror(errno);
467 		}
468 
469 		return (-1);
470 	}
471 
472 	free(packed);
473 
474 	/*
475 	 * If this is an alternate root pool, then we automatically set the
476 	 * moutnpoint of the root dataset to be '/'.
477 	 */
478 	if (altroot != NULL) {
479 		zfs_handle_t *zhp;
480 
481 		verify((zhp = zfs_open(pool, ZFS_TYPE_ANY)) != NULL);
482 		verify(zfs_prop_set(zhp, ZFS_PROP_MOUNTPOINT, "/") == 0);
483 
484 		zfs_close(zhp);
485 	}
486 
487 	return (0);
488 }
489 
490 /*
491  * Destroy the given pool.  It is up to the caller to ensure that there are no
492  * datasets left in the pool.
493  */
494 int
495 zpool_destroy(zpool_handle_t *zhp)
496 {
497 	zfs_cmd_t zc = { 0 };
498 	zfs_handle_t *zfp = NULL;
499 
500 	if (zhp->zpool_state == POOL_STATE_ACTIVE &&
501 	    (zfp = zfs_open(zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
502 		return (-1);
503 
504 	if (zpool_remove_zvol_links(zhp) != NULL)
505 		return (-1);
506 
507 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
508 
509 	if (zfs_ioctl(ZFS_IOC_POOL_DESTROY, &zc) != 0) {
510 		switch (errno) {
511 		case EPERM:
512 			zfs_error(dgettext(TEXT_DOMAIN,
513 			    "cannot destroy '%s': permission denied"),
514 			    zhp->zpool_name);
515 			break;
516 
517 		case EBUSY:
518 			zfs_error(dgettext(TEXT_DOMAIN,
519 			    "cannot destroy '%s': pool busy"),
520 			    zhp->zpool_name);
521 			break;
522 
523 		case ENOENT:
524 			zfs_error(dgettext(TEXT_DOMAIN,
525 			    "cannot destroy '%s': no such pool"),
526 			    zhp->zpool_name);
527 			break;
528 
529 		case EROFS:
530 			zfs_error(dgettext(TEXT_DOMAIN,
531 			    "cannot destroy '%s': one or more devices is "
532 			    "read only, or '/' is mounted read only"),
533 			    zhp->zpool_name);
534 			break;
535 
536 		default:
537 			zfs_baderror(errno);
538 		}
539 
540 		if (zfp)
541 			zfs_close(zfp);
542 		return (-1);
543 	}
544 
545 	if (zfp) {
546 		remove_mountpoint(zfp);
547 		zfs_close(zfp);
548 	}
549 
550 	return (0);
551 }
552 
553 /*
554  * Add the given vdevs to the pool.  The caller must have already performed the
555  * necessary verification to ensure that the vdev specification is well-formed.
556  */
557 int
558 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
559 {
560 	char *packed;
561 	size_t len;
562 	zfs_cmd_t zc;
563 
564 	verify(nvlist_size(nvroot, &len, NV_ENCODE_NATIVE) == 0);
565 
566 	packed = zfs_malloc(len);
567 
568 	verify(nvlist_pack(nvroot, &packed, &len, NV_ENCODE_NATIVE, 0) == 0);
569 
570 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
571 	zc.zc_config_src = (uint64_t)(uintptr_t)packed;
572 	zc.zc_config_src_size = len;
573 
574 	if (zfs_ioctl(ZFS_IOC_VDEV_ADD, &zc) != 0) {
575 		switch (errno) {
576 		case EPERM:
577 			zfs_error(dgettext(TEXT_DOMAIN, "cannot add to '%s': "
578 			    "permission denied"), zhp->zpool_name);
579 			break;
580 
581 		case EBUSY:
582 			/*
583 			 * This can happen if the user has specified the same
584 			 * device multiple times.  We can't reliably detect this
585 			 * until we try to add it and see we already have a
586 			 * label.
587 			 */
588 			zfs_error(dgettext(TEXT_DOMAIN, "cannot add to '%s': "
589 			    "one or more vdevs refer to the same device"),
590 			    zhp->zpool_name);
591 			break;
592 
593 		case ENAMETOOLONG:
594 			/*
595 			 * One of the vdevs has exceeded VDEV_SPEC_MAX length in
596 			 * its plaintext representation.
597 			 */
598 			zfs_error(dgettext(TEXT_DOMAIN, "cannot add to '%s': "
599 			    "too many devices in a single vdev"),
600 			    zhp->zpool_name);
601 			break;
602 
603 		case ENXIO:
604 			/*
605 			 * This is unlikely to happen since we've verified that
606 			 * all the devices can be opened from userland, but it's
607 			 * still possible in some circumstances.
608 			 */
609 			zfs_error(dgettext(TEXT_DOMAIN, "cannot add to '%s': "
610 			    "one or more devices is unavailable"),
611 			    zhp->zpool_name);
612 			break;
613 
614 		case EOVERFLOW:
615 			/*
616 			 * This occurrs when one of the devices is below
617 			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
618 			 * device was the problem device since there's no
619 			 * reliable way to determine device size from userland.
620 			 */
621 			{
622 				char buf[64];
623 
624 				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
625 
626 				zfs_error(dgettext(TEXT_DOMAIN, "cannot "
627 				    "add to '%s': one or more devices is less "
628 				    "than the minimum size (%s)"),
629 				    zhp->zpool_name, buf);
630 			}
631 			break;
632 
633 		default:
634 			zfs_baderror(errno);
635 		}
636 
637 		return (-1);
638 	}
639 
640 	free(packed);
641 
642 	return (0);
643 }
644 
645 /*
646  * Exports the pool from the system.  The caller must ensure that there are no
647  * mounted datasets in the pool.
648  */
649 int
650 zpool_export(zpool_handle_t *zhp)
651 {
652 	zfs_cmd_t zc = { 0 };
653 
654 	if (zpool_remove_zvol_links(zhp) != 0)
655 		return (-1);
656 
657 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
658 
659 	if (zfs_ioctl(ZFS_IOC_POOL_EXPORT, &zc) != 0) {
660 		switch (errno) {
661 		case EPERM:
662 			zfs_error(dgettext(TEXT_DOMAIN,
663 			    "cannot export '%s': permission denied"),
664 			    zhp->zpool_name);
665 			break;
666 
667 		case EBUSY:
668 			zfs_error(dgettext(TEXT_DOMAIN,
669 			    "cannot export '%s': pool is in use"),
670 			    zhp->zpool_name);
671 			break;
672 
673 		case ENOENT:
674 			zfs_error(dgettext(TEXT_DOMAIN,
675 			    "cannot export '%s': no such pool"),
676 			    zhp->zpool_name);
677 			break;
678 
679 		default:
680 			zfs_baderror(errno);
681 		}
682 
683 		return (-1);
684 	}
685 
686 	return (0);
687 }
688 
689 /*
690  * Import the given pool using the known configuration.  The configuration
691  * should have come from zpool_find_import().  The 'newname' and 'altroot'
692  * parameters control whether the pool is imported with a different name or with
693  * an alternate root, respectively.
694  */
695 int
696 zpool_import(nvlist_t *config, const char *newname, const char *altroot)
697 {
698 	zfs_cmd_t zc;
699 	char *packed;
700 	size_t len;
701 	char *thename;
702 	char *origname;
703 	int ret;
704 
705 	verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
706 	    &origname) == 0);
707 
708 	if (newname != NULL) {
709 		if (!zpool_name_valid(newname, B_FALSE, NULL, 0)) {
710 			zfs_error(dgettext(TEXT_DOMAIN, "cannot import '%s': "
711 			    "invalid pool name"), newname);
712 			return (-1);
713 		}
714 		thename = (char *)newname;
715 	} else {
716 		thename = origname;
717 	}
718 
719 	if (altroot != NULL && altroot[0] != '/') {
720 		zfs_error(dgettext(TEXT_DOMAIN, "cannot import '%s': alternate "
721 		    "root '%s' must be a complete path"), thename,
722 		    altroot);
723 		return (-1);
724 	}
725 
726 	(void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
727 
728 	if (altroot != NULL)
729 		(void) strlcpy(zc.zc_root, altroot, sizeof (zc.zc_root));
730 	else
731 		zc.zc_root[0] = '\0';
732 
733 	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
734 	    &zc.zc_guid) == 0);
735 
736 	verify(nvlist_size(config, &len, NV_ENCODE_NATIVE) == 0);
737 
738 	packed = zfs_malloc(len);
739 
740 	verify(nvlist_pack(config, &packed, &len, NV_ENCODE_NATIVE, 0) == 0);
741 
742 	zc.zc_config_src = (uint64_t)(uintptr_t)packed;
743 	zc.zc_config_src_size = len;
744 
745 	ret = 0;
746 	if (zfs_ioctl(ZFS_IOC_POOL_IMPORT, &zc) != 0) {
747 		char desc[1024];
748 		if (newname == NULL)
749 			(void) snprintf(desc, sizeof (desc),
750 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
751 			    thename);
752 		else
753 			(void) snprintf(desc, sizeof (desc),
754 			    dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
755 			    origname, thename);
756 
757 		switch (errno) {
758 		case EEXIST:
759 			/*
760 			 * A pool with that name already exists.
761 			 */
762 			zfs_error(dgettext(TEXT_DOMAIN, "%s: pool exists"),
763 			    desc);
764 			break;
765 
766 		case EPERM:
767 			/*
768 			 * The user doesn't have permission to create pools.
769 			 */
770 			zfs_error(dgettext(TEXT_DOMAIN, "%s: permission "
771 			    "denied"), desc);
772 			break;
773 
774 		case ENXIO:
775 		case EDOM:
776 			/*
777 			 * Device is unavailable, or vdev sum didn't match.
778 			 */
779 			zfs_error(dgettext(TEXT_DOMAIN, "%s: one or more "
780 			    "devices is unavailable"),
781 			    desc);
782 			break;
783 
784 		case ENOTSUP:
785 			/*
786 			 * Unsupported version.
787 			 */
788 			zfs_error(dgettext(TEXT_DOMAIN,
789 			    "%s: unsupported version"), desc);
790 			break;
791 
792 		default:
793 			zfs_baderror(errno);
794 		}
795 
796 		ret = -1;
797 	} else {
798 		zpool_handle_t *zhp;
799 		/*
800 		 * This should never fail, but play it safe anyway.
801 		 */
802 		if ((zhp = zpool_open_silent(thename)) != NULL) {
803 			ret = zpool_create_zvol_links(zhp);
804 			zpool_close(zhp);
805 		}
806 	}
807 
808 	free(packed);
809 	return (ret);
810 }
811 
812 /*
813  * Scrub the pool.
814  */
815 int
816 zpool_scrub(zpool_handle_t *zhp, pool_scrub_type_t type)
817 {
818 	zfs_cmd_t zc = { 0 };
819 	char msg[1024];
820 
821 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
822 	zc.zc_cookie = type;
823 
824 	if (zfs_ioctl(ZFS_IOC_POOL_SCRUB, &zc) == 0)
825 		return (0);
826 
827 	(void) snprintf(msg, sizeof (msg),
828 	    dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
829 
830 	switch (errno) {
831 	    case EPERM:
832 		/*
833 		 * No permission to scrub this pool.
834 		 */
835 		zfs_error(dgettext(TEXT_DOMAIN, "%s: permission denied"), msg);
836 		break;
837 
838 	    case EBUSY:
839 		/*
840 		 * Resilver in progress.
841 		 */
842 		zfs_error(dgettext(TEXT_DOMAIN, "%s: currently resilvering"),
843 		    msg);
844 		break;
845 
846 	    default:
847 		zfs_baderror(errno);
848 	}
849 	return (-1);
850 }
851 
852 static uint64_t
853 vdev_to_guid(nvlist_t *nv, const char *search, uint64_t guid)
854 {
855 	uint_t c, children;
856 	nvlist_t **child;
857 	uint64_t ret, present;
858 	char *path;
859 	uint64_t wholedisk = 0;
860 
861 	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &ret) == 0);
862 
863 	if (search == NULL &&
864 	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &present) == 0) {
865 		/*
866 		 * If the device has never been present since import, the only
867 		 * reliable way to match the vdev is by GUID.
868 		 */
869 		if (ret == guid)
870 			return (ret);
871 	} else if (search != NULL &&
872 	    nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
873 		(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
874 		    &wholedisk);
875 		if (wholedisk) {
876 			/*
877 			 * For whole disks, the internal path has 's0', but the
878 			 * path passed in by the user doesn't.
879 			 */
880 			if (strlen(search) == strlen(path) - 2 &&
881 			    strncmp(search, path, strlen(search)) == 0)
882 				return (ret);
883 		} else if (strcmp(search, path) == 0) {
884 			return (ret);
885 		}
886 	}
887 
888 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
889 	    &child, &children) != 0)
890 		return (0);
891 
892 	for (c = 0; c < children; c++)
893 		if ((ret = vdev_to_guid(child[c], search, guid)) != 0)
894 			return (ret);
895 
896 	return (0);
897 }
898 
899 /*
900  * Given a string describing a vdev, returns the matching GUID, or 0 if none.
901  */
902 uint64_t
903 zpool_vdev_to_guid(zpool_handle_t *zhp, const char *path)
904 {
905 	char buf[MAXPATHLEN];
906 	const char *search;
907 	char *end;
908 	nvlist_t *nvroot;
909 	uint64_t guid;
910 
911 	guid = strtoull(path, &end, 10);
912 	if (guid != 0 && *end == '\0') {
913 		search = NULL;
914 	} else if (path[0] != '/') {
915 		(void) snprintf(buf, sizeof (buf), "%s%s", "/dev/dsk/", path);
916 		search = buf;
917 	} else {
918 		search = path;
919 	}
920 
921 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
922 	    &nvroot) == 0);
923 
924 	return (vdev_to_guid(nvroot, search, guid));
925 }
926 
927 /*
928  * Bring the specified vdev online
929  */
930 int
931 zpool_vdev_online(zpool_handle_t *zhp, const char *path)
932 {
933 	zfs_cmd_t zc = { 0 };
934 	char msg[1024];
935 
936 	(void) snprintf(msg, sizeof (msg),
937 	    dgettext(TEXT_DOMAIN, "cannot online %s"), path);
938 
939 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
940 	if ((zc.zc_guid = zpool_vdev_to_guid(zhp, path)) == 0) {
941 		zfs_error(dgettext(TEXT_DOMAIN, "%s: no such device in pool"),
942 		    msg);
943 		return (-1);
944 	}
945 
946 	if (zfs_ioctl(ZFS_IOC_VDEV_ONLINE, &zc) == 0)
947 		return (0);
948 
949 	switch (errno) {
950 	    case ENODEV:
951 		/*
952 		 * Device doesn't exist
953 		 */
954 		zfs_error(dgettext(TEXT_DOMAIN, "%s: device not in pool"), msg);
955 		break;
956 
957 	    case EPERM:
958 		/*
959 		 * No permission to bring this vdev online.
960 		 */
961 		zfs_error(dgettext(TEXT_DOMAIN, "%s: permission denied"), msg);
962 		break;
963 
964 	    default:
965 		zfs_baderror(errno);
966 	}
967 	return (-1);
968 }
969 
970 /*
971  * Take the specified vdev offline
972  */
973 int
974 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, int istmp)
975 {
976 	zfs_cmd_t zc = { 0 };
977 	char msg[1024];
978 
979 	(void) snprintf(msg, sizeof (msg),
980 	    dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
981 
982 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
983 	if ((zc.zc_guid = zpool_vdev_to_guid(zhp, path)) == 0) {
984 		zfs_error(dgettext(TEXT_DOMAIN, "%s: no such device in pool"),
985 		    msg);
986 		return (-1);
987 	}
988 
989 	zc.zc_cookie = istmp;
990 
991 	if (zfs_ioctl(ZFS_IOC_VDEV_OFFLINE, &zc) == 0)
992 		return (0);
993 
994 	switch (errno) {
995 	    case ENODEV:
996 		/*
997 		 * Device doesn't exist
998 		 */
999 		zfs_error(dgettext(TEXT_DOMAIN, "%s: device not in pool"), msg);
1000 		break;
1001 
1002 	    case EPERM:
1003 		/*
1004 		 * No permission to take this vdev offline.
1005 		 */
1006 		zfs_error(dgettext(TEXT_DOMAIN, "%s: permission denied"), msg);
1007 		break;
1008 
1009 	    case EBUSY:
1010 		/*
1011 		 * There are no other replicas of this device.
1012 		 */
1013 		zfs_error(dgettext(TEXT_DOMAIN, "%s: no valid replicas"), msg);
1014 		break;
1015 
1016 	    default:
1017 		zfs_baderror(errno);
1018 	}
1019 	return (-1);
1020 }
1021 
1022 /*
1023  * Attach new_disk (fully described by nvroot) to old_disk.
1024  * If 'replacing' is specified, tne new disk will replace the old one.
1025  */
1026 int
1027 zpool_vdev_attach(zpool_handle_t *zhp,
1028     const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
1029 {
1030 	zfs_cmd_t zc = { 0 };
1031 	char msg[1024];
1032 	char *packed;
1033 	int ret;
1034 	size_t len;
1035 
1036 	if (replacing)
1037 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1038 		    "cannot replace %s with %s"), old_disk, new_disk);
1039 	else
1040 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1041 		    "cannot attach %s to %s"), new_disk, old_disk);
1042 
1043 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1044 	if ((zc.zc_guid = zpool_vdev_to_guid(zhp, old_disk)) == 0) {
1045 		zfs_error(dgettext(TEXT_DOMAIN, "%s: no such device in pool"),
1046 		    msg);
1047 		return (-1);
1048 	}
1049 	zc.zc_cookie = replacing;
1050 
1051 	verify(nvlist_size(nvroot, &len, NV_ENCODE_NATIVE) == 0);
1052 
1053 	packed = zfs_malloc(len);
1054 
1055 	verify(nvlist_pack(nvroot, &packed, &len, NV_ENCODE_NATIVE, 0) == 0);
1056 
1057 	zc.zc_config_src = (uint64_t)(uintptr_t)packed;
1058 	zc.zc_config_src_size = len;
1059 
1060 	ret = zfs_ioctl(ZFS_IOC_VDEV_ATTACH, &zc);
1061 
1062 	free(packed);
1063 
1064 	if (ret == 0)
1065 		return (0);
1066 
1067 	switch (errno) {
1068 	case EPERM:
1069 		/*
1070 		 * No permission to mess with the config.
1071 		 */
1072 		zfs_error(dgettext(TEXT_DOMAIN, "%s: permission denied"), msg);
1073 		break;
1074 
1075 	case ENODEV:
1076 		/*
1077 		 * Device doesn't exist.
1078 		 */
1079 		zfs_error(dgettext(TEXT_DOMAIN, "%s: %s not in pool"),
1080 		    msg, old_disk);
1081 		break;
1082 
1083 	case ENOTSUP:
1084 		/*
1085 		 * Can't attach to or replace this type of vdev.
1086 		 */
1087 		if (replacing)
1088 			zfs_error(dgettext(TEXT_DOMAIN,
1089 			    "%s: cannot replace a replacing device"), msg);
1090 		else
1091 			zfs_error(dgettext(TEXT_DOMAIN,
1092 			    "%s: attach is only applicable to mirrors"), msg);
1093 		break;
1094 
1095 	case EINVAL:
1096 		/*
1097 		 * The new device must be a single disk.
1098 		 */
1099 		zfs_error(dgettext(TEXT_DOMAIN,
1100 		    "%s: <new_device> must be a single disk"), msg);
1101 		break;
1102 
1103 	case ENXIO:
1104 		/*
1105 		 * This is unlikely to happen since we've verified that
1106 		 * all the devices can be opened from userland, but it's
1107 		 * still possible in some circumstances.
1108 		 */
1109 		zfs_error(dgettext(TEXT_DOMAIN, "%s: %s is unavailable"),
1110 		    msg, new_disk);
1111 		break;
1112 
1113 	case EBUSY:
1114 		/*
1115 		 * The new device is is use.
1116 		 */
1117 		zfs_error(dgettext(TEXT_DOMAIN, "%s: %s busy"), msg, new_disk);
1118 		break;
1119 
1120 	case EOVERFLOW:
1121 		/*
1122 		 * The new device is too small.
1123 		 */
1124 		zfs_error(dgettext(TEXT_DOMAIN, "%s: %s is too small"),
1125 		    msg, new_disk);
1126 		break;
1127 
1128 	case EDOM:
1129 		/*
1130 		 * The new device has a different alignment requirement.
1131 		 */
1132 		zfs_error(dgettext(TEXT_DOMAIN,
1133 		    "%s: devices have different sector alignment"), msg);
1134 		break;
1135 
1136 	case ENAMETOOLONG:
1137 		/*
1138 		 * The resulting top-level vdev spec won't fit in the label.
1139 		 */
1140 		zfs_error(dgettext(TEXT_DOMAIN,
1141 		    "%s: too many devices in a single vdev"), msg);
1142 		break;
1143 
1144 	default:
1145 		zfs_baderror(errno);
1146 	}
1147 
1148 	return (1);
1149 }
1150 
1151 /*
1152  * Detach the specified device.
1153  */
1154 int
1155 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
1156 {
1157 	zfs_cmd_t zc = { 0 };
1158 	char msg[1024];
1159 
1160 	(void) snprintf(msg, sizeof (msg),
1161 	    dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
1162 
1163 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1164 	if ((zc.zc_guid = zpool_vdev_to_guid(zhp, path)) == 0) {
1165 		zfs_error(dgettext(TEXT_DOMAIN, "%s: no such device in pool"),
1166 		    msg);
1167 		return (-1);
1168 	}
1169 
1170 	if (zfs_ioctl(ZFS_IOC_VDEV_DETACH, &zc) == 0)
1171 		return (0);
1172 
1173 	switch (errno) {
1174 	case EPERM:
1175 		/*
1176 		 * No permission to mess with the config.
1177 		 */
1178 		zfs_error(dgettext(TEXT_DOMAIN, "%s: permission denied"), msg);
1179 		break;
1180 
1181 	case ENODEV:
1182 		/*
1183 		 * Device doesn't exist.
1184 		 */
1185 		zfs_error(dgettext(TEXT_DOMAIN, "%s: device not in pool"), msg);
1186 		break;
1187 
1188 	case ENOTSUP:
1189 		/*
1190 		 * Can't detach from this type of vdev.
1191 		 */
1192 		zfs_error(dgettext(TEXT_DOMAIN,
1193 		    "%s: only applicable to mirror and replacing vdevs"), msg);
1194 		break;
1195 
1196 	case EBUSY:
1197 		/*
1198 		 * There are no other replicas of this device.
1199 		 */
1200 		zfs_error(dgettext(TEXT_DOMAIN, "%s: no valid replicas"), msg);
1201 		break;
1202 
1203 	default:
1204 		zfs_baderror(errno);
1205 	}
1206 
1207 	return (1);
1208 }
1209 
1210 /*
1211  * Clear the errors for the pool, or the particular device if specified.
1212  */
1213 int
1214 zpool_clear(zpool_handle_t *zhp, const char *path)
1215 {
1216 	zfs_cmd_t zc = { 0 };
1217 	char msg[1024];
1218 
1219 	if (path)
1220 		(void) snprintf(msg, sizeof (msg),
1221 		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
1222 		    zc.zc_prop_value);
1223 	else
1224 		(void) snprintf(msg, sizeof (msg),
1225 		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
1226 		    zhp->zpool_name);
1227 
1228 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1229 	if (path && (zc.zc_guid = zpool_vdev_to_guid(zhp, path)) == 0) {
1230 		zfs_error(dgettext(TEXT_DOMAIN, "%s: no such device in pool"),
1231 		    msg);
1232 		return (-1);
1233 	}
1234 
1235 	if (zfs_ioctl(ZFS_IOC_CLEAR, &zc) == 0)
1236 		return (0);
1237 
1238 	switch (errno) {
1239 	case EPERM:
1240 		/*
1241 		 * No permission to mess with the config.
1242 		 */
1243 		zfs_error(dgettext(TEXT_DOMAIN, "%s: permission denied"), msg);
1244 		break;
1245 
1246 	case ENODEV:
1247 		/*
1248 		 * Device doesn't exist.
1249 		 */
1250 		zfs_error(dgettext(TEXT_DOMAIN, "%s: device not in pool"), msg);
1251 		break;
1252 
1253 	default:
1254 		zfs_baderror(errno);
1255 	}
1256 
1257 	return (1);
1258 }
1259 
1260 static int
1261 do_zvol(zfs_handle_t *zhp, void *data)
1262 {
1263 	int linktype = (int)(uintptr_t)data;
1264 	int ret;
1265 
1266 	/*
1267 	 * We check for volblocksize intead of ZFS_TYPE_VOLUME so that we
1268 	 * correctly handle snapshots of volumes.
1269 	 */
1270 	if (zhp->zfs_volblocksize != 0) {
1271 		if (linktype)
1272 			ret = zvol_create_link(zhp->zfs_name);
1273 		else
1274 			ret = zvol_remove_link(zhp->zfs_name);
1275 	}
1276 
1277 	ret = zfs_iter_children(zhp, do_zvol, data);
1278 
1279 	zfs_close(zhp);
1280 	return (ret);
1281 }
1282 
1283 /*
1284  * Iterate over all zvols in the pool and make any necessary minor nodes.
1285  */
1286 int
1287 zpool_create_zvol_links(zpool_handle_t *zhp)
1288 {
1289 	zfs_handle_t *zfp;
1290 	int ret;
1291 
1292 	/*
1293 	 * If the pool is unavailable, just return success.
1294 	 */
1295 	if ((zfp = make_dataset_handle(zhp->zpool_name)) == NULL)
1296 		return (0);
1297 
1298 	ret = zfs_iter_children(zfp, do_zvol, (void *)TRUE);
1299 
1300 	zfs_close(zfp);
1301 	return (ret);
1302 }
1303 
1304 /*
1305  * Iterate over all zvols in the poool and remove any minor nodes.
1306  */
1307 int
1308 zpool_remove_zvol_links(zpool_handle_t *zhp)
1309 {
1310 	zfs_handle_t *zfp;
1311 	int ret;
1312 
1313 	/*
1314 	 * If the pool is unavailable, just return success.
1315 	 */
1316 	if ((zfp = make_dataset_handle(zhp->zpool_name)) == NULL)
1317 		return (0);
1318 
1319 	ret = zfs_iter_children(zfp, do_zvol, (void *)FALSE);
1320 
1321 	zfs_close(zfp);
1322 	return (ret);
1323 }
1324 
1325 /*
1326  * Convert from a devid string to a path.
1327  */
1328 static char *
1329 devid_to_path(char *devid_str)
1330 {
1331 	ddi_devid_t devid;
1332 	char *minor;
1333 	char *path;
1334 	devid_nmlist_t *list = NULL;
1335 	int ret;
1336 
1337 	if (devid_str_decode(devid_str, &devid, &minor) != 0)
1338 		return (NULL);
1339 
1340 	ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
1341 
1342 	devid_str_free(minor);
1343 	devid_free(devid);
1344 
1345 	if (ret != 0)
1346 		return (NULL);
1347 
1348 	path = zfs_strdup(list[0].devname);
1349 	devid_free_nmlist(list);
1350 
1351 	return (path);
1352 }
1353 
1354 /*
1355  * Convert from a path to a devid string.
1356  */
1357 static char *
1358 path_to_devid(const char *path)
1359 {
1360 	int fd;
1361 	ddi_devid_t devid;
1362 	char *minor, *ret;
1363 
1364 	if ((fd = open(path, O_RDONLY)) < 0)
1365 		return (NULL);
1366 
1367 	minor = NULL;
1368 	ret = NULL;
1369 	if (devid_get(fd, &devid) == 0) {
1370 		if (devid_get_minor_name(fd, &minor) == 0)
1371 			ret = devid_str_encode(devid, minor);
1372 		if (minor != NULL)
1373 			devid_str_free(minor);
1374 		devid_free(devid);
1375 	}
1376 	(void) close(fd);
1377 
1378 	return (ret);
1379 }
1380 
1381 /*
1382  * Issue the necessary ioctl() to update the stored path value for the vdev.  We
1383  * ignore any failure here, since a common case is for an unprivileged user to
1384  * type 'zpool status', and we'll display the correct information anyway.
1385  */
1386 static void
1387 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
1388 {
1389 	zfs_cmd_t zc = { 0 };
1390 
1391 	(void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1392 	(void) strncpy(zc.zc_prop_value, path, sizeof (zc.zc_prop_value));
1393 	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1394 	    &zc.zc_guid) == 0);
1395 
1396 	(void) zfs_ioctl(ZFS_IOC_VDEV_SETPATH, &zc);
1397 }
1398 
1399 /*
1400  * Given a vdev, return the name to display in iostat.  If the vdev has a path,
1401  * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
1402  * We also check if this is a whole disk, in which case we strip off the
1403  * trailing 's0' slice name.
1404  *
1405  * This routine is also responsible for identifying when disks have been
1406  * reconfigured in a new location.  The kernel will have opened the device by
1407  * devid, but the path will still refer to the old location.  To catch this, we
1408  * first do a path -> devid translation (which is fast for the common case).  If
1409  * the devid matches, we're done.  If not, we do a reverse devid -> path
1410  * translation and issue the appropriate ioctl() to update the path of the vdev.
1411  * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
1412  * of these checks.
1413  */
1414 char *
1415 zpool_vdev_name(zpool_handle_t *zhp, nvlist_t *nv)
1416 {
1417 	char *path, *devid;
1418 	uint64_t value;
1419 	char buf[64];
1420 
1421 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
1422 	    &value) == 0) {
1423 		verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1424 		    &value) == 0);
1425 		(void) snprintf(buf, sizeof (buf), "%llu", value);
1426 		path = buf;
1427 	} else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
1428 
1429 		if (zhp != NULL &&
1430 		    nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
1431 			/*
1432 			 * Determine if the current path is correct.
1433 			 */
1434 			char *newdevid = path_to_devid(path);
1435 
1436 			if (newdevid == NULL ||
1437 			    strcmp(devid, newdevid) != 0) {
1438 				char *newpath;
1439 
1440 				if ((newpath = devid_to_path(devid)) != NULL) {
1441 					/*
1442 					 * Update the path appropriately.
1443 					 */
1444 					set_path(zhp, nv, newpath);
1445 					verify(nvlist_add_string(nv,
1446 					    ZPOOL_CONFIG_PATH, newpath) == 0);
1447 					free(newpath);
1448 					verify(nvlist_lookup_string(nv,
1449 					    ZPOOL_CONFIG_PATH, &path) == 0);
1450 				}
1451 
1452 				if (newdevid)
1453 					devid_str_free(newdevid);
1454 			}
1455 
1456 		}
1457 
1458 		if (strncmp(path, "/dev/dsk/", 9) == 0)
1459 			path += 9;
1460 
1461 		if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
1462 		    &value) == 0 && value) {
1463 			char *tmp = zfs_strdup(path);
1464 			tmp[strlen(path) - 2] = '\0';
1465 			return (tmp);
1466 		}
1467 	} else {
1468 		verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
1469 	}
1470 
1471 	return (zfs_strdup(path));
1472 }
1473 
1474 static int
1475 zbookmark_compare(const void *a, const void *b)
1476 {
1477 	return (memcmp(a, b, sizeof (zbookmark_t)));
1478 }
1479 
1480 /*
1481  * Retrieve the persistent error log, uniquify the members, and return to the
1482  * caller.
1483  */
1484 int
1485 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t ***list, size_t *nelem)
1486 {
1487 	zfs_cmd_t zc = { 0 };
1488 	uint64_t count;
1489 	zbookmark_t *zb;
1490 	int i, j;
1491 
1492 	if (zhp->zpool_error_log != NULL) {
1493 		*list = zhp->zpool_error_log;
1494 		*nelem = zhp->zpool_error_count;
1495 		return (0);
1496 	}
1497 
1498 	/*
1499 	 * Retrieve the raw error list from the kernel.  If the number of errors
1500 	 * has increased, allocate more space and continue until we get the
1501 	 * entire list.
1502 	 */
1503 	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
1504 	    &count) == 0);
1505 	zc.zc_config_dst = (uintptr_t)zfs_malloc(count * sizeof (zbookmark_t));
1506 	zc.zc_config_dst_size = count;
1507 	(void) strcpy(zc.zc_name, zhp->zpool_name);
1508 	for (;;) {
1509 		if (zfs_ioctl(ZFS_IOC_ERROR_LOG, &zc) != 0) {
1510 			if (errno == ENOMEM) {
1511 				free((void *)(uintptr_t)zc.zc_config_dst);
1512 				zc.zc_config_dst = (uintptr_t)
1513 				    zfs_malloc(zc.zc_config_dst_size);
1514 			} else {
1515 				return (-1);
1516 			}
1517 		} else {
1518 			break;
1519 		}
1520 	}
1521 
1522 	/*
1523 	 * Sort the resulting bookmarks.  This is a little confusing due to the
1524 	 * implementation of ZFS_IOC_ERROR_LOG.  The bookmarks are copied last
1525 	 * to first, and 'zc_config_dst_size' indicates the number of boomarks
1526 	 * _not_ copied as part of the process.  So we point the start of our
1527 	 * array appropriate and decrement the total number of elements.
1528 	 */
1529 	zb = ((zbookmark_t *)(uintptr_t)zc.zc_config_dst) +
1530 	    zc.zc_config_dst_size;
1531 	count -= zc.zc_config_dst_size;
1532 
1533 	qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare);
1534 
1535 	/*
1536 	 * Count the number of unique elements
1537 	 */
1538 	j = 0;
1539 	for (i = 0; i < count; i++) {
1540 		if (i > 0 && memcmp(&zb[i - 1], &zb[i],
1541 		    sizeof (zbookmark_t)) == 0)
1542 			continue;
1543 		j++;
1544 	}
1545 
1546 	/*
1547 	 * If the user has only requested the number of items, return it now
1548 	 * without bothering with the extra work.
1549 	 */
1550 	if (list == NULL) {
1551 		*nelem = j;
1552 		return (0);
1553 	}
1554 
1555 	zhp->zpool_error_count = j;
1556 
1557 	/*
1558 	 * Allocate an array of nvlists to hold the results
1559 	 */
1560 	zhp->zpool_error_log = zfs_malloc(j * sizeof (nvlist_t *));
1561 
1562 	/*
1563 	 * Fill in the results with names from the kernel.
1564 	 */
1565 	j = 0;
1566 	for (i = 0; i < count; i++) {
1567 		char buf[64];
1568 		nvlist_t *nv;
1569 
1570 		if (i > 0 && memcmp(&zb[i - 1], &zb[i],
1571 		    sizeof (zbookmark_t)) == 0)
1572 			continue;
1573 
1574 		verify(nvlist_alloc(&nv, NV_UNIQUE_NAME,
1575 		    0) == 0);
1576 		zhp->zpool_error_log[j] = nv;
1577 
1578 		zc.zc_bookmark = zb[i];
1579 		if (zfs_ioctl(ZFS_IOC_BOOKMARK_NAME, &zc) == 0) {
1580 			verify(nvlist_add_string(nv, ZPOOL_ERR_DATASET,
1581 			    zc.zc_prop_name) == 0);
1582 			verify(nvlist_add_string(nv, ZPOOL_ERR_OBJECT,
1583 			    zc.zc_prop_value) == 0);
1584 			verify(nvlist_add_string(nv, ZPOOL_ERR_RANGE,
1585 			    zc.zc_filename) == 0);
1586 		} else {
1587 			(void) snprintf(buf, sizeof (buf), "%llx",
1588 			    zb[i].zb_objset);
1589 			verify(nvlist_add_string(nv,
1590 			    ZPOOL_ERR_DATASET, buf) == 0);
1591 			(void) snprintf(buf, sizeof (buf), "%llx",
1592 			    zb[i].zb_object);
1593 			verify(nvlist_add_string(nv, ZPOOL_ERR_OBJECT,
1594 			    buf) == 0);
1595 			(void) snprintf(buf, sizeof (buf), "lvl=%u blkid=%llu",
1596 			    (int)zb[i].zb_level, (long long)zb[i].zb_blkid);
1597 			verify(nvlist_add_string(nv, ZPOOL_ERR_RANGE,
1598 			    buf) == 0);
1599 		}
1600 
1601 		j++;
1602 	}
1603 
1604 	*list = zhp->zpool_error_log;
1605 	*nelem = zhp->zpool_error_count;
1606 
1607 	free((void *)(uintptr_t)zc.zc_config_dst);
1608 
1609 	return (0);
1610 }
1611 
1612 /*
1613  * Upgrade a ZFS pool to the latest on-disk version.
1614  */
1615 int
1616 zpool_upgrade(zpool_handle_t *zhp)
1617 {
1618 	zfs_cmd_t zc = { 0 };
1619 
1620 	(void) strcpy(zc.zc_name, zhp->zpool_name);
1621 	if (zfs_ioctl(ZFS_IOC_POOL_UPGRADE, &zc) != 0) {
1622 		switch (errno) {
1623 		case EPERM:
1624 			zfs_error(dgettext(TEXT_DOMAIN, "cannot upgrade '%s': "
1625 			    "permission denied"), zhp->zpool_name);
1626 			break;
1627 		default:
1628 			zfs_baderror(errno);
1629 		}
1630 
1631 		return (-1);
1632 	}
1633 
1634 	return (0);
1635 }
1636