xref: /titanic_41/usr/src/lib/libzfs/common/libzfs_pool.c (revision 62e6146930fb254c3db3332a4597f6fffe081b2c)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <assert.h>
30 #include <ctype.h>
31 #include <errno.h>
32 #include <devid.h>
33 #include <fcntl.h>
34 #include <libintl.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <string.h>
38 #include <unistd.h>
39 #include <sys/zfs_ioctl.h>
40 #include <sys/zio.h>
41 
42 #include "zfs_namecheck.h"
43 #include "libzfs_impl.h"
44 
45 /*
46  * Validate the given pool name, optionally putting an extended error message in
47  * 'buf'.
48  */
49 static boolean_t
50 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
51 {
52 	namecheck_err_t why;
53 	char what;
54 	int ret;
55 
56 	ret = pool_namecheck(pool, &why, &what);
57 
58 	/*
59 	 * The rules for reserved pool names were extended at a later point.
60 	 * But we need to support users with existing pools that may now be
61 	 * invalid.  So we only check for this expanded set of names during a
62 	 * create (or import), and only in userland.
63 	 */
64 	if (ret == 0 && !isopen &&
65 	    (strncmp(pool, "mirror", 6) == 0 ||
66 	    strncmp(pool, "raidz", 5) == 0 ||
67 	    strncmp(pool, "spare", 5) == 0)) {
68 		zfs_error_aux(hdl,
69 		    dgettext(TEXT_DOMAIN, "name is reserved"));
70 		return (B_FALSE);
71 	}
72 
73 
74 	if (ret != 0) {
75 		if (hdl != NULL) {
76 			switch (why) {
77 			case NAME_ERR_TOOLONG:
78 				zfs_error_aux(hdl,
79 				    dgettext(TEXT_DOMAIN, "name is too long"));
80 				break;
81 
82 			case NAME_ERR_INVALCHAR:
83 				zfs_error_aux(hdl,
84 				    dgettext(TEXT_DOMAIN, "invalid character "
85 				    "'%c' in pool name"), what);
86 				break;
87 
88 			case NAME_ERR_NOLETTER:
89 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
90 				    "name must begin with a letter"));
91 				break;
92 
93 			case NAME_ERR_RESERVED:
94 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
95 				    "name is reserved"));
96 				break;
97 
98 			case NAME_ERR_DISKLIKE:
99 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
100 				    "pool name is reserved"));
101 				break;
102 			}
103 		}
104 		return (B_FALSE);
105 	}
106 
107 	return (B_TRUE);
108 }
109 
110 /*
111  * Set the pool-wide health based on the vdev state of the root vdev.
112  */
113 int
114 set_pool_health(nvlist_t *config)
115 {
116 	nvlist_t *nvroot;
117 	vdev_stat_t *vs;
118 	uint_t vsc;
119 	char *health;
120 
121 	verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
122 	    &nvroot) == 0);
123 	verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS,
124 	    (uint64_t **)&vs, &vsc) == 0);
125 
126 	switch (vs->vs_state) {
127 
128 	case VDEV_STATE_CLOSED:
129 	case VDEV_STATE_CANT_OPEN:
130 	case VDEV_STATE_OFFLINE:
131 		health = dgettext(TEXT_DOMAIN, "FAULTED");
132 		break;
133 
134 	case VDEV_STATE_DEGRADED:
135 		health = dgettext(TEXT_DOMAIN, "DEGRADED");
136 		break;
137 
138 	case VDEV_STATE_HEALTHY:
139 		health = dgettext(TEXT_DOMAIN, "ONLINE");
140 		break;
141 
142 	default:
143 		abort();
144 	}
145 
146 	return (nvlist_add_string(config, ZPOOL_CONFIG_POOL_HEALTH, health));
147 }
148 
149 /*
150  * Open a handle to the given pool, even if the pool is currently in the FAULTED
151  * state.
152  */
153 zpool_handle_t *
154 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
155 {
156 	zpool_handle_t *zhp;
157 	boolean_t missing;
158 
159 	/*
160 	 * Make sure the pool name is valid.
161 	 */
162 	if (!zpool_name_valid(hdl, B_TRUE, pool)) {
163 		(void) zfs_error(hdl, EZFS_INVALIDNAME,
164 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
165 		    pool);
166 		return (NULL);
167 	}
168 
169 	if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
170 		return (NULL);
171 
172 	zhp->zpool_hdl = hdl;
173 	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
174 
175 	if (zpool_refresh_stats(zhp, &missing) != 0) {
176 		zpool_close(zhp);
177 		return (NULL);
178 	}
179 
180 	if (missing) {
181 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
182 		    "no such pool"));
183 		(void) zfs_error(hdl, EZFS_NOENT,
184 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
185 		    pool);
186 		zpool_close(zhp);
187 		return (NULL);
188 	}
189 
190 	return (zhp);
191 }
192 
193 /*
194  * Like the above, but silent on error.  Used when iterating over pools (because
195  * the configuration cache may be out of date).
196  */
197 int
198 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
199 {
200 	zpool_handle_t *zhp;
201 	boolean_t missing;
202 
203 	if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
204 		return (-1);
205 
206 	zhp->zpool_hdl = hdl;
207 	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
208 
209 	if (zpool_refresh_stats(zhp, &missing) != 0) {
210 		zpool_close(zhp);
211 		return (-1);
212 	}
213 
214 	if (missing) {
215 		zpool_close(zhp);
216 		*ret = NULL;
217 		return (0);
218 	}
219 
220 	*ret = zhp;
221 	return (0);
222 }
223 
224 /*
225  * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
226  * state.
227  */
228 zpool_handle_t *
229 zpool_open(libzfs_handle_t *hdl, const char *pool)
230 {
231 	zpool_handle_t *zhp;
232 
233 	if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
234 		return (NULL);
235 
236 	if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
237 		(void) zfs_error(hdl, EZFS_POOLUNAVAIL,
238 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
239 		zpool_close(zhp);
240 		return (NULL);
241 	}
242 
243 	return (zhp);
244 }
245 
246 /*
247  * Close the handle.  Simply frees the memory associated with the handle.
248  */
249 void
250 zpool_close(zpool_handle_t *zhp)
251 {
252 	if (zhp->zpool_config)
253 		nvlist_free(zhp->zpool_config);
254 	if (zhp->zpool_old_config)
255 		nvlist_free(zhp->zpool_old_config);
256 	if (zhp->zpool_error_log) {
257 		int i;
258 		for (i = 0; i < zhp->zpool_error_count; i++)
259 			nvlist_free(zhp->zpool_error_log[i]);
260 		free(zhp->zpool_error_log);
261 	}
262 	free(zhp);
263 }
264 
265 /*
266  * Return the name of the pool.
267  */
268 const char *
269 zpool_get_name(zpool_handle_t *zhp)
270 {
271 	return (zhp->zpool_name);
272 }
273 
274 /*
275  * Return the GUID of the pool.
276  */
277 uint64_t
278 zpool_get_guid(zpool_handle_t *zhp)
279 {
280 	uint64_t guid;
281 
282 	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID,
283 	    &guid) == 0);
284 	return (guid);
285 }
286 
287 /*
288  * Return the version of the pool.
289  */
290 uint64_t
291 zpool_get_version(zpool_handle_t *zhp)
292 {
293 	uint64_t version;
294 
295 	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_VERSION,
296 	    &version) == 0);
297 
298 	return (version);
299 }
300 
301 /*
302  * Return the amount of space currently consumed by the pool.
303  */
304 uint64_t
305 zpool_get_space_used(zpool_handle_t *zhp)
306 {
307 	nvlist_t *nvroot;
308 	vdev_stat_t *vs;
309 	uint_t vsc;
310 
311 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
312 	    &nvroot) == 0);
313 	verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS,
314 	    (uint64_t **)&vs, &vsc) == 0);
315 
316 	return (vs->vs_alloc);
317 }
318 
319 /*
320  * Return the total space in the pool.
321  */
322 uint64_t
323 zpool_get_space_total(zpool_handle_t *zhp)
324 {
325 	nvlist_t *nvroot;
326 	vdev_stat_t *vs;
327 	uint_t vsc;
328 
329 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
330 	    &nvroot) == 0);
331 	verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS,
332 	    (uint64_t **)&vs, &vsc) == 0);
333 
334 	return (vs->vs_space);
335 }
336 
337 /*
338  * Return the alternate root for this pool, if any.
339  */
340 int
341 zpool_get_root(zpool_handle_t *zhp, char *buf, size_t buflen)
342 {
343 	zfs_cmd_t zc = { 0 };
344 
345 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
346 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, &zc) != 0 ||
347 	    zc.zc_root[0] == '\0')
348 		return (-1);
349 
350 	(void) strlcpy(buf, zc.zc_root, buflen);
351 
352 	return (0);
353 }
354 
355 /*
356  * Return the state of the pool (ACTIVE or UNAVAILABLE)
357  */
358 int
359 zpool_get_state(zpool_handle_t *zhp)
360 {
361 	return (zhp->zpool_state);
362 }
363 
364 /*
365  * Create the named pool, using the provided vdev list.  It is assumed
366  * that the consumer has already validated the contents of the nvlist, so we
367  * don't have to worry about error semantics.
368  */
369 int
370 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
371     const char *altroot)
372 {
373 	zfs_cmd_t zc = { 0 };
374 	char *packed;
375 	size_t len;
376 	char msg[1024];
377 
378 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
379 	    "cannot create '%s'"), pool);
380 
381 	if (!zpool_name_valid(hdl, B_FALSE, pool))
382 		return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
383 
384 	if (altroot != NULL && altroot[0] != '/')
385 		return (zfs_error(hdl, EZFS_BADPATH,
386 		    dgettext(TEXT_DOMAIN, "bad alternate root '%s'"), altroot));
387 
388 	if (nvlist_size(nvroot, &len, NV_ENCODE_NATIVE) != 0)
389 		return (no_memory(hdl));
390 
391 	if ((packed = zfs_alloc(hdl, len)) == NULL)
392 		return (-1);
393 
394 	if (nvlist_pack(nvroot, &packed, &len,
395 	    NV_ENCODE_NATIVE, 0) != 0) {
396 		free(packed);
397 		return (no_memory(hdl));
398 	}
399 
400 	(void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
401 	zc.zc_config_src = (uint64_t)(uintptr_t)packed;
402 	zc.zc_config_src_size = len;
403 
404 	if (altroot != NULL)
405 		(void) strlcpy(zc.zc_root, altroot, sizeof (zc.zc_root));
406 
407 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_CREATE, &zc) != 0) {
408 		free(packed);
409 
410 		switch (errno) {
411 		case EBUSY:
412 			/*
413 			 * This can happen if the user has specified the same
414 			 * device multiple times.  We can't reliably detect this
415 			 * until we try to add it and see we already have a
416 			 * label.
417 			 */
418 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
419 			    "one or more vdevs refer to the same device"));
420 			return (zfs_error(hdl, EZFS_BADDEV, msg));
421 
422 		case EOVERFLOW:
423 			/*
424 			 * This occurs when one of the devices is below
425 			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
426 			 * device was the problem device since there's no
427 			 * reliable way to determine device size from userland.
428 			 */
429 			{
430 				char buf[64];
431 
432 				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
433 
434 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
435 				    "one or more devices is less than the "
436 				    "minimum size (%s)"), buf);
437 			}
438 			return (zfs_error(hdl, EZFS_BADDEV, msg));
439 
440 		case ENOSPC:
441 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
442 			    "one or more devices is out of space"));
443 			return (zfs_error(hdl, EZFS_BADDEV, msg));
444 
445 		default:
446 			return (zpool_standard_error(hdl, errno, msg));
447 		}
448 	}
449 
450 	free(packed);
451 
452 	/*
453 	 * If this is an alternate root pool, then we automatically set the
454 	 * moutnpoint of the root dataset to be '/'.
455 	 */
456 	if (altroot != NULL) {
457 		zfs_handle_t *zhp;
458 
459 		verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_ANY)) != NULL);
460 		verify(zfs_prop_set(zhp, ZFS_PROP_MOUNTPOINT, "/") == 0);
461 
462 		zfs_close(zhp);
463 	}
464 
465 	return (0);
466 }
467 
468 /*
469  * Destroy the given pool.  It is up to the caller to ensure that there are no
470  * datasets left in the pool.
471  */
472 int
473 zpool_destroy(zpool_handle_t *zhp)
474 {
475 	zfs_cmd_t zc = { 0 };
476 	zfs_handle_t *zfp = NULL;
477 	libzfs_handle_t *hdl = zhp->zpool_hdl;
478 	char msg[1024];
479 
480 	if (zhp->zpool_state == POOL_STATE_ACTIVE &&
481 	    (zfp = zfs_open(zhp->zpool_hdl, zhp->zpool_name,
482 	    ZFS_TYPE_FILESYSTEM)) == NULL)
483 		return (-1);
484 
485 	if (zpool_remove_zvol_links(zhp) != NULL)
486 		return (-1);
487 
488 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
489 
490 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
491 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
492 		    "cannot destroy '%s'"), zhp->zpool_name);
493 
494 		if (errno == EROFS) {
495 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
496 			    "one or more devices is read only"));
497 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
498 		} else {
499 			(void) zpool_standard_error(hdl, errno, msg);
500 		}
501 
502 		if (zfp)
503 			zfs_close(zfp);
504 		return (-1);
505 	}
506 
507 	if (zfp) {
508 		remove_mountpoint(zfp);
509 		zfs_close(zfp);
510 	}
511 
512 	return (0);
513 }
514 
515 /*
516  * Add the given vdevs to the pool.  The caller must have already performed the
517  * necessary verification to ensure that the vdev specification is well-formed.
518  */
519 int
520 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
521 {
522 	char *packed;
523 	size_t len;
524 	zfs_cmd_t zc;
525 	int ret;
526 	libzfs_handle_t *hdl = zhp->zpool_hdl;
527 	char msg[1024];
528 	nvlist_t **spares;
529 	uint_t nspares;
530 
531 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
532 	    "cannot add to '%s'"), zhp->zpool_name);
533 
534 	if (zpool_get_version(zhp) < ZFS_VERSION_SPARES &&
535 	    nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
536 	    &spares, &nspares) == 0) {
537 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
538 		    "upgraded to add hot spares"));
539 		return (zfs_error(hdl, EZFS_BADVERSION, msg));
540 	}
541 
542 	verify(nvlist_size(nvroot, &len, NV_ENCODE_NATIVE) == 0);
543 
544 	if ((packed = zfs_alloc(zhp->zpool_hdl, len)) == NULL)
545 		return (-1);
546 
547 	verify(nvlist_pack(nvroot, &packed, &len, NV_ENCODE_NATIVE, 0) == 0);
548 
549 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
550 	zc.zc_config_src = (uint64_t)(uintptr_t)packed;
551 	zc.zc_config_src_size = len;
552 
553 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_ADD, &zc) != 0) {
554 		switch (errno) {
555 		case EBUSY:
556 			/*
557 			 * This can happen if the user has specified the same
558 			 * device multiple times.  We can't reliably detect this
559 			 * until we try to add it and see we already have a
560 			 * label.
561 			 */
562 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
563 			    "one or more vdevs refer to the same device"));
564 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
565 			break;
566 
567 		case EOVERFLOW:
568 			/*
569 			 * This occurrs when one of the devices is below
570 			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
571 			 * device was the problem device since there's no
572 			 * reliable way to determine device size from userland.
573 			 */
574 			{
575 				char buf[64];
576 
577 				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
578 
579 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
580 				    "device is less than the minimum "
581 				    "size (%s)"), buf);
582 			}
583 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
584 			break;
585 
586 		case ENOTSUP:
587 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
588 			    "pool must be upgraded to add raidz2 vdevs"));
589 			(void) zfs_error(hdl, EZFS_BADVERSION, msg);
590 			break;
591 
592 		default:
593 			(void) zpool_standard_error(hdl, errno, msg);
594 		}
595 
596 		ret = -1;
597 	} else {
598 		ret = 0;
599 	}
600 
601 	free(packed);
602 
603 	return (ret);
604 }
605 
606 /*
607  * Exports the pool from the system.  The caller must ensure that there are no
608  * mounted datasets in the pool.
609  */
610 int
611 zpool_export(zpool_handle_t *zhp)
612 {
613 	zfs_cmd_t zc = { 0 };
614 
615 	if (zpool_remove_zvol_links(zhp) != 0)
616 		return (-1);
617 
618 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
619 
620 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_EXPORT, &zc) != 0)
621 		return (zpool_standard_error(zhp->zpool_hdl, errno,
622 		    dgettext(TEXT_DOMAIN, "cannot export '%s'"),
623 		    zhp->zpool_name));
624 
625 	return (0);
626 }
627 
628 /*
629  * Import the given pool using the known configuration.  The configuration
630  * should have come from zpool_find_import().  The 'newname' and 'altroot'
631  * parameters control whether the pool is imported with a different name or with
632  * an alternate root, respectively.
633  */
634 int
635 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
636     const char *altroot)
637 {
638 	zfs_cmd_t zc;
639 	char *packed;
640 	size_t len;
641 	char *thename;
642 	char *origname;
643 	int ret;
644 
645 	verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
646 	    &origname) == 0);
647 
648 	if (newname != NULL) {
649 		if (!zpool_name_valid(hdl, B_FALSE, newname))
650 			return (zfs_error(hdl, EZFS_INVALIDNAME,
651 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
652 			    newname));
653 		thename = (char *)newname;
654 	} else {
655 		thename = origname;
656 	}
657 
658 	if (altroot != NULL && altroot[0] != '/')
659 		return (zfs_error(hdl, EZFS_BADPATH,
660 		    dgettext(TEXT_DOMAIN, "bad alternate root '%s'"),
661 		    altroot));
662 
663 	(void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
664 
665 	if (altroot != NULL)
666 		(void) strlcpy(zc.zc_root, altroot, sizeof (zc.zc_root));
667 	else
668 		zc.zc_root[0] = '\0';
669 
670 	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
671 	    &zc.zc_guid) == 0);
672 
673 	verify(nvlist_size(config, &len, NV_ENCODE_NATIVE) == 0);
674 
675 	if ((packed = zfs_alloc(hdl, len)) == NULL)
676 		return (-1);
677 
678 	verify(nvlist_pack(config, &packed, &len, NV_ENCODE_NATIVE, 0) == 0);
679 
680 	zc.zc_config_src = (uint64_t)(uintptr_t)packed;
681 	zc.zc_config_src_size = len;
682 
683 	ret = 0;
684 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_IMPORT, &zc) != 0) {
685 		char desc[1024];
686 		if (newname == NULL)
687 			(void) snprintf(desc, sizeof (desc),
688 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
689 			    thename);
690 		else
691 			(void) snprintf(desc, sizeof (desc),
692 			    dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
693 			    origname, thename);
694 
695 		switch (errno) {
696 		case ENOTSUP:
697 			/*
698 			 * Unsupported version.
699 			 */
700 			(void) zfs_error(hdl, EZFS_BADVERSION, desc);
701 			break;
702 
703 		default:
704 			(void) zpool_standard_error(hdl, errno, desc);
705 		}
706 
707 		ret = -1;
708 	} else {
709 		zpool_handle_t *zhp;
710 		/*
711 		 * This should never fail, but play it safe anyway.
712 		 */
713 		if (zpool_open_silent(hdl, thename, &zhp) != 0) {
714 			ret = -1;
715 		} else if (zhp != NULL) {
716 			ret = zpool_create_zvol_links(zhp);
717 			zpool_close(zhp);
718 		}
719 	}
720 
721 	free(packed);
722 	return (ret);
723 }
724 
725 /*
726  * Scrub the pool.
727  */
728 int
729 zpool_scrub(zpool_handle_t *zhp, pool_scrub_type_t type)
730 {
731 	zfs_cmd_t zc = { 0 };
732 	char msg[1024];
733 	libzfs_handle_t *hdl = zhp->zpool_hdl;
734 
735 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
736 	zc.zc_cookie = type;
737 
738 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_SCRUB, &zc) == 0)
739 		return (0);
740 
741 	(void) snprintf(msg, sizeof (msg),
742 	    dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
743 
744 	if (errno == EBUSY)
745 		return (zfs_error(hdl, EZFS_RESILVERING, msg));
746 	else
747 		return (zpool_standard_error(hdl, errno, msg));
748 }
749 
750 static nvlist_t *
751 vdev_to_nvlist_iter(nvlist_t *nv, const char *search, uint64_t guid,
752     boolean_t *isspare)
753 {
754 	uint_t c, children;
755 	nvlist_t **child;
756 	uint64_t theguid, present;
757 	char *path;
758 	uint64_t wholedisk = 0;
759 	nvlist_t *ret;
760 
761 	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &theguid) == 0);
762 
763 	if (search == NULL &&
764 	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &present) == 0) {
765 		/*
766 		 * If the device has never been present since import, the only
767 		 * reliable way to match the vdev is by GUID.
768 		 */
769 		if (theguid == guid)
770 			return (nv);
771 	} else if (search != NULL &&
772 	    nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
773 		(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
774 		    &wholedisk);
775 		if (wholedisk) {
776 			/*
777 			 * For whole disks, the internal path has 's0', but the
778 			 * path passed in by the user doesn't.
779 			 */
780 			if (strlen(search) == strlen(path) - 2 &&
781 			    strncmp(search, path, strlen(search)) == 0)
782 				return (nv);
783 		} else if (strcmp(search, path) == 0) {
784 			return (nv);
785 		}
786 	}
787 
788 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
789 	    &child, &children) != 0)
790 		return (NULL);
791 
792 	for (c = 0; c < children; c++)
793 		if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
794 		    isspare)) != NULL)
795 			return (ret);
796 
797 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
798 	    &child, &children) == 0) {
799 		for (c = 0; c < children; c++) {
800 			if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
801 			    isspare)) != NULL) {
802 				*isspare = B_TRUE;
803 				return (ret);
804 			}
805 		}
806 	}
807 
808 	return (NULL);
809 }
810 
811 nvlist_t *
812 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *isspare)
813 {
814 	char buf[MAXPATHLEN];
815 	const char *search;
816 	char *end;
817 	nvlist_t *nvroot;
818 	uint64_t guid;
819 
820 	guid = strtoull(path, &end, 10);
821 	if (guid != 0 && *end == '\0') {
822 		search = NULL;
823 	} else if (path[0] != '/') {
824 		(void) snprintf(buf, sizeof (buf), "%s%s", "/dev/dsk/", path);
825 		search = buf;
826 	} else {
827 		search = path;
828 	}
829 
830 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
831 	    &nvroot) == 0);
832 
833 	*isspare = B_FALSE;
834 	return (vdev_to_nvlist_iter(nvroot, search, guid, isspare));
835 }
836 
837 /*
838  * Bring the specified vdev online
839  */
840 int
841 zpool_vdev_online(zpool_handle_t *zhp, const char *path)
842 {
843 	zfs_cmd_t zc = { 0 };
844 	char msg[1024];
845 	nvlist_t *tgt;
846 	boolean_t isspare;
847 	libzfs_handle_t *hdl = zhp->zpool_hdl;
848 
849 	(void) snprintf(msg, sizeof (msg),
850 	    dgettext(TEXT_DOMAIN, "cannot online %s"), path);
851 
852 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
853 	if ((tgt = zpool_find_vdev(zhp, path, &isspare)) == NULL)
854 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
855 
856 	if (isspare)
857 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
858 
859 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
860 
861 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_ONLINE, &zc) == 0)
862 		return (0);
863 
864 	return (zpool_standard_error(hdl, errno, msg));
865 }
866 
867 /*
868  * Take the specified vdev offline
869  */
870 int
871 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, int istmp)
872 {
873 	zfs_cmd_t zc = { 0 };
874 	char msg[1024];
875 	nvlist_t *tgt;
876 	boolean_t isspare;
877 	libzfs_handle_t *hdl = zhp->zpool_hdl;
878 
879 	(void) snprintf(msg, sizeof (msg),
880 	    dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
881 
882 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
883 	if ((tgt = zpool_find_vdev(zhp, path, &isspare)) == NULL)
884 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
885 
886 	if (isspare)
887 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
888 
889 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
890 
891 	zc.zc_cookie = istmp;
892 
893 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_OFFLINE, &zc) == 0)
894 		return (0);
895 
896 	switch (errno) {
897 	case EBUSY:
898 
899 		/*
900 		 * There are no other replicas of this device.
901 		 */
902 		return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
903 
904 	default:
905 		return (zpool_standard_error(hdl, errno, msg));
906 	}
907 }
908 
909 /*
910  * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
911  * a hot spare.
912  */
913 static boolean_t
914 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
915 {
916 	nvlist_t **child;
917 	uint_t c, children;
918 	char *type;
919 
920 	if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
921 	    &children) == 0) {
922 		verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
923 		    &type) == 0);
924 
925 		if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
926 		    children == 2 && child[which] == tgt)
927 			return (B_TRUE);
928 
929 		for (c = 0; c < children; c++)
930 			if (is_replacing_spare(child[c], tgt, which))
931 				return (B_TRUE);
932 	}
933 
934 	return (B_FALSE);
935 }
936 
937 /*
938  * Attach new_disk (fully described by nvroot) to old_disk.
939  * If 'replacing' is specified, tne new disk will replace the old one.
940  */
941 int
942 zpool_vdev_attach(zpool_handle_t *zhp,
943     const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
944 {
945 	zfs_cmd_t zc = { 0 };
946 	char msg[1024];
947 	char *packed;
948 	int ret;
949 	size_t len;
950 	nvlist_t *tgt;
951 	boolean_t isspare;
952 	uint64_t val;
953 	char *path;
954 	nvlist_t **child;
955 	uint_t children;
956 	nvlist_t *config_root;
957 	libzfs_handle_t *hdl = zhp->zpool_hdl;
958 
959 	if (replacing)
960 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
961 		    "cannot replace %s with %s"), old_disk, new_disk);
962 	else
963 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
964 		    "cannot attach %s to %s"), new_disk, old_disk);
965 
966 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
967 	if ((tgt = zpool_find_vdev(zhp, old_disk, &isspare)) == 0)
968 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
969 
970 	if (isspare)
971 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
972 
973 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
974 	zc.zc_cookie = replacing;
975 
976 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
977 	    &child, &children) != 0 || children != 1) {
978 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
979 		    "new device must be a single disk"));
980 		return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
981 	}
982 
983 	verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
984 	    ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
985 
986 	/*
987 	 * If the target is a hot spare that has been swapped in, we can only
988 	 * replace it with another hot spare.
989 	 */
990 	if (replacing &&
991 	    nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
992 	    nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
993 	    (zpool_find_vdev(zhp, path, &isspare) == NULL || !isspare) &&
994 	    is_replacing_spare(config_root, tgt, 1)) {
995 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
996 		    "can only be replaced by another hot spare"));
997 		return (zfs_error(hdl, EZFS_BADTARGET, msg));
998 	}
999 
1000 	/*
1001 	 * If we are attempting to replace a spare, it canot be applied to an
1002 	 * already spared device.
1003 	 */
1004 	if (replacing &&
1005 	    nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
1006 	    zpool_find_vdev(zhp, path, &isspare) != NULL && isspare &&
1007 	    is_replacing_spare(config_root, tgt, 0)) {
1008 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1009 		    "device has already been replaced with a spare"));
1010 		return (zfs_error(hdl, EZFS_BADTARGET, msg));
1011 	}
1012 
1013 	verify(nvlist_size(nvroot, &len, NV_ENCODE_NATIVE) == 0);
1014 
1015 	if ((packed = zfs_alloc(zhp->zpool_hdl, len)) == NULL)
1016 		return (-1);
1017 
1018 	verify(nvlist_pack(nvroot, &packed, &len, NV_ENCODE_NATIVE, 0) == 0);
1019 
1020 	zc.zc_config_src = (uint64_t)(uintptr_t)packed;
1021 	zc.zc_config_src_size = len;
1022 
1023 	ret = ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_ATTACH, &zc);
1024 
1025 	free(packed);
1026 
1027 	if (ret == 0)
1028 		return (0);
1029 
1030 	switch (errno) {
1031 	case ENOTSUP:
1032 		/*
1033 		 * Can't attach to or replace this type of vdev.
1034 		 */
1035 		if (replacing)
1036 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1037 			    "cannot replace a replacing device"));
1038 		else
1039 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1040 			    "can only attach to mirrors and top-level "
1041 			    "disks"));
1042 		(void) zfs_error(hdl, EZFS_BADTARGET, msg);
1043 		break;
1044 
1045 	case EINVAL:
1046 		/*
1047 		 * The new device must be a single disk.
1048 		 */
1049 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1050 		    "new device must be a single disk"));
1051 		(void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
1052 		break;
1053 
1054 	case EBUSY:
1055 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
1056 		    new_disk);
1057 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
1058 		break;
1059 
1060 	case EOVERFLOW:
1061 		/*
1062 		 * The new device is too small.
1063 		 */
1064 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1065 		    "device is too small"));
1066 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
1067 		break;
1068 
1069 	case EDOM:
1070 		/*
1071 		 * The new device has a different alignment requirement.
1072 		 */
1073 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1074 		    "devices have different sector alignment"));
1075 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
1076 		break;
1077 
1078 	case ENAMETOOLONG:
1079 		/*
1080 		 * The resulting top-level vdev spec won't fit in the label.
1081 		 */
1082 		(void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
1083 		break;
1084 
1085 	default:
1086 		(void) zpool_standard_error(hdl, errno, msg);
1087 	}
1088 
1089 	return (-1);
1090 }
1091 
1092 /*
1093  * Detach the specified device.
1094  */
1095 int
1096 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
1097 {
1098 	zfs_cmd_t zc = { 0 };
1099 	char msg[1024];
1100 	nvlist_t *tgt;
1101 	boolean_t isspare;
1102 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1103 
1104 	(void) snprintf(msg, sizeof (msg),
1105 	    dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
1106 
1107 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1108 	if ((tgt = zpool_find_vdev(zhp, path, &isspare)) == 0)
1109 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1110 
1111 	if (isspare)
1112 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
1113 
1114 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1115 
1116 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_DETACH, &zc) == 0)
1117 		return (0);
1118 
1119 	switch (errno) {
1120 
1121 	case ENOTSUP:
1122 		/*
1123 		 * Can't detach from this type of vdev.
1124 		 */
1125 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
1126 		    "applicable to mirror and replacing vdevs"));
1127 		(void) zfs_error(zhp->zpool_hdl, EZFS_BADTARGET, msg);
1128 		break;
1129 
1130 	case EBUSY:
1131 		/*
1132 		 * There are no other replicas of this device.
1133 		 */
1134 		(void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
1135 		break;
1136 
1137 	default:
1138 		(void) zpool_standard_error(hdl, errno, msg);
1139 	}
1140 
1141 	return (-1);
1142 }
1143 
1144 /*
1145  * Remove the given device.  Currently, this is supported only for hot spares.
1146  */
1147 int
1148 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
1149 {
1150 	zfs_cmd_t zc = { 0 };
1151 	char msg[1024];
1152 	nvlist_t *tgt;
1153 	boolean_t isspare;
1154 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1155 
1156 	(void) snprintf(msg, sizeof (msg),
1157 	    dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
1158 
1159 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1160 	if ((tgt = zpool_find_vdev(zhp, path, &isspare)) == 0)
1161 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1162 
1163 	if (!isspare) {
1164 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1165 		    "only hot spares can be removed"));
1166 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1167 	}
1168 
1169 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1170 
1171 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
1172 		return (0);
1173 
1174 	return (zpool_standard_error(hdl, errno, msg));
1175 }
1176 
1177 /*
1178  * Clear the errors for the pool, or the particular device if specified.
1179  */
1180 int
1181 zpool_clear(zpool_handle_t *zhp, const char *path)
1182 {
1183 	zfs_cmd_t zc = { 0 };
1184 	char msg[1024];
1185 	nvlist_t *tgt;
1186 	boolean_t isspare;
1187 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1188 
1189 	if (path)
1190 		(void) snprintf(msg, sizeof (msg),
1191 		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
1192 		    zc.zc_prop_value);
1193 	else
1194 		(void) snprintf(msg, sizeof (msg),
1195 		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
1196 		    zhp->zpool_name);
1197 
1198 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1199 	if (path) {
1200 		if ((tgt = zpool_find_vdev(zhp, path, &isspare)) == 0)
1201 			return (zfs_error(hdl, EZFS_NODEVICE, msg));
1202 
1203 		if (isspare)
1204 			return (zfs_error(hdl, EZFS_ISSPARE, msg));
1205 
1206 		verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
1207 		    &zc.zc_guid) == 0);
1208 	}
1209 
1210 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
1211 		return (0);
1212 
1213 	return (zpool_standard_error(hdl, errno, msg));
1214 }
1215 
1216 static int
1217 do_zvol(zfs_handle_t *zhp, void *data)
1218 {
1219 	int linktype = (int)(uintptr_t)data;
1220 	int ret;
1221 
1222 	/*
1223 	 * We check for volblocksize intead of ZFS_TYPE_VOLUME so that we
1224 	 * correctly handle snapshots of volumes.
1225 	 */
1226 	if (zhp->zfs_volblocksize != 0) {
1227 		if (linktype)
1228 			ret = zvol_create_link(zhp->zfs_hdl, zhp->zfs_name);
1229 		else
1230 			ret = zvol_remove_link(zhp->zfs_hdl, zhp->zfs_name);
1231 	}
1232 
1233 	ret = zfs_iter_children(zhp, do_zvol, data);
1234 
1235 	zfs_close(zhp);
1236 	return (ret);
1237 }
1238 
1239 /*
1240  * Iterate over all zvols in the pool and make any necessary minor nodes.
1241  */
1242 int
1243 zpool_create_zvol_links(zpool_handle_t *zhp)
1244 {
1245 	zfs_handle_t *zfp;
1246 	int ret;
1247 
1248 	/*
1249 	 * If the pool is unavailable, just return success.
1250 	 */
1251 	if ((zfp = make_dataset_handle(zhp->zpool_hdl,
1252 	    zhp->zpool_name)) == NULL)
1253 		return (0);
1254 
1255 	ret = zfs_iter_children(zfp, do_zvol, (void *)B_TRUE);
1256 
1257 	zfs_close(zfp);
1258 	return (ret);
1259 }
1260 
1261 /*
1262  * Iterate over all zvols in the poool and remove any minor nodes.
1263  */
1264 int
1265 zpool_remove_zvol_links(zpool_handle_t *zhp)
1266 {
1267 	zfs_handle_t *zfp;
1268 	int ret;
1269 
1270 	/*
1271 	 * If the pool is unavailable, just return success.
1272 	 */
1273 	if ((zfp = make_dataset_handle(zhp->zpool_hdl,
1274 	    zhp->zpool_name)) == NULL)
1275 		return (0);
1276 
1277 	ret = zfs_iter_children(zfp, do_zvol, (void *)B_FALSE);
1278 
1279 	zfs_close(zfp);
1280 	return (ret);
1281 }
1282 
1283 /*
1284  * Convert from a devid string to a path.
1285  */
1286 static char *
1287 devid_to_path(char *devid_str)
1288 {
1289 	ddi_devid_t devid;
1290 	char *minor;
1291 	char *path;
1292 	devid_nmlist_t *list = NULL;
1293 	int ret;
1294 
1295 	if (devid_str_decode(devid_str, &devid, &minor) != 0)
1296 		return (NULL);
1297 
1298 	ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
1299 
1300 	devid_str_free(minor);
1301 	devid_free(devid);
1302 
1303 	if (ret != 0)
1304 		return (NULL);
1305 
1306 	if ((path = strdup(list[0].devname)) == NULL)
1307 		return (NULL);
1308 
1309 	devid_free_nmlist(list);
1310 
1311 	return (path);
1312 }
1313 
1314 /*
1315  * Convert from a path to a devid string.
1316  */
1317 static char *
1318 path_to_devid(const char *path)
1319 {
1320 	int fd;
1321 	ddi_devid_t devid;
1322 	char *minor, *ret;
1323 
1324 	if ((fd = open(path, O_RDONLY)) < 0)
1325 		return (NULL);
1326 
1327 	minor = NULL;
1328 	ret = NULL;
1329 	if (devid_get(fd, &devid) == 0) {
1330 		if (devid_get_minor_name(fd, &minor) == 0)
1331 			ret = devid_str_encode(devid, minor);
1332 		if (minor != NULL)
1333 			devid_str_free(minor);
1334 		devid_free(devid);
1335 	}
1336 	(void) close(fd);
1337 
1338 	return (ret);
1339 }
1340 
1341 /*
1342  * Issue the necessary ioctl() to update the stored path value for the vdev.  We
1343  * ignore any failure here, since a common case is for an unprivileged user to
1344  * type 'zpool status', and we'll display the correct information anyway.
1345  */
1346 static void
1347 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
1348 {
1349 	zfs_cmd_t zc = { 0 };
1350 
1351 	(void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1352 	(void) strncpy(zc.zc_prop_value, path, sizeof (zc.zc_prop_value));
1353 	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1354 	    &zc.zc_guid) == 0);
1355 
1356 	(void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
1357 }
1358 
1359 /*
1360  * Given a vdev, return the name to display in iostat.  If the vdev has a path,
1361  * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
1362  * We also check if this is a whole disk, in which case we strip off the
1363  * trailing 's0' slice name.
1364  *
1365  * This routine is also responsible for identifying when disks have been
1366  * reconfigured in a new location.  The kernel will have opened the device by
1367  * devid, but the path will still refer to the old location.  To catch this, we
1368  * first do a path -> devid translation (which is fast for the common case).  If
1369  * the devid matches, we're done.  If not, we do a reverse devid -> path
1370  * translation and issue the appropriate ioctl() to update the path of the vdev.
1371  * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
1372  * of these checks.
1373  */
1374 char *
1375 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv)
1376 {
1377 	char *path, *devid;
1378 	uint64_t value;
1379 	char buf[64];
1380 
1381 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
1382 	    &value) == 0) {
1383 		verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1384 		    &value) == 0);
1385 		(void) snprintf(buf, sizeof (buf), "%llu", value);
1386 		path = buf;
1387 	} else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
1388 
1389 		if (zhp != NULL &&
1390 		    nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
1391 			/*
1392 			 * Determine if the current path is correct.
1393 			 */
1394 			char *newdevid = path_to_devid(path);
1395 
1396 			if (newdevid == NULL ||
1397 			    strcmp(devid, newdevid) != 0) {
1398 				char *newpath;
1399 
1400 				if ((newpath = devid_to_path(devid)) != NULL) {
1401 					/*
1402 					 * Update the path appropriately.
1403 					 */
1404 					set_path(zhp, nv, newpath);
1405 					if (nvlist_add_string(nv,
1406 					    ZPOOL_CONFIG_PATH, newpath) == 0)
1407 						verify(nvlist_lookup_string(nv,
1408 						    ZPOOL_CONFIG_PATH,
1409 						    &path) == 0);
1410 					free(newpath);
1411 				}
1412 			}
1413 
1414 			if (newdevid)
1415 				devid_str_free(newdevid);
1416 		}
1417 
1418 		if (strncmp(path, "/dev/dsk/", 9) == 0)
1419 			path += 9;
1420 
1421 		if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
1422 		    &value) == 0 && value) {
1423 			char *tmp = zfs_strdup(hdl, path);
1424 			if (tmp == NULL)
1425 				return (NULL);
1426 			tmp[strlen(path) - 2] = '\0';
1427 			return (tmp);
1428 		}
1429 	} else {
1430 		verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
1431 
1432 		/*
1433 		 * If it's a raidz device, we need to stick in the parity level.
1434 		 */
1435 		if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
1436 			verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
1437 			    &value) == 0);
1438 			(void) snprintf(buf, sizeof (buf), "%s%llu", path,
1439 			    value);
1440 			path = buf;
1441 		}
1442 	}
1443 
1444 	return (zfs_strdup(hdl, path));
1445 }
1446 
1447 static int
1448 zbookmark_compare(const void *a, const void *b)
1449 {
1450 	return (memcmp(a, b, sizeof (zbookmark_t)));
1451 }
1452 
1453 /*
1454  * Retrieve the persistent error log, uniquify the members, and return to the
1455  * caller.
1456  */
1457 int
1458 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t ***list, size_t *nelem)
1459 {
1460 	zfs_cmd_t zc = { 0 };
1461 	uint64_t count;
1462 	zbookmark_t *zb;
1463 	int i, j;
1464 
1465 	if (zhp->zpool_error_log != NULL) {
1466 		*list = zhp->zpool_error_log;
1467 		*nelem = zhp->zpool_error_count;
1468 		return (0);
1469 	}
1470 
1471 	/*
1472 	 * Retrieve the raw error list from the kernel.  If the number of errors
1473 	 * has increased, allocate more space and continue until we get the
1474 	 * entire list.
1475 	 */
1476 	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
1477 	    &count) == 0);
1478 	if ((zc.zc_config_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
1479 	    count * sizeof (zbookmark_t))) == NULL)
1480 		return (-1);
1481 	zc.zc_config_dst_size = count;
1482 	(void) strcpy(zc.zc_name, zhp->zpool_name);
1483 	for (;;) {
1484 		if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
1485 		    &zc) != 0) {
1486 			free((void *)(uintptr_t)zc.zc_config_dst);
1487 			if (errno == ENOMEM) {
1488 				if ((zc.zc_config_dst = (uintptr_t)
1489 				    zfs_alloc(zhp->zpool_hdl,
1490 				    zc.zc_config_dst_size)) == NULL)
1491 					return (-1);
1492 			} else {
1493 				return (-1);
1494 			}
1495 		} else {
1496 			break;
1497 		}
1498 	}
1499 
1500 	/*
1501 	 * Sort the resulting bookmarks.  This is a little confusing due to the
1502 	 * implementation of ZFS_IOC_ERROR_LOG.  The bookmarks are copied last
1503 	 * to first, and 'zc_config_dst_size' indicates the number of boomarks
1504 	 * _not_ copied as part of the process.  So we point the start of our
1505 	 * array appropriate and decrement the total number of elements.
1506 	 */
1507 	zb = ((zbookmark_t *)(uintptr_t)zc.zc_config_dst) +
1508 	    zc.zc_config_dst_size;
1509 	count -= zc.zc_config_dst_size;
1510 
1511 	qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare);
1512 
1513 	/*
1514 	 * Count the number of unique elements
1515 	 */
1516 	j = 0;
1517 	for (i = 0; i < count; i++) {
1518 		if (i > 0 && memcmp(&zb[i - 1], &zb[i],
1519 		    sizeof (zbookmark_t)) == 0)
1520 			continue;
1521 		j++;
1522 	}
1523 
1524 	/*
1525 	 * If the user has only requested the number of items, return it now
1526 	 * without bothering with the extra work.
1527 	 */
1528 	if (list == NULL) {
1529 		*nelem = j;
1530 		free((void *)(uintptr_t)zc.zc_config_dst);
1531 		return (0);
1532 	}
1533 
1534 	zhp->zpool_error_count = j;
1535 
1536 	/*
1537 	 * Allocate an array of nvlists to hold the results
1538 	 */
1539 	if ((zhp->zpool_error_log = zfs_alloc(zhp->zpool_hdl,
1540 	    j * sizeof (nvlist_t *))) == NULL) {
1541 		free((void *)(uintptr_t)zc.zc_config_dst);
1542 		return (-1);
1543 	}
1544 
1545 	/*
1546 	 * Fill in the results with names from the kernel.
1547 	 */
1548 	j = 0;
1549 	for (i = 0; i < count; i++) {
1550 		char buf[64];
1551 		nvlist_t *nv;
1552 
1553 		if (i > 0 && memcmp(&zb[i - 1], &zb[i],
1554 		    sizeof (zbookmark_t)) == 0)
1555 			continue;
1556 
1557 		if (nvlist_alloc(&nv, NV_UNIQUE_NAME,
1558 		    0) != 0)
1559 			goto nomem;
1560 		zhp->zpool_error_log[j] = nv;
1561 
1562 		zc.zc_bookmark = zb[i];
1563 		if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_BOOKMARK_NAME,
1564 		    &zc) == 0) {
1565 			if (nvlist_add_string(nv, ZPOOL_ERR_DATASET,
1566 			    zc.zc_prop_name) != 0 ||
1567 			    nvlist_add_string(nv, ZPOOL_ERR_OBJECT,
1568 			    zc.zc_prop_value) != 0 ||
1569 			    nvlist_add_string(nv, ZPOOL_ERR_RANGE,
1570 			    zc.zc_filename) != 0)
1571 				goto nomem;
1572 		} else {
1573 			(void) snprintf(buf, sizeof (buf), "%llx",
1574 			    zb[i].zb_objset);
1575 			if (nvlist_add_string(nv,
1576 			    ZPOOL_ERR_DATASET, buf) != 0)
1577 				goto nomem;
1578 			(void) snprintf(buf, sizeof (buf), "%llx",
1579 			    zb[i].zb_object);
1580 			if (nvlist_add_string(nv, ZPOOL_ERR_OBJECT,
1581 			    buf) != 0)
1582 				goto nomem;
1583 			(void) snprintf(buf, sizeof (buf), "lvl=%u blkid=%llu",
1584 			    (int)zb[i].zb_level, (long long)zb[i].zb_blkid);
1585 			if (nvlist_add_string(nv, ZPOOL_ERR_RANGE,
1586 			    buf) != 0)
1587 				goto nomem;
1588 		}
1589 
1590 		j++;
1591 	}
1592 
1593 	*list = zhp->zpool_error_log;
1594 	*nelem = zhp->zpool_error_count;
1595 
1596 	free((void *)(uintptr_t)zc.zc_config_dst);
1597 
1598 	return (0);
1599 
1600 nomem:
1601 	free((void *)(uintptr_t)zc.zc_config_dst);
1602 	for (i = 0; i < zhp->zpool_error_count; i++) {
1603 		if (zhp->zpool_error_log[i])
1604 			free(zhp->zpool_error_log[i]);
1605 	}
1606 	free(zhp->zpool_error_log);
1607 	zhp->zpool_error_log = NULL;
1608 	return (no_memory(zhp->zpool_hdl));
1609 }
1610 
1611 /*
1612  * Upgrade a ZFS pool to the latest on-disk version.
1613  */
1614 int
1615 zpool_upgrade(zpool_handle_t *zhp)
1616 {
1617 	zfs_cmd_t zc = { 0 };
1618 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1619 
1620 	(void) strcpy(zc.zc_name, zhp->zpool_name);
1621 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
1622 		return (zpool_standard_error(hdl, errno,
1623 		    dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
1624 		    zhp->zpool_name));
1625 
1626 	return (0);
1627 }
1628