xref: /titanic_41/usr/src/lib/libzfs/common/libzfs_pool.c (revision 1daa57680c1c12da0548089a11b00a9d6d077bb5)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <assert.h>
30 #include <ctype.h>
31 #include <errno.h>
32 #include <devid.h>
33 #include <fcntl.h>
34 #include <libintl.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <string.h>
38 #include <unistd.h>
39 #include <sys/zfs_ioctl.h>
40 #include <sys/zio.h>
41 
42 #include "zfs_namecheck.h"
43 #include "libzfs_impl.h"
44 
45 /*
46  * Validate the given pool name, optionally putting an extended error message in
47  * 'buf'.
48  */
49 static boolean_t
50 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
51 {
52 	namecheck_err_t why;
53 	char what;
54 	int ret;
55 
56 	ret = pool_namecheck(pool, &why, &what);
57 
58 	/*
59 	 * The rules for reserved pool names were extended at a later point.
60 	 * But we need to support users with existing pools that may now be
61 	 * invalid.  So we only check for this expanded set of names during a
62 	 * create (or import), and only in userland.
63 	 */
64 	if (ret == 0 && !isopen &&
65 	    (strncmp(pool, "mirror", 6) == 0 ||
66 	    strncmp(pool, "raidz", 5) == 0 ||
67 	    strncmp(pool, "spare", 5) == 0)) {
68 		zfs_error_aux(hdl,
69 		    dgettext(TEXT_DOMAIN, "name is reserved"));
70 		return (B_FALSE);
71 	}
72 
73 
74 	if (ret != 0) {
75 		if (hdl != NULL) {
76 			switch (why) {
77 			case NAME_ERR_TOOLONG:
78 				zfs_error_aux(hdl,
79 				    dgettext(TEXT_DOMAIN, "name is too long"));
80 				break;
81 
82 			case NAME_ERR_INVALCHAR:
83 				zfs_error_aux(hdl,
84 				    dgettext(TEXT_DOMAIN, "invalid character "
85 				    "'%c' in pool name"), what);
86 				break;
87 
88 			case NAME_ERR_NOLETTER:
89 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
90 				    "name must begin with a letter"));
91 				break;
92 
93 			case NAME_ERR_RESERVED:
94 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
95 				    "name is reserved"));
96 				break;
97 
98 			case NAME_ERR_DISKLIKE:
99 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
100 				    "pool name is reserved"));
101 				break;
102 			}
103 		}
104 		return (B_FALSE);
105 	}
106 
107 	return (B_TRUE);
108 }
109 
110 /*
111  * Set the pool-wide health based on the vdev state of the root vdev.
112  */
113 int
114 set_pool_health(nvlist_t *config)
115 {
116 	nvlist_t *nvroot;
117 	vdev_stat_t *vs;
118 	uint_t vsc;
119 	char *health;
120 
121 	verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
122 	    &nvroot) == 0);
123 	verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS,
124 	    (uint64_t **)&vs, &vsc) == 0);
125 
126 	switch (vs->vs_state) {
127 
128 	case VDEV_STATE_CLOSED:
129 	case VDEV_STATE_CANT_OPEN:
130 	case VDEV_STATE_OFFLINE:
131 		health = dgettext(TEXT_DOMAIN, "FAULTED");
132 		break;
133 
134 	case VDEV_STATE_DEGRADED:
135 		health = dgettext(TEXT_DOMAIN, "DEGRADED");
136 		break;
137 
138 	case VDEV_STATE_HEALTHY:
139 		health = dgettext(TEXT_DOMAIN, "ONLINE");
140 		break;
141 
142 	default:
143 		abort();
144 	}
145 
146 	return (nvlist_add_string(config, ZPOOL_CONFIG_POOL_HEALTH, health));
147 }
148 
149 /*
150  * Open a handle to the given pool, even if the pool is currently in the FAULTED
151  * state.
152  */
153 zpool_handle_t *
154 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
155 {
156 	zpool_handle_t *zhp;
157 	boolean_t missing;
158 
159 	/*
160 	 * Make sure the pool name is valid.
161 	 */
162 	if (!zpool_name_valid(hdl, B_TRUE, pool)) {
163 		(void) zfs_error(hdl, EZFS_INVALIDNAME,
164 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
165 		    pool);
166 		return (NULL);
167 	}
168 
169 	if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
170 		return (NULL);
171 
172 	zhp->zpool_hdl = hdl;
173 	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
174 
175 	if (zpool_refresh_stats(zhp, &missing) != 0) {
176 		zpool_close(zhp);
177 		return (NULL);
178 	}
179 
180 	if (missing) {
181 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
182 		    "no such pool"));
183 		(void) zfs_error(hdl, EZFS_NOENT,
184 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
185 		    pool);
186 		zpool_close(zhp);
187 		return (NULL);
188 	}
189 
190 	return (zhp);
191 }
192 
193 /*
194  * Like the above, but silent on error.  Used when iterating over pools (because
195  * the configuration cache may be out of date).
196  */
197 int
198 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
199 {
200 	zpool_handle_t *zhp;
201 	boolean_t missing;
202 
203 	if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
204 		return (-1);
205 
206 	zhp->zpool_hdl = hdl;
207 	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
208 
209 	if (zpool_refresh_stats(zhp, &missing) != 0) {
210 		zpool_close(zhp);
211 		return (-1);
212 	}
213 
214 	if (missing) {
215 		zpool_close(zhp);
216 		*ret = NULL;
217 		return (0);
218 	}
219 
220 	*ret = zhp;
221 	return (0);
222 }
223 
224 /*
225  * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
226  * state.
227  */
228 zpool_handle_t *
229 zpool_open(libzfs_handle_t *hdl, const char *pool)
230 {
231 	zpool_handle_t *zhp;
232 
233 	if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
234 		return (NULL);
235 
236 	if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
237 		(void) zfs_error(hdl, EZFS_POOLUNAVAIL,
238 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
239 		zpool_close(zhp);
240 		return (NULL);
241 	}
242 
243 	return (zhp);
244 }
245 
246 /*
247  * Close the handle.  Simply frees the memory associated with the handle.
248  */
249 void
250 zpool_close(zpool_handle_t *zhp)
251 {
252 	if (zhp->zpool_config)
253 		nvlist_free(zhp->zpool_config);
254 	if (zhp->zpool_old_config)
255 		nvlist_free(zhp->zpool_old_config);
256 	if (zhp->zpool_error_log) {
257 		int i;
258 		for (i = 0; i < zhp->zpool_error_count; i++)
259 			nvlist_free(zhp->zpool_error_log[i]);
260 		free(zhp->zpool_error_log);
261 	}
262 	free(zhp);
263 }
264 
265 /*
266  * Return the name of the pool.
267  */
268 const char *
269 zpool_get_name(zpool_handle_t *zhp)
270 {
271 	return (zhp->zpool_name);
272 }
273 
274 /*
275  * Return the GUID of the pool.
276  */
277 uint64_t
278 zpool_get_guid(zpool_handle_t *zhp)
279 {
280 	uint64_t guid;
281 
282 	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID,
283 	    &guid) == 0);
284 	return (guid);
285 }
286 
287 /*
288  * Return the version of the pool.
289  */
290 uint64_t
291 zpool_get_version(zpool_handle_t *zhp)
292 {
293 	uint64_t version;
294 
295 	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_VERSION,
296 	    &version) == 0);
297 
298 	return (version);
299 }
300 
301 /*
302  * Return the amount of space currently consumed by the pool.
303  */
304 uint64_t
305 zpool_get_space_used(zpool_handle_t *zhp)
306 {
307 	nvlist_t *nvroot;
308 	vdev_stat_t *vs;
309 	uint_t vsc;
310 
311 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
312 	    &nvroot) == 0);
313 	verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS,
314 	    (uint64_t **)&vs, &vsc) == 0);
315 
316 	return (vs->vs_alloc);
317 }
318 
319 /*
320  * Return the total space in the pool.
321  */
322 uint64_t
323 zpool_get_space_total(zpool_handle_t *zhp)
324 {
325 	nvlist_t *nvroot;
326 	vdev_stat_t *vs;
327 	uint_t vsc;
328 
329 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
330 	    &nvroot) == 0);
331 	verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS,
332 	    (uint64_t **)&vs, &vsc) == 0);
333 
334 	return (vs->vs_space);
335 }
336 
337 /*
338  * Return the alternate root for this pool, if any.
339  */
340 int
341 zpool_get_root(zpool_handle_t *zhp, char *buf, size_t buflen)
342 {
343 	zfs_cmd_t zc = { 0 };
344 
345 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
346 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, &zc) != 0 ||
347 	    zc.zc_root[0] == '\0')
348 		return (-1);
349 
350 	(void) strlcpy(buf, zc.zc_root, buflen);
351 
352 	return (0);
353 }
354 
355 /*
356  * Return the state of the pool (ACTIVE or UNAVAILABLE)
357  */
358 int
359 zpool_get_state(zpool_handle_t *zhp)
360 {
361 	return (zhp->zpool_state);
362 }
363 
364 /*
365  * Create the named pool, using the provided vdev list.  It is assumed
366  * that the consumer has already validated the contents of the nvlist, so we
367  * don't have to worry about error semantics.
368  */
369 int
370 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
371     const char *altroot)
372 {
373 	zfs_cmd_t zc = { 0 };
374 	char *packed;
375 	size_t len;
376 	char msg[1024];
377 
378 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
379 	    "cannot create '%s'"), pool);
380 
381 	if (!zpool_name_valid(hdl, B_FALSE, pool))
382 		return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
383 
384 	if (altroot != NULL && altroot[0] != '/')
385 		return (zfs_error(hdl, EZFS_BADPATH,
386 		    dgettext(TEXT_DOMAIN, "bad alternate root '%s'"), altroot));
387 
388 	if (nvlist_size(nvroot, &len, NV_ENCODE_NATIVE) != 0)
389 		return (no_memory(hdl));
390 
391 	if ((packed = zfs_alloc(hdl, len)) == NULL)
392 		return (-1);
393 
394 	if (nvlist_pack(nvroot, &packed, &len,
395 	    NV_ENCODE_NATIVE, 0) != 0) {
396 		free(packed);
397 		return (no_memory(hdl));
398 	}
399 
400 	(void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
401 	zc.zc_config_src = (uint64_t)(uintptr_t)packed;
402 	zc.zc_config_src_size = len;
403 
404 	if (altroot != NULL)
405 		(void) strlcpy(zc.zc_root, altroot, sizeof (zc.zc_root));
406 
407 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_CREATE, &zc) != 0) {
408 		free(packed);
409 
410 		switch (errno) {
411 		case EBUSY:
412 			/*
413 			 * This can happen if the user has specified the same
414 			 * device multiple times.  We can't reliably detect this
415 			 * until we try to add it and see we already have a
416 			 * label.
417 			 */
418 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
419 			    "one or more vdevs refer to the same device"));
420 			return (zfs_error(hdl, EZFS_BADDEV, msg));
421 
422 		case EOVERFLOW:
423 			/*
424 			 * This occurs when one of the devices is below
425 			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
426 			 * device was the problem device since there's no
427 			 * reliable way to determine device size from userland.
428 			 */
429 			{
430 				char buf[64];
431 
432 				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
433 
434 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
435 				    "one or more devices is less than the "
436 				    "minimum size (%s)"), buf);
437 			}
438 			return (zfs_error(hdl, EZFS_BADDEV, msg));
439 
440 		case ENOSPC:
441 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
442 			    "one or more devices is out of space"));
443 			return (zfs_error(hdl, EZFS_BADDEV, msg));
444 
445 		default:
446 			return (zpool_standard_error(hdl, errno, msg));
447 		}
448 	}
449 
450 	free(packed);
451 
452 	/*
453 	 * If this is an alternate root pool, then we automatically set the
454 	 * moutnpoint of the root dataset to be '/'.
455 	 */
456 	if (altroot != NULL) {
457 		zfs_handle_t *zhp;
458 
459 		verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_ANY)) != NULL);
460 		verify(zfs_prop_set(zhp, ZFS_PROP_MOUNTPOINT, "/") == 0);
461 
462 		zfs_close(zhp);
463 	}
464 
465 	return (0);
466 }
467 
468 /*
469  * Destroy the given pool.  It is up to the caller to ensure that there are no
470  * datasets left in the pool.
471  */
472 int
473 zpool_destroy(zpool_handle_t *zhp)
474 {
475 	zfs_cmd_t zc = { 0 };
476 	zfs_handle_t *zfp = NULL;
477 	libzfs_handle_t *hdl = zhp->zpool_hdl;
478 	char msg[1024];
479 
480 	if (zhp->zpool_state == POOL_STATE_ACTIVE &&
481 	    (zfp = zfs_open(zhp->zpool_hdl, zhp->zpool_name,
482 	    ZFS_TYPE_FILESYSTEM)) == NULL)
483 		return (-1);
484 
485 	if (zpool_remove_zvol_links(zhp) != NULL)
486 		return (-1);
487 
488 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
489 
490 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
491 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
492 		    "cannot destroy '%s'"), zhp->zpool_name);
493 
494 		if (errno == EROFS) {
495 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
496 			    "one or more devices is read only"));
497 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
498 		} else {
499 			(void) zpool_standard_error(hdl, errno, msg);
500 		}
501 
502 		if (zfp)
503 			zfs_close(zfp);
504 		return (-1);
505 	}
506 
507 	if (zfp) {
508 		remove_mountpoint(zfp);
509 		zfs_close(zfp);
510 	}
511 
512 	return (0);
513 }
514 
515 /*
516  * Add the given vdevs to the pool.  The caller must have already performed the
517  * necessary verification to ensure that the vdev specification is well-formed.
518  */
519 int
520 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
521 {
522 	char *packed;
523 	size_t len;
524 	zfs_cmd_t zc;
525 	int ret;
526 	libzfs_handle_t *hdl = zhp->zpool_hdl;
527 	char msg[1024];
528 	nvlist_t **spares;
529 	uint_t nspares;
530 
531 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
532 	    "cannot add to '%s'"), zhp->zpool_name);
533 
534 	if (zpool_get_version(zhp) < ZFS_VERSION_SPARES &&
535 	    nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
536 	    &spares, &nspares) == 0) {
537 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
538 		    "upgraded to add hot spares"));
539 		return (zfs_error(hdl, EZFS_BADVERSION, msg));
540 	}
541 
542 	verify(nvlist_size(nvroot, &len, NV_ENCODE_NATIVE) == 0);
543 
544 	if ((packed = zfs_alloc(zhp->zpool_hdl, len)) == NULL)
545 		return (-1);
546 
547 	verify(nvlist_pack(nvroot, &packed, &len, NV_ENCODE_NATIVE, 0) == 0);
548 
549 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
550 	zc.zc_config_src = (uint64_t)(uintptr_t)packed;
551 	zc.zc_config_src_size = len;
552 
553 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_ADD, &zc) != 0) {
554 		switch (errno) {
555 		case EBUSY:
556 			/*
557 			 * This can happen if the user has specified the same
558 			 * device multiple times.  We can't reliably detect this
559 			 * until we try to add it and see we already have a
560 			 * label.
561 			 */
562 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
563 			    "one or more vdevs refer to the same device"));
564 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
565 			break;
566 
567 		case EOVERFLOW:
568 			/*
569 			 * This occurrs when one of the devices is below
570 			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
571 			 * device was the problem device since there's no
572 			 * reliable way to determine device size from userland.
573 			 */
574 			{
575 				char buf[64];
576 
577 				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
578 
579 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
580 				    "device is less than the minimum "
581 				    "size (%s)"), buf);
582 			}
583 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
584 			break;
585 
586 		case ENOTSUP:
587 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
588 			    "pool must be upgraded to add raidz2 vdevs"));
589 			(void) zfs_error(hdl, EZFS_BADVERSION, msg);
590 			break;
591 
592 		default:
593 			(void) zpool_standard_error(hdl, errno, msg);
594 		}
595 
596 		ret = -1;
597 	} else {
598 		ret = 0;
599 	}
600 
601 	free(packed);
602 
603 	return (ret);
604 }
605 
606 /*
607  * Exports the pool from the system.  The caller must ensure that there are no
608  * mounted datasets in the pool.
609  */
610 int
611 zpool_export(zpool_handle_t *zhp)
612 {
613 	zfs_cmd_t zc = { 0 };
614 
615 	if (zpool_remove_zvol_links(zhp) != 0)
616 		return (-1);
617 
618 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
619 
620 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_EXPORT, &zc) != 0)
621 		return (zpool_standard_error(zhp->zpool_hdl, errno,
622 		    dgettext(TEXT_DOMAIN, "cannot export '%s'"),
623 		    zhp->zpool_name));
624 
625 	return (0);
626 }
627 
628 /*
629  * Import the given pool using the known configuration.  The configuration
630  * should have come from zpool_find_import().  The 'newname' and 'altroot'
631  * parameters control whether the pool is imported with a different name or with
632  * an alternate root, respectively.
633  */
634 int
635 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
636     const char *altroot)
637 {
638 	zfs_cmd_t zc;
639 	char *packed;
640 	size_t len;
641 	char *thename;
642 	char *origname;
643 	int ret;
644 
645 	verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
646 	    &origname) == 0);
647 
648 	if (newname != NULL) {
649 		if (!zpool_name_valid(hdl, B_FALSE, newname))
650 			return (zfs_error(hdl, EZFS_INVALIDNAME,
651 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
652 			    newname));
653 		thename = (char *)newname;
654 	} else {
655 		thename = origname;
656 	}
657 
658 	if (altroot != NULL && altroot[0] != '/')
659 		return (zfs_error(hdl, EZFS_BADPATH,
660 		    dgettext(TEXT_DOMAIN, "bad alternate root '%s'"),
661 		    altroot));
662 
663 	(void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
664 
665 	if (altroot != NULL)
666 		(void) strlcpy(zc.zc_root, altroot, sizeof (zc.zc_root));
667 	else
668 		zc.zc_root[0] = '\0';
669 
670 	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
671 	    &zc.zc_guid) == 0);
672 
673 	verify(nvlist_size(config, &len, NV_ENCODE_NATIVE) == 0);
674 
675 	if ((packed = zfs_alloc(hdl, len)) == NULL)
676 		return (-1);
677 
678 	verify(nvlist_pack(config, &packed, &len, NV_ENCODE_NATIVE, 0) == 0);
679 
680 	zc.zc_config_src = (uint64_t)(uintptr_t)packed;
681 	zc.zc_config_src_size = len;
682 
683 	ret = 0;
684 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_IMPORT, &zc) != 0) {
685 		char desc[1024];
686 		if (newname == NULL)
687 			(void) snprintf(desc, sizeof (desc),
688 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
689 			    thename);
690 		else
691 			(void) snprintf(desc, sizeof (desc),
692 			    dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
693 			    origname, thename);
694 
695 		switch (errno) {
696 		case ENOTSUP:
697 			/*
698 			 * Unsupported version.
699 			 */
700 			(void) zfs_error(hdl, EZFS_BADVERSION, desc);
701 			break;
702 
703 		case EINVAL:
704 			(void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
705 			break;
706 
707 		default:
708 			(void) zpool_standard_error(hdl, errno, desc);
709 		}
710 
711 		ret = -1;
712 	} else {
713 		zpool_handle_t *zhp;
714 		/*
715 		 * This should never fail, but play it safe anyway.
716 		 */
717 		if (zpool_open_silent(hdl, thename, &zhp) != 0) {
718 			ret = -1;
719 		} else if (zhp != NULL) {
720 			ret = zpool_create_zvol_links(zhp);
721 			zpool_close(zhp);
722 		}
723 	}
724 
725 	free(packed);
726 	return (ret);
727 }
728 
729 /*
730  * Scrub the pool.
731  */
732 int
733 zpool_scrub(zpool_handle_t *zhp, pool_scrub_type_t type)
734 {
735 	zfs_cmd_t zc = { 0 };
736 	char msg[1024];
737 	libzfs_handle_t *hdl = zhp->zpool_hdl;
738 
739 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
740 	zc.zc_cookie = type;
741 
742 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_SCRUB, &zc) == 0)
743 		return (0);
744 
745 	(void) snprintf(msg, sizeof (msg),
746 	    dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
747 
748 	if (errno == EBUSY)
749 		return (zfs_error(hdl, EZFS_RESILVERING, msg));
750 	else
751 		return (zpool_standard_error(hdl, errno, msg));
752 }
753 
754 /*
755  * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
756  * spare; but FALSE if its an INUSE spare.
757  */
758 static nvlist_t *
759 vdev_to_nvlist_iter(nvlist_t *nv, const char *search, uint64_t guid,
760     boolean_t *avail_spare)
761 {
762 	uint_t c, children;
763 	nvlist_t **child;
764 	uint64_t theguid, present;
765 	char *path;
766 	uint64_t wholedisk = 0;
767 	nvlist_t *ret;
768 
769 	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &theguid) == 0);
770 
771 	if (search == NULL &&
772 	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &present) == 0) {
773 		/*
774 		 * If the device has never been present since import, the only
775 		 * reliable way to match the vdev is by GUID.
776 		 */
777 		if (theguid == guid)
778 			return (nv);
779 	} else if (search != NULL &&
780 	    nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
781 		(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
782 		    &wholedisk);
783 		if (wholedisk) {
784 			/*
785 			 * For whole disks, the internal path has 's0', but the
786 			 * path passed in by the user doesn't.
787 			 */
788 			if (strlen(search) == strlen(path) - 2 &&
789 			    strncmp(search, path, strlen(search)) == 0)
790 				return (nv);
791 		} else if (strcmp(search, path) == 0) {
792 			return (nv);
793 		}
794 	}
795 
796 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
797 	    &child, &children) != 0)
798 		return (NULL);
799 
800 	for (c = 0; c < children; c++)
801 		if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
802 		    avail_spare)) != NULL)
803 			return (ret);
804 
805 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
806 	    &child, &children) == 0) {
807 		for (c = 0; c < children; c++) {
808 			if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
809 			    avail_spare)) != NULL) {
810 				*avail_spare = B_TRUE;
811 				return (ret);
812 			}
813 		}
814 	}
815 
816 	return (NULL);
817 }
818 
819 nvlist_t *
820 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare)
821 {
822 	char buf[MAXPATHLEN];
823 	const char *search;
824 	char *end;
825 	nvlist_t *nvroot;
826 	uint64_t guid;
827 
828 	guid = strtoull(path, &end, 10);
829 	if (guid != 0 && *end == '\0') {
830 		search = NULL;
831 	} else if (path[0] != '/') {
832 		(void) snprintf(buf, sizeof (buf), "%s%s", "/dev/dsk/", path);
833 		search = buf;
834 	} else {
835 		search = path;
836 	}
837 
838 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
839 	    &nvroot) == 0);
840 
841 	*avail_spare = B_FALSE;
842 	return (vdev_to_nvlist_iter(nvroot, search, guid, avail_spare));
843 }
844 
845 /*
846  * Returns TRUE if the given guid corresponds to a spare (INUSE or not).
847  */
848 static boolean_t
849 is_spare(zpool_handle_t *zhp, uint64_t guid)
850 {
851 	uint64_t spare_guid;
852 	nvlist_t *nvroot;
853 	nvlist_t **spares;
854 	uint_t nspares;
855 	int i;
856 
857 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
858 	    &nvroot) == 0);
859 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
860 	    &spares, &nspares) == 0) {
861 		for (i = 0; i < nspares; i++) {
862 			verify(nvlist_lookup_uint64(spares[i],
863 			    ZPOOL_CONFIG_GUID, &spare_guid) == 0);
864 			if (guid == spare_guid)
865 				return (B_TRUE);
866 		}
867 	}
868 
869 	return (B_FALSE);
870 }
871 
872 /*
873  * Bring the specified vdev online
874  */
875 int
876 zpool_vdev_online(zpool_handle_t *zhp, const char *path)
877 {
878 	zfs_cmd_t zc = { 0 };
879 	char msg[1024];
880 	nvlist_t *tgt;
881 	boolean_t avail_spare;
882 	libzfs_handle_t *hdl = zhp->zpool_hdl;
883 
884 	(void) snprintf(msg, sizeof (msg),
885 	    dgettext(TEXT_DOMAIN, "cannot online %s"), path);
886 
887 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
888 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == NULL)
889 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
890 
891 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
892 
893 	if (avail_spare || is_spare(zhp, zc.zc_guid) == B_TRUE)
894 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
895 
896 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_ONLINE, &zc) == 0)
897 		return (0);
898 
899 	return (zpool_standard_error(hdl, errno, msg));
900 }
901 
902 /*
903  * Take the specified vdev offline
904  */
905 int
906 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, int istmp)
907 {
908 	zfs_cmd_t zc = { 0 };
909 	char msg[1024];
910 	nvlist_t *tgt;
911 	boolean_t avail_spare;
912 	libzfs_handle_t *hdl = zhp->zpool_hdl;
913 
914 	(void) snprintf(msg, sizeof (msg),
915 	    dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
916 
917 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
918 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == NULL)
919 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
920 
921 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
922 
923 	if (avail_spare || is_spare(zhp, zc.zc_guid) == B_TRUE)
924 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
925 
926 	zc.zc_cookie = istmp;
927 
928 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_OFFLINE, &zc) == 0)
929 		return (0);
930 
931 	switch (errno) {
932 	case EBUSY:
933 
934 		/*
935 		 * There are no other replicas of this device.
936 		 */
937 		return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
938 
939 	default:
940 		return (zpool_standard_error(hdl, errno, msg));
941 	}
942 }
943 
944 /*
945  * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
946  * a hot spare.
947  */
948 static boolean_t
949 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
950 {
951 	nvlist_t **child;
952 	uint_t c, children;
953 	char *type;
954 
955 	if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
956 	    &children) == 0) {
957 		verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
958 		    &type) == 0);
959 
960 		if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
961 		    children == 2 && child[which] == tgt)
962 			return (B_TRUE);
963 
964 		for (c = 0; c < children; c++)
965 			if (is_replacing_spare(child[c], tgt, which))
966 				return (B_TRUE);
967 	}
968 
969 	return (B_FALSE);
970 }
971 
972 /*
973  * Attach new_disk (fully described by nvroot) to old_disk.
974  * If 'replacing' is specified, tne new disk will replace the old one.
975  */
976 int
977 zpool_vdev_attach(zpool_handle_t *zhp,
978     const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
979 {
980 	zfs_cmd_t zc = { 0 };
981 	char msg[1024];
982 	char *packed;
983 	int ret;
984 	size_t len;
985 	nvlist_t *tgt;
986 	boolean_t avail_spare;
987 	uint64_t val;
988 	char *path;
989 	nvlist_t **child;
990 	uint_t children;
991 	nvlist_t *config_root;
992 	libzfs_handle_t *hdl = zhp->zpool_hdl;
993 
994 	if (replacing)
995 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
996 		    "cannot replace %s with %s"), old_disk, new_disk);
997 	else
998 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
999 		    "cannot attach %s to %s"), new_disk, old_disk);
1000 
1001 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1002 	if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare)) == 0)
1003 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1004 
1005 	if (avail_spare)
1006 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
1007 
1008 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1009 	zc.zc_cookie = replacing;
1010 
1011 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
1012 	    &child, &children) != 0 || children != 1) {
1013 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1014 		    "new device must be a single disk"));
1015 		return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
1016 	}
1017 
1018 	verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
1019 	    ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
1020 
1021 	/*
1022 	 * If the target is a hot spare that has been swapped in, we can only
1023 	 * replace it with another hot spare.
1024 	 */
1025 	if (replacing &&
1026 	    nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
1027 	    nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
1028 	    (zpool_find_vdev(zhp, path, &avail_spare) == NULL ||
1029 	    !avail_spare) && is_replacing_spare(config_root, tgt, 1)) {
1030 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1031 		    "can only be replaced by another hot spare"));
1032 		return (zfs_error(hdl, EZFS_BADTARGET, msg));
1033 	}
1034 
1035 	/*
1036 	 * If we are attempting to replace a spare, it canot be applied to an
1037 	 * already spared device.
1038 	 */
1039 	if (replacing &&
1040 	    nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
1041 	    zpool_find_vdev(zhp, path, &avail_spare) != NULL && avail_spare &&
1042 	    is_replacing_spare(config_root, tgt, 0)) {
1043 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1044 		    "device has already been replaced with a spare"));
1045 		return (zfs_error(hdl, EZFS_BADTARGET, msg));
1046 	}
1047 
1048 	verify(nvlist_size(nvroot, &len, NV_ENCODE_NATIVE) == 0);
1049 
1050 	if ((packed = zfs_alloc(zhp->zpool_hdl, len)) == NULL)
1051 		return (-1);
1052 
1053 	verify(nvlist_pack(nvroot, &packed, &len, NV_ENCODE_NATIVE, 0) == 0);
1054 
1055 	zc.zc_config_src = (uint64_t)(uintptr_t)packed;
1056 	zc.zc_config_src_size = len;
1057 
1058 	ret = ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_ATTACH, &zc);
1059 
1060 	free(packed);
1061 
1062 	if (ret == 0)
1063 		return (0);
1064 
1065 	switch (errno) {
1066 	case ENOTSUP:
1067 		/*
1068 		 * Can't attach to or replace this type of vdev.
1069 		 */
1070 		if (replacing)
1071 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1072 			    "cannot replace a replacing device"));
1073 		else
1074 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1075 			    "can only attach to mirrors and top-level "
1076 			    "disks"));
1077 		(void) zfs_error(hdl, EZFS_BADTARGET, msg);
1078 		break;
1079 
1080 	case EINVAL:
1081 		/*
1082 		 * The new device must be a single disk.
1083 		 */
1084 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1085 		    "new device must be a single disk"));
1086 		(void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
1087 		break;
1088 
1089 	case EBUSY:
1090 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
1091 		    new_disk);
1092 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
1093 		break;
1094 
1095 	case EOVERFLOW:
1096 		/*
1097 		 * The new device is too small.
1098 		 */
1099 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1100 		    "device is too small"));
1101 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
1102 		break;
1103 
1104 	case EDOM:
1105 		/*
1106 		 * The new device has a different alignment requirement.
1107 		 */
1108 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1109 		    "devices have different sector alignment"));
1110 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
1111 		break;
1112 
1113 	case ENAMETOOLONG:
1114 		/*
1115 		 * The resulting top-level vdev spec won't fit in the label.
1116 		 */
1117 		(void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
1118 		break;
1119 
1120 	default:
1121 		(void) zpool_standard_error(hdl, errno, msg);
1122 	}
1123 
1124 	return (-1);
1125 }
1126 
1127 /*
1128  * Detach the specified device.
1129  */
1130 int
1131 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
1132 {
1133 	zfs_cmd_t zc = { 0 };
1134 	char msg[1024];
1135 	nvlist_t *tgt;
1136 	boolean_t avail_spare;
1137 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1138 
1139 	(void) snprintf(msg, sizeof (msg),
1140 	    dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
1141 
1142 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1143 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == 0)
1144 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1145 
1146 	if (avail_spare)
1147 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
1148 
1149 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1150 
1151 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_DETACH, &zc) == 0)
1152 		return (0);
1153 
1154 	switch (errno) {
1155 
1156 	case ENOTSUP:
1157 		/*
1158 		 * Can't detach from this type of vdev.
1159 		 */
1160 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
1161 		    "applicable to mirror and replacing vdevs"));
1162 		(void) zfs_error(zhp->zpool_hdl, EZFS_BADTARGET, msg);
1163 		break;
1164 
1165 	case EBUSY:
1166 		/*
1167 		 * There are no other replicas of this device.
1168 		 */
1169 		(void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
1170 		break;
1171 
1172 	default:
1173 		(void) zpool_standard_error(hdl, errno, msg);
1174 	}
1175 
1176 	return (-1);
1177 }
1178 
1179 /*
1180  * Remove the given device.  Currently, this is supported only for hot spares.
1181  */
1182 int
1183 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
1184 {
1185 	zfs_cmd_t zc = { 0 };
1186 	char msg[1024];
1187 	nvlist_t *tgt;
1188 	boolean_t avail_spare;
1189 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1190 
1191 	(void) snprintf(msg, sizeof (msg),
1192 	    dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
1193 
1194 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1195 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == 0)
1196 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1197 
1198 	if (!avail_spare) {
1199 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1200 		    "only hot spares can be removed"));
1201 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1202 	}
1203 
1204 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1205 
1206 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
1207 		return (0);
1208 
1209 	return (zpool_standard_error(hdl, errno, msg));
1210 }
1211 
1212 /*
1213  * Clear the errors for the pool, or the particular device if specified.
1214  */
1215 int
1216 zpool_clear(zpool_handle_t *zhp, const char *path)
1217 {
1218 	zfs_cmd_t zc = { 0 };
1219 	char msg[1024];
1220 	nvlist_t *tgt;
1221 	boolean_t avail_spare;
1222 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1223 
1224 	if (path)
1225 		(void) snprintf(msg, sizeof (msg),
1226 		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
1227 		    zc.zc_prop_value);
1228 	else
1229 		(void) snprintf(msg, sizeof (msg),
1230 		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
1231 		    zhp->zpool_name);
1232 
1233 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1234 	if (path) {
1235 		if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == 0)
1236 			return (zfs_error(hdl, EZFS_NODEVICE, msg));
1237 
1238 		if (avail_spare)
1239 			return (zfs_error(hdl, EZFS_ISSPARE, msg));
1240 
1241 		verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
1242 		    &zc.zc_guid) == 0);
1243 	}
1244 
1245 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
1246 		return (0);
1247 
1248 	return (zpool_standard_error(hdl, errno, msg));
1249 }
1250 
1251 static int
1252 do_zvol(zfs_handle_t *zhp, void *data)
1253 {
1254 	int linktype = (int)(uintptr_t)data;
1255 	int ret;
1256 
1257 	/*
1258 	 * We check for volblocksize intead of ZFS_TYPE_VOLUME so that we
1259 	 * correctly handle snapshots of volumes.
1260 	 */
1261 	if (zhp->zfs_volblocksize != 0) {
1262 		if (linktype)
1263 			ret = zvol_create_link(zhp->zfs_hdl, zhp->zfs_name);
1264 		else
1265 			ret = zvol_remove_link(zhp->zfs_hdl, zhp->zfs_name);
1266 	}
1267 
1268 	ret = zfs_iter_children(zhp, do_zvol, data);
1269 
1270 	zfs_close(zhp);
1271 	return (ret);
1272 }
1273 
1274 /*
1275  * Iterate over all zvols in the pool and make any necessary minor nodes.
1276  */
1277 int
1278 zpool_create_zvol_links(zpool_handle_t *zhp)
1279 {
1280 	zfs_handle_t *zfp;
1281 	int ret;
1282 
1283 	/*
1284 	 * If the pool is unavailable, just return success.
1285 	 */
1286 	if ((zfp = make_dataset_handle(zhp->zpool_hdl,
1287 	    zhp->zpool_name)) == NULL)
1288 		return (0);
1289 
1290 	ret = zfs_iter_children(zfp, do_zvol, (void *)B_TRUE);
1291 
1292 	zfs_close(zfp);
1293 	return (ret);
1294 }
1295 
1296 /*
1297  * Iterate over all zvols in the poool and remove any minor nodes.
1298  */
1299 int
1300 zpool_remove_zvol_links(zpool_handle_t *zhp)
1301 {
1302 	zfs_handle_t *zfp;
1303 	int ret;
1304 
1305 	/*
1306 	 * If the pool is unavailable, just return success.
1307 	 */
1308 	if ((zfp = make_dataset_handle(zhp->zpool_hdl,
1309 	    zhp->zpool_name)) == NULL)
1310 		return (0);
1311 
1312 	ret = zfs_iter_children(zfp, do_zvol, (void *)B_FALSE);
1313 
1314 	zfs_close(zfp);
1315 	return (ret);
1316 }
1317 
1318 /*
1319  * Convert from a devid string to a path.
1320  */
1321 static char *
1322 devid_to_path(char *devid_str)
1323 {
1324 	ddi_devid_t devid;
1325 	char *minor;
1326 	char *path;
1327 	devid_nmlist_t *list = NULL;
1328 	int ret;
1329 
1330 	if (devid_str_decode(devid_str, &devid, &minor) != 0)
1331 		return (NULL);
1332 
1333 	ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
1334 
1335 	devid_str_free(minor);
1336 	devid_free(devid);
1337 
1338 	if (ret != 0)
1339 		return (NULL);
1340 
1341 	if ((path = strdup(list[0].devname)) == NULL)
1342 		return (NULL);
1343 
1344 	devid_free_nmlist(list);
1345 
1346 	return (path);
1347 }
1348 
1349 /*
1350  * Convert from a path to a devid string.
1351  */
1352 static char *
1353 path_to_devid(const char *path)
1354 {
1355 	int fd;
1356 	ddi_devid_t devid;
1357 	char *minor, *ret;
1358 
1359 	if ((fd = open(path, O_RDONLY)) < 0)
1360 		return (NULL);
1361 
1362 	minor = NULL;
1363 	ret = NULL;
1364 	if (devid_get(fd, &devid) == 0) {
1365 		if (devid_get_minor_name(fd, &minor) == 0)
1366 			ret = devid_str_encode(devid, minor);
1367 		if (minor != NULL)
1368 			devid_str_free(minor);
1369 		devid_free(devid);
1370 	}
1371 	(void) close(fd);
1372 
1373 	return (ret);
1374 }
1375 
1376 /*
1377  * Issue the necessary ioctl() to update the stored path value for the vdev.  We
1378  * ignore any failure here, since a common case is for an unprivileged user to
1379  * type 'zpool status', and we'll display the correct information anyway.
1380  */
1381 static void
1382 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
1383 {
1384 	zfs_cmd_t zc = { 0 };
1385 
1386 	(void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1387 	(void) strncpy(zc.zc_prop_value, path, sizeof (zc.zc_prop_value));
1388 	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1389 	    &zc.zc_guid) == 0);
1390 
1391 	(void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
1392 }
1393 
1394 /*
1395  * Given a vdev, return the name to display in iostat.  If the vdev has a path,
1396  * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
1397  * We also check if this is a whole disk, in which case we strip off the
1398  * trailing 's0' slice name.
1399  *
1400  * This routine is also responsible for identifying when disks have been
1401  * reconfigured in a new location.  The kernel will have opened the device by
1402  * devid, but the path will still refer to the old location.  To catch this, we
1403  * first do a path -> devid translation (which is fast for the common case).  If
1404  * the devid matches, we're done.  If not, we do a reverse devid -> path
1405  * translation and issue the appropriate ioctl() to update the path of the vdev.
1406  * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
1407  * of these checks.
1408  */
1409 char *
1410 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv)
1411 {
1412 	char *path, *devid;
1413 	uint64_t value;
1414 	char buf[64];
1415 
1416 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
1417 	    &value) == 0) {
1418 		verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1419 		    &value) == 0);
1420 		(void) snprintf(buf, sizeof (buf), "%llu", value);
1421 		path = buf;
1422 	} else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
1423 
1424 		if (zhp != NULL &&
1425 		    nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
1426 			/*
1427 			 * Determine if the current path is correct.
1428 			 */
1429 			char *newdevid = path_to_devid(path);
1430 
1431 			if (newdevid == NULL ||
1432 			    strcmp(devid, newdevid) != 0) {
1433 				char *newpath;
1434 
1435 				if ((newpath = devid_to_path(devid)) != NULL) {
1436 					/*
1437 					 * Update the path appropriately.
1438 					 */
1439 					set_path(zhp, nv, newpath);
1440 					if (nvlist_add_string(nv,
1441 					    ZPOOL_CONFIG_PATH, newpath) == 0)
1442 						verify(nvlist_lookup_string(nv,
1443 						    ZPOOL_CONFIG_PATH,
1444 						    &path) == 0);
1445 					free(newpath);
1446 				}
1447 			}
1448 
1449 			if (newdevid)
1450 				devid_str_free(newdevid);
1451 		}
1452 
1453 		if (strncmp(path, "/dev/dsk/", 9) == 0)
1454 			path += 9;
1455 
1456 		if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
1457 		    &value) == 0 && value) {
1458 			char *tmp = zfs_strdup(hdl, path);
1459 			if (tmp == NULL)
1460 				return (NULL);
1461 			tmp[strlen(path) - 2] = '\0';
1462 			return (tmp);
1463 		}
1464 	} else {
1465 		verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
1466 
1467 		/*
1468 		 * If it's a raidz device, we need to stick in the parity level.
1469 		 */
1470 		if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
1471 			verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
1472 			    &value) == 0);
1473 			(void) snprintf(buf, sizeof (buf), "%s%llu", path,
1474 			    value);
1475 			path = buf;
1476 		}
1477 	}
1478 
1479 	return (zfs_strdup(hdl, path));
1480 }
1481 
1482 static int
1483 zbookmark_compare(const void *a, const void *b)
1484 {
1485 	return (memcmp(a, b, sizeof (zbookmark_t)));
1486 }
1487 
1488 /*
1489  * Retrieve the persistent error log, uniquify the members, and return to the
1490  * caller.
1491  */
1492 int
1493 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t ***list, size_t *nelem)
1494 {
1495 	zfs_cmd_t zc = { 0 };
1496 	uint64_t count;
1497 	zbookmark_t *zb;
1498 	int i, j;
1499 
1500 	if (zhp->zpool_error_log != NULL) {
1501 		*list = zhp->zpool_error_log;
1502 		*nelem = zhp->zpool_error_count;
1503 		return (0);
1504 	}
1505 
1506 	/*
1507 	 * Retrieve the raw error list from the kernel.  If the number of errors
1508 	 * has increased, allocate more space and continue until we get the
1509 	 * entire list.
1510 	 */
1511 	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
1512 	    &count) == 0);
1513 	if ((zc.zc_config_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
1514 	    count * sizeof (zbookmark_t))) == NULL)
1515 		return (-1);
1516 	zc.zc_config_dst_size = count;
1517 	(void) strcpy(zc.zc_name, zhp->zpool_name);
1518 	for (;;) {
1519 		if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
1520 		    &zc) != 0) {
1521 			free((void *)(uintptr_t)zc.zc_config_dst);
1522 			if (errno == ENOMEM) {
1523 				if ((zc.zc_config_dst = (uintptr_t)
1524 				    zfs_alloc(zhp->zpool_hdl,
1525 				    zc.zc_config_dst_size)) == NULL)
1526 					return (-1);
1527 			} else {
1528 				return (-1);
1529 			}
1530 		} else {
1531 			break;
1532 		}
1533 	}
1534 
1535 	/*
1536 	 * Sort the resulting bookmarks.  This is a little confusing due to the
1537 	 * implementation of ZFS_IOC_ERROR_LOG.  The bookmarks are copied last
1538 	 * to first, and 'zc_config_dst_size' indicates the number of boomarks
1539 	 * _not_ copied as part of the process.  So we point the start of our
1540 	 * array appropriate and decrement the total number of elements.
1541 	 */
1542 	zb = ((zbookmark_t *)(uintptr_t)zc.zc_config_dst) +
1543 	    zc.zc_config_dst_size;
1544 	count -= zc.zc_config_dst_size;
1545 
1546 	qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare);
1547 
1548 	/*
1549 	 * Count the number of unique elements
1550 	 */
1551 	j = 0;
1552 	for (i = 0; i < count; i++) {
1553 		if (i > 0 && memcmp(&zb[i - 1], &zb[i],
1554 		    sizeof (zbookmark_t)) == 0)
1555 			continue;
1556 		j++;
1557 	}
1558 
1559 	/*
1560 	 * If the user has only requested the number of items, return it now
1561 	 * without bothering with the extra work.
1562 	 */
1563 	if (list == NULL) {
1564 		*nelem = j;
1565 		free((void *)(uintptr_t)zc.zc_config_dst);
1566 		return (0);
1567 	}
1568 
1569 	zhp->zpool_error_count = j;
1570 
1571 	/*
1572 	 * Allocate an array of nvlists to hold the results
1573 	 */
1574 	if ((zhp->zpool_error_log = zfs_alloc(zhp->zpool_hdl,
1575 	    j * sizeof (nvlist_t *))) == NULL) {
1576 		free((void *)(uintptr_t)zc.zc_config_dst);
1577 		return (-1);
1578 	}
1579 
1580 	/*
1581 	 * Fill in the results with names from the kernel.
1582 	 */
1583 	j = 0;
1584 	for (i = 0; i < count; i++) {
1585 		char buf[64];
1586 		nvlist_t *nv;
1587 
1588 		if (i > 0 && memcmp(&zb[i - 1], &zb[i],
1589 		    sizeof (zbookmark_t)) == 0)
1590 			continue;
1591 
1592 		if (nvlist_alloc(&nv, NV_UNIQUE_NAME,
1593 		    0) != 0)
1594 			goto nomem;
1595 		zhp->zpool_error_log[j] = nv;
1596 
1597 		zc.zc_bookmark = zb[i];
1598 		if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_BOOKMARK_NAME,
1599 		    &zc) == 0) {
1600 			if (nvlist_add_string(nv, ZPOOL_ERR_DATASET,
1601 			    zc.zc_prop_name) != 0 ||
1602 			    nvlist_add_string(nv, ZPOOL_ERR_OBJECT,
1603 			    zc.zc_prop_value) != 0 ||
1604 			    nvlist_add_string(nv, ZPOOL_ERR_RANGE,
1605 			    zc.zc_filename) != 0)
1606 				goto nomem;
1607 		} else {
1608 			(void) snprintf(buf, sizeof (buf), "%llx",
1609 			    zb[i].zb_objset);
1610 			if (nvlist_add_string(nv,
1611 			    ZPOOL_ERR_DATASET, buf) != 0)
1612 				goto nomem;
1613 			(void) snprintf(buf, sizeof (buf), "%llx",
1614 			    zb[i].zb_object);
1615 			if (nvlist_add_string(nv, ZPOOL_ERR_OBJECT,
1616 			    buf) != 0)
1617 				goto nomem;
1618 			(void) snprintf(buf, sizeof (buf), "lvl=%u blkid=%llu",
1619 			    (int)zb[i].zb_level, (long long)zb[i].zb_blkid);
1620 			if (nvlist_add_string(nv, ZPOOL_ERR_RANGE,
1621 			    buf) != 0)
1622 				goto nomem;
1623 		}
1624 
1625 		j++;
1626 	}
1627 
1628 	*list = zhp->zpool_error_log;
1629 	*nelem = zhp->zpool_error_count;
1630 
1631 	free((void *)(uintptr_t)zc.zc_config_dst);
1632 
1633 	return (0);
1634 
1635 nomem:
1636 	free((void *)(uintptr_t)zc.zc_config_dst);
1637 	for (i = 0; i < zhp->zpool_error_count; i++) {
1638 		if (zhp->zpool_error_log[i])
1639 			free(zhp->zpool_error_log[i]);
1640 	}
1641 	free(zhp->zpool_error_log);
1642 	zhp->zpool_error_log = NULL;
1643 	return (no_memory(zhp->zpool_hdl));
1644 }
1645 
1646 /*
1647  * Upgrade a ZFS pool to the latest on-disk version.
1648  */
1649 int
1650 zpool_upgrade(zpool_handle_t *zhp)
1651 {
1652 	zfs_cmd_t zc = { 0 };
1653 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1654 
1655 	(void) strcpy(zc.zc_name, zhp->zpool_name);
1656 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
1657 		return (zpool_standard_error(hdl, errno,
1658 		    dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
1659 		    zhp->zpool_name));
1660 
1661 	return (0);
1662 }
1663