xref: /titanic_50/usr/src/lib/libzfs/common/libzfs_pool.c (revision 0e42dee69ed771bf604dd1789fca9d77b5bbe302)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <assert.h>
30 #include <ctype.h>
31 #include <errno.h>
32 #include <devid.h>
33 #include <fcntl.h>
34 #include <libintl.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <string.h>
38 #include <unistd.h>
39 #include <sys/zfs_ioctl.h>
40 #include <sys/zio.h>
41 
42 #include "zfs_namecheck.h"
43 #include "libzfs_impl.h"
44 
45 /*
46  * Validate the given pool name, optionally putting an extended error message in
47  * 'buf'.
48  */
49 static boolean_t
50 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
51 {
52 	namecheck_err_t why;
53 	char what;
54 	int ret;
55 
56 	ret = pool_namecheck(pool, &why, &what);
57 
58 	/*
59 	 * The rules for reserved pool names were extended at a later point.
60 	 * But we need to support users with existing pools that may now be
61 	 * invalid.  So we only check for this expanded set of names during a
62 	 * create (or import), and only in userland.
63 	 */
64 	if (ret == 0 && !isopen &&
65 	    (strncmp(pool, "mirror", 6) == 0 ||
66 	    strncmp(pool, "raidz", 5) == 0 ||
67 	    strncmp(pool, "spare", 5) == 0)) {
68 		zfs_error_aux(hdl,
69 		    dgettext(TEXT_DOMAIN, "name is reserved"));
70 		return (B_FALSE);
71 	}
72 
73 
74 	if (ret != 0) {
75 		if (hdl != NULL) {
76 			switch (why) {
77 			case NAME_ERR_TOOLONG:
78 				zfs_error_aux(hdl,
79 				    dgettext(TEXT_DOMAIN, "name is too long"));
80 				break;
81 
82 			case NAME_ERR_INVALCHAR:
83 				zfs_error_aux(hdl,
84 				    dgettext(TEXT_DOMAIN, "invalid character "
85 				    "'%c' in pool name"), what);
86 				break;
87 
88 			case NAME_ERR_NOLETTER:
89 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
90 				    "name must begin with a letter"));
91 				break;
92 
93 			case NAME_ERR_RESERVED:
94 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
95 				    "name is reserved"));
96 				break;
97 
98 			case NAME_ERR_DISKLIKE:
99 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
100 				    "pool name is reserved"));
101 				break;
102 			}
103 		}
104 		return (B_FALSE);
105 	}
106 
107 	return (B_TRUE);
108 }
109 
110 /*
111  * Set the pool-wide health based on the vdev state of the root vdev.
112  */
113 int
114 set_pool_health(nvlist_t *config)
115 {
116 	nvlist_t *nvroot;
117 	vdev_stat_t *vs;
118 	uint_t vsc;
119 	char *health;
120 
121 	verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
122 	    &nvroot) == 0);
123 	verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS,
124 	    (uint64_t **)&vs, &vsc) == 0);
125 
126 	switch (vs->vs_state) {
127 
128 	case VDEV_STATE_CLOSED:
129 	case VDEV_STATE_CANT_OPEN:
130 	case VDEV_STATE_OFFLINE:
131 		health = dgettext(TEXT_DOMAIN, "FAULTED");
132 		break;
133 
134 	case VDEV_STATE_DEGRADED:
135 		health = dgettext(TEXT_DOMAIN, "DEGRADED");
136 		break;
137 
138 	case VDEV_STATE_HEALTHY:
139 		health = dgettext(TEXT_DOMAIN, "ONLINE");
140 		break;
141 
142 	default:
143 		abort();
144 	}
145 
146 	return (nvlist_add_string(config, ZPOOL_CONFIG_POOL_HEALTH, health));
147 }
148 
149 /*
150  * Open a handle to the given pool, even if the pool is currently in the FAULTED
151  * state.
152  */
153 zpool_handle_t *
154 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
155 {
156 	zpool_handle_t *zhp;
157 	boolean_t missing;
158 
159 	/*
160 	 * Make sure the pool name is valid.
161 	 */
162 	if (!zpool_name_valid(hdl, B_TRUE, pool)) {
163 		(void) zfs_error(hdl, EZFS_INVALIDNAME,
164 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
165 		    pool);
166 		return (NULL);
167 	}
168 
169 	if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
170 		return (NULL);
171 
172 	zhp->zpool_hdl = hdl;
173 	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
174 
175 	if (zpool_refresh_stats(zhp, &missing) != 0) {
176 		zpool_close(zhp);
177 		return (NULL);
178 	}
179 
180 	if (missing) {
181 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
182 		    "no such pool"));
183 		(void) zfs_error(hdl, EZFS_NOENT,
184 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
185 		    pool);
186 		zpool_close(zhp);
187 		return (NULL);
188 	}
189 
190 	return (zhp);
191 }
192 
193 /*
194  * Like the above, but silent on error.  Used when iterating over pools (because
195  * the configuration cache may be out of date).
196  */
197 int
198 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
199 {
200 	zpool_handle_t *zhp;
201 	boolean_t missing;
202 
203 	if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
204 		return (-1);
205 
206 	zhp->zpool_hdl = hdl;
207 	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
208 
209 	if (zpool_refresh_stats(zhp, &missing) != 0) {
210 		zpool_close(zhp);
211 		return (-1);
212 	}
213 
214 	if (missing) {
215 		zpool_close(zhp);
216 		*ret = NULL;
217 		return (0);
218 	}
219 
220 	*ret = zhp;
221 	return (0);
222 }
223 
224 /*
225  * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
226  * state.
227  */
228 zpool_handle_t *
229 zpool_open(libzfs_handle_t *hdl, const char *pool)
230 {
231 	zpool_handle_t *zhp;
232 
233 	if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
234 		return (NULL);
235 
236 	if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
237 		(void) zfs_error(hdl, EZFS_POOLUNAVAIL,
238 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
239 		zpool_close(zhp);
240 		return (NULL);
241 	}
242 
243 	return (zhp);
244 }
245 
246 /*
247  * Close the handle.  Simply frees the memory associated with the handle.
248  */
249 void
250 zpool_close(zpool_handle_t *zhp)
251 {
252 	if (zhp->zpool_config)
253 		nvlist_free(zhp->zpool_config);
254 	if (zhp->zpool_old_config)
255 		nvlist_free(zhp->zpool_old_config);
256 	if (zhp->zpool_error_log) {
257 		int i;
258 		for (i = 0; i < zhp->zpool_error_count; i++)
259 			nvlist_free(zhp->zpool_error_log[i]);
260 		free(zhp->zpool_error_log);
261 	}
262 	free(zhp);
263 }
264 
265 /*
266  * Return the name of the pool.
267  */
268 const char *
269 zpool_get_name(zpool_handle_t *zhp)
270 {
271 	return (zhp->zpool_name);
272 }
273 
274 /*
275  * Return the GUID of the pool.
276  */
277 uint64_t
278 zpool_get_guid(zpool_handle_t *zhp)
279 {
280 	uint64_t guid;
281 
282 	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID,
283 	    &guid) == 0);
284 	return (guid);
285 }
286 
287 /*
288  * Return the version of the pool.
289  */
290 uint64_t
291 zpool_get_version(zpool_handle_t *zhp)
292 {
293 	uint64_t version;
294 
295 	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_VERSION,
296 	    &version) == 0);
297 
298 	return (version);
299 }
300 
301 /*
302  * Return the amount of space currently consumed by the pool.
303  */
304 uint64_t
305 zpool_get_space_used(zpool_handle_t *zhp)
306 {
307 	nvlist_t *nvroot;
308 	vdev_stat_t *vs;
309 	uint_t vsc;
310 
311 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
312 	    &nvroot) == 0);
313 	verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS,
314 	    (uint64_t **)&vs, &vsc) == 0);
315 
316 	return (vs->vs_alloc);
317 }
318 
319 /*
320  * Return the total space in the pool.
321  */
322 uint64_t
323 zpool_get_space_total(zpool_handle_t *zhp)
324 {
325 	nvlist_t *nvroot;
326 	vdev_stat_t *vs;
327 	uint_t vsc;
328 
329 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
330 	    &nvroot) == 0);
331 	verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS,
332 	    (uint64_t **)&vs, &vsc) == 0);
333 
334 	return (vs->vs_space);
335 }
336 
337 /*
338  * Return the alternate root for this pool, if any.
339  */
340 int
341 zpool_get_root(zpool_handle_t *zhp, char *buf, size_t buflen)
342 {
343 	zfs_cmd_t zc = { 0 };
344 
345 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
346 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, &zc) != 0 ||
347 	    zc.zc_root[0] == '\0')
348 		return (-1);
349 
350 	(void) strlcpy(buf, zc.zc_root, buflen);
351 
352 	return (0);
353 }
354 
355 /*
356  * Return the state of the pool (ACTIVE or UNAVAILABLE)
357  */
358 int
359 zpool_get_state(zpool_handle_t *zhp)
360 {
361 	return (zhp->zpool_state);
362 }
363 
364 /*
365  * Create the named pool, using the provided vdev list.  It is assumed
366  * that the consumer has already validated the contents of the nvlist, so we
367  * don't have to worry about error semantics.
368  */
369 int
370 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
371     const char *altroot)
372 {
373 	zfs_cmd_t zc = { 0 };
374 	char *packed;
375 	size_t len;
376 	char msg[1024];
377 
378 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
379 	    "cannot create '%s'"), pool);
380 
381 	if (!zpool_name_valid(hdl, B_FALSE, pool))
382 		return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
383 
384 	if (altroot != NULL && altroot[0] != '/')
385 		return (zfs_error(hdl, EZFS_BADPATH,
386 		    dgettext(TEXT_DOMAIN, "bad alternate root '%s'"), altroot));
387 
388 	if (nvlist_size(nvroot, &len, NV_ENCODE_NATIVE) != 0)
389 		return (no_memory(hdl));
390 
391 	if ((packed = zfs_alloc(hdl, len)) == NULL)
392 		return (-1);
393 
394 	if (nvlist_pack(nvroot, &packed, &len,
395 	    NV_ENCODE_NATIVE, 0) != 0) {
396 		free(packed);
397 		return (no_memory(hdl));
398 	}
399 
400 	(void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
401 	zc.zc_config_src = (uint64_t)(uintptr_t)packed;
402 	zc.zc_config_src_size = len;
403 
404 	if (altroot != NULL)
405 		(void) strlcpy(zc.zc_root, altroot, sizeof (zc.zc_root));
406 
407 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_CREATE, &zc) != 0) {
408 		free(packed);
409 
410 		switch (errno) {
411 		case EBUSY:
412 			/*
413 			 * This can happen if the user has specified the same
414 			 * device multiple times.  We can't reliably detect this
415 			 * until we try to add it and see we already have a
416 			 * label.
417 			 */
418 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
419 			    "one or more vdevs refer to the same device"));
420 			return (zfs_error(hdl, EZFS_BADDEV, msg));
421 
422 		case EOVERFLOW:
423 			/*
424 			 * This occurs when one of the devices is below
425 			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
426 			 * device was the problem device since there's no
427 			 * reliable way to determine device size from userland.
428 			 */
429 			{
430 				char buf[64];
431 
432 				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
433 
434 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
435 				    "one or more devices is less than the "
436 				    "minimum size (%s)"), buf);
437 			}
438 			return (zfs_error(hdl, EZFS_BADDEV, msg));
439 
440 		case ENOSPC:
441 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
442 			    "one or more devices is out of space"));
443 			return (zfs_error(hdl, EZFS_BADDEV, msg));
444 
445 		default:
446 			return (zpool_standard_error(hdl, errno, msg));
447 		}
448 	}
449 
450 	free(packed);
451 
452 	/*
453 	 * If this is an alternate root pool, then we automatically set the
454 	 * moutnpoint of the root dataset to be '/'.
455 	 */
456 	if (altroot != NULL) {
457 		zfs_handle_t *zhp;
458 
459 		verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_ANY)) != NULL);
460 		verify(zfs_prop_set(zhp, ZFS_PROP_MOUNTPOINT, "/") == 0);
461 
462 		zfs_close(zhp);
463 	}
464 
465 	return (0);
466 }
467 
468 /*
469  * Destroy the given pool.  It is up to the caller to ensure that there are no
470  * datasets left in the pool.
471  */
472 int
473 zpool_destroy(zpool_handle_t *zhp)
474 {
475 	zfs_cmd_t zc = { 0 };
476 	zfs_handle_t *zfp = NULL;
477 	libzfs_handle_t *hdl = zhp->zpool_hdl;
478 	char msg[1024];
479 
480 	if (zhp->zpool_state == POOL_STATE_ACTIVE &&
481 	    (zfp = zfs_open(zhp->zpool_hdl, zhp->zpool_name,
482 	    ZFS_TYPE_FILESYSTEM)) == NULL)
483 		return (-1);
484 
485 	if (zpool_remove_zvol_links(zhp) != NULL)
486 		return (-1);
487 
488 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
489 
490 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
491 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
492 		    "cannot destroy '%s'"), zhp->zpool_name);
493 
494 		if (errno == EROFS) {
495 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
496 			    "one or more devices is read only"));
497 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
498 		} else {
499 			(void) zpool_standard_error(hdl, errno, msg);
500 		}
501 
502 		if (zfp)
503 			zfs_close(zfp);
504 		return (-1);
505 	}
506 
507 	if (zfp) {
508 		remove_mountpoint(zfp);
509 		zfs_close(zfp);
510 	}
511 
512 	return (0);
513 }
514 
515 /*
516  * Add the given vdevs to the pool.  The caller must have already performed the
517  * necessary verification to ensure that the vdev specification is well-formed.
518  */
519 int
520 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
521 {
522 	char *packed;
523 	size_t len;
524 	zfs_cmd_t zc;
525 	int ret;
526 	libzfs_handle_t *hdl = zhp->zpool_hdl;
527 	char msg[1024];
528 	nvlist_t **spares;
529 	uint_t nspares;
530 
531 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
532 	    "cannot add to '%s'"), zhp->zpool_name);
533 
534 	if (zpool_get_version(zhp) < ZFS_VERSION_SPARES &&
535 	    nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
536 	    &spares, &nspares) == 0) {
537 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
538 		    "upgraded to add hot spares"));
539 		return (zfs_error(hdl, EZFS_BADVERSION, msg));
540 	}
541 
542 	verify(nvlist_size(nvroot, &len, NV_ENCODE_NATIVE) == 0);
543 
544 	if ((packed = zfs_alloc(zhp->zpool_hdl, len)) == NULL)
545 		return (-1);
546 
547 	verify(nvlist_pack(nvroot, &packed, &len, NV_ENCODE_NATIVE, 0) == 0);
548 
549 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
550 	zc.zc_config_src = (uint64_t)(uintptr_t)packed;
551 	zc.zc_config_src_size = len;
552 
553 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_ADD, &zc) != 0) {
554 		switch (errno) {
555 		case EBUSY:
556 			/*
557 			 * This can happen if the user has specified the same
558 			 * device multiple times.  We can't reliably detect this
559 			 * until we try to add it and see we already have a
560 			 * label.
561 			 */
562 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
563 			    "one or more vdevs refer to the same device"));
564 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
565 			break;
566 
567 		case EOVERFLOW:
568 			/*
569 			 * This occurrs when one of the devices is below
570 			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
571 			 * device was the problem device since there's no
572 			 * reliable way to determine device size from userland.
573 			 */
574 			{
575 				char buf[64];
576 
577 				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
578 
579 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
580 				    "device is less than the minimum "
581 				    "size (%s)"), buf);
582 			}
583 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
584 			break;
585 
586 		case ENOTSUP:
587 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
588 			    "pool must be upgraded to add raidz2 vdevs"));
589 			(void) zfs_error(hdl, EZFS_BADVERSION, msg);
590 			break;
591 
592 		default:
593 			(void) zpool_standard_error(hdl, errno, msg);
594 		}
595 
596 		ret = -1;
597 	} else {
598 		ret = 0;
599 	}
600 
601 	free(packed);
602 
603 	return (ret);
604 }
605 
606 /*
607  * Exports the pool from the system.  The caller must ensure that there are no
608  * mounted datasets in the pool.
609  */
610 int
611 zpool_export(zpool_handle_t *zhp)
612 {
613 	zfs_cmd_t zc = { 0 };
614 
615 	if (zpool_remove_zvol_links(zhp) != 0)
616 		return (-1);
617 
618 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
619 
620 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_EXPORT, &zc) != 0)
621 		return (zpool_standard_error(zhp->zpool_hdl, errno,
622 		    dgettext(TEXT_DOMAIN, "cannot export '%s'"),
623 		    zhp->zpool_name));
624 
625 	return (0);
626 }
627 
628 /*
629  * Import the given pool using the known configuration.  The configuration
630  * should have come from zpool_find_import().  The 'newname' and 'altroot'
631  * parameters control whether the pool is imported with a different name or with
632  * an alternate root, respectively.
633  */
634 int
635 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
636     const char *altroot)
637 {
638 	zfs_cmd_t zc;
639 	char *packed;
640 	size_t len;
641 	char *thename;
642 	char *origname;
643 	int ret;
644 
645 	verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
646 	    &origname) == 0);
647 
648 	if (newname != NULL) {
649 		if (!zpool_name_valid(hdl, B_FALSE, newname))
650 			return (zfs_error(hdl, EZFS_INVALIDNAME,
651 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
652 			    newname));
653 		thename = (char *)newname;
654 	} else {
655 		thename = origname;
656 	}
657 
658 	if (altroot != NULL && altroot[0] != '/')
659 		return (zfs_error(hdl, EZFS_BADPATH,
660 		    dgettext(TEXT_DOMAIN, "bad alternate root '%s'"),
661 		    altroot));
662 
663 	(void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
664 
665 	if (altroot != NULL)
666 		(void) strlcpy(zc.zc_root, altroot, sizeof (zc.zc_root));
667 	else
668 		zc.zc_root[0] = '\0';
669 
670 	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
671 	    &zc.zc_guid) == 0);
672 
673 	verify(nvlist_size(config, &len, NV_ENCODE_NATIVE) == 0);
674 
675 	if ((packed = zfs_alloc(hdl, len)) == NULL)
676 		return (-1);
677 
678 	verify(nvlist_pack(config, &packed, &len, NV_ENCODE_NATIVE, 0) == 0);
679 
680 	zc.zc_config_src = (uint64_t)(uintptr_t)packed;
681 	zc.zc_config_src_size = len;
682 
683 	ret = 0;
684 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_IMPORT, &zc) != 0) {
685 		char desc[1024];
686 		if (newname == NULL)
687 			(void) snprintf(desc, sizeof (desc),
688 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
689 			    thename);
690 		else
691 			(void) snprintf(desc, sizeof (desc),
692 			    dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
693 			    origname, thename);
694 
695 		switch (errno) {
696 		case ENOTSUP:
697 			/*
698 			 * Unsupported version.
699 			 */
700 			(void) zfs_error(hdl, EZFS_BADVERSION, desc);
701 			break;
702 
703 		case EINVAL:
704 			(void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
705 			break;
706 
707 		default:
708 			(void) zpool_standard_error(hdl, errno, desc);
709 		}
710 
711 		ret = -1;
712 	} else {
713 		zpool_handle_t *zhp;
714 		/*
715 		 * This should never fail, but play it safe anyway.
716 		 */
717 		if (zpool_open_silent(hdl, thename, &zhp) != 0) {
718 			ret = -1;
719 		} else if (zhp != NULL) {
720 			ret = zpool_create_zvol_links(zhp);
721 			zpool_close(zhp);
722 		}
723 	}
724 
725 	free(packed);
726 	return (ret);
727 }
728 
729 /*
730  * Scrub the pool.
731  */
732 int
733 zpool_scrub(zpool_handle_t *zhp, pool_scrub_type_t type)
734 {
735 	zfs_cmd_t zc = { 0 };
736 	char msg[1024];
737 	libzfs_handle_t *hdl = zhp->zpool_hdl;
738 
739 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
740 	zc.zc_cookie = type;
741 
742 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_SCRUB, &zc) == 0)
743 		return (0);
744 
745 	(void) snprintf(msg, sizeof (msg),
746 	    dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
747 
748 	if (errno == EBUSY)
749 		return (zfs_error(hdl, EZFS_RESILVERING, msg));
750 	else
751 		return (zpool_standard_error(hdl, errno, msg));
752 }
753 
754 static nvlist_t *
755 vdev_to_nvlist_iter(nvlist_t *nv, const char *search, uint64_t guid,
756     boolean_t *isspare)
757 {
758 	uint_t c, children;
759 	nvlist_t **child;
760 	uint64_t theguid, present;
761 	char *path;
762 	uint64_t wholedisk = 0;
763 	nvlist_t *ret;
764 
765 	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &theguid) == 0);
766 
767 	if (search == NULL &&
768 	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &present) == 0) {
769 		/*
770 		 * If the device has never been present since import, the only
771 		 * reliable way to match the vdev is by GUID.
772 		 */
773 		if (theguid == guid)
774 			return (nv);
775 	} else if (search != NULL &&
776 	    nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
777 		(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
778 		    &wholedisk);
779 		if (wholedisk) {
780 			/*
781 			 * For whole disks, the internal path has 's0', but the
782 			 * path passed in by the user doesn't.
783 			 */
784 			if (strlen(search) == strlen(path) - 2 &&
785 			    strncmp(search, path, strlen(search)) == 0)
786 				return (nv);
787 		} else if (strcmp(search, path) == 0) {
788 			return (nv);
789 		}
790 	}
791 
792 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
793 	    &child, &children) != 0)
794 		return (NULL);
795 
796 	for (c = 0; c < children; c++)
797 		if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
798 		    isspare)) != NULL)
799 			return (ret);
800 
801 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
802 	    &child, &children) == 0) {
803 		for (c = 0; c < children; c++) {
804 			if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
805 			    isspare)) != NULL) {
806 				*isspare = B_TRUE;
807 				return (ret);
808 			}
809 		}
810 	}
811 
812 	return (NULL);
813 }
814 
815 nvlist_t *
816 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *isspare)
817 {
818 	char buf[MAXPATHLEN];
819 	const char *search;
820 	char *end;
821 	nvlist_t *nvroot;
822 	uint64_t guid;
823 
824 	guid = strtoull(path, &end, 10);
825 	if (guid != 0 && *end == '\0') {
826 		search = NULL;
827 	} else if (path[0] != '/') {
828 		(void) snprintf(buf, sizeof (buf), "%s%s", "/dev/dsk/", path);
829 		search = buf;
830 	} else {
831 		search = path;
832 	}
833 
834 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
835 	    &nvroot) == 0);
836 
837 	*isspare = B_FALSE;
838 	return (vdev_to_nvlist_iter(nvroot, search, guid, isspare));
839 }
840 
841 /*
842  * Bring the specified vdev online
843  */
844 int
845 zpool_vdev_online(zpool_handle_t *zhp, const char *path)
846 {
847 	zfs_cmd_t zc = { 0 };
848 	char msg[1024];
849 	nvlist_t *tgt;
850 	boolean_t isspare;
851 	libzfs_handle_t *hdl = zhp->zpool_hdl;
852 
853 	(void) snprintf(msg, sizeof (msg),
854 	    dgettext(TEXT_DOMAIN, "cannot online %s"), path);
855 
856 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
857 	if ((tgt = zpool_find_vdev(zhp, path, &isspare)) == NULL)
858 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
859 
860 	if (isspare)
861 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
862 
863 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
864 
865 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_ONLINE, &zc) == 0)
866 		return (0);
867 
868 	return (zpool_standard_error(hdl, errno, msg));
869 }
870 
871 /*
872  * Take the specified vdev offline
873  */
874 int
875 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, int istmp)
876 {
877 	zfs_cmd_t zc = { 0 };
878 	char msg[1024];
879 	nvlist_t *tgt;
880 	boolean_t isspare;
881 	libzfs_handle_t *hdl = zhp->zpool_hdl;
882 
883 	(void) snprintf(msg, sizeof (msg),
884 	    dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
885 
886 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
887 	if ((tgt = zpool_find_vdev(zhp, path, &isspare)) == NULL)
888 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
889 
890 	if (isspare)
891 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
892 
893 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
894 
895 	zc.zc_cookie = istmp;
896 
897 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_OFFLINE, &zc) == 0)
898 		return (0);
899 
900 	switch (errno) {
901 	case EBUSY:
902 
903 		/*
904 		 * There are no other replicas of this device.
905 		 */
906 		return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
907 
908 	default:
909 		return (zpool_standard_error(hdl, errno, msg));
910 	}
911 }
912 
913 /*
914  * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
915  * a hot spare.
916  */
917 static boolean_t
918 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
919 {
920 	nvlist_t **child;
921 	uint_t c, children;
922 	char *type;
923 
924 	if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
925 	    &children) == 0) {
926 		verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
927 		    &type) == 0);
928 
929 		if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
930 		    children == 2 && child[which] == tgt)
931 			return (B_TRUE);
932 
933 		for (c = 0; c < children; c++)
934 			if (is_replacing_spare(child[c], tgt, which))
935 				return (B_TRUE);
936 	}
937 
938 	return (B_FALSE);
939 }
940 
941 /*
942  * Attach new_disk (fully described by nvroot) to old_disk.
943  * If 'replacing' is specified, tne new disk will replace the old one.
944  */
945 int
946 zpool_vdev_attach(zpool_handle_t *zhp,
947     const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
948 {
949 	zfs_cmd_t zc = { 0 };
950 	char msg[1024];
951 	char *packed;
952 	int ret;
953 	size_t len;
954 	nvlist_t *tgt;
955 	boolean_t isspare;
956 	uint64_t val;
957 	char *path;
958 	nvlist_t **child;
959 	uint_t children;
960 	nvlist_t *config_root;
961 	libzfs_handle_t *hdl = zhp->zpool_hdl;
962 
963 	if (replacing)
964 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
965 		    "cannot replace %s with %s"), old_disk, new_disk);
966 	else
967 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
968 		    "cannot attach %s to %s"), new_disk, old_disk);
969 
970 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
971 	if ((tgt = zpool_find_vdev(zhp, old_disk, &isspare)) == 0)
972 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
973 
974 	if (isspare)
975 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
976 
977 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
978 	zc.zc_cookie = replacing;
979 
980 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
981 	    &child, &children) != 0 || children != 1) {
982 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
983 		    "new device must be a single disk"));
984 		return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
985 	}
986 
987 	verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
988 	    ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
989 
990 	/*
991 	 * If the target is a hot spare that has been swapped in, we can only
992 	 * replace it with another hot spare.
993 	 */
994 	if (replacing &&
995 	    nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
996 	    nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
997 	    (zpool_find_vdev(zhp, path, &isspare) == NULL || !isspare) &&
998 	    is_replacing_spare(config_root, tgt, 1)) {
999 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1000 		    "can only be replaced by another hot spare"));
1001 		return (zfs_error(hdl, EZFS_BADTARGET, msg));
1002 	}
1003 
1004 	/*
1005 	 * If we are attempting to replace a spare, it canot be applied to an
1006 	 * already spared device.
1007 	 */
1008 	if (replacing &&
1009 	    nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
1010 	    zpool_find_vdev(zhp, path, &isspare) != NULL && isspare &&
1011 	    is_replacing_spare(config_root, tgt, 0)) {
1012 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1013 		    "device has already been replaced with a spare"));
1014 		return (zfs_error(hdl, EZFS_BADTARGET, msg));
1015 	}
1016 
1017 	verify(nvlist_size(nvroot, &len, NV_ENCODE_NATIVE) == 0);
1018 
1019 	if ((packed = zfs_alloc(zhp->zpool_hdl, len)) == NULL)
1020 		return (-1);
1021 
1022 	verify(nvlist_pack(nvroot, &packed, &len, NV_ENCODE_NATIVE, 0) == 0);
1023 
1024 	zc.zc_config_src = (uint64_t)(uintptr_t)packed;
1025 	zc.zc_config_src_size = len;
1026 
1027 	ret = ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_ATTACH, &zc);
1028 
1029 	free(packed);
1030 
1031 	if (ret == 0)
1032 		return (0);
1033 
1034 	switch (errno) {
1035 	case ENOTSUP:
1036 		/*
1037 		 * Can't attach to or replace this type of vdev.
1038 		 */
1039 		if (replacing)
1040 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1041 			    "cannot replace a replacing device"));
1042 		else
1043 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1044 			    "can only attach to mirrors and top-level "
1045 			    "disks"));
1046 		(void) zfs_error(hdl, EZFS_BADTARGET, msg);
1047 		break;
1048 
1049 	case EINVAL:
1050 		/*
1051 		 * The new device must be a single disk.
1052 		 */
1053 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1054 		    "new device must be a single disk"));
1055 		(void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
1056 		break;
1057 
1058 	case EBUSY:
1059 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
1060 		    new_disk);
1061 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
1062 		break;
1063 
1064 	case EOVERFLOW:
1065 		/*
1066 		 * The new device is too small.
1067 		 */
1068 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1069 		    "device is too small"));
1070 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
1071 		break;
1072 
1073 	case EDOM:
1074 		/*
1075 		 * The new device has a different alignment requirement.
1076 		 */
1077 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1078 		    "devices have different sector alignment"));
1079 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
1080 		break;
1081 
1082 	case ENAMETOOLONG:
1083 		/*
1084 		 * The resulting top-level vdev spec won't fit in the label.
1085 		 */
1086 		(void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
1087 		break;
1088 
1089 	default:
1090 		(void) zpool_standard_error(hdl, errno, msg);
1091 	}
1092 
1093 	return (-1);
1094 }
1095 
1096 /*
1097  * Detach the specified device.
1098  */
1099 int
1100 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
1101 {
1102 	zfs_cmd_t zc = { 0 };
1103 	char msg[1024];
1104 	nvlist_t *tgt;
1105 	boolean_t isspare;
1106 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1107 
1108 	(void) snprintf(msg, sizeof (msg),
1109 	    dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
1110 
1111 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1112 	if ((tgt = zpool_find_vdev(zhp, path, &isspare)) == 0)
1113 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1114 
1115 	if (isspare)
1116 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
1117 
1118 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1119 
1120 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_DETACH, &zc) == 0)
1121 		return (0);
1122 
1123 	switch (errno) {
1124 
1125 	case ENOTSUP:
1126 		/*
1127 		 * Can't detach from this type of vdev.
1128 		 */
1129 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
1130 		    "applicable to mirror and replacing vdevs"));
1131 		(void) zfs_error(zhp->zpool_hdl, EZFS_BADTARGET, msg);
1132 		break;
1133 
1134 	case EBUSY:
1135 		/*
1136 		 * There are no other replicas of this device.
1137 		 */
1138 		(void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
1139 		break;
1140 
1141 	default:
1142 		(void) zpool_standard_error(hdl, errno, msg);
1143 	}
1144 
1145 	return (-1);
1146 }
1147 
1148 /*
1149  * Remove the given device.  Currently, this is supported only for hot spares.
1150  */
1151 int
1152 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
1153 {
1154 	zfs_cmd_t zc = { 0 };
1155 	char msg[1024];
1156 	nvlist_t *tgt;
1157 	boolean_t isspare;
1158 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1159 
1160 	(void) snprintf(msg, sizeof (msg),
1161 	    dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
1162 
1163 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1164 	if ((tgt = zpool_find_vdev(zhp, path, &isspare)) == 0)
1165 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1166 
1167 	if (!isspare) {
1168 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1169 		    "only hot spares can be removed"));
1170 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1171 	}
1172 
1173 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1174 
1175 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
1176 		return (0);
1177 
1178 	return (zpool_standard_error(hdl, errno, msg));
1179 }
1180 
1181 /*
1182  * Clear the errors for the pool, or the particular device if specified.
1183  */
1184 int
1185 zpool_clear(zpool_handle_t *zhp, const char *path)
1186 {
1187 	zfs_cmd_t zc = { 0 };
1188 	char msg[1024];
1189 	nvlist_t *tgt;
1190 	boolean_t isspare;
1191 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1192 
1193 	if (path)
1194 		(void) snprintf(msg, sizeof (msg),
1195 		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
1196 		    zc.zc_prop_value);
1197 	else
1198 		(void) snprintf(msg, sizeof (msg),
1199 		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
1200 		    zhp->zpool_name);
1201 
1202 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1203 	if (path) {
1204 		if ((tgt = zpool_find_vdev(zhp, path, &isspare)) == 0)
1205 			return (zfs_error(hdl, EZFS_NODEVICE, msg));
1206 
1207 		if (isspare)
1208 			return (zfs_error(hdl, EZFS_ISSPARE, msg));
1209 
1210 		verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
1211 		    &zc.zc_guid) == 0);
1212 	}
1213 
1214 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
1215 		return (0);
1216 
1217 	return (zpool_standard_error(hdl, errno, msg));
1218 }
1219 
1220 static int
1221 do_zvol(zfs_handle_t *zhp, void *data)
1222 {
1223 	int linktype = (int)(uintptr_t)data;
1224 	int ret;
1225 
1226 	/*
1227 	 * We check for volblocksize intead of ZFS_TYPE_VOLUME so that we
1228 	 * correctly handle snapshots of volumes.
1229 	 */
1230 	if (zhp->zfs_volblocksize != 0) {
1231 		if (linktype)
1232 			ret = zvol_create_link(zhp->zfs_hdl, zhp->zfs_name);
1233 		else
1234 			ret = zvol_remove_link(zhp->zfs_hdl, zhp->zfs_name);
1235 	}
1236 
1237 	ret = zfs_iter_children(zhp, do_zvol, data);
1238 
1239 	zfs_close(zhp);
1240 	return (ret);
1241 }
1242 
1243 /*
1244  * Iterate over all zvols in the pool and make any necessary minor nodes.
1245  */
1246 int
1247 zpool_create_zvol_links(zpool_handle_t *zhp)
1248 {
1249 	zfs_handle_t *zfp;
1250 	int ret;
1251 
1252 	/*
1253 	 * If the pool is unavailable, just return success.
1254 	 */
1255 	if ((zfp = make_dataset_handle(zhp->zpool_hdl,
1256 	    zhp->zpool_name)) == NULL)
1257 		return (0);
1258 
1259 	ret = zfs_iter_children(zfp, do_zvol, (void *)B_TRUE);
1260 
1261 	zfs_close(zfp);
1262 	return (ret);
1263 }
1264 
1265 /*
1266  * Iterate over all zvols in the poool and remove any minor nodes.
1267  */
1268 int
1269 zpool_remove_zvol_links(zpool_handle_t *zhp)
1270 {
1271 	zfs_handle_t *zfp;
1272 	int ret;
1273 
1274 	/*
1275 	 * If the pool is unavailable, just return success.
1276 	 */
1277 	if ((zfp = make_dataset_handle(zhp->zpool_hdl,
1278 	    zhp->zpool_name)) == NULL)
1279 		return (0);
1280 
1281 	ret = zfs_iter_children(zfp, do_zvol, (void *)B_FALSE);
1282 
1283 	zfs_close(zfp);
1284 	return (ret);
1285 }
1286 
1287 /*
1288  * Convert from a devid string to a path.
1289  */
1290 static char *
1291 devid_to_path(char *devid_str)
1292 {
1293 	ddi_devid_t devid;
1294 	char *minor;
1295 	char *path;
1296 	devid_nmlist_t *list = NULL;
1297 	int ret;
1298 
1299 	if (devid_str_decode(devid_str, &devid, &minor) != 0)
1300 		return (NULL);
1301 
1302 	ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
1303 
1304 	devid_str_free(minor);
1305 	devid_free(devid);
1306 
1307 	if (ret != 0)
1308 		return (NULL);
1309 
1310 	if ((path = strdup(list[0].devname)) == NULL)
1311 		return (NULL);
1312 
1313 	devid_free_nmlist(list);
1314 
1315 	return (path);
1316 }
1317 
1318 /*
1319  * Convert from a path to a devid string.
1320  */
1321 static char *
1322 path_to_devid(const char *path)
1323 {
1324 	int fd;
1325 	ddi_devid_t devid;
1326 	char *minor, *ret;
1327 
1328 	if ((fd = open(path, O_RDONLY)) < 0)
1329 		return (NULL);
1330 
1331 	minor = NULL;
1332 	ret = NULL;
1333 	if (devid_get(fd, &devid) == 0) {
1334 		if (devid_get_minor_name(fd, &minor) == 0)
1335 			ret = devid_str_encode(devid, minor);
1336 		if (minor != NULL)
1337 			devid_str_free(minor);
1338 		devid_free(devid);
1339 	}
1340 	(void) close(fd);
1341 
1342 	return (ret);
1343 }
1344 
1345 /*
1346  * Issue the necessary ioctl() to update the stored path value for the vdev.  We
1347  * ignore any failure here, since a common case is for an unprivileged user to
1348  * type 'zpool status', and we'll display the correct information anyway.
1349  */
1350 static void
1351 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
1352 {
1353 	zfs_cmd_t zc = { 0 };
1354 
1355 	(void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1356 	(void) strncpy(zc.zc_prop_value, path, sizeof (zc.zc_prop_value));
1357 	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1358 	    &zc.zc_guid) == 0);
1359 
1360 	(void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
1361 }
1362 
1363 /*
1364  * Given a vdev, return the name to display in iostat.  If the vdev has a path,
1365  * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
1366  * We also check if this is a whole disk, in which case we strip off the
1367  * trailing 's0' slice name.
1368  *
1369  * This routine is also responsible for identifying when disks have been
1370  * reconfigured in a new location.  The kernel will have opened the device by
1371  * devid, but the path will still refer to the old location.  To catch this, we
1372  * first do a path -> devid translation (which is fast for the common case).  If
1373  * the devid matches, we're done.  If not, we do a reverse devid -> path
1374  * translation and issue the appropriate ioctl() to update the path of the vdev.
1375  * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
1376  * of these checks.
1377  */
1378 char *
1379 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv)
1380 {
1381 	char *path, *devid;
1382 	uint64_t value;
1383 	char buf[64];
1384 
1385 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
1386 	    &value) == 0) {
1387 		verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1388 		    &value) == 0);
1389 		(void) snprintf(buf, sizeof (buf), "%llu", value);
1390 		path = buf;
1391 	} else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
1392 
1393 		if (zhp != NULL &&
1394 		    nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
1395 			/*
1396 			 * Determine if the current path is correct.
1397 			 */
1398 			char *newdevid = path_to_devid(path);
1399 
1400 			if (newdevid == NULL ||
1401 			    strcmp(devid, newdevid) != 0) {
1402 				char *newpath;
1403 
1404 				if ((newpath = devid_to_path(devid)) != NULL) {
1405 					/*
1406 					 * Update the path appropriately.
1407 					 */
1408 					set_path(zhp, nv, newpath);
1409 					if (nvlist_add_string(nv,
1410 					    ZPOOL_CONFIG_PATH, newpath) == 0)
1411 						verify(nvlist_lookup_string(nv,
1412 						    ZPOOL_CONFIG_PATH,
1413 						    &path) == 0);
1414 					free(newpath);
1415 				}
1416 			}
1417 
1418 			if (newdevid)
1419 				devid_str_free(newdevid);
1420 		}
1421 
1422 		if (strncmp(path, "/dev/dsk/", 9) == 0)
1423 			path += 9;
1424 
1425 		if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
1426 		    &value) == 0 && value) {
1427 			char *tmp = zfs_strdup(hdl, path);
1428 			if (tmp == NULL)
1429 				return (NULL);
1430 			tmp[strlen(path) - 2] = '\0';
1431 			return (tmp);
1432 		}
1433 	} else {
1434 		verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
1435 
1436 		/*
1437 		 * If it's a raidz device, we need to stick in the parity level.
1438 		 */
1439 		if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
1440 			verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
1441 			    &value) == 0);
1442 			(void) snprintf(buf, sizeof (buf), "%s%llu", path,
1443 			    value);
1444 			path = buf;
1445 		}
1446 	}
1447 
1448 	return (zfs_strdup(hdl, path));
1449 }
1450 
1451 static int
1452 zbookmark_compare(const void *a, const void *b)
1453 {
1454 	return (memcmp(a, b, sizeof (zbookmark_t)));
1455 }
1456 
1457 /*
1458  * Retrieve the persistent error log, uniquify the members, and return to the
1459  * caller.
1460  */
1461 int
1462 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t ***list, size_t *nelem)
1463 {
1464 	zfs_cmd_t zc = { 0 };
1465 	uint64_t count;
1466 	zbookmark_t *zb;
1467 	int i, j;
1468 
1469 	if (zhp->zpool_error_log != NULL) {
1470 		*list = zhp->zpool_error_log;
1471 		*nelem = zhp->zpool_error_count;
1472 		return (0);
1473 	}
1474 
1475 	/*
1476 	 * Retrieve the raw error list from the kernel.  If the number of errors
1477 	 * has increased, allocate more space and continue until we get the
1478 	 * entire list.
1479 	 */
1480 	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
1481 	    &count) == 0);
1482 	if ((zc.zc_config_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
1483 	    count * sizeof (zbookmark_t))) == NULL)
1484 		return (-1);
1485 	zc.zc_config_dst_size = count;
1486 	(void) strcpy(zc.zc_name, zhp->zpool_name);
1487 	for (;;) {
1488 		if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
1489 		    &zc) != 0) {
1490 			free((void *)(uintptr_t)zc.zc_config_dst);
1491 			if (errno == ENOMEM) {
1492 				if ((zc.zc_config_dst = (uintptr_t)
1493 				    zfs_alloc(zhp->zpool_hdl,
1494 				    zc.zc_config_dst_size)) == NULL)
1495 					return (-1);
1496 			} else {
1497 				return (-1);
1498 			}
1499 		} else {
1500 			break;
1501 		}
1502 	}
1503 
1504 	/*
1505 	 * Sort the resulting bookmarks.  This is a little confusing due to the
1506 	 * implementation of ZFS_IOC_ERROR_LOG.  The bookmarks are copied last
1507 	 * to first, and 'zc_config_dst_size' indicates the number of boomarks
1508 	 * _not_ copied as part of the process.  So we point the start of our
1509 	 * array appropriate and decrement the total number of elements.
1510 	 */
1511 	zb = ((zbookmark_t *)(uintptr_t)zc.zc_config_dst) +
1512 	    zc.zc_config_dst_size;
1513 	count -= zc.zc_config_dst_size;
1514 
1515 	qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare);
1516 
1517 	/*
1518 	 * Count the number of unique elements
1519 	 */
1520 	j = 0;
1521 	for (i = 0; i < count; i++) {
1522 		if (i > 0 && memcmp(&zb[i - 1], &zb[i],
1523 		    sizeof (zbookmark_t)) == 0)
1524 			continue;
1525 		j++;
1526 	}
1527 
1528 	/*
1529 	 * If the user has only requested the number of items, return it now
1530 	 * without bothering with the extra work.
1531 	 */
1532 	if (list == NULL) {
1533 		*nelem = j;
1534 		free((void *)(uintptr_t)zc.zc_config_dst);
1535 		return (0);
1536 	}
1537 
1538 	zhp->zpool_error_count = j;
1539 
1540 	/*
1541 	 * Allocate an array of nvlists to hold the results
1542 	 */
1543 	if ((zhp->zpool_error_log = zfs_alloc(zhp->zpool_hdl,
1544 	    j * sizeof (nvlist_t *))) == NULL) {
1545 		free((void *)(uintptr_t)zc.zc_config_dst);
1546 		return (-1);
1547 	}
1548 
1549 	/*
1550 	 * Fill in the results with names from the kernel.
1551 	 */
1552 	j = 0;
1553 	for (i = 0; i < count; i++) {
1554 		char buf[64];
1555 		nvlist_t *nv;
1556 
1557 		if (i > 0 && memcmp(&zb[i - 1], &zb[i],
1558 		    sizeof (zbookmark_t)) == 0)
1559 			continue;
1560 
1561 		if (nvlist_alloc(&nv, NV_UNIQUE_NAME,
1562 		    0) != 0)
1563 			goto nomem;
1564 		zhp->zpool_error_log[j] = nv;
1565 
1566 		zc.zc_bookmark = zb[i];
1567 		if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_BOOKMARK_NAME,
1568 		    &zc) == 0) {
1569 			if (nvlist_add_string(nv, ZPOOL_ERR_DATASET,
1570 			    zc.zc_prop_name) != 0 ||
1571 			    nvlist_add_string(nv, ZPOOL_ERR_OBJECT,
1572 			    zc.zc_prop_value) != 0 ||
1573 			    nvlist_add_string(nv, ZPOOL_ERR_RANGE,
1574 			    zc.zc_filename) != 0)
1575 				goto nomem;
1576 		} else {
1577 			(void) snprintf(buf, sizeof (buf), "%llx",
1578 			    zb[i].zb_objset);
1579 			if (nvlist_add_string(nv,
1580 			    ZPOOL_ERR_DATASET, buf) != 0)
1581 				goto nomem;
1582 			(void) snprintf(buf, sizeof (buf), "%llx",
1583 			    zb[i].zb_object);
1584 			if (nvlist_add_string(nv, ZPOOL_ERR_OBJECT,
1585 			    buf) != 0)
1586 				goto nomem;
1587 			(void) snprintf(buf, sizeof (buf), "lvl=%u blkid=%llu",
1588 			    (int)zb[i].zb_level, (long long)zb[i].zb_blkid);
1589 			if (nvlist_add_string(nv, ZPOOL_ERR_RANGE,
1590 			    buf) != 0)
1591 				goto nomem;
1592 		}
1593 
1594 		j++;
1595 	}
1596 
1597 	*list = zhp->zpool_error_log;
1598 	*nelem = zhp->zpool_error_count;
1599 
1600 	free((void *)(uintptr_t)zc.zc_config_dst);
1601 
1602 	return (0);
1603 
1604 nomem:
1605 	free((void *)(uintptr_t)zc.zc_config_dst);
1606 	for (i = 0; i < zhp->zpool_error_count; i++) {
1607 		if (zhp->zpool_error_log[i])
1608 			free(zhp->zpool_error_log[i]);
1609 	}
1610 	free(zhp->zpool_error_log);
1611 	zhp->zpool_error_log = NULL;
1612 	return (no_memory(zhp->zpool_hdl));
1613 }
1614 
1615 /*
1616  * Upgrade a ZFS pool to the latest on-disk version.
1617  */
1618 int
1619 zpool_upgrade(zpool_handle_t *zhp)
1620 {
1621 	zfs_cmd_t zc = { 0 };
1622 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1623 
1624 	(void) strcpy(zc.zc_name, zhp->zpool_name);
1625 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
1626 		return (zpool_standard_error(hdl, errno,
1627 		    dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
1628 		    zhp->zpool_name));
1629 
1630 	return (0);
1631 }
1632