xref: /titanic_44/usr/src/lib/libzfs/common/libzfs_pool.c (revision 458cf4d67cff5ff99a68a866b7657f1b1d6fe61c)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <alloca.h>
30 #include <assert.h>
31 #include <ctype.h>
32 #include <errno.h>
33 #include <devid.h>
34 #include <dirent.h>
35 #include <fcntl.h>
36 #include <libintl.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <strings.h>
40 #include <unistd.h>
41 #include <sys/efi_partition.h>
42 #include <sys/vtoc.h>
43 #include <sys/zfs_ioctl.h>
44 #include <sys/zio.h>
45 #include <strings.h>
46 
47 #include "zfs_namecheck.h"
48 #include "zfs_prop.h"
49 #include "libzfs_impl.h"
50 
51 /*
52  * Validate the given pool name, optionally putting an extended error message in
53  * 'buf'.
54  */
55 static boolean_t
56 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
57 {
58 	namecheck_err_t why;
59 	char what;
60 	int ret;
61 
62 	ret = pool_namecheck(pool, &why, &what);
63 
64 	/*
65 	 * The rules for reserved pool names were extended at a later point.
66 	 * But we need to support users with existing pools that may now be
67 	 * invalid.  So we only check for this expanded set of names during a
68 	 * create (or import), and only in userland.
69 	 */
70 	if (ret == 0 && !isopen &&
71 	    (strncmp(pool, "mirror", 6) == 0 ||
72 	    strncmp(pool, "raidz", 5) == 0 ||
73 	    strncmp(pool, "spare", 5) == 0 ||
74 	    strcmp(pool, "log") == 0)) {
75 		zfs_error_aux(hdl,
76 		    dgettext(TEXT_DOMAIN, "name is reserved"));
77 		return (B_FALSE);
78 	}
79 
80 
81 	if (ret != 0) {
82 		if (hdl != NULL) {
83 			switch (why) {
84 			case NAME_ERR_TOOLONG:
85 				zfs_error_aux(hdl,
86 				    dgettext(TEXT_DOMAIN, "name is too long"));
87 				break;
88 
89 			case NAME_ERR_INVALCHAR:
90 				zfs_error_aux(hdl,
91 				    dgettext(TEXT_DOMAIN, "invalid character "
92 				    "'%c' in pool name"), what);
93 				break;
94 
95 			case NAME_ERR_NOLETTER:
96 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
97 				    "name must begin with a letter"));
98 				break;
99 
100 			case NAME_ERR_RESERVED:
101 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
102 				    "name is reserved"));
103 				break;
104 
105 			case NAME_ERR_DISKLIKE:
106 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
107 				    "pool name is reserved"));
108 				break;
109 
110 			case NAME_ERR_LEADING_SLASH:
111 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
112 				    "leading slash in name"));
113 				break;
114 
115 			case NAME_ERR_EMPTY_COMPONENT:
116 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
117 				    "empty component in name"));
118 				break;
119 
120 			case NAME_ERR_TRAILING_SLASH:
121 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
122 				    "trailing slash in name"));
123 				break;
124 
125 			case NAME_ERR_MULTIPLE_AT:
126 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
127 				    "multiple '@' delimiters in name"));
128 				break;
129 
130 			}
131 		}
132 		return (B_FALSE);
133 	}
134 
135 	return (B_TRUE);
136 }
137 
138 static int
139 zpool_get_all_props(zpool_handle_t *zhp)
140 {
141 	zfs_cmd_t zc = { 0 };
142 	libzfs_handle_t *hdl = zhp->zpool_hdl;
143 
144 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
145 
146 	if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
147 		return (-1);
148 
149 	while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
150 		if (errno == ENOMEM) {
151 			if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
152 				zcmd_free_nvlists(&zc);
153 				return (-1);
154 			}
155 		} else {
156 			zcmd_free_nvlists(&zc);
157 			return (-1);
158 		}
159 	}
160 
161 	if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
162 		zcmd_free_nvlists(&zc);
163 		return (-1);
164 	}
165 
166 	zcmd_free_nvlists(&zc);
167 
168 	return (0);
169 }
170 
171 /*
172  * Open a handle to the given pool, even if the pool is currently in the FAULTED
173  * state.
174  */
175 zpool_handle_t *
176 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
177 {
178 	zpool_handle_t *zhp;
179 	boolean_t missing;
180 
181 	/*
182 	 * Make sure the pool name is valid.
183 	 */
184 	if (!zpool_name_valid(hdl, B_TRUE, pool)) {
185 		(void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
186 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
187 		    pool);
188 		return (NULL);
189 	}
190 
191 	if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
192 		return (NULL);
193 
194 	zhp->zpool_hdl = hdl;
195 	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
196 
197 	if (zpool_refresh_stats(zhp, &missing) != 0) {
198 		zpool_close(zhp);
199 		return (NULL);
200 	}
201 
202 	if (missing) {
203 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
204 		    "no such pool"));
205 		(void) zfs_error_fmt(hdl, EZFS_NOENT,
206 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
207 		    pool);
208 		zpool_close(zhp);
209 		return (NULL);
210 	}
211 
212 	return (zhp);
213 }
214 
215 /*
216  * Like the above, but silent on error.  Used when iterating over pools (because
217  * the configuration cache may be out of date).
218  */
219 int
220 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
221 {
222 	zpool_handle_t *zhp;
223 	boolean_t missing;
224 
225 	if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
226 		return (-1);
227 
228 	zhp->zpool_hdl = hdl;
229 	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
230 
231 	if (zpool_refresh_stats(zhp, &missing) != 0) {
232 		zpool_close(zhp);
233 		return (-1);
234 	}
235 
236 	if (missing) {
237 		zpool_close(zhp);
238 		*ret = NULL;
239 		return (0);
240 	}
241 
242 	*ret = zhp;
243 	return (0);
244 }
245 
246 /*
247  * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
248  * state.
249  */
250 zpool_handle_t *
251 zpool_open(libzfs_handle_t *hdl, const char *pool)
252 {
253 	zpool_handle_t *zhp;
254 
255 	if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
256 		return (NULL);
257 
258 	if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
259 		(void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
260 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
261 		zpool_close(zhp);
262 		return (NULL);
263 	}
264 
265 	return (zhp);
266 }
267 
268 /*
269  * Close the handle.  Simply frees the memory associated with the handle.
270  */
271 void
272 zpool_close(zpool_handle_t *zhp)
273 {
274 	if (zhp->zpool_config)
275 		nvlist_free(zhp->zpool_config);
276 	if (zhp->zpool_old_config)
277 		nvlist_free(zhp->zpool_old_config);
278 	if (zhp->zpool_props)
279 		nvlist_free(zhp->zpool_props);
280 	free(zhp);
281 }
282 
283 /*
284  * Return the name of the pool.
285  */
286 const char *
287 zpool_get_name(zpool_handle_t *zhp)
288 {
289 	return (zhp->zpool_name);
290 }
291 
292 /*
293  * Return the GUID of the pool.
294  */
295 uint64_t
296 zpool_get_guid(zpool_handle_t *zhp)
297 {
298 	uint64_t guid;
299 
300 	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID,
301 	    &guid) == 0);
302 	return (guid);
303 }
304 
305 /*
306  * Return the version of the pool.
307  */
308 uint64_t
309 zpool_get_version(zpool_handle_t *zhp)
310 {
311 	uint64_t version;
312 
313 	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_VERSION,
314 	    &version) == 0);
315 
316 	return (version);
317 }
318 
319 /*
320  * Return the amount of space currently consumed by the pool.
321  */
322 uint64_t
323 zpool_get_space_used(zpool_handle_t *zhp)
324 {
325 	nvlist_t *nvroot;
326 	vdev_stat_t *vs;
327 	uint_t vsc;
328 
329 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
330 	    &nvroot) == 0);
331 	verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS,
332 	    (uint64_t **)&vs, &vsc) == 0);
333 
334 	return (vs->vs_alloc);
335 }
336 
337 /*
338  * Return the total space in the pool.
339  */
340 uint64_t
341 zpool_get_space_total(zpool_handle_t *zhp)
342 {
343 	nvlist_t *nvroot;
344 	vdev_stat_t *vs;
345 	uint_t vsc;
346 
347 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
348 	    &nvroot) == 0);
349 	verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS,
350 	    (uint64_t **)&vs, &vsc) == 0);
351 
352 	return (vs->vs_space);
353 }
354 
355 /*
356  * Return the alternate root for this pool, if any.
357  */
358 int
359 zpool_get_root(zpool_handle_t *zhp, char *buf, size_t buflen)
360 {
361 	zfs_cmd_t zc = { 0 };
362 
363 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
364 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, &zc) != 0 ||
365 	    zc.zc_value[0] == '\0')
366 		return (-1);
367 
368 	(void) strlcpy(buf, zc.zc_value, buflen);
369 
370 	return (0);
371 }
372 
373 /*
374  * Return the state of the pool (ACTIVE or UNAVAILABLE)
375  */
376 int
377 zpool_get_state(zpool_handle_t *zhp)
378 {
379 	return (zhp->zpool_state);
380 }
381 
382 /*
383  * Create the named pool, using the provided vdev list.  It is assumed
384  * that the consumer has already validated the contents of the nvlist, so we
385  * don't have to worry about error semantics.
386  */
387 int
388 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
389     const char *altroot)
390 {
391 	zfs_cmd_t zc = { 0 };
392 	char msg[1024];
393 
394 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
395 	    "cannot create '%s'"), pool);
396 
397 	if (!zpool_name_valid(hdl, B_FALSE, pool))
398 		return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
399 
400 	if (altroot != NULL && altroot[0] != '/')
401 		return (zfs_error_fmt(hdl, EZFS_BADPATH,
402 		    dgettext(TEXT_DOMAIN, "bad alternate root '%s'"), altroot));
403 
404 	if (zcmd_write_src_nvlist(hdl, &zc, nvroot, NULL) != 0)
405 		return (-1);
406 
407 	(void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
408 
409 	if (altroot != NULL)
410 		(void) strlcpy(zc.zc_value, altroot, sizeof (zc.zc_value));
411 
412 	if (zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc) != 0) {
413 		zcmd_free_nvlists(&zc);
414 
415 		switch (errno) {
416 		case EBUSY:
417 			/*
418 			 * This can happen if the user has specified the same
419 			 * device multiple times.  We can't reliably detect this
420 			 * until we try to add it and see we already have a
421 			 * label.
422 			 */
423 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
424 			    "one or more vdevs refer to the same device"));
425 			return (zfs_error(hdl, EZFS_BADDEV, msg));
426 
427 		case EOVERFLOW:
428 			/*
429 			 * This occurs when one of the devices is below
430 			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
431 			 * device was the problem device since there's no
432 			 * reliable way to determine device size from userland.
433 			 */
434 			{
435 				char buf[64];
436 
437 				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
438 
439 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
440 				    "one or more devices is less than the "
441 				    "minimum size (%s)"), buf);
442 			}
443 			return (zfs_error(hdl, EZFS_BADDEV, msg));
444 
445 		case ENOSPC:
446 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
447 			    "one or more devices is out of space"));
448 			return (zfs_error(hdl, EZFS_BADDEV, msg));
449 
450 		default:
451 			return (zpool_standard_error(hdl, errno, msg));
452 		}
453 	}
454 	zcmd_free_nvlists(&zc);
455 
456 	/*
457 	 * If this is an alternate root pool, then we automatically set the
458 	 * mountpoint of the root dataset to be '/'.
459 	 */
460 	if (altroot != NULL) {
461 		zfs_handle_t *zhp;
462 
463 		verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_ANY)) != NULL);
464 		verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
465 		    "/") == 0);
466 
467 		zfs_close(zhp);
468 	}
469 
470 	return (0);
471 }
472 
473 /*
474  * Destroy the given pool.  It is up to the caller to ensure that there are no
475  * datasets left in the pool.
476  */
477 int
478 zpool_destroy(zpool_handle_t *zhp)
479 {
480 	zfs_cmd_t zc = { 0 };
481 	zfs_handle_t *zfp = NULL;
482 	libzfs_handle_t *hdl = zhp->zpool_hdl;
483 	char msg[1024];
484 
485 	if (zhp->zpool_state == POOL_STATE_ACTIVE &&
486 	    (zfp = zfs_open(zhp->zpool_hdl, zhp->zpool_name,
487 	    ZFS_TYPE_FILESYSTEM)) == NULL)
488 		return (-1);
489 
490 	if (zpool_remove_zvol_links(zhp) != 0)
491 		return (-1);
492 
493 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
494 
495 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
496 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
497 		    "cannot destroy '%s'"), zhp->zpool_name);
498 
499 		if (errno == EROFS) {
500 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
501 			    "one or more devices is read only"));
502 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
503 		} else {
504 			(void) zpool_standard_error(hdl, errno, msg);
505 		}
506 
507 		if (zfp)
508 			zfs_close(zfp);
509 		return (-1);
510 	}
511 
512 	if (zfp) {
513 		remove_mountpoint(zfp);
514 		zfs_close(zfp);
515 	}
516 
517 	return (0);
518 }
519 
520 /*
521  * Add the given vdevs to the pool.  The caller must have already performed the
522  * necessary verification to ensure that the vdev specification is well-formed.
523  */
524 int
525 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
526 {
527 	zfs_cmd_t zc = { 0 };
528 	int ret;
529 	libzfs_handle_t *hdl = zhp->zpool_hdl;
530 	char msg[1024];
531 	nvlist_t **spares;
532 	uint_t nspares;
533 
534 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
535 	    "cannot add to '%s'"), zhp->zpool_name);
536 
537 	if (zpool_get_version(zhp) < SPA_VERSION_SPARES &&
538 	    nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
539 	    &spares, &nspares) == 0) {
540 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
541 		    "upgraded to add hot spares"));
542 		return (zfs_error(hdl, EZFS_BADVERSION, msg));
543 	}
544 
545 	if (zcmd_write_src_nvlist(hdl, &zc, nvroot, NULL) != 0)
546 		return (-1);
547 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
548 
549 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
550 		switch (errno) {
551 		case EBUSY:
552 			/*
553 			 * This can happen if the user has specified the same
554 			 * device multiple times.  We can't reliably detect this
555 			 * until we try to add it and see we already have a
556 			 * label.
557 			 */
558 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
559 			    "one or more vdevs refer to the same device"));
560 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
561 			break;
562 
563 		case EOVERFLOW:
564 			/*
565 			 * This occurrs when one of the devices is below
566 			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
567 			 * device was the problem device since there's no
568 			 * reliable way to determine device size from userland.
569 			 */
570 			{
571 				char buf[64];
572 
573 				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
574 
575 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
576 				    "device is less than the minimum "
577 				    "size (%s)"), buf);
578 			}
579 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
580 			break;
581 
582 		case ENOTSUP:
583 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
584 			    "pool must be upgraded to add these vdevs"));
585 			(void) zfs_error(hdl, EZFS_BADVERSION, msg);
586 			break;
587 
588 		case EDOM:
589 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
590 			    "root pool can not have multiple vdevs"
591 			    " or separate logs"));
592 			(void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
593 			break;
594 
595 		default:
596 			(void) zpool_standard_error(hdl, errno, msg);
597 		}
598 
599 		ret = -1;
600 	} else {
601 		ret = 0;
602 	}
603 
604 	zcmd_free_nvlists(&zc);
605 
606 	return (ret);
607 }
608 
609 /*
610  * Exports the pool from the system.  The caller must ensure that there are no
611  * mounted datasets in the pool.
612  */
613 int
614 zpool_export(zpool_handle_t *zhp)
615 {
616 	zfs_cmd_t zc = { 0 };
617 
618 	if (zpool_remove_zvol_links(zhp) != 0)
619 		return (-1);
620 
621 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
622 
623 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0)
624 		return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
625 		    dgettext(TEXT_DOMAIN, "cannot export '%s'"),
626 		    zhp->zpool_name));
627 	return (0);
628 }
629 
630 /*
631  * Import the given pool using the known configuration.  The configuration
632  * should have come from zpool_find_import().  The 'newname' and 'altroot'
633  * parameters control whether the pool is imported with a different name or with
634  * an alternate root, respectively.
635  */
636 int
637 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
638     const char *altroot)
639 {
640 	zfs_cmd_t zc = { 0 };
641 	char *thename;
642 	char *origname;
643 	int ret;
644 
645 	verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
646 	    &origname) == 0);
647 
648 	if (newname != NULL) {
649 		if (!zpool_name_valid(hdl, B_FALSE, newname))
650 			return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
651 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
652 			    newname));
653 		thename = (char *)newname;
654 	} else {
655 		thename = origname;
656 	}
657 
658 	if (altroot != NULL && altroot[0] != '/')
659 		return (zfs_error_fmt(hdl, EZFS_BADPATH,
660 		    dgettext(TEXT_DOMAIN, "bad alternate root '%s'"),
661 		    altroot));
662 
663 	(void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
664 
665 	if (altroot != NULL)
666 		(void) strlcpy(zc.zc_value, altroot, sizeof (zc.zc_value));
667 	else
668 		zc.zc_value[0] = '\0';
669 
670 	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
671 	    &zc.zc_guid) == 0);
672 
673 	if (zcmd_write_src_nvlist(hdl, &zc, config, NULL) != 0)
674 		return (-1);
675 
676 	ret = 0;
677 	if (zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc) != 0) {
678 		char desc[1024];
679 		if (newname == NULL)
680 			(void) snprintf(desc, sizeof (desc),
681 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
682 			    thename);
683 		else
684 			(void) snprintf(desc, sizeof (desc),
685 			    dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
686 			    origname, thename);
687 
688 		switch (errno) {
689 		case ENOTSUP:
690 			/*
691 			 * Unsupported version.
692 			 */
693 			(void) zfs_error(hdl, EZFS_BADVERSION, desc);
694 			break;
695 
696 		case EINVAL:
697 			(void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
698 			break;
699 
700 		default:
701 			(void) zpool_standard_error(hdl, errno, desc);
702 		}
703 
704 		ret = -1;
705 	} else {
706 		zpool_handle_t *zhp;
707 
708 		/*
709 		 * This should never fail, but play it safe anyway.
710 		 */
711 		if (zpool_open_silent(hdl, thename, &zhp) != 0) {
712 			ret = -1;
713 		} else if (zhp != NULL) {
714 			ret = zpool_create_zvol_links(zhp);
715 			zpool_close(zhp);
716 		}
717 
718 	}
719 
720 
721 	zcmd_free_nvlists(&zc);
722 	return (ret);
723 }
724 
725 /*
726  * Scrub the pool.
727  */
728 int
729 zpool_scrub(zpool_handle_t *zhp, pool_scrub_type_t type)
730 {
731 	zfs_cmd_t zc = { 0 };
732 	char msg[1024];
733 	libzfs_handle_t *hdl = zhp->zpool_hdl;
734 
735 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
736 	zc.zc_cookie = type;
737 
738 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SCRUB, &zc) == 0)
739 		return (0);
740 
741 	(void) snprintf(msg, sizeof (msg),
742 	    dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
743 
744 	if (errno == EBUSY)
745 		return (zfs_error(hdl, EZFS_RESILVERING, msg));
746 	else
747 		return (zpool_standard_error(hdl, errno, msg));
748 }
749 
750 /*
751  * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
752  * spare; but FALSE if its an INUSE spare.
753  */
754 static nvlist_t *
755 vdev_to_nvlist_iter(nvlist_t *nv, const char *search, uint64_t guid,
756     boolean_t *avail_spare)
757 {
758 	uint_t c, children;
759 	nvlist_t **child;
760 	uint64_t theguid, present;
761 	char *path;
762 	uint64_t wholedisk = 0;
763 	nvlist_t *ret;
764 
765 	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &theguid) == 0);
766 
767 	if (search == NULL &&
768 	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &present) == 0) {
769 		/*
770 		 * If the device has never been present since import, the only
771 		 * reliable way to match the vdev is by GUID.
772 		 */
773 		if (theguid == guid)
774 			return (nv);
775 	} else if (search != NULL &&
776 	    nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
777 		(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
778 		    &wholedisk);
779 		if (wholedisk) {
780 			/*
781 			 * For whole disks, the internal path has 's0', but the
782 			 * path passed in by the user doesn't.
783 			 */
784 			if (strlen(search) == strlen(path) - 2 &&
785 			    strncmp(search, path, strlen(search)) == 0)
786 				return (nv);
787 		} else if (strcmp(search, path) == 0) {
788 			return (nv);
789 		}
790 	}
791 
792 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
793 	    &child, &children) != 0)
794 		return (NULL);
795 
796 	for (c = 0; c < children; c++)
797 		if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
798 		    avail_spare)) != NULL)
799 			return (ret);
800 
801 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
802 	    &child, &children) == 0) {
803 		for (c = 0; c < children; c++) {
804 			if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
805 			    avail_spare)) != NULL) {
806 				*avail_spare = B_TRUE;
807 				return (ret);
808 			}
809 		}
810 	}
811 
812 	return (NULL);
813 }
814 
815 nvlist_t *
816 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare)
817 {
818 	char buf[MAXPATHLEN];
819 	const char *search;
820 	char *end;
821 	nvlist_t *nvroot;
822 	uint64_t guid;
823 
824 	guid = strtoull(path, &end, 10);
825 	if (guid != 0 && *end == '\0') {
826 		search = NULL;
827 	} else if (path[0] != '/') {
828 		(void) snprintf(buf, sizeof (buf), "%s%s", "/dev/dsk/", path);
829 		search = buf;
830 	} else {
831 		search = path;
832 	}
833 
834 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
835 	    &nvroot) == 0);
836 
837 	*avail_spare = B_FALSE;
838 	return (vdev_to_nvlist_iter(nvroot, search, guid, avail_spare));
839 }
840 
841 /*
842  * Returns TRUE if the given guid corresponds to a spare (INUSE or not).
843  */
844 static boolean_t
845 is_spare(zpool_handle_t *zhp, uint64_t guid)
846 {
847 	uint64_t spare_guid;
848 	nvlist_t *nvroot;
849 	nvlist_t **spares;
850 	uint_t nspares;
851 	int i;
852 
853 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
854 	    &nvroot) == 0);
855 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
856 	    &spares, &nspares) == 0) {
857 		for (i = 0; i < nspares; i++) {
858 			verify(nvlist_lookup_uint64(spares[i],
859 			    ZPOOL_CONFIG_GUID, &spare_guid) == 0);
860 			if (guid == spare_guid)
861 				return (B_TRUE);
862 		}
863 	}
864 
865 	return (B_FALSE);
866 }
867 
868 /*
869  * Bring the specified vdev online.   The 'flags' parameter is a set of the
870  * ZFS_ONLINE_* flags.
871  */
872 int
873 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
874     vdev_state_t *newstate)
875 {
876 	zfs_cmd_t zc = { 0 };
877 	char msg[1024];
878 	nvlist_t *tgt;
879 	boolean_t avail_spare;
880 	libzfs_handle_t *hdl = zhp->zpool_hdl;
881 
882 	(void) snprintf(msg, sizeof (msg),
883 	    dgettext(TEXT_DOMAIN, "cannot online %s"), path);
884 
885 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
886 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == NULL)
887 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
888 
889 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
890 
891 	if (avail_spare || is_spare(zhp, zc.zc_guid) == B_TRUE)
892 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
893 
894 	zc.zc_cookie = VDEV_STATE_ONLINE;
895 	zc.zc_obj = flags;
896 
897 
898 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0)
899 		return (zpool_standard_error(hdl, errno, msg));
900 
901 	*newstate = zc.zc_cookie;
902 	return (0);
903 }
904 
905 /*
906  * Take the specified vdev offline
907  */
908 int
909 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
910 {
911 	zfs_cmd_t zc = { 0 };
912 	char msg[1024];
913 	nvlist_t *tgt;
914 	boolean_t avail_spare;
915 	libzfs_handle_t *hdl = zhp->zpool_hdl;
916 
917 	(void) snprintf(msg, sizeof (msg),
918 	    dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
919 
920 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
921 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == NULL)
922 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
923 
924 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
925 
926 	if (avail_spare || is_spare(zhp, zc.zc_guid) == B_TRUE)
927 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
928 
929 	zc.zc_cookie = VDEV_STATE_OFFLINE;
930 	zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
931 
932 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
933 		return (0);
934 
935 	switch (errno) {
936 	case EBUSY:
937 
938 		/*
939 		 * There are no other replicas of this device.
940 		 */
941 		return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
942 
943 	default:
944 		return (zpool_standard_error(hdl, errno, msg));
945 	}
946 }
947 
948 /*
949  * Mark the given vdev faulted.
950  */
951 int
952 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid)
953 {
954 	zfs_cmd_t zc = { 0 };
955 	char msg[1024];
956 	libzfs_handle_t *hdl = zhp->zpool_hdl;
957 
958 	(void) snprintf(msg, sizeof (msg),
959 	    dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid);
960 
961 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
962 	zc.zc_guid = guid;
963 	zc.zc_cookie = VDEV_STATE_FAULTED;
964 
965 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
966 		return (0);
967 
968 	switch (errno) {
969 	case EBUSY:
970 
971 		/*
972 		 * There are no other replicas of this device.
973 		 */
974 		return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
975 
976 	default:
977 		return (zpool_standard_error(hdl, errno, msg));
978 	}
979 
980 }
981 
982 /*
983  * Mark the given vdev degraded.
984  */
985 int
986 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid)
987 {
988 	zfs_cmd_t zc = { 0 };
989 	char msg[1024];
990 	libzfs_handle_t *hdl = zhp->zpool_hdl;
991 
992 	(void) snprintf(msg, sizeof (msg),
993 	    dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid);
994 
995 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
996 	zc.zc_guid = guid;
997 	zc.zc_cookie = VDEV_STATE_DEGRADED;
998 
999 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
1000 		return (0);
1001 
1002 	return (zpool_standard_error(hdl, errno, msg));
1003 }
1004 
1005 /*
1006  * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
1007  * a hot spare.
1008  */
1009 static boolean_t
1010 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
1011 {
1012 	nvlist_t **child;
1013 	uint_t c, children;
1014 	char *type;
1015 
1016 	if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
1017 	    &children) == 0) {
1018 		verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
1019 		    &type) == 0);
1020 
1021 		if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
1022 		    children == 2 && child[which] == tgt)
1023 			return (B_TRUE);
1024 
1025 		for (c = 0; c < children; c++)
1026 			if (is_replacing_spare(child[c], tgt, which))
1027 				return (B_TRUE);
1028 	}
1029 
1030 	return (B_FALSE);
1031 }
1032 
1033 /*
1034  * Attach new_disk (fully described by nvroot) to old_disk.
1035  * If 'replacing' is specified, the new disk will replace the old one.
1036  */
1037 int
1038 zpool_vdev_attach(zpool_handle_t *zhp,
1039     const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
1040 {
1041 	zfs_cmd_t zc = { 0 };
1042 	char msg[1024];
1043 	int ret;
1044 	nvlist_t *tgt;
1045 	boolean_t avail_spare;
1046 	uint64_t val, is_log;
1047 	char *path;
1048 	nvlist_t **child;
1049 	uint_t children;
1050 	nvlist_t *config_root;
1051 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1052 
1053 	if (replacing)
1054 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1055 		    "cannot replace %s with %s"), old_disk, new_disk);
1056 	else
1057 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1058 		    "cannot attach %s to %s"), new_disk, old_disk);
1059 
1060 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1061 	if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare)) == 0)
1062 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1063 
1064 	if (avail_spare)
1065 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
1066 
1067 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1068 	zc.zc_cookie = replacing;
1069 
1070 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
1071 	    &child, &children) != 0 || children != 1) {
1072 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1073 		    "new device must be a single disk"));
1074 		return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
1075 	}
1076 
1077 	verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
1078 	    ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
1079 
1080 	/*
1081 	 * If the target is a hot spare that has been swapped in, we can only
1082 	 * replace it with another hot spare.
1083 	 */
1084 	if (replacing &&
1085 	    nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
1086 	    nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
1087 	    (zpool_find_vdev(zhp, path, &avail_spare) == NULL ||
1088 	    !avail_spare) && is_replacing_spare(config_root, tgt, 1)) {
1089 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1090 		    "can only be replaced by another hot spare"));
1091 		return (zfs_error(hdl, EZFS_BADTARGET, msg));
1092 	}
1093 
1094 	/*
1095 	 * If we are attempting to replace a spare, it canot be applied to an
1096 	 * already spared device.
1097 	 */
1098 	if (replacing &&
1099 	    nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
1100 	    zpool_find_vdev(zhp, path, &avail_spare) != NULL && avail_spare &&
1101 	    is_replacing_spare(config_root, tgt, 0)) {
1102 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1103 		    "device has already been replaced with a spare"));
1104 		return (zfs_error(hdl, EZFS_BADTARGET, msg));
1105 	}
1106 
1107 	if (zcmd_write_src_nvlist(hdl, &zc, nvroot, NULL) != 0)
1108 		return (-1);
1109 
1110 	ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ATTACH, &zc);
1111 
1112 	zcmd_free_nvlists(&zc);
1113 
1114 	if (ret == 0)
1115 		return (0);
1116 
1117 	switch (errno) {
1118 	case ENOTSUP:
1119 		/*
1120 		 * Can't attach to or replace this type of vdev.
1121 		 */
1122 		if (replacing) {
1123 			is_log = B_FALSE;
1124 			(void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_LOG,
1125 			    &is_log);
1126 			if (is_log)
1127 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1128 				    "cannot replace a log with a spare"));
1129 			else
1130 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1131 				    "cannot replace a replacing device"));
1132 		} else {
1133 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1134 			    "can only attach to mirrors and top-level "
1135 			    "disks"));
1136 		}
1137 		(void) zfs_error(hdl, EZFS_BADTARGET, msg);
1138 		break;
1139 
1140 	case EINVAL:
1141 		/*
1142 		 * The new device must be a single disk.
1143 		 */
1144 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1145 		    "new device must be a single disk"));
1146 		(void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
1147 		break;
1148 
1149 	case EBUSY:
1150 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
1151 		    new_disk);
1152 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
1153 		break;
1154 
1155 	case EOVERFLOW:
1156 		/*
1157 		 * The new device is too small.
1158 		 */
1159 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1160 		    "device is too small"));
1161 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
1162 		break;
1163 
1164 	case EDOM:
1165 		/*
1166 		 * The new device has a different alignment requirement.
1167 		 */
1168 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1169 		    "devices have different sector alignment"));
1170 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
1171 		break;
1172 
1173 	case ENAMETOOLONG:
1174 		/*
1175 		 * The resulting top-level vdev spec won't fit in the label.
1176 		 */
1177 		(void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
1178 		break;
1179 
1180 	default:
1181 		(void) zpool_standard_error(hdl, errno, msg);
1182 	}
1183 
1184 	return (-1);
1185 }
1186 
1187 /*
1188  * Detach the specified device.
1189  */
1190 int
1191 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
1192 {
1193 	zfs_cmd_t zc = { 0 };
1194 	char msg[1024];
1195 	nvlist_t *tgt;
1196 	boolean_t avail_spare;
1197 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1198 
1199 	(void) snprintf(msg, sizeof (msg),
1200 	    dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
1201 
1202 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1203 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == 0)
1204 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1205 
1206 	if (avail_spare)
1207 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
1208 
1209 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1210 
1211 	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
1212 		return (0);
1213 
1214 	switch (errno) {
1215 
1216 	case ENOTSUP:
1217 		/*
1218 		 * Can't detach from this type of vdev.
1219 		 */
1220 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
1221 		    "applicable to mirror and replacing vdevs"));
1222 		(void) zfs_error(zhp->zpool_hdl, EZFS_BADTARGET, msg);
1223 		break;
1224 
1225 	case EBUSY:
1226 		/*
1227 		 * There are no other replicas of this device.
1228 		 */
1229 		(void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
1230 		break;
1231 
1232 	default:
1233 		(void) zpool_standard_error(hdl, errno, msg);
1234 	}
1235 
1236 	return (-1);
1237 }
1238 
1239 /*
1240  * Remove the given device.  Currently, this is supported only for hot spares.
1241  */
1242 int
1243 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
1244 {
1245 	zfs_cmd_t zc = { 0 };
1246 	char msg[1024];
1247 	nvlist_t *tgt;
1248 	boolean_t avail_spare;
1249 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1250 
1251 	(void) snprintf(msg, sizeof (msg),
1252 	    dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
1253 
1254 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1255 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == 0)
1256 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1257 
1258 	if (!avail_spare) {
1259 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1260 		    "only inactive hot spares can be removed"));
1261 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1262 	}
1263 
1264 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1265 
1266 	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
1267 		return (0);
1268 
1269 	return (zpool_standard_error(hdl, errno, msg));
1270 }
1271 
1272 /*
1273  * Clear the errors for the pool, or the particular device if specified.
1274  */
1275 int
1276 zpool_clear(zpool_handle_t *zhp, const char *path)
1277 {
1278 	zfs_cmd_t zc = { 0 };
1279 	char msg[1024];
1280 	nvlist_t *tgt;
1281 	boolean_t avail_spare;
1282 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1283 
1284 	if (path)
1285 		(void) snprintf(msg, sizeof (msg),
1286 		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
1287 		    path);
1288 	else
1289 		(void) snprintf(msg, sizeof (msg),
1290 		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
1291 		    zhp->zpool_name);
1292 
1293 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1294 	if (path) {
1295 		if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == 0)
1296 			return (zfs_error(hdl, EZFS_NODEVICE, msg));
1297 
1298 		if (avail_spare)
1299 			return (zfs_error(hdl, EZFS_ISSPARE, msg));
1300 
1301 		verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
1302 		    &zc.zc_guid) == 0);
1303 	}
1304 
1305 	if (zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc) == 0)
1306 		return (0);
1307 
1308 	return (zpool_standard_error(hdl, errno, msg));
1309 }
1310 
1311 /*
1312  * Similar to zpool_clear(), but takes a GUID (used by fmd).
1313  */
1314 int
1315 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
1316 {
1317 	zfs_cmd_t zc = { 0 };
1318 	char msg[1024];
1319 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1320 
1321 	(void) snprintf(msg, sizeof (msg),
1322 	    dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
1323 	    guid);
1324 
1325 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1326 	zc.zc_guid = guid;
1327 
1328 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
1329 		return (0);
1330 
1331 	return (zpool_standard_error(hdl, errno, msg));
1332 }
1333 
1334 /*
1335  * Iterate over all zvols in a given pool by walking the /dev/zvol/dsk/<pool>
1336  * hierarchy.
1337  */
1338 int
1339 zpool_iter_zvol(zpool_handle_t *zhp, int (*cb)(const char *, void *),
1340     void *data)
1341 {
1342 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1343 	char (*paths)[MAXPATHLEN];
1344 	size_t size = 4;
1345 	int curr, fd, base, ret = 0;
1346 	DIR *dirp;
1347 	struct dirent *dp;
1348 	struct stat st;
1349 
1350 	if ((base = open("/dev/zvol/dsk", O_RDONLY)) < 0)
1351 		return (errno == ENOENT ? 0 : -1);
1352 
1353 	if (fstatat(base, zhp->zpool_name, &st, 0) != 0) {
1354 		int err = errno;
1355 		(void) close(base);
1356 		return (err == ENOENT ? 0 : -1);
1357 	}
1358 
1359 	/*
1360 	 * Oddly this wasn't a directory -- ignore that failure since we
1361 	 * know there are no links lower in the (non-existant) hierarchy.
1362 	 */
1363 	if (!S_ISDIR(st.st_mode)) {
1364 		(void) close(base);
1365 		return (0);
1366 	}
1367 
1368 	if ((paths = zfs_alloc(hdl, size * sizeof (paths[0]))) == NULL) {
1369 		(void) close(base);
1370 		return (-1);
1371 	}
1372 
1373 	(void) strlcpy(paths[0], zhp->zpool_name, sizeof (paths[0]));
1374 	curr = 0;
1375 
1376 	while (curr >= 0) {
1377 		if (fstatat(base, paths[curr], &st, AT_SYMLINK_NOFOLLOW) != 0)
1378 			goto err;
1379 
1380 		if (S_ISDIR(st.st_mode)) {
1381 			if ((fd = openat(base, paths[curr], O_RDONLY)) < 0)
1382 				goto err;
1383 
1384 			if ((dirp = fdopendir(fd)) == NULL) {
1385 				(void) close(fd);
1386 				goto err;
1387 			}
1388 
1389 			while ((dp = readdir(dirp)) != NULL) {
1390 				if (dp->d_name[0] == '.')
1391 					continue;
1392 
1393 				if (curr + 1 == size) {
1394 					paths = zfs_realloc(hdl, paths,
1395 					    size * sizeof (paths[0]),
1396 					    size * 2 * sizeof (paths[0]));
1397 					if (paths == NULL) {
1398 						(void) closedir(dirp);
1399 						(void) close(fd);
1400 						goto err;
1401 					}
1402 
1403 					size *= 2;
1404 				}
1405 
1406 				(void) strlcpy(paths[curr + 1], paths[curr],
1407 				    sizeof (paths[curr + 1]));
1408 				(void) strlcat(paths[curr], "/",
1409 				    sizeof (paths[curr]));
1410 				(void) strlcat(paths[curr], dp->d_name,
1411 				    sizeof (paths[curr]));
1412 				curr++;
1413 			}
1414 
1415 			(void) closedir(dirp);
1416 
1417 		} else {
1418 			if ((ret = cb(paths[curr], data)) != 0)
1419 				break;
1420 		}
1421 
1422 		curr--;
1423 	}
1424 
1425 	free(paths);
1426 	(void) close(base);
1427 
1428 	return (ret);
1429 
1430 err:
1431 	free(paths);
1432 	(void) close(base);
1433 	return (-1);
1434 }
1435 
1436 typedef struct zvol_cb {
1437 	zpool_handle_t *zcb_pool;
1438 	boolean_t zcb_create;
1439 } zvol_cb_t;
1440 
1441 /*ARGSUSED*/
1442 static int
1443 do_zvol_create(zfs_handle_t *zhp, void *data)
1444 {
1445 	int ret = 0;
1446 
1447 	if (ZFS_IS_VOLUME(zhp)) {
1448 		(void) zvol_create_link(zhp->zfs_hdl, zhp->zfs_name);
1449 		ret = zfs_iter_snapshots(zhp, do_zvol_create, NULL);
1450 	}
1451 
1452 	if (ret == 0)
1453 		ret = zfs_iter_filesystems(zhp, do_zvol_create, NULL);
1454 
1455 	zfs_close(zhp);
1456 
1457 	return (ret);
1458 }
1459 
1460 /*
1461  * Iterate over all zvols in the pool and make any necessary minor nodes.
1462  */
1463 int
1464 zpool_create_zvol_links(zpool_handle_t *zhp)
1465 {
1466 	zfs_handle_t *zfp;
1467 	int ret;
1468 
1469 	/*
1470 	 * If the pool is unavailable, just return success.
1471 	 */
1472 	if ((zfp = make_dataset_handle(zhp->zpool_hdl,
1473 	    zhp->zpool_name)) == NULL)
1474 		return (0);
1475 
1476 	ret = zfs_iter_filesystems(zfp, do_zvol_create, NULL);
1477 
1478 	zfs_close(zfp);
1479 	return (ret);
1480 }
1481 
1482 static int
1483 do_zvol_remove(const char *dataset, void *data)
1484 {
1485 	zpool_handle_t *zhp = data;
1486 
1487 	return (zvol_remove_link(zhp->zpool_hdl, dataset));
1488 }
1489 
1490 /*
1491  * Iterate over all zvols in the pool and remove any minor nodes.  We iterate
1492  * by examining the /dev links so that a corrupted pool doesn't impede this
1493  * operation.
1494  */
1495 int
1496 zpool_remove_zvol_links(zpool_handle_t *zhp)
1497 {
1498 	return (zpool_iter_zvol(zhp, do_zvol_remove, zhp));
1499 }
1500 
1501 /*
1502  * Convert from a devid string to a path.
1503  */
1504 static char *
1505 devid_to_path(char *devid_str)
1506 {
1507 	ddi_devid_t devid;
1508 	char *minor;
1509 	char *path;
1510 	devid_nmlist_t *list = NULL;
1511 	int ret;
1512 
1513 	if (devid_str_decode(devid_str, &devid, &minor) != 0)
1514 		return (NULL);
1515 
1516 	ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
1517 
1518 	devid_str_free(minor);
1519 	devid_free(devid);
1520 
1521 	if (ret != 0)
1522 		return (NULL);
1523 
1524 	if ((path = strdup(list[0].devname)) == NULL)
1525 		return (NULL);
1526 
1527 	devid_free_nmlist(list);
1528 
1529 	return (path);
1530 }
1531 
1532 /*
1533  * Convert from a path to a devid string.
1534  */
1535 static char *
1536 path_to_devid(const char *path)
1537 {
1538 	int fd;
1539 	ddi_devid_t devid;
1540 	char *minor, *ret;
1541 
1542 	if ((fd = open(path, O_RDONLY)) < 0)
1543 		return (NULL);
1544 
1545 	minor = NULL;
1546 	ret = NULL;
1547 	if (devid_get(fd, &devid) == 0) {
1548 		if (devid_get_minor_name(fd, &minor) == 0)
1549 			ret = devid_str_encode(devid, minor);
1550 		if (minor != NULL)
1551 			devid_str_free(minor);
1552 		devid_free(devid);
1553 	}
1554 	(void) close(fd);
1555 
1556 	return (ret);
1557 }
1558 
1559 /*
1560  * Issue the necessary ioctl() to update the stored path value for the vdev.  We
1561  * ignore any failure here, since a common case is for an unprivileged user to
1562  * type 'zpool status', and we'll display the correct information anyway.
1563  */
1564 static void
1565 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
1566 {
1567 	zfs_cmd_t zc = { 0 };
1568 
1569 	(void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1570 	(void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
1571 	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1572 	    &zc.zc_guid) == 0);
1573 
1574 	(void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
1575 }
1576 
1577 /*
1578  * Given a vdev, return the name to display in iostat.  If the vdev has a path,
1579  * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
1580  * We also check if this is a whole disk, in which case we strip off the
1581  * trailing 's0' slice name.
1582  *
1583  * This routine is also responsible for identifying when disks have been
1584  * reconfigured in a new location.  The kernel will have opened the device by
1585  * devid, but the path will still refer to the old location.  To catch this, we
1586  * first do a path -> devid translation (which is fast for the common case).  If
1587  * the devid matches, we're done.  If not, we do a reverse devid -> path
1588  * translation and issue the appropriate ioctl() to update the path of the vdev.
1589  * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
1590  * of these checks.
1591  */
1592 char *
1593 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv)
1594 {
1595 	char *path, *devid;
1596 	uint64_t value;
1597 	char buf[64];
1598 	vdev_stat_t *vs;
1599 	uint_t vsc;
1600 
1601 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
1602 	    &value) == 0) {
1603 		verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1604 		    &value) == 0);
1605 		(void) snprintf(buf, sizeof (buf), "%llu",
1606 		    (u_longlong_t)value);
1607 		path = buf;
1608 	} else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
1609 
1610 		/*
1611 		 * If the device is dead (faulted, offline, etc) then don't
1612 		 * bother opening it.  Otherwise we may be forcing the user to
1613 		 * open a misbehaving device, which can have undesirable
1614 		 * effects.
1615 		 */
1616 		if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_STATS,
1617 		    (uint64_t **)&vs, &vsc) != 0 ||
1618 		    vs->vs_state >= VDEV_STATE_DEGRADED) &&
1619 		    zhp != NULL &&
1620 		    nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
1621 			/*
1622 			 * Determine if the current path is correct.
1623 			 */
1624 			char *newdevid = path_to_devid(path);
1625 
1626 			if (newdevid == NULL ||
1627 			    strcmp(devid, newdevid) != 0) {
1628 				char *newpath;
1629 
1630 				if ((newpath = devid_to_path(devid)) != NULL) {
1631 					/*
1632 					 * Update the path appropriately.
1633 					 */
1634 					set_path(zhp, nv, newpath);
1635 					if (nvlist_add_string(nv,
1636 					    ZPOOL_CONFIG_PATH, newpath) == 0)
1637 						verify(nvlist_lookup_string(nv,
1638 						    ZPOOL_CONFIG_PATH,
1639 						    &path) == 0);
1640 					free(newpath);
1641 				}
1642 			}
1643 
1644 			if (newdevid)
1645 				devid_str_free(newdevid);
1646 		}
1647 
1648 		if (strncmp(path, "/dev/dsk/", 9) == 0)
1649 			path += 9;
1650 
1651 		if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
1652 		    &value) == 0 && value) {
1653 			char *tmp = zfs_strdup(hdl, path);
1654 			if (tmp == NULL)
1655 				return (NULL);
1656 			tmp[strlen(path) - 2] = '\0';
1657 			return (tmp);
1658 		}
1659 	} else {
1660 		verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
1661 
1662 		/*
1663 		 * If it's a raidz device, we need to stick in the parity level.
1664 		 */
1665 		if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
1666 			verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
1667 			    &value) == 0);
1668 			(void) snprintf(buf, sizeof (buf), "%s%llu", path,
1669 			    (u_longlong_t)value);
1670 			path = buf;
1671 		}
1672 	}
1673 
1674 	return (zfs_strdup(hdl, path));
1675 }
1676 
1677 static int
1678 zbookmark_compare(const void *a, const void *b)
1679 {
1680 	return (memcmp(a, b, sizeof (zbookmark_t)));
1681 }
1682 
1683 /*
1684  * Retrieve the persistent error log, uniquify the members, and return to the
1685  * caller.
1686  */
1687 int
1688 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
1689 {
1690 	zfs_cmd_t zc = { 0 };
1691 	uint64_t count;
1692 	zbookmark_t *zb = NULL;
1693 	int i;
1694 
1695 	/*
1696 	 * Retrieve the raw error list from the kernel.  If the number of errors
1697 	 * has increased, allocate more space and continue until we get the
1698 	 * entire list.
1699 	 */
1700 	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
1701 	    &count) == 0);
1702 	if (count == 0)
1703 		return (0);
1704 	if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
1705 	    count * sizeof (zbookmark_t))) == (uintptr_t)NULL)
1706 		return (-1);
1707 	zc.zc_nvlist_dst_size = count;
1708 	(void) strcpy(zc.zc_name, zhp->zpool_name);
1709 	for (;;) {
1710 		if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
1711 		    &zc) != 0) {
1712 			free((void *)(uintptr_t)zc.zc_nvlist_dst);
1713 			if (errno == ENOMEM) {
1714 				count = zc.zc_nvlist_dst_size;
1715 				if ((zc.zc_nvlist_dst = (uintptr_t)
1716 				    zfs_alloc(zhp->zpool_hdl, count *
1717 				    sizeof (zbookmark_t))) == (uintptr_t)NULL)
1718 					return (-1);
1719 			} else {
1720 				return (-1);
1721 			}
1722 		} else {
1723 			break;
1724 		}
1725 	}
1726 
1727 	/*
1728 	 * Sort the resulting bookmarks.  This is a little confusing due to the
1729 	 * implementation of ZFS_IOC_ERROR_LOG.  The bookmarks are copied last
1730 	 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
1731 	 * _not_ copied as part of the process.  So we point the start of our
1732 	 * array appropriate and decrement the total number of elements.
1733 	 */
1734 	zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) +
1735 	    zc.zc_nvlist_dst_size;
1736 	count -= zc.zc_nvlist_dst_size;
1737 
1738 	qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare);
1739 
1740 	verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
1741 
1742 	/*
1743 	 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
1744 	 */
1745 	for (i = 0; i < count; i++) {
1746 		nvlist_t *nv;
1747 
1748 		/* ignoring zb_blkid and zb_level for now */
1749 		if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
1750 		    zb[i-1].zb_object == zb[i].zb_object)
1751 			continue;
1752 
1753 		if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
1754 			goto nomem;
1755 		if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
1756 		    zb[i].zb_objset) != 0) {
1757 			nvlist_free(nv);
1758 			goto nomem;
1759 		}
1760 		if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
1761 		    zb[i].zb_object) != 0) {
1762 			nvlist_free(nv);
1763 			goto nomem;
1764 		}
1765 		if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
1766 			nvlist_free(nv);
1767 			goto nomem;
1768 		}
1769 		nvlist_free(nv);
1770 	}
1771 
1772 	free((void *)(uintptr_t)zc.zc_nvlist_dst);
1773 	return (0);
1774 
1775 nomem:
1776 	free((void *)(uintptr_t)zc.zc_nvlist_dst);
1777 	return (no_memory(zhp->zpool_hdl));
1778 }
1779 
1780 /*
1781  * Upgrade a ZFS pool to the latest on-disk version.
1782  */
1783 int
1784 zpool_upgrade(zpool_handle_t *zhp)
1785 {
1786 	zfs_cmd_t zc = { 0 };
1787 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1788 
1789 	(void) strcpy(zc.zc_name, zhp->zpool_name);
1790 	if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
1791 		return (zpool_standard_error_fmt(hdl, errno,
1792 		    dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
1793 		    zhp->zpool_name));
1794 	return (0);
1795 }
1796 
1797 void
1798 zpool_set_history_str(const char *subcommand, int argc, char **argv,
1799     char *history_str)
1800 {
1801 	int i;
1802 
1803 	(void) strlcpy(history_str, subcommand, HIS_MAX_RECORD_LEN);
1804 	for (i = 1; i < argc; i++) {
1805 		if (strlen(history_str) + 1 + strlen(argv[i]) >
1806 		    HIS_MAX_RECORD_LEN)
1807 			break;
1808 		(void) strlcat(history_str, " ", HIS_MAX_RECORD_LEN);
1809 		(void) strlcat(history_str, argv[i], HIS_MAX_RECORD_LEN);
1810 	}
1811 }
1812 
1813 /*
1814  * Stage command history for logging.
1815  */
1816 int
1817 zpool_stage_history(libzfs_handle_t *hdl, const char *history_str)
1818 {
1819 	if (history_str == NULL)
1820 		return (EINVAL);
1821 
1822 	if (strlen(history_str) > HIS_MAX_RECORD_LEN)
1823 		return (EINVAL);
1824 
1825 	if (hdl->libzfs_log_str != NULL)
1826 		free(hdl->libzfs_log_str);
1827 
1828 	if ((hdl->libzfs_log_str = strdup(history_str)) == NULL)
1829 		return (no_memory(hdl));
1830 
1831 	return (0);
1832 }
1833 
1834 /*
1835  * Perform ioctl to get some command history of a pool.
1836  *
1837  * 'buf' is the buffer to fill up to 'len' bytes.  'off' is the
1838  * logical offset of the history buffer to start reading from.
1839  *
1840  * Upon return, 'off' is the next logical offset to read from and
1841  * 'len' is the actual amount of bytes read into 'buf'.
1842  */
1843 static int
1844 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
1845 {
1846 	zfs_cmd_t zc = { 0 };
1847 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1848 
1849 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1850 
1851 	zc.zc_history = (uint64_t)(uintptr_t)buf;
1852 	zc.zc_history_len = *len;
1853 	zc.zc_history_offset = *off;
1854 
1855 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
1856 		switch (errno) {
1857 		case EPERM:
1858 			return (zfs_error_fmt(hdl, EZFS_PERM,
1859 			    dgettext(TEXT_DOMAIN,
1860 			    "cannot show history for pool '%s'"),
1861 			    zhp->zpool_name));
1862 		case ENOENT:
1863 			return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
1864 			    dgettext(TEXT_DOMAIN, "cannot get history for pool "
1865 			    "'%s'"), zhp->zpool_name));
1866 		case ENOTSUP:
1867 			return (zfs_error_fmt(hdl, EZFS_BADVERSION,
1868 			    dgettext(TEXT_DOMAIN, "cannot get history for pool "
1869 			    "'%s', pool must be upgraded"), zhp->zpool_name));
1870 		default:
1871 			return (zpool_standard_error_fmt(hdl, errno,
1872 			    dgettext(TEXT_DOMAIN,
1873 			    "cannot get history for '%s'"), zhp->zpool_name));
1874 		}
1875 	}
1876 
1877 	*len = zc.zc_history_len;
1878 	*off = zc.zc_history_offset;
1879 
1880 	return (0);
1881 }
1882 
1883 /*
1884  * Process the buffer of nvlists, unpacking and storing each nvlist record
1885  * into 'records'.  'leftover' is set to the number of bytes that weren't
1886  * processed as there wasn't a complete record.
1887  */
1888 static int
1889 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
1890     nvlist_t ***records, uint_t *numrecords)
1891 {
1892 	uint64_t reclen;
1893 	nvlist_t *nv;
1894 	int i;
1895 
1896 	while (bytes_read > sizeof (reclen)) {
1897 
1898 		/* get length of packed record (stored as little endian) */
1899 		for (i = 0, reclen = 0; i < sizeof (reclen); i++)
1900 			reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
1901 
1902 		if (bytes_read < sizeof (reclen) + reclen)
1903 			break;
1904 
1905 		/* unpack record */
1906 		if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
1907 			return (ENOMEM);
1908 		bytes_read -= sizeof (reclen) + reclen;
1909 		buf += sizeof (reclen) + reclen;
1910 
1911 		/* add record to nvlist array */
1912 		(*numrecords)++;
1913 		if (ISP2(*numrecords + 1)) {
1914 			*records = realloc(*records,
1915 			    *numrecords * 2 * sizeof (nvlist_t *));
1916 		}
1917 		(*records)[*numrecords - 1] = nv;
1918 	}
1919 
1920 	*leftover = bytes_read;
1921 	return (0);
1922 }
1923 
1924 #define	HIS_BUF_LEN	(128*1024)
1925 
1926 /*
1927  * Retrieve the command history of a pool.
1928  */
1929 int
1930 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
1931 {
1932 	char buf[HIS_BUF_LEN];
1933 	uint64_t off = 0;
1934 	nvlist_t **records = NULL;
1935 	uint_t numrecords = 0;
1936 	int err, i;
1937 
1938 	do {
1939 		uint64_t bytes_read = sizeof (buf);
1940 		uint64_t leftover;
1941 
1942 		if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
1943 			break;
1944 
1945 		/* if nothing else was read in, we're at EOF, just return */
1946 		if (!bytes_read)
1947 			break;
1948 
1949 		if ((err = zpool_history_unpack(buf, bytes_read,
1950 		    &leftover, &records, &numrecords)) != 0)
1951 			break;
1952 		off -= leftover;
1953 
1954 		/* CONSTCOND */
1955 	} while (1);
1956 
1957 	if (!err) {
1958 		verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
1959 		verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
1960 		    records, numrecords) == 0);
1961 	}
1962 	for (i = 0; i < numrecords; i++)
1963 		nvlist_free(records[i]);
1964 	free(records);
1965 
1966 	return (err);
1967 }
1968 
1969 void
1970 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
1971     char *pathname, size_t len)
1972 {
1973 	zfs_cmd_t zc = { 0 };
1974 	boolean_t mounted = B_FALSE;
1975 	char *mntpnt = NULL;
1976 	char dsname[MAXNAMELEN];
1977 
1978 	if (dsobj == 0) {
1979 		/* special case for the MOS */
1980 		(void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj);
1981 		return;
1982 	}
1983 
1984 	/* get the dataset's name */
1985 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1986 	zc.zc_obj = dsobj;
1987 	if (ioctl(zhp->zpool_hdl->libzfs_fd,
1988 	    ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
1989 		/* just write out a path of two object numbers */
1990 		(void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
1991 		    dsobj, obj);
1992 		return;
1993 	}
1994 	(void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
1995 
1996 	/* find out if the dataset is mounted */
1997 	mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
1998 
1999 	/* get the corrupted object's path */
2000 	(void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
2001 	zc.zc_obj = obj;
2002 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
2003 	    &zc) == 0) {
2004 		if (mounted) {
2005 			(void) snprintf(pathname, len, "%s%s", mntpnt,
2006 			    zc.zc_value);
2007 		} else {
2008 			(void) snprintf(pathname, len, "%s:%s",
2009 			    dsname, zc.zc_value);
2010 		}
2011 	} else {
2012 		(void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj);
2013 	}
2014 	free(mntpnt);
2015 }
2016 
2017 #define	RDISK_ROOT	"/dev/rdsk"
2018 #define	BACKUP_SLICE	"s2"
2019 /*
2020  * Don't start the slice at the default block of 34; many storage
2021  * devices will use a stripe width of 128k, so start there instead.
2022  */
2023 #define	NEW_START_BLOCK	256
2024 
2025 /*
2026  * determine where a partition starts on a disk in the current
2027  * configuration
2028  */
2029 static diskaddr_t
2030 find_start_block(nvlist_t *config)
2031 {
2032 	nvlist_t **child;
2033 	uint_t c, children;
2034 	char *path;
2035 	diskaddr_t sb = MAXOFFSET_T;
2036 	int fd;
2037 	char diskname[MAXPATHLEN];
2038 	uint64_t wholedisk;
2039 
2040 	if (nvlist_lookup_nvlist_array(config,
2041 	    ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
2042 		if (nvlist_lookup_uint64(config,
2043 		    ZPOOL_CONFIG_WHOLE_DISK,
2044 		    &wholedisk) != 0 || !wholedisk) {
2045 			return (MAXOFFSET_T);
2046 		}
2047 		if (nvlist_lookup_string(config,
2048 		    ZPOOL_CONFIG_PATH, &path) != 0) {
2049 			return (MAXOFFSET_T);
2050 		}
2051 
2052 		(void) snprintf(diskname, sizeof (diskname), "%s%s",
2053 		    RDISK_ROOT, strrchr(path, '/'));
2054 		if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) {
2055 			struct dk_gpt *vtoc;
2056 			if (efi_alloc_and_read(fd, &vtoc) >= 0) {
2057 				sb = vtoc->efi_parts[0].p_start;
2058 				efi_free(vtoc);
2059 			}
2060 			(void) close(fd);
2061 		}
2062 		return (sb);
2063 	}
2064 
2065 	for (c = 0; c < children; c++) {
2066 		sb = find_start_block(child[c]);
2067 		if (sb != MAXOFFSET_T) {
2068 			return (sb);
2069 		}
2070 	}
2071 	return (MAXOFFSET_T);
2072 }
2073 
2074 /*
2075  * Label an individual disk.  The name provided is the short name,
2076  * stripped of any leading /dev path.
2077  */
2078 int
2079 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
2080 {
2081 	char path[MAXPATHLEN];
2082 	struct dk_gpt *vtoc;
2083 	int fd;
2084 	size_t resv = EFI_MIN_RESV_SIZE;
2085 	uint64_t slice_size;
2086 	diskaddr_t start_block;
2087 	char errbuf[1024];
2088 
2089 	if (zhp) {
2090 		nvlist_t *nvroot;
2091 
2092 		verify(nvlist_lookup_nvlist(zhp->zpool_config,
2093 		    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
2094 
2095 		if (zhp->zpool_start_block == 0)
2096 			start_block = find_start_block(nvroot);
2097 		else
2098 			start_block = zhp->zpool_start_block;
2099 		zhp->zpool_start_block = start_block;
2100 	} else {
2101 		/* new pool */
2102 		start_block = NEW_START_BLOCK;
2103 	}
2104 
2105 	(void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name,
2106 	    BACKUP_SLICE);
2107 
2108 	if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
2109 		/*
2110 		 * This shouldn't happen.  We've long since verified that this
2111 		 * is a valid device.
2112 		 */
2113 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2114 		    "label '%s': unable to open device"), name);
2115 		return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
2116 	}
2117 
2118 	if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
2119 		/*
2120 		 * The only way this can fail is if we run out of memory, or we
2121 		 * were unable to read the disk's capacity
2122 		 */
2123 		if (errno == ENOMEM)
2124 			(void) no_memory(hdl);
2125 
2126 		(void) close(fd);
2127 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2128 		    "label '%s': unable to read disk capacity"), name);
2129 
2130 		return (zfs_error(hdl, EZFS_NOCAP, errbuf));
2131 	}
2132 
2133 	slice_size = vtoc->efi_last_u_lba + 1;
2134 	slice_size -= EFI_MIN_RESV_SIZE;
2135 	if (start_block == MAXOFFSET_T)
2136 		start_block = NEW_START_BLOCK;
2137 	slice_size -= start_block;
2138 
2139 	vtoc->efi_parts[0].p_start = start_block;
2140 	vtoc->efi_parts[0].p_size = slice_size;
2141 
2142 	/*
2143 	 * Why we use V_USR: V_BACKUP confuses users, and is considered
2144 	 * disposable by some EFI utilities (since EFI doesn't have a backup
2145 	 * slice).  V_UNASSIGNED is supposed to be used only for zero size
2146 	 * partitions, and efi_write() will fail if we use it.  V_ROOT, V_BOOT,
2147 	 * etc. were all pretty specific.  V_USR is as close to reality as we
2148 	 * can get, in the absence of V_OTHER.
2149 	 */
2150 	vtoc->efi_parts[0].p_tag = V_USR;
2151 	(void) strcpy(vtoc->efi_parts[0].p_name, "zfs");
2152 
2153 	vtoc->efi_parts[8].p_start = slice_size + start_block;
2154 	vtoc->efi_parts[8].p_size = resv;
2155 	vtoc->efi_parts[8].p_tag = V_RESERVED;
2156 
2157 	if (efi_write(fd, vtoc) != 0) {
2158 		/*
2159 		 * Some block drivers (like pcata) may not support EFI
2160 		 * GPT labels.  Print out a helpful error message dir-
2161 		 * ecting the user to manually label the disk and give
2162 		 * a specific slice.
2163 		 */
2164 		(void) close(fd);
2165 		efi_free(vtoc);
2166 
2167 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2168 		    "cannot label '%s': try using fdisk(1M) and then "
2169 		    "provide a specific slice"), name);
2170 		return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
2171 	}
2172 
2173 	(void) close(fd);
2174 	efi_free(vtoc);
2175 	return (0);
2176 }
2177 
2178 int
2179 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
2180 {
2181 	zfs_cmd_t zc = { 0 };
2182 	int ret = -1;
2183 	char errbuf[1024];
2184 	nvlist_t *nvl = NULL;
2185 	nvlist_t *realprops;
2186 
2187 	(void) snprintf(errbuf, sizeof (errbuf),
2188 	    dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
2189 	    zhp->zpool_name);
2190 
2191 	if (zpool_get_version(zhp) < SPA_VERSION_BOOTFS) {
2192 		zfs_error_aux(zhp->zpool_hdl,
2193 		    dgettext(TEXT_DOMAIN, "pool must be "
2194 		    "upgraded to support pool properties"));
2195 		return (zfs_error(zhp->zpool_hdl, EZFS_BADVERSION, errbuf));
2196 	}
2197 
2198 	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp))
2199 		return (zfs_error(zhp->zpool_hdl, EZFS_POOLPROPS, errbuf));
2200 
2201 	if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0 ||
2202 	    nvlist_add_string(nvl, propname, propval) != 0) {
2203 		return (no_memory(zhp->zpool_hdl));
2204 	}
2205 
2206 	if ((realprops = zfs_validate_properties(zhp->zpool_hdl, ZFS_TYPE_POOL,
2207 	    zhp->zpool_name, nvl, 0, NULL, errbuf)) == NULL) {
2208 		nvlist_free(nvl);
2209 		return (-1);
2210 	}
2211 
2212 	nvlist_free(nvl);
2213 	nvl = realprops;
2214 
2215 	/*
2216 	 * Execute the corresponding ioctl() to set this property.
2217 	 */
2218 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2219 
2220 	if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl, NULL) != 0)
2221 		return (-1);
2222 
2223 	ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
2224 	zcmd_free_nvlists(&zc);
2225 
2226 	if (ret)
2227 		(void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
2228 
2229 	return (ret);
2230 }
2231 
2232 uint64_t
2233 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop)
2234 {
2235 	uint64_t value;
2236 	nvlist_t *nvp;
2237 
2238 	if (zpool_get_version(zhp) < SPA_VERSION_BOOTFS)
2239 		return (0);
2240 
2241 	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp))
2242 		return (zpool_prop_default_numeric(prop));
2243 
2244 	switch (prop) {
2245 	case ZPOOL_PROP_AUTOREPLACE:
2246 		if (nvlist_lookup_nvlist(zhp->zpool_props,
2247 		    zpool_prop_to_name(prop), &nvp) != 0) {
2248 			value = zpool_prop_default_numeric(prop);
2249 		} else {
2250 			VERIFY(nvlist_lookup_uint64(nvp, ZFS_PROP_VALUE,
2251 			    &value) == 0);
2252 		}
2253 		return (value);
2254 		break;
2255 
2256 	default:
2257 		assert(0);
2258 	}
2259 
2260 	return (0);
2261 }
2262 
2263 int
2264 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *propbuf,
2265     size_t proplen, zfs_source_t *srctype)
2266 {
2267 	uint64_t value;
2268 	char msg[1024], *strvalue;
2269 	nvlist_t *nvp;
2270 	zfs_source_t src = ZFS_SRC_NONE;
2271 
2272 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2273 	    "cannot get property '%s'"), zpool_prop_to_name(prop));
2274 
2275 	if (zpool_get_version(zhp) < SPA_VERSION_BOOTFS) {
2276 		zfs_error_aux(zhp->zpool_hdl,
2277 		    dgettext(TEXT_DOMAIN, "pool must be "
2278 		    "upgraded to support pool properties"));
2279 		return (zfs_error(zhp->zpool_hdl, EZFS_BADVERSION, msg));
2280 	}
2281 
2282 	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
2283 	    prop != ZPOOL_PROP_NAME)
2284 		return (zfs_error(zhp->zpool_hdl, EZFS_POOLPROPS, msg));
2285 
2286 	switch (prop) {
2287 	case ZPOOL_PROP_NAME:
2288 		(void) strlcpy(propbuf, zhp->zpool_name, proplen);
2289 		break;
2290 
2291 	case ZPOOL_PROP_BOOTFS:
2292 		if (nvlist_lookup_nvlist(zhp->zpool_props,
2293 		    zpool_prop_to_name(prop), &nvp) != 0) {
2294 			strvalue = (char *)zfs_prop_default_string(prop);
2295 			if (strvalue == NULL)
2296 				strvalue = "-";
2297 			src = ZFS_SRC_DEFAULT;
2298 		} else {
2299 			VERIFY(nvlist_lookup_uint64(nvp,
2300 			    ZFS_PROP_SOURCE, &value) == 0);
2301 			src = value;
2302 			VERIFY(nvlist_lookup_string(nvp, ZFS_PROP_VALUE,
2303 			    &strvalue) == 0);
2304 			if (strlen(strvalue) >= proplen)
2305 				return (-1);
2306 		}
2307 		(void) strlcpy(propbuf, strvalue, proplen);
2308 		break;
2309 
2310 	case ZPOOL_PROP_DELEGATION:
2311 	case ZPOOL_PROP_AUTOREPLACE:
2312 		if (nvlist_lookup_nvlist(zhp->zpool_props,
2313 		    zpool_prop_to_name(prop), &nvp) != 0) {
2314 			value = zpool_prop_default_numeric(prop);
2315 			src = ZFS_SRC_DEFAULT;
2316 		} else {
2317 			VERIFY(nvlist_lookup_uint64(nvp,
2318 			    ZFS_PROP_SOURCE, &value) == 0);
2319 			src = value;
2320 			VERIFY(nvlist_lookup_uint64(nvp, ZFS_PROP_VALUE,
2321 			    &value) == 0);
2322 		}
2323 		(void) strlcpy(propbuf, value ? "on" : "off", proplen);
2324 		break;
2325 
2326 	default:
2327 		return (-1);
2328 	}
2329 	if (srctype)
2330 		*srctype = src;
2331 	return (0);
2332 }
2333 
2334 int
2335 zpool_get_proplist(libzfs_handle_t *hdl, char *fields, zpool_proplist_t **listp)
2336 {
2337 	return (zfs_get_proplist_common(hdl, fields, listp, ZFS_TYPE_POOL));
2338 }
2339 
2340 
2341 int
2342 zpool_expand_proplist(zpool_handle_t *zhp, zpool_proplist_t **plp)
2343 {
2344 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2345 	zpool_proplist_t *entry;
2346 	char buf[ZFS_MAXPROPLEN];
2347 
2348 	if (zfs_expand_proplist_common(hdl, plp, ZFS_TYPE_POOL) != 0)
2349 		return (-1);
2350 
2351 	for (entry = *plp; entry != NULL; entry = entry->pl_next) {
2352 
2353 		if (entry->pl_fixed)
2354 			continue;
2355 
2356 		if (entry->pl_prop != ZFS_PROP_INVAL &&
2357 		    zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
2358 		    NULL) == 0) {
2359 			if (strlen(buf) > entry->pl_width)
2360 				entry->pl_width = strlen(buf);
2361 		}
2362 	}
2363 
2364 	return (0);
2365 }
2366