xref: /illumos-gate/usr/src/lib/libzfs/common/libzfs_pool.c (revision a08cd59e3d5cffe0ba541e434c0880cc98195970)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <alloca.h>
30 #include <assert.h>
31 #include <ctype.h>
32 #include <errno.h>
33 #include <devid.h>
34 #include <dirent.h>
35 #include <fcntl.h>
36 #include <libintl.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <strings.h>
40 #include <unistd.h>
41 #include <sys/efi_partition.h>
42 #include <sys/vtoc.h>
43 #include <sys/zfs_ioctl.h>
44 #include <sys/zio.h>
45 #include <strings.h>
46 
47 #include "zfs_namecheck.h"
48 #include "zfs_prop.h"
49 #include "libzfs_impl.h"
50 
51 /*
52  * Validate the given pool name, optionally putting an extended error message in
53  * 'buf'.
54  */
55 static boolean_t
56 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
57 {
58 	namecheck_err_t why;
59 	char what;
60 	int ret;
61 
62 	ret = pool_namecheck(pool, &why, &what);
63 
64 	/*
65 	 * The rules for reserved pool names were extended at a later point.
66 	 * But we need to support users with existing pools that may now be
67 	 * invalid.  So we only check for this expanded set of names during a
68 	 * create (or import), and only in userland.
69 	 */
70 	if (ret == 0 && !isopen &&
71 	    (strncmp(pool, "mirror", 6) == 0 ||
72 	    strncmp(pool, "raidz", 5) == 0 ||
73 	    strncmp(pool, "spare", 5) == 0)) {
74 		zfs_error_aux(hdl,
75 		    dgettext(TEXT_DOMAIN, "name is reserved"));
76 		return (B_FALSE);
77 	}
78 
79 
80 	if (ret != 0) {
81 		if (hdl != NULL) {
82 			switch (why) {
83 			case NAME_ERR_TOOLONG:
84 				zfs_error_aux(hdl,
85 				    dgettext(TEXT_DOMAIN, "name is too long"));
86 				break;
87 
88 			case NAME_ERR_INVALCHAR:
89 				zfs_error_aux(hdl,
90 				    dgettext(TEXT_DOMAIN, "invalid character "
91 				    "'%c' in pool name"), what);
92 				break;
93 
94 			case NAME_ERR_NOLETTER:
95 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
96 				    "name must begin with a letter"));
97 				break;
98 
99 			case NAME_ERR_RESERVED:
100 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
101 				    "name is reserved"));
102 				break;
103 
104 			case NAME_ERR_DISKLIKE:
105 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
106 				    "pool name is reserved"));
107 				break;
108 
109 			case NAME_ERR_LEADING_SLASH:
110 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
111 				    "leading slash in name"));
112 				break;
113 
114 			case NAME_ERR_EMPTY_COMPONENT:
115 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
116 				    "empty component in name"));
117 				break;
118 
119 			case NAME_ERR_TRAILING_SLASH:
120 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
121 				    "trailing slash in name"));
122 				break;
123 
124 			case NAME_ERR_MULTIPLE_AT:
125 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
126 				    "multiple '@' delimiters in name"));
127 				break;
128 
129 			}
130 		}
131 		return (B_FALSE);
132 	}
133 
134 	return (B_TRUE);
135 }
136 
137 static int
138 zpool_get_all_props(zpool_handle_t *zhp)
139 {
140 	zfs_cmd_t zc = { 0 };
141 	libzfs_handle_t *hdl = zhp->zpool_hdl;
142 
143 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
144 
145 	if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
146 		return (-1);
147 
148 	while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
149 		if (errno == ENOMEM) {
150 			if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
151 				zcmd_free_nvlists(&zc);
152 				return (-1);
153 			}
154 		} else {
155 			zcmd_free_nvlists(&zc);
156 			return (-1);
157 		}
158 	}
159 
160 	if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
161 		zcmd_free_nvlists(&zc);
162 		return (-1);
163 	}
164 
165 	zcmd_free_nvlists(&zc);
166 
167 	return (0);
168 }
169 
170 /*
171  * Open a handle to the given pool, even if the pool is currently in the FAULTED
172  * state.
173  */
174 zpool_handle_t *
175 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
176 {
177 	zpool_handle_t *zhp;
178 	boolean_t missing;
179 
180 	/*
181 	 * Make sure the pool name is valid.
182 	 */
183 	if (!zpool_name_valid(hdl, B_TRUE, pool)) {
184 		(void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
185 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
186 		    pool);
187 		return (NULL);
188 	}
189 
190 	if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
191 		return (NULL);
192 
193 	zhp->zpool_hdl = hdl;
194 	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
195 
196 	if (zpool_refresh_stats(zhp, &missing) != 0) {
197 		zpool_close(zhp);
198 		return (NULL);
199 	}
200 
201 	if (missing) {
202 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
203 		    "no such pool"));
204 		(void) zfs_error_fmt(hdl, EZFS_NOENT,
205 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
206 		    pool);
207 		zpool_close(zhp);
208 		return (NULL);
209 	}
210 
211 	return (zhp);
212 }
213 
214 /*
215  * Like the above, but silent on error.  Used when iterating over pools (because
216  * the configuration cache may be out of date).
217  */
218 int
219 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
220 {
221 	zpool_handle_t *zhp;
222 	boolean_t missing;
223 
224 	if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
225 		return (-1);
226 
227 	zhp->zpool_hdl = hdl;
228 	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
229 
230 	if (zpool_refresh_stats(zhp, &missing) != 0) {
231 		zpool_close(zhp);
232 		return (-1);
233 	}
234 
235 	if (missing) {
236 		zpool_close(zhp);
237 		*ret = NULL;
238 		return (0);
239 	}
240 
241 	*ret = zhp;
242 	return (0);
243 }
244 
245 /*
246  * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
247  * state.
248  */
249 zpool_handle_t *
250 zpool_open(libzfs_handle_t *hdl, const char *pool)
251 {
252 	zpool_handle_t *zhp;
253 
254 	if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
255 		return (NULL);
256 
257 	if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
258 		(void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
259 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
260 		zpool_close(zhp);
261 		return (NULL);
262 	}
263 
264 	return (zhp);
265 }
266 
267 /*
268  * Close the handle.  Simply frees the memory associated with the handle.
269  */
270 void
271 zpool_close(zpool_handle_t *zhp)
272 {
273 	if (zhp->zpool_config)
274 		nvlist_free(zhp->zpool_config);
275 	if (zhp->zpool_old_config)
276 		nvlist_free(zhp->zpool_old_config);
277 	if (zhp->zpool_props)
278 		nvlist_free(zhp->zpool_props);
279 	free(zhp);
280 }
281 
282 /*
283  * Return the name of the pool.
284  */
285 const char *
286 zpool_get_name(zpool_handle_t *zhp)
287 {
288 	return (zhp->zpool_name);
289 }
290 
291 /*
292  * Return the GUID of the pool.
293  */
294 uint64_t
295 zpool_get_guid(zpool_handle_t *zhp)
296 {
297 	uint64_t guid;
298 
299 	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID,
300 	    &guid) == 0);
301 	return (guid);
302 }
303 
304 /*
305  * Return the version of the pool.
306  */
307 uint64_t
308 zpool_get_version(zpool_handle_t *zhp)
309 {
310 	uint64_t version;
311 
312 	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_VERSION,
313 	    &version) == 0);
314 
315 	return (version);
316 }
317 
318 /*
319  * Return the amount of space currently consumed by the pool.
320  */
321 uint64_t
322 zpool_get_space_used(zpool_handle_t *zhp)
323 {
324 	nvlist_t *nvroot;
325 	vdev_stat_t *vs;
326 	uint_t vsc;
327 
328 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
329 	    &nvroot) == 0);
330 	verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS,
331 	    (uint64_t **)&vs, &vsc) == 0);
332 
333 	return (vs->vs_alloc);
334 }
335 
336 /*
337  * Return the total space in the pool.
338  */
339 uint64_t
340 zpool_get_space_total(zpool_handle_t *zhp)
341 {
342 	nvlist_t *nvroot;
343 	vdev_stat_t *vs;
344 	uint_t vsc;
345 
346 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
347 	    &nvroot) == 0);
348 	verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS,
349 	    (uint64_t **)&vs, &vsc) == 0);
350 
351 	return (vs->vs_space);
352 }
353 
354 /*
355  * Return the alternate root for this pool, if any.
356  */
357 int
358 zpool_get_root(zpool_handle_t *zhp, char *buf, size_t buflen)
359 {
360 	zfs_cmd_t zc = { 0 };
361 
362 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
363 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, &zc) != 0 ||
364 	    zc.zc_value[0] == '\0')
365 		return (-1);
366 
367 	(void) strlcpy(buf, zc.zc_value, buflen);
368 
369 	return (0);
370 }
371 
372 /*
373  * Return the state of the pool (ACTIVE or UNAVAILABLE)
374  */
375 int
376 zpool_get_state(zpool_handle_t *zhp)
377 {
378 	return (zhp->zpool_state);
379 }
380 
381 /*
382  * Create the named pool, using the provided vdev list.  It is assumed
383  * that the consumer has already validated the contents of the nvlist, so we
384  * don't have to worry about error semantics.
385  */
386 int
387 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
388     const char *altroot)
389 {
390 	zfs_cmd_t zc = { 0 };
391 	char msg[1024];
392 
393 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
394 	    "cannot create '%s'"), pool);
395 
396 	if (!zpool_name_valid(hdl, B_FALSE, pool))
397 		return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
398 
399 	if (altroot != NULL && altroot[0] != '/')
400 		return (zfs_error_fmt(hdl, EZFS_BADPATH,
401 		    dgettext(TEXT_DOMAIN, "bad alternate root '%s'"), altroot));
402 
403 	if (zcmd_write_src_nvlist(hdl, &zc, nvroot, NULL) != 0)
404 		return (-1);
405 
406 	(void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
407 
408 	if (altroot != NULL)
409 		(void) strlcpy(zc.zc_value, altroot, sizeof (zc.zc_value));
410 
411 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_CREATE, &zc) != 0) {
412 		zcmd_free_nvlists(&zc);
413 
414 		switch (errno) {
415 		case EBUSY:
416 			/*
417 			 * This can happen if the user has specified the same
418 			 * device multiple times.  We can't reliably detect this
419 			 * until we try to add it and see we already have a
420 			 * label.
421 			 */
422 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
423 			    "one or more vdevs refer to the same device"));
424 			return (zfs_error(hdl, EZFS_BADDEV, msg));
425 
426 		case EOVERFLOW:
427 			/*
428 			 * This occurs when one of the devices is below
429 			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
430 			 * device was the problem device since there's no
431 			 * reliable way to determine device size from userland.
432 			 */
433 			{
434 				char buf[64];
435 
436 				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
437 
438 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
439 				    "one or more devices is less than the "
440 				    "minimum size (%s)"), buf);
441 			}
442 			return (zfs_error(hdl, EZFS_BADDEV, msg));
443 
444 		case ENOSPC:
445 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
446 			    "one or more devices is out of space"));
447 			return (zfs_error(hdl, EZFS_BADDEV, msg));
448 
449 		default:
450 			return (zpool_standard_error(hdl, errno, msg));
451 		}
452 	}
453 
454 	zcmd_free_nvlists(&zc);
455 
456 	/*
457 	 * If this is an alternate root pool, then we automatically set the
458 	 * mountpoint of the root dataset to be '/'.
459 	 */
460 	if (altroot != NULL) {
461 		zfs_handle_t *zhp;
462 
463 		verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_ANY)) != NULL);
464 		verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
465 		    "/") == 0);
466 
467 		zfs_close(zhp);
468 	}
469 
470 	return (0);
471 }
472 
473 /*
474  * Destroy the given pool.  It is up to the caller to ensure that there are no
475  * datasets left in the pool.
476  */
477 int
478 zpool_destroy(zpool_handle_t *zhp)
479 {
480 	zfs_cmd_t zc = { 0 };
481 	zfs_handle_t *zfp = NULL;
482 	libzfs_handle_t *hdl = zhp->zpool_hdl;
483 	char msg[1024];
484 
485 	if (zhp->zpool_state == POOL_STATE_ACTIVE &&
486 	    (zfp = zfs_open(zhp->zpool_hdl, zhp->zpool_name,
487 	    ZFS_TYPE_FILESYSTEM)) == NULL)
488 		return (-1);
489 
490 	if (zpool_remove_zvol_links(zhp) != 0)
491 		return (-1);
492 
493 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
494 
495 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
496 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
497 		    "cannot destroy '%s'"), zhp->zpool_name);
498 
499 		if (errno == EROFS) {
500 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
501 			    "one or more devices is read only"));
502 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
503 		} else {
504 			(void) zpool_standard_error(hdl, errno, msg);
505 		}
506 
507 		if (zfp)
508 			zfs_close(zfp);
509 		return (-1);
510 	}
511 
512 	if (zfp) {
513 		remove_mountpoint(zfp);
514 		zfs_close(zfp);
515 	}
516 
517 	return (0);
518 }
519 
520 /*
521  * Add the given vdevs to the pool.  The caller must have already performed the
522  * necessary verification to ensure that the vdev specification is well-formed.
523  */
524 int
525 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
526 {
527 	zfs_cmd_t zc = { 0 };
528 	int ret;
529 	libzfs_handle_t *hdl = zhp->zpool_hdl;
530 	char msg[1024];
531 	nvlist_t **spares;
532 	uint_t nspares;
533 
534 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
535 	    "cannot add to '%s'"), zhp->zpool_name);
536 
537 	if (zpool_get_version(zhp) < ZFS_VERSION_SPARES &&
538 	    nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
539 	    &spares, &nspares) == 0) {
540 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
541 		    "upgraded to add hot spares"));
542 		return (zfs_error(hdl, EZFS_BADVERSION, msg));
543 	}
544 
545 	if (zcmd_write_src_nvlist(hdl, &zc, nvroot, NULL) != 0)
546 		return (-1);
547 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
548 
549 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_ADD, &zc) != 0) {
550 		switch (errno) {
551 		case EBUSY:
552 			/*
553 			 * This can happen if the user has specified the same
554 			 * device multiple times.  We can't reliably detect this
555 			 * until we try to add it and see we already have a
556 			 * label.
557 			 */
558 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
559 			    "one or more vdevs refer to the same device"));
560 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
561 			break;
562 
563 		case EOVERFLOW:
564 			/*
565 			 * This occurrs when one of the devices is below
566 			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
567 			 * device was the problem device since there's no
568 			 * reliable way to determine device size from userland.
569 			 */
570 			{
571 				char buf[64];
572 
573 				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
574 
575 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
576 				    "device is less than the minimum "
577 				    "size (%s)"), buf);
578 			}
579 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
580 			break;
581 
582 		case ENOTSUP:
583 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
584 			    "pool must be upgraded to add raidz2 vdevs"));
585 			(void) zfs_error(hdl, EZFS_BADVERSION, msg);
586 			break;
587 
588 		case EDOM:
589 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
590 			    "root pool can not have multiple vdevs"));
591 			(void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
592 			break;
593 
594 		default:
595 			(void) zpool_standard_error(hdl, errno, msg);
596 		}
597 
598 		ret = -1;
599 	} else {
600 		ret = 0;
601 	}
602 
603 	zcmd_free_nvlists(&zc);
604 
605 	return (ret);
606 }
607 
608 /*
609  * Exports the pool from the system.  The caller must ensure that there are no
610  * mounted datasets in the pool.
611  */
612 int
613 zpool_export(zpool_handle_t *zhp)
614 {
615 	zfs_cmd_t zc = { 0 };
616 
617 	if (zpool_remove_zvol_links(zhp) != 0)
618 		return (-1);
619 
620 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
621 
622 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_EXPORT, &zc) != 0)
623 		return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
624 		    dgettext(TEXT_DOMAIN, "cannot export '%s'"),
625 		    zhp->zpool_name));
626 	return (0);
627 }
628 
629 /*
630  * Import the given pool using the known configuration.  The configuration
631  * should have come from zpool_find_import().  The 'newname' and 'altroot'
632  * parameters control whether the pool is imported with a different name or with
633  * an alternate root, respectively.
634  */
635 int
636 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
637     const char *altroot)
638 {
639 	zfs_cmd_t zc = { 0 };
640 	char *thename;
641 	char *origname;
642 	int ret;
643 
644 	verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
645 	    &origname) == 0);
646 
647 	if (newname != NULL) {
648 		if (!zpool_name_valid(hdl, B_FALSE, newname))
649 			return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
650 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
651 			    newname));
652 		thename = (char *)newname;
653 	} else {
654 		thename = origname;
655 	}
656 
657 	if (altroot != NULL && altroot[0] != '/')
658 		return (zfs_error_fmt(hdl, EZFS_BADPATH,
659 		    dgettext(TEXT_DOMAIN, "bad alternate root '%s'"),
660 		    altroot));
661 
662 	(void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
663 
664 	if (altroot != NULL)
665 		(void) strlcpy(zc.zc_value, altroot, sizeof (zc.zc_value));
666 	else
667 		zc.zc_value[0] = '\0';
668 
669 	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
670 	    &zc.zc_guid) == 0);
671 
672 	if (zcmd_write_src_nvlist(hdl, &zc, config, NULL) != 0)
673 		return (-1);
674 
675 	ret = 0;
676 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_IMPORT, &zc) != 0) {
677 		char desc[1024];
678 		if (newname == NULL)
679 			(void) snprintf(desc, sizeof (desc),
680 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
681 			    thename);
682 		else
683 			(void) snprintf(desc, sizeof (desc),
684 			    dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
685 			    origname, thename);
686 
687 		switch (errno) {
688 		case ENOTSUP:
689 			/*
690 			 * Unsupported version.
691 			 */
692 			(void) zfs_error(hdl, EZFS_BADVERSION, desc);
693 			break;
694 
695 		case EINVAL:
696 			(void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
697 			break;
698 
699 		default:
700 			(void) zpool_standard_error(hdl, errno, desc);
701 		}
702 
703 		ret = -1;
704 	} else {
705 		zpool_handle_t *zhp;
706 		/*
707 		 * This should never fail, but play it safe anyway.
708 		 */
709 		if (zpool_open_silent(hdl, thename, &zhp) != 0) {
710 			ret = -1;
711 		} else if (zhp != NULL) {
712 			ret = zpool_create_zvol_links(zhp);
713 			zpool_close(zhp);
714 		}
715 	}
716 
717 	zcmd_free_nvlists(&zc);
718 	return (ret);
719 }
720 
721 /*
722  * Scrub the pool.
723  */
724 int
725 zpool_scrub(zpool_handle_t *zhp, pool_scrub_type_t type)
726 {
727 	zfs_cmd_t zc = { 0 };
728 	char msg[1024];
729 	libzfs_handle_t *hdl = zhp->zpool_hdl;
730 
731 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
732 	zc.zc_cookie = type;
733 
734 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_SCRUB, &zc) == 0)
735 		return (0);
736 
737 	(void) snprintf(msg, sizeof (msg),
738 	    dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
739 
740 	if (errno == EBUSY)
741 		return (zfs_error(hdl, EZFS_RESILVERING, msg));
742 	else
743 		return (zpool_standard_error(hdl, errno, msg));
744 }
745 
746 /*
747  * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
748  * spare; but FALSE if its an INUSE spare.
749  */
750 static nvlist_t *
751 vdev_to_nvlist_iter(nvlist_t *nv, const char *search, uint64_t guid,
752     boolean_t *avail_spare)
753 {
754 	uint_t c, children;
755 	nvlist_t **child;
756 	uint64_t theguid, present;
757 	char *path;
758 	uint64_t wholedisk = 0;
759 	nvlist_t *ret;
760 
761 	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &theguid) == 0);
762 
763 	if (search == NULL &&
764 	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &present) == 0) {
765 		/*
766 		 * If the device has never been present since import, the only
767 		 * reliable way to match the vdev is by GUID.
768 		 */
769 		if (theguid == guid)
770 			return (nv);
771 	} else if (search != NULL &&
772 	    nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
773 		(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
774 		    &wholedisk);
775 		if (wholedisk) {
776 			/*
777 			 * For whole disks, the internal path has 's0', but the
778 			 * path passed in by the user doesn't.
779 			 */
780 			if (strlen(search) == strlen(path) - 2 &&
781 			    strncmp(search, path, strlen(search)) == 0)
782 				return (nv);
783 		} else if (strcmp(search, path) == 0) {
784 			return (nv);
785 		}
786 	}
787 
788 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
789 	    &child, &children) != 0)
790 		return (NULL);
791 
792 	for (c = 0; c < children; c++)
793 		if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
794 		    avail_spare)) != NULL)
795 			return (ret);
796 
797 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
798 	    &child, &children) == 0) {
799 		for (c = 0; c < children; c++) {
800 			if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
801 			    avail_spare)) != NULL) {
802 				*avail_spare = B_TRUE;
803 				return (ret);
804 			}
805 		}
806 	}
807 
808 	return (NULL);
809 }
810 
811 nvlist_t *
812 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare)
813 {
814 	char buf[MAXPATHLEN];
815 	const char *search;
816 	char *end;
817 	nvlist_t *nvroot;
818 	uint64_t guid;
819 
820 	guid = strtoull(path, &end, 10);
821 	if (guid != 0 && *end == '\0') {
822 		search = NULL;
823 	} else if (path[0] != '/') {
824 		(void) snprintf(buf, sizeof (buf), "%s%s", "/dev/dsk/", path);
825 		search = buf;
826 	} else {
827 		search = path;
828 	}
829 
830 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
831 	    &nvroot) == 0);
832 
833 	*avail_spare = B_FALSE;
834 	return (vdev_to_nvlist_iter(nvroot, search, guid, avail_spare));
835 }
836 
837 /*
838  * Returns TRUE if the given guid corresponds to a spare (INUSE or not).
839  */
840 static boolean_t
841 is_spare(zpool_handle_t *zhp, uint64_t guid)
842 {
843 	uint64_t spare_guid;
844 	nvlist_t *nvroot;
845 	nvlist_t **spares;
846 	uint_t nspares;
847 	int i;
848 
849 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
850 	    &nvroot) == 0);
851 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
852 	    &spares, &nspares) == 0) {
853 		for (i = 0; i < nspares; i++) {
854 			verify(nvlist_lookup_uint64(spares[i],
855 			    ZPOOL_CONFIG_GUID, &spare_guid) == 0);
856 			if (guid == spare_guid)
857 				return (B_TRUE);
858 		}
859 	}
860 
861 	return (B_FALSE);
862 }
863 
864 /*
865  * Bring the specified vdev online.   The 'flags' parameter is a set of the
866  * ZFS_ONLINE_* flags.
867  */
868 int
869 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
870     vdev_state_t *newstate)
871 {
872 	zfs_cmd_t zc = { 0 };
873 	char msg[1024];
874 	nvlist_t *tgt;
875 	boolean_t avail_spare;
876 	libzfs_handle_t *hdl = zhp->zpool_hdl;
877 
878 	(void) snprintf(msg, sizeof (msg),
879 	    dgettext(TEXT_DOMAIN, "cannot online %s"), path);
880 
881 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
882 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == NULL)
883 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
884 
885 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
886 
887 	if (avail_spare || is_spare(zhp, zc.zc_guid) == B_TRUE)
888 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
889 
890 	zc.zc_cookie = VDEV_STATE_ONLINE;
891 	zc.zc_obj = flags;
892 
893 
894 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) != 0)
895 		return (zpool_standard_error(hdl, errno, msg));
896 
897 	*newstate = zc.zc_cookie;
898 	return (0);
899 }
900 
901 /*
902  * Take the specified vdev offline
903  */
904 int
905 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
906 {
907 	zfs_cmd_t zc = { 0 };
908 	char msg[1024];
909 	nvlist_t *tgt;
910 	boolean_t avail_spare;
911 	libzfs_handle_t *hdl = zhp->zpool_hdl;
912 
913 	(void) snprintf(msg, sizeof (msg),
914 	    dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
915 
916 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
917 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == NULL)
918 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
919 
920 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
921 
922 	if (avail_spare || is_spare(zhp, zc.zc_guid) == B_TRUE)
923 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
924 
925 	zc.zc_cookie = VDEV_STATE_OFFLINE;
926 	zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
927 
928 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
929 		return (0);
930 
931 	switch (errno) {
932 	case EBUSY:
933 
934 		/*
935 		 * There are no other replicas of this device.
936 		 */
937 		return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
938 
939 	default:
940 		return (zpool_standard_error(hdl, errno, msg));
941 	}
942 }
943 
944 /*
945  * Mark the given vdev faulted.
946  */
947 int
948 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid)
949 {
950 	zfs_cmd_t zc = { 0 };
951 	char msg[1024];
952 	libzfs_handle_t *hdl = zhp->zpool_hdl;
953 
954 	(void) snprintf(msg, sizeof (msg),
955 	    dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid);
956 
957 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
958 	zc.zc_guid = guid;
959 	zc.zc_cookie = VDEV_STATE_FAULTED;
960 
961 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
962 		return (0);
963 
964 	switch (errno) {
965 	case EBUSY:
966 
967 		/*
968 		 * There are no other replicas of this device.
969 		 */
970 		return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
971 
972 	default:
973 		return (zpool_standard_error(hdl, errno, msg));
974 	}
975 
976 }
977 
978 /*
979  * Mark the given vdev degraded.
980  */
981 int
982 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid)
983 {
984 	zfs_cmd_t zc = { 0 };
985 	char msg[1024];
986 	libzfs_handle_t *hdl = zhp->zpool_hdl;
987 
988 	(void) snprintf(msg, sizeof (msg),
989 	    dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid);
990 
991 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
992 	zc.zc_guid = guid;
993 	zc.zc_cookie = VDEV_STATE_DEGRADED;
994 
995 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
996 		return (0);
997 
998 	return (zpool_standard_error(hdl, errno, msg));
999 }
1000 
1001 /*
1002  * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
1003  * a hot spare.
1004  */
1005 static boolean_t
1006 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
1007 {
1008 	nvlist_t **child;
1009 	uint_t c, children;
1010 	char *type;
1011 
1012 	if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
1013 	    &children) == 0) {
1014 		verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
1015 		    &type) == 0);
1016 
1017 		if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
1018 		    children == 2 && child[which] == tgt)
1019 			return (B_TRUE);
1020 
1021 		for (c = 0; c < children; c++)
1022 			if (is_replacing_spare(child[c], tgt, which))
1023 				return (B_TRUE);
1024 	}
1025 
1026 	return (B_FALSE);
1027 }
1028 
1029 /*
1030  * Attach new_disk (fully described by nvroot) to old_disk.
1031  * If 'replacing' is specified, tne new disk will replace the old one.
1032  */
1033 int
1034 zpool_vdev_attach(zpool_handle_t *zhp,
1035     const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
1036 {
1037 	zfs_cmd_t zc = { 0 };
1038 	char msg[1024];
1039 	int ret;
1040 	nvlist_t *tgt;
1041 	boolean_t avail_spare;
1042 	uint64_t val;
1043 	char *path;
1044 	nvlist_t **child;
1045 	uint_t children;
1046 	nvlist_t *config_root;
1047 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1048 
1049 	if (replacing)
1050 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1051 		    "cannot replace %s with %s"), old_disk, new_disk);
1052 	else
1053 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1054 		    "cannot attach %s to %s"), new_disk, old_disk);
1055 
1056 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1057 	if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare)) == 0)
1058 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1059 
1060 	if (avail_spare)
1061 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
1062 
1063 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1064 	zc.zc_cookie = replacing;
1065 
1066 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
1067 	    &child, &children) != 0 || children != 1) {
1068 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1069 		    "new device must be a single disk"));
1070 		return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
1071 	}
1072 
1073 	verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
1074 	    ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
1075 
1076 	/*
1077 	 * If the target is a hot spare that has been swapped in, we can only
1078 	 * replace it with another hot spare.
1079 	 */
1080 	if (replacing &&
1081 	    nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
1082 	    nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
1083 	    (zpool_find_vdev(zhp, path, &avail_spare) == NULL ||
1084 	    !avail_spare) && is_replacing_spare(config_root, tgt, 1)) {
1085 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1086 		    "can only be replaced by another hot spare"));
1087 		return (zfs_error(hdl, EZFS_BADTARGET, msg));
1088 	}
1089 
1090 	/*
1091 	 * If we are attempting to replace a spare, it canot be applied to an
1092 	 * already spared device.
1093 	 */
1094 	if (replacing &&
1095 	    nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
1096 	    zpool_find_vdev(zhp, path, &avail_spare) != NULL && avail_spare &&
1097 	    is_replacing_spare(config_root, tgt, 0)) {
1098 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1099 		    "device has already been replaced with a spare"));
1100 		return (zfs_error(hdl, EZFS_BADTARGET, msg));
1101 	}
1102 
1103 	if (zcmd_write_src_nvlist(hdl, &zc, nvroot, NULL) != 0)
1104 		return (-1);
1105 
1106 	ret = ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_ATTACH, &zc);
1107 
1108 	zcmd_free_nvlists(&zc);
1109 
1110 	if (ret == 0)
1111 		return (0);
1112 
1113 	switch (errno) {
1114 	case ENOTSUP:
1115 		/*
1116 		 * Can't attach to or replace this type of vdev.
1117 		 */
1118 		if (replacing)
1119 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1120 			    "cannot replace a replacing device"));
1121 		else
1122 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1123 			    "can only attach to mirrors and top-level "
1124 			    "disks"));
1125 		(void) zfs_error(hdl, EZFS_BADTARGET, msg);
1126 		break;
1127 
1128 	case EINVAL:
1129 		/*
1130 		 * The new device must be a single disk.
1131 		 */
1132 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1133 		    "new device must be a single disk"));
1134 		(void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
1135 		break;
1136 
1137 	case EBUSY:
1138 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
1139 		    new_disk);
1140 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
1141 		break;
1142 
1143 	case EOVERFLOW:
1144 		/*
1145 		 * The new device is too small.
1146 		 */
1147 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1148 		    "device is too small"));
1149 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
1150 		break;
1151 
1152 	case EDOM:
1153 		/*
1154 		 * The new device has a different alignment requirement.
1155 		 */
1156 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1157 		    "devices have different sector alignment"));
1158 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
1159 		break;
1160 
1161 	case ENAMETOOLONG:
1162 		/*
1163 		 * The resulting top-level vdev spec won't fit in the label.
1164 		 */
1165 		(void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
1166 		break;
1167 
1168 	default:
1169 		(void) zpool_standard_error(hdl, errno, msg);
1170 	}
1171 
1172 	return (-1);
1173 }
1174 
1175 /*
1176  * Detach the specified device.
1177  */
1178 int
1179 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
1180 {
1181 	zfs_cmd_t zc = { 0 };
1182 	char msg[1024];
1183 	nvlist_t *tgt;
1184 	boolean_t avail_spare;
1185 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1186 
1187 	(void) snprintf(msg, sizeof (msg),
1188 	    dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
1189 
1190 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1191 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == 0)
1192 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1193 
1194 	if (avail_spare)
1195 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
1196 
1197 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1198 
1199 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_DETACH, &zc) == 0)
1200 		return (0);
1201 
1202 	switch (errno) {
1203 
1204 	case ENOTSUP:
1205 		/*
1206 		 * Can't detach from this type of vdev.
1207 		 */
1208 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
1209 		    "applicable to mirror and replacing vdevs"));
1210 		(void) zfs_error(zhp->zpool_hdl, EZFS_BADTARGET, msg);
1211 		break;
1212 
1213 	case EBUSY:
1214 		/*
1215 		 * There are no other replicas of this device.
1216 		 */
1217 		(void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
1218 		break;
1219 
1220 	default:
1221 		(void) zpool_standard_error(hdl, errno, msg);
1222 	}
1223 
1224 	return (-1);
1225 }
1226 
1227 /*
1228  * Remove the given device.  Currently, this is supported only for hot spares.
1229  */
1230 int
1231 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
1232 {
1233 	zfs_cmd_t zc = { 0 };
1234 	char msg[1024];
1235 	nvlist_t *tgt;
1236 	boolean_t avail_spare;
1237 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1238 
1239 	(void) snprintf(msg, sizeof (msg),
1240 	    dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
1241 
1242 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1243 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == 0)
1244 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1245 
1246 	if (!avail_spare) {
1247 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1248 		    "only inactive hot spares can be removed"));
1249 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1250 	}
1251 
1252 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1253 
1254 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
1255 		return (0);
1256 
1257 	return (zpool_standard_error(hdl, errno, msg));
1258 }
1259 
1260 /*
1261  * Clear the errors for the pool, or the particular device if specified.
1262  */
1263 int
1264 zpool_clear(zpool_handle_t *zhp, const char *path)
1265 {
1266 	zfs_cmd_t zc = { 0 };
1267 	char msg[1024];
1268 	nvlist_t *tgt;
1269 	boolean_t avail_spare;
1270 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1271 
1272 	if (path)
1273 		(void) snprintf(msg, sizeof (msg),
1274 		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
1275 		    path);
1276 	else
1277 		(void) snprintf(msg, sizeof (msg),
1278 		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
1279 		    zhp->zpool_name);
1280 
1281 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1282 	if (path) {
1283 		if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == 0)
1284 			return (zfs_error(hdl, EZFS_NODEVICE, msg));
1285 
1286 		if (avail_spare)
1287 			return (zfs_error(hdl, EZFS_ISSPARE, msg));
1288 
1289 		verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
1290 		    &zc.zc_guid) == 0);
1291 	}
1292 
1293 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
1294 		return (0);
1295 
1296 	return (zpool_standard_error(hdl, errno, msg));
1297 }
1298 
1299 /*
1300  * Similar to zpool_clear(), but takes a GUID (used by fmd).
1301  */
1302 int
1303 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
1304 {
1305 	zfs_cmd_t zc = { 0 };
1306 	char msg[1024];
1307 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1308 
1309 	(void) snprintf(msg, sizeof (msg),
1310 	    dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
1311 	    guid);
1312 
1313 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1314 	zc.zc_guid = guid;
1315 
1316 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
1317 		return (0);
1318 
1319 	return (zpool_standard_error(hdl, errno, msg));
1320 }
1321 
1322 /*
1323  * Iterate over all zvols in a given pool by walking the /dev/zvol/dsk/<pool>
1324  * hierarchy.
1325  */
1326 int
1327 zpool_iter_zvol(zpool_handle_t *zhp, int (*cb)(const char *, void *),
1328     void *data)
1329 {
1330 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1331 	char (*paths)[MAXPATHLEN];
1332 	size_t size = 4;
1333 	int curr, fd, base, ret = 0;
1334 	DIR *dirp;
1335 	struct dirent *dp;
1336 	struct stat st;
1337 
1338 	if ((base = open("/dev/zvol/dsk", O_RDONLY)) < 0)
1339 		return (errno == ENOENT ? 0 : -1);
1340 
1341 	if (fstatat(base, zhp->zpool_name, &st, 0) != 0) {
1342 		int err = errno;
1343 		(void) close(base);
1344 		return (err == ENOENT ? 0 : -1);
1345 	}
1346 
1347 	/*
1348 	 * Oddly this wasn't a directory -- ignore that failure since we
1349 	 * know there are no links lower in the (non-existant) hierarchy.
1350 	 */
1351 	if (!S_ISDIR(st.st_mode)) {
1352 		(void) close(base);
1353 		return (0);
1354 	}
1355 
1356 	if ((paths = zfs_alloc(hdl, size * sizeof (paths[0]))) == NULL) {
1357 		(void) close(base);
1358 		return (-1);
1359 	}
1360 
1361 	(void) strlcpy(paths[0], zhp->zpool_name, sizeof (paths[0]));
1362 	curr = 0;
1363 
1364 	while (curr >= 0) {
1365 		if (fstatat(base, paths[curr], &st, AT_SYMLINK_NOFOLLOW) != 0)
1366 			goto err;
1367 
1368 		if (S_ISDIR(st.st_mode)) {
1369 			if ((fd = openat(base, paths[curr], O_RDONLY)) < 0)
1370 				goto err;
1371 
1372 			if ((dirp = fdopendir(fd)) == NULL) {
1373 				(void) close(fd);
1374 				goto err;
1375 			}
1376 
1377 			while ((dp = readdir(dirp)) != NULL) {
1378 				if (dp->d_name[0] == '.')
1379 					continue;
1380 
1381 				if (curr + 1 == size) {
1382 					paths = zfs_realloc(hdl, paths,
1383 					    size * sizeof (paths[0]),
1384 					    size * 2 * sizeof (paths[0]));
1385 					if (paths == NULL) {
1386 						(void) closedir(dirp);
1387 						(void) close(fd);
1388 						goto err;
1389 					}
1390 
1391 					size *= 2;
1392 				}
1393 
1394 				(void) strlcpy(paths[curr + 1], paths[curr],
1395 				    sizeof (paths[curr + 1]));
1396 				(void) strlcat(paths[curr], "/",
1397 				    sizeof (paths[curr]));
1398 				(void) strlcat(paths[curr], dp->d_name,
1399 				    sizeof (paths[curr]));
1400 				curr++;
1401 			}
1402 
1403 			(void) closedir(dirp);
1404 
1405 		} else {
1406 			if ((ret = cb(paths[curr], data)) != 0)
1407 				break;
1408 		}
1409 
1410 		curr--;
1411 	}
1412 
1413 	free(paths);
1414 	(void) close(base);
1415 
1416 	return (ret);
1417 
1418 err:
1419 	free(paths);
1420 	(void) close(base);
1421 	return (-1);
1422 }
1423 
1424 typedef struct zvol_cb {
1425 	zpool_handle_t *zcb_pool;
1426 	boolean_t zcb_create;
1427 } zvol_cb_t;
1428 
1429 /*ARGSUSED*/
1430 static int
1431 do_zvol_create(zfs_handle_t *zhp, void *data)
1432 {
1433 	int ret;
1434 
1435 	if (ZFS_IS_VOLUME(zhp))
1436 		(void) zvol_create_link(zhp->zfs_hdl, zhp->zfs_name);
1437 
1438 	ret = zfs_iter_children(zhp, do_zvol_create, NULL);
1439 
1440 	zfs_close(zhp);
1441 
1442 	return (ret);
1443 }
1444 
1445 /*
1446  * Iterate over all zvols in the pool and make any necessary minor nodes.
1447  */
1448 int
1449 zpool_create_zvol_links(zpool_handle_t *zhp)
1450 {
1451 	zfs_handle_t *zfp;
1452 	int ret;
1453 
1454 	/*
1455 	 * If the pool is unavailable, just return success.
1456 	 */
1457 	if ((zfp = make_dataset_handle(zhp->zpool_hdl,
1458 	    zhp->zpool_name)) == NULL)
1459 		return (0);
1460 
1461 	ret = zfs_iter_children(zfp, do_zvol_create, NULL);
1462 
1463 	zfs_close(zfp);
1464 	return (ret);
1465 }
1466 
1467 static int
1468 do_zvol_remove(const char *dataset, void *data)
1469 {
1470 	zpool_handle_t *zhp = data;
1471 
1472 	return (zvol_remove_link(zhp->zpool_hdl, dataset));
1473 }
1474 
1475 /*
1476  * Iterate over all zvols in the pool and remove any minor nodes.  We iterate
1477  * by examining the /dev links so that a corrupted pool doesn't impede this
1478  * operation.
1479  */
1480 int
1481 zpool_remove_zvol_links(zpool_handle_t *zhp)
1482 {
1483 	return (zpool_iter_zvol(zhp, do_zvol_remove, zhp));
1484 }
1485 
1486 /*
1487  * Convert from a devid string to a path.
1488  */
1489 static char *
1490 devid_to_path(char *devid_str)
1491 {
1492 	ddi_devid_t devid;
1493 	char *minor;
1494 	char *path;
1495 	devid_nmlist_t *list = NULL;
1496 	int ret;
1497 
1498 	if (devid_str_decode(devid_str, &devid, &minor) != 0)
1499 		return (NULL);
1500 
1501 	ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
1502 
1503 	devid_str_free(minor);
1504 	devid_free(devid);
1505 
1506 	if (ret != 0)
1507 		return (NULL);
1508 
1509 	if ((path = strdup(list[0].devname)) == NULL)
1510 		return (NULL);
1511 
1512 	devid_free_nmlist(list);
1513 
1514 	return (path);
1515 }
1516 
1517 /*
1518  * Convert from a path to a devid string.
1519  */
1520 static char *
1521 path_to_devid(const char *path)
1522 {
1523 	int fd;
1524 	ddi_devid_t devid;
1525 	char *minor, *ret;
1526 
1527 	if ((fd = open(path, O_RDONLY)) < 0)
1528 		return (NULL);
1529 
1530 	minor = NULL;
1531 	ret = NULL;
1532 	if (devid_get(fd, &devid) == 0) {
1533 		if (devid_get_minor_name(fd, &minor) == 0)
1534 			ret = devid_str_encode(devid, minor);
1535 		if (minor != NULL)
1536 			devid_str_free(minor);
1537 		devid_free(devid);
1538 	}
1539 	(void) close(fd);
1540 
1541 	return (ret);
1542 }
1543 
1544 /*
1545  * Issue the necessary ioctl() to update the stored path value for the vdev.  We
1546  * ignore any failure here, since a common case is for an unprivileged user to
1547  * type 'zpool status', and we'll display the correct information anyway.
1548  */
1549 static void
1550 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
1551 {
1552 	zfs_cmd_t zc = { 0 };
1553 
1554 	(void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1555 	(void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
1556 	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1557 	    &zc.zc_guid) == 0);
1558 
1559 	(void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
1560 }
1561 
1562 /*
1563  * Given a vdev, return the name to display in iostat.  If the vdev has a path,
1564  * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
1565  * We also check if this is a whole disk, in which case we strip off the
1566  * trailing 's0' slice name.
1567  *
1568  * This routine is also responsible for identifying when disks have been
1569  * reconfigured in a new location.  The kernel will have opened the device by
1570  * devid, but the path will still refer to the old location.  To catch this, we
1571  * first do a path -> devid translation (which is fast for the common case).  If
1572  * the devid matches, we're done.  If not, we do a reverse devid -> path
1573  * translation and issue the appropriate ioctl() to update the path of the vdev.
1574  * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
1575  * of these checks.
1576  */
1577 char *
1578 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv)
1579 {
1580 	char *path, *devid;
1581 	uint64_t value;
1582 	char buf[64];
1583 	vdev_stat_t *vs;
1584 	uint_t vsc;
1585 
1586 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
1587 	    &value) == 0) {
1588 		verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1589 		    &value) == 0);
1590 		(void) snprintf(buf, sizeof (buf), "%llu",
1591 		    (u_longlong_t)value);
1592 		path = buf;
1593 	} else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
1594 
1595 		/*
1596 		 * If the device is dead (faulted, offline, etc) then don't
1597 		 * bother opening it.  Otherwise we may be forcing the user to
1598 		 * open a misbehaving device, which can have undesirable
1599 		 * effects.
1600 		 */
1601 		if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_STATS,
1602 		    (uint64_t **)&vs, &vsc) != 0 ||
1603 		    vs->vs_state >= VDEV_STATE_DEGRADED) &&
1604 		    zhp != NULL &&
1605 		    nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
1606 			/*
1607 			 * Determine if the current path is correct.
1608 			 */
1609 			char *newdevid = path_to_devid(path);
1610 
1611 			if (newdevid == NULL ||
1612 			    strcmp(devid, newdevid) != 0) {
1613 				char *newpath;
1614 
1615 				if ((newpath = devid_to_path(devid)) != NULL) {
1616 					/*
1617 					 * Update the path appropriately.
1618 					 */
1619 					set_path(zhp, nv, newpath);
1620 					if (nvlist_add_string(nv,
1621 					    ZPOOL_CONFIG_PATH, newpath) == 0)
1622 						verify(nvlist_lookup_string(nv,
1623 						    ZPOOL_CONFIG_PATH,
1624 						    &path) == 0);
1625 					free(newpath);
1626 				}
1627 			}
1628 
1629 			if (newdevid)
1630 				devid_str_free(newdevid);
1631 		}
1632 
1633 		if (strncmp(path, "/dev/dsk/", 9) == 0)
1634 			path += 9;
1635 
1636 		if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
1637 		    &value) == 0 && value) {
1638 			char *tmp = zfs_strdup(hdl, path);
1639 			if (tmp == NULL)
1640 				return (NULL);
1641 			tmp[strlen(path) - 2] = '\0';
1642 			return (tmp);
1643 		}
1644 	} else {
1645 		verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
1646 
1647 		/*
1648 		 * If it's a raidz device, we need to stick in the parity level.
1649 		 */
1650 		if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
1651 			verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
1652 			    &value) == 0);
1653 			(void) snprintf(buf, sizeof (buf), "%s%llu", path,
1654 			    (u_longlong_t)value);
1655 			path = buf;
1656 		}
1657 	}
1658 
1659 	return (zfs_strdup(hdl, path));
1660 }
1661 
1662 static int
1663 zbookmark_compare(const void *a, const void *b)
1664 {
1665 	return (memcmp(a, b, sizeof (zbookmark_t)));
1666 }
1667 
1668 /*
1669  * Retrieve the persistent error log, uniquify the members, and return to the
1670  * caller.
1671  */
1672 int
1673 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
1674 {
1675 	zfs_cmd_t zc = { 0 };
1676 	uint64_t count;
1677 	zbookmark_t *zb = NULL;
1678 	int i;
1679 
1680 	/*
1681 	 * Retrieve the raw error list from the kernel.  If the number of errors
1682 	 * has increased, allocate more space and continue until we get the
1683 	 * entire list.
1684 	 */
1685 	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
1686 	    &count) == 0);
1687 	if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
1688 	    count * sizeof (zbookmark_t))) == (uintptr_t)NULL)
1689 		return (-1);
1690 	zc.zc_nvlist_dst_size = count;
1691 	(void) strcpy(zc.zc_name, zhp->zpool_name);
1692 	for (;;) {
1693 		if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
1694 		    &zc) != 0) {
1695 			free((void *)(uintptr_t)zc.zc_nvlist_dst);
1696 			if (errno == ENOMEM) {
1697 				count = zc.zc_nvlist_dst_size;
1698 				if ((zc.zc_nvlist_dst = (uintptr_t)
1699 				    zfs_alloc(zhp->zpool_hdl, count *
1700 				    sizeof (zbookmark_t))) == (uintptr_t)NULL)
1701 					return (-1);
1702 			} else {
1703 				return (-1);
1704 			}
1705 		} else {
1706 			break;
1707 		}
1708 	}
1709 
1710 	/*
1711 	 * Sort the resulting bookmarks.  This is a little confusing due to the
1712 	 * implementation of ZFS_IOC_ERROR_LOG.  The bookmarks are copied last
1713 	 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
1714 	 * _not_ copied as part of the process.  So we point the start of our
1715 	 * array appropriate and decrement the total number of elements.
1716 	 */
1717 	zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) +
1718 	    zc.zc_nvlist_dst_size;
1719 	count -= zc.zc_nvlist_dst_size;
1720 
1721 	qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare);
1722 
1723 	verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
1724 
1725 	/*
1726 	 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
1727 	 */
1728 	for (i = 0; i < count; i++) {
1729 		nvlist_t *nv;
1730 
1731 		/* ignoring zb_blkid and zb_level for now */
1732 		if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
1733 		    zb[i-1].zb_object == zb[i].zb_object)
1734 			continue;
1735 
1736 		if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
1737 			goto nomem;
1738 		if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
1739 		    zb[i].zb_objset) != 0) {
1740 			nvlist_free(nv);
1741 			goto nomem;
1742 		}
1743 		if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
1744 		    zb[i].zb_object) != 0) {
1745 			nvlist_free(nv);
1746 			goto nomem;
1747 		}
1748 		if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
1749 			nvlist_free(nv);
1750 			goto nomem;
1751 		}
1752 		nvlist_free(nv);
1753 	}
1754 
1755 	free((void *)(uintptr_t)zc.zc_nvlist_dst);
1756 	return (0);
1757 
1758 nomem:
1759 	free((void *)(uintptr_t)zc.zc_nvlist_dst);
1760 	return (no_memory(zhp->zpool_hdl));
1761 }
1762 
1763 /*
1764  * Upgrade a ZFS pool to the latest on-disk version.
1765  */
1766 int
1767 zpool_upgrade(zpool_handle_t *zhp)
1768 {
1769 	zfs_cmd_t zc = { 0 };
1770 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1771 
1772 	(void) strcpy(zc.zc_name, zhp->zpool_name);
1773 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
1774 		return (zpool_standard_error_fmt(hdl, errno,
1775 		    dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
1776 		    zhp->zpool_name));
1777 
1778 	return (0);
1779 }
1780 
1781 /*
1782  * Log command history.
1783  *
1784  * 'pool' is B_TRUE if we are logging a command for 'zpool'; B_FALSE
1785  * otherwise ('zfs').  'pool_create' is B_TRUE if we are logging the creation
1786  * of the pool; B_FALSE otherwise.  'path' is the pathname containing the
1787  * poolname.  'argc' and 'argv' are used to construct the command string.
1788  */
1789 void
1790 zpool_log_history(libzfs_handle_t *hdl, int argc, char **argv, const char *path,
1791 	boolean_t pool, boolean_t pool_create)
1792 {
1793 	char cmd_buf[HIS_MAX_RECORD_LEN];
1794 	char *dspath;
1795 	zfs_cmd_t zc = { 0 };
1796 	int i;
1797 
1798 	/* construct the command string */
1799 	(void) strcpy(cmd_buf, pool ? "zpool" : "zfs");
1800 	for (i = 0; i < argc; i++) {
1801 		if (strlen(cmd_buf) + 1 + strlen(argv[i]) > HIS_MAX_RECORD_LEN)
1802 			break;
1803 		(void) strcat(cmd_buf, " ");
1804 		(void) strcat(cmd_buf, argv[i]);
1805 	}
1806 
1807 	/* figure out the poolname */
1808 	dspath = strpbrk(path, "/@");
1809 	if (dspath == NULL) {
1810 		(void) strcpy(zc.zc_name, path);
1811 	} else {
1812 		(void) strncpy(zc.zc_name, path, dspath - path);
1813 		zc.zc_name[dspath-path] = '\0';
1814 	}
1815 
1816 	zc.zc_history = (uint64_t)(uintptr_t)cmd_buf;
1817 	zc.zc_history_len = strlen(cmd_buf);
1818 
1819 	/* overloading zc_history_offset */
1820 	zc.zc_history_offset = pool_create;
1821 
1822 	(void) ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_LOG_HISTORY, &zc);
1823 }
1824 
1825 /*
1826  * Perform ioctl to get some command history of a pool.
1827  *
1828  * 'buf' is the buffer to fill up to 'len' bytes.  'off' is the
1829  * logical offset of the history buffer to start reading from.
1830  *
1831  * Upon return, 'off' is the next logical offset to read from and
1832  * 'len' is the actual amount of bytes read into 'buf'.
1833  */
1834 static int
1835 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
1836 {
1837 	zfs_cmd_t zc = { 0 };
1838 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1839 
1840 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1841 
1842 	zc.zc_history = (uint64_t)(uintptr_t)buf;
1843 	zc.zc_history_len = *len;
1844 	zc.zc_history_offset = *off;
1845 
1846 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
1847 		switch (errno) {
1848 		case EPERM:
1849 			return (zfs_error_fmt(hdl, EZFS_PERM,
1850 			    dgettext(TEXT_DOMAIN,
1851 			    "cannot show history for pool '%s'"),
1852 			    zhp->zpool_name));
1853 		case ENOENT:
1854 			return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
1855 			    dgettext(TEXT_DOMAIN, "cannot get history for pool "
1856 			    "'%s'"), zhp->zpool_name));
1857 		case ENOTSUP:
1858 			return (zfs_error_fmt(hdl, EZFS_BADVERSION,
1859 			    dgettext(TEXT_DOMAIN, "cannot get history for pool "
1860 			    "'%s', pool must be upgraded"), zhp->zpool_name));
1861 		default:
1862 			return (zpool_standard_error_fmt(hdl, errno,
1863 			    dgettext(TEXT_DOMAIN,
1864 			    "cannot get history for '%s'"), zhp->zpool_name));
1865 		}
1866 	}
1867 
1868 	*len = zc.zc_history_len;
1869 	*off = zc.zc_history_offset;
1870 
1871 	return (0);
1872 }
1873 
1874 /*
1875  * Process the buffer of nvlists, unpacking and storing each nvlist record
1876  * into 'records'.  'leftover' is set to the number of bytes that weren't
1877  * processed as there wasn't a complete record.
1878  */
1879 static int
1880 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
1881     nvlist_t ***records, uint_t *numrecords)
1882 {
1883 	uint64_t reclen;
1884 	nvlist_t *nv;
1885 	int i;
1886 
1887 	while (bytes_read > sizeof (reclen)) {
1888 
1889 		/* get length of packed record (stored as little endian) */
1890 		for (i = 0, reclen = 0; i < sizeof (reclen); i++)
1891 			reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
1892 
1893 		if (bytes_read < sizeof (reclen) + reclen)
1894 			break;
1895 
1896 		/* unpack record */
1897 		if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
1898 			return (ENOMEM);
1899 		bytes_read -= sizeof (reclen) + reclen;
1900 		buf += sizeof (reclen) + reclen;
1901 
1902 		/* add record to nvlist array */
1903 		(*numrecords)++;
1904 		if (ISP2(*numrecords + 1)) {
1905 			*records = realloc(*records,
1906 			    *numrecords * 2 * sizeof (nvlist_t *));
1907 		}
1908 		(*records)[*numrecords - 1] = nv;
1909 	}
1910 
1911 	*leftover = bytes_read;
1912 	return (0);
1913 }
1914 
1915 #define	HIS_BUF_LEN	(128*1024)
1916 
1917 /*
1918  * Retrieve the command history of a pool.
1919  */
1920 int
1921 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
1922 {
1923 	char buf[HIS_BUF_LEN];
1924 	uint64_t off = 0;
1925 	nvlist_t **records = NULL;
1926 	uint_t numrecords = 0;
1927 	int err, i;
1928 
1929 	do {
1930 		uint64_t bytes_read = sizeof (buf);
1931 		uint64_t leftover;
1932 
1933 		if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
1934 			break;
1935 
1936 		/* if nothing else was read in, we're at EOF, just return */
1937 		if (!bytes_read)
1938 			break;
1939 
1940 		if ((err = zpool_history_unpack(buf, bytes_read,
1941 		    &leftover, &records, &numrecords)) != 0)
1942 			break;
1943 		off -= leftover;
1944 
1945 		/* CONSTCOND */
1946 	} while (1);
1947 
1948 	if (!err) {
1949 		verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
1950 		verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
1951 		    records, numrecords) == 0);
1952 	}
1953 	for (i = 0; i < numrecords; i++)
1954 		nvlist_free(records[i]);
1955 	free(records);
1956 
1957 	return (err);
1958 }
1959 
1960 void
1961 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
1962     char *pathname, size_t len)
1963 {
1964 	zfs_cmd_t zc = { 0 };
1965 	boolean_t mounted = B_FALSE;
1966 	char *mntpnt = NULL;
1967 	char dsname[MAXNAMELEN];
1968 
1969 	if (dsobj == 0) {
1970 		/* special case for the MOS */
1971 		(void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj);
1972 		return;
1973 	}
1974 
1975 	/* get the dataset's name */
1976 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1977 	zc.zc_obj = dsobj;
1978 	if (ioctl(zhp->zpool_hdl->libzfs_fd,
1979 	    ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
1980 		/* just write out a path of two object numbers */
1981 		(void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
1982 		    dsobj, obj);
1983 		return;
1984 	}
1985 	(void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
1986 
1987 	/* find out if the dataset is mounted */
1988 	mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
1989 
1990 	/* get the corrupted object's path */
1991 	(void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
1992 	zc.zc_obj = obj;
1993 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
1994 	    &zc) == 0) {
1995 		if (mounted) {
1996 			(void) snprintf(pathname, len, "%s%s", mntpnt,
1997 			    zc.zc_value);
1998 		} else {
1999 			(void) snprintf(pathname, len, "%s:%s",
2000 			    dsname, zc.zc_value);
2001 		}
2002 	} else {
2003 		(void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj);
2004 	}
2005 	free(mntpnt);
2006 }
2007 
2008 #define	RDISK_ROOT	"/dev/rdsk"
2009 #define	BACKUP_SLICE	"s2"
2010 /*
2011  * Don't start the slice at the default block of 34; many storage
2012  * devices will use a stripe width of 128k, so start there instead.
2013  */
2014 #define	NEW_START_BLOCK	256
2015 
2016 /*
2017  * determine where a partition starts on a disk in the current
2018  * configuration
2019  */
2020 static diskaddr_t
2021 find_start_block(nvlist_t *config)
2022 {
2023 	nvlist_t **child;
2024 	uint_t c, children;
2025 	char *path;
2026 	diskaddr_t sb = MAXOFFSET_T;
2027 	int fd;
2028 	char diskname[MAXPATHLEN];
2029 	uint64_t wholedisk;
2030 
2031 	if (nvlist_lookup_nvlist_array(config,
2032 	    ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
2033 		if (nvlist_lookup_uint64(config,
2034 		    ZPOOL_CONFIG_WHOLE_DISK,
2035 		    &wholedisk) != 0 || !wholedisk) {
2036 			return (MAXOFFSET_T);
2037 		}
2038 		if (nvlist_lookup_string(config,
2039 		    ZPOOL_CONFIG_PATH, &path) != 0) {
2040 			return (MAXOFFSET_T);
2041 		}
2042 
2043 		(void) snprintf(diskname, sizeof (diskname), "%s%s",
2044 		    RDISK_ROOT, strrchr(path, '/'));
2045 		if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) {
2046 			struct dk_gpt *vtoc;
2047 			if (efi_alloc_and_read(fd, &vtoc) >= 0) {
2048 				sb = vtoc->efi_parts[0].p_start;
2049 				efi_free(vtoc);
2050 			}
2051 			(void) close(fd);
2052 		}
2053 		return (sb);
2054 	}
2055 
2056 	for (c = 0; c < children; c++) {
2057 		sb = find_start_block(child[c]);
2058 		if (sb != MAXOFFSET_T) {
2059 			return (sb);
2060 		}
2061 	}
2062 	return (MAXOFFSET_T);
2063 }
2064 
2065 /*
2066  * Label an individual disk.  The name provided is the short name,
2067  * stripped of any leading /dev path.
2068  */
2069 int
2070 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
2071 {
2072 	char path[MAXPATHLEN];
2073 	struct dk_gpt *vtoc;
2074 	int fd;
2075 	size_t resv = EFI_MIN_RESV_SIZE;
2076 	uint64_t slice_size;
2077 	diskaddr_t start_block;
2078 	char errbuf[1024];
2079 
2080 	if (zhp) {
2081 		nvlist_t *nvroot;
2082 
2083 		verify(nvlist_lookup_nvlist(zhp->zpool_config,
2084 		    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
2085 
2086 		if (zhp->zpool_start_block == 0)
2087 			start_block = find_start_block(nvroot);
2088 		else
2089 			start_block = zhp->zpool_start_block;
2090 		zhp->zpool_start_block = start_block;
2091 	} else {
2092 		/* new pool */
2093 		start_block = NEW_START_BLOCK;
2094 	}
2095 
2096 	(void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name,
2097 	    BACKUP_SLICE);
2098 
2099 	if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
2100 		/*
2101 		 * This shouldn't happen.  We've long since verified that this
2102 		 * is a valid device.
2103 		 */
2104 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2105 		    "label '%s': unable to open device"), name);
2106 		return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
2107 	}
2108 
2109 	if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
2110 		/*
2111 		 * The only way this can fail is if we run out of memory, or we
2112 		 * were unable to read the disk's capacity
2113 		 */
2114 		if (errno == ENOMEM)
2115 			(void) no_memory(hdl);
2116 
2117 		(void) close(fd);
2118 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2119 		    "label '%s': unable to read disk capacity"), name);
2120 
2121 		return (zfs_error(hdl, EZFS_NOCAP, errbuf));
2122 	}
2123 
2124 	slice_size = vtoc->efi_last_u_lba + 1;
2125 	slice_size -= EFI_MIN_RESV_SIZE;
2126 	if (start_block == MAXOFFSET_T)
2127 		start_block = NEW_START_BLOCK;
2128 	slice_size -= start_block;
2129 
2130 	vtoc->efi_parts[0].p_start = start_block;
2131 	vtoc->efi_parts[0].p_size = slice_size;
2132 
2133 	/*
2134 	 * Why we use V_USR: V_BACKUP confuses users, and is considered
2135 	 * disposable by some EFI utilities (since EFI doesn't have a backup
2136 	 * slice).  V_UNASSIGNED is supposed to be used only for zero size
2137 	 * partitions, and efi_write() will fail if we use it.  V_ROOT, V_BOOT,
2138 	 * etc. were all pretty specific.  V_USR is as close to reality as we
2139 	 * can get, in the absence of V_OTHER.
2140 	 */
2141 	vtoc->efi_parts[0].p_tag = V_USR;
2142 	(void) strcpy(vtoc->efi_parts[0].p_name, "zfs");
2143 
2144 	vtoc->efi_parts[8].p_start = slice_size + start_block;
2145 	vtoc->efi_parts[8].p_size = resv;
2146 	vtoc->efi_parts[8].p_tag = V_RESERVED;
2147 
2148 	if (efi_write(fd, vtoc) != 0) {
2149 		/*
2150 		 * Some block drivers (like pcata) may not support EFI
2151 		 * GPT labels.  Print out a helpful error message dir-
2152 		 * ecting the user to manually label the disk and give
2153 		 * a specific slice.
2154 		 */
2155 		(void) close(fd);
2156 		efi_free(vtoc);
2157 
2158 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2159 		    "cannot label '%s': try using fdisk(1M) and then "
2160 		    "provide a specific slice"), name);
2161 		return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
2162 	}
2163 
2164 	(void) close(fd);
2165 	efi_free(vtoc);
2166 	return (0);
2167 }
2168 
2169 int
2170 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
2171 {
2172 	zfs_cmd_t zc = { 0 };
2173 	int ret = -1;
2174 	char errbuf[1024];
2175 	nvlist_t *nvl = NULL;
2176 	nvlist_t *realprops;
2177 
2178 	(void) snprintf(errbuf, sizeof (errbuf),
2179 	    dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
2180 	    zhp->zpool_name);
2181 
2182 	if (zpool_get_version(zhp) < ZFS_VERSION_BOOTFS) {
2183 		zfs_error_aux(zhp->zpool_hdl,
2184 		    dgettext(TEXT_DOMAIN, "pool must be "
2185 		    "upgraded to support pool properties"));
2186 		return (zfs_error(zhp->zpool_hdl, EZFS_BADVERSION, errbuf));
2187 	}
2188 
2189 	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp))
2190 		return (zfs_error(zhp->zpool_hdl, EZFS_POOLPROPS, errbuf));
2191 
2192 	if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0 ||
2193 	    nvlist_add_string(nvl, propname, propval) != 0) {
2194 		return (no_memory(zhp->zpool_hdl));
2195 	}
2196 
2197 	if ((realprops = zfs_validate_properties(zhp->zpool_hdl, ZFS_TYPE_POOL,
2198 	    zhp->zpool_name, nvl, 0, NULL, errbuf)) == NULL) {
2199 		nvlist_free(nvl);
2200 		return (-1);
2201 	}
2202 
2203 	nvlist_free(nvl);
2204 	nvl = realprops;
2205 
2206 	/*
2207 	 * Execute the corresponding ioctl() to set this property.
2208 	 */
2209 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2210 
2211 	if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl, NULL) != 0)
2212 		return (-1);
2213 
2214 	ret = ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_SET_PROPS, &zc);
2215 	zcmd_free_nvlists(&zc);
2216 
2217 	if (ret)
2218 		(void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
2219 
2220 	return (ret);
2221 }
2222 
2223 uint64_t
2224 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop)
2225 {
2226 	uint64_t value;
2227 	nvlist_t *nvp;
2228 
2229 	if (zpool_get_version(zhp) < ZFS_VERSION_BOOTFS)
2230 		return (0);
2231 
2232 	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp))
2233 		return (zpool_prop_default_numeric(prop));
2234 
2235 	switch (prop) {
2236 	case ZPOOL_PROP_AUTOREPLACE:
2237 		if (nvlist_lookup_nvlist(zhp->zpool_props,
2238 		    zpool_prop_to_name(prop), &nvp) != 0) {
2239 			value = zpool_prop_default_numeric(prop);
2240 		} else {
2241 			VERIFY(nvlist_lookup_uint64(nvp, ZFS_PROP_VALUE,
2242 			    &value) == 0);
2243 		}
2244 		return (value);
2245 		break;
2246 
2247 	default:
2248 		assert(0);
2249 	}
2250 
2251 	return (0);
2252 }
2253 
2254 int
2255 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *propbuf,
2256     size_t proplen, zfs_source_t *srctype)
2257 {
2258 	uint64_t value;
2259 	char msg[1024], *strvalue;
2260 	nvlist_t *nvp;
2261 	zfs_source_t src = ZFS_SRC_NONE;
2262 
2263 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2264 	    "cannot get property '%s'"), zpool_prop_to_name(prop));
2265 
2266 	if (zpool_get_version(zhp) < ZFS_VERSION_BOOTFS) {
2267 		zfs_error_aux(zhp->zpool_hdl,
2268 		    dgettext(TEXT_DOMAIN, "pool must be "
2269 		    "upgraded to support pool properties"));
2270 		return (zfs_error(zhp->zpool_hdl, EZFS_BADVERSION, msg));
2271 	}
2272 
2273 	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
2274 	    prop != ZPOOL_PROP_NAME)
2275 		return (zfs_error(zhp->zpool_hdl, EZFS_POOLPROPS, msg));
2276 
2277 	switch (prop) {
2278 	case ZPOOL_PROP_NAME:
2279 		(void) strlcpy(propbuf, zhp->zpool_name, proplen);
2280 		break;
2281 
2282 	case ZPOOL_PROP_BOOTFS:
2283 		if (nvlist_lookup_nvlist(zhp->zpool_props,
2284 		    zpool_prop_to_name(prop), &nvp) != 0) {
2285 			strvalue = (char *)zfs_prop_default_string(prop);
2286 			if (strvalue == NULL)
2287 				strvalue = "-";
2288 			src = ZFS_SRC_DEFAULT;
2289 		} else {
2290 			VERIFY(nvlist_lookup_uint64(nvp,
2291 			    ZFS_PROP_SOURCE, &value) == 0);
2292 			src = value;
2293 			VERIFY(nvlist_lookup_string(nvp, ZFS_PROP_VALUE,
2294 			    &strvalue) == 0);
2295 			if (strlen(strvalue) >= proplen)
2296 				return (-1);
2297 		}
2298 		(void) strlcpy(propbuf, strvalue, proplen);
2299 		break;
2300 
2301 	case ZPOOL_PROP_AUTOREPLACE:
2302 		if (nvlist_lookup_nvlist(zhp->zpool_props,
2303 		    zpool_prop_to_name(prop), &nvp) != 0) {
2304 			value = zpool_prop_default_numeric(prop);
2305 			src = ZFS_SRC_DEFAULT;
2306 		} else {
2307 			VERIFY(nvlist_lookup_uint64(nvp,
2308 			    ZFS_PROP_SOURCE, &value) == 0);
2309 			src = value;
2310 			VERIFY(nvlist_lookup_uint64(nvp, ZFS_PROP_VALUE,
2311 			    &value) == 0);
2312 		}
2313 		(void) strlcpy(propbuf, value ? "on" : "off", proplen);
2314 		break;
2315 
2316 	default:
2317 		return (-1);
2318 	}
2319 	if (srctype)
2320 		*srctype = src;
2321 	return (0);
2322 }
2323 
2324 int
2325 zpool_get_proplist(libzfs_handle_t *hdl, char *fields, zpool_proplist_t **listp)
2326 {
2327 	return (zfs_get_proplist_common(hdl, fields, listp, ZFS_TYPE_POOL));
2328 }
2329 
2330 
2331 int
2332 zpool_expand_proplist(zpool_handle_t *zhp, zpool_proplist_t **plp)
2333 {
2334 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2335 	zpool_proplist_t *entry;
2336 	char buf[ZFS_MAXPROPLEN];
2337 
2338 	if (zfs_expand_proplist_common(hdl, plp, ZFS_TYPE_POOL) != 0)
2339 		return (-1);
2340 
2341 	for (entry = *plp; entry != NULL; entry = entry->pl_next) {
2342 
2343 		if (entry->pl_fixed)
2344 			continue;
2345 
2346 		if (entry->pl_prop != ZFS_PROP_INVAL &&
2347 		    zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
2348 		    NULL) == 0) {
2349 			if (strlen(buf) > entry->pl_width)
2350 				entry->pl_width = strlen(buf);
2351 		}
2352 	}
2353 
2354 	return (0);
2355 }
2356