xref: /titanic_51/usr/src/lib/libzfs/common/libzfs_pool.c (revision 858a4b9997a29c40b725e606eb9bc3ac0a8c765b)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <alloca.h>
30 #include <assert.h>
31 #include <ctype.h>
32 #include <errno.h>
33 #include <devid.h>
34 #include <dirent.h>
35 #include <fcntl.h>
36 #include <libintl.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <strings.h>
40 #include <unistd.h>
41 #include <sys/efi_partition.h>
42 #include <sys/vtoc.h>
43 #include <sys/zfs_ioctl.h>
44 #include <sys/zio.h>
45 #include <strings.h>
46 
47 #include "zfs_namecheck.h"
48 #include "zfs_prop.h"
49 #include "libzfs_impl.h"
50 
51 /*
52  * Validate the given pool name, optionally putting an extended error message in
53  * 'buf'.
54  */
55 static boolean_t
56 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
57 {
58 	namecheck_err_t why;
59 	char what;
60 	int ret;
61 
62 	ret = pool_namecheck(pool, &why, &what);
63 
64 	/*
65 	 * The rules for reserved pool names were extended at a later point.
66 	 * But we need to support users with existing pools that may now be
67 	 * invalid.  So we only check for this expanded set of names during a
68 	 * create (or import), and only in userland.
69 	 */
70 	if (ret == 0 && !isopen &&
71 	    (strncmp(pool, "mirror", 6) == 0 ||
72 	    strncmp(pool, "raidz", 5) == 0 ||
73 	    strncmp(pool, "spare", 5) == 0)) {
74 		zfs_error_aux(hdl,
75 		    dgettext(TEXT_DOMAIN, "name is reserved"));
76 		return (B_FALSE);
77 	}
78 
79 
80 	if (ret != 0) {
81 		if (hdl != NULL) {
82 			switch (why) {
83 			case NAME_ERR_TOOLONG:
84 				zfs_error_aux(hdl,
85 				    dgettext(TEXT_DOMAIN, "name is too long"));
86 				break;
87 
88 			case NAME_ERR_INVALCHAR:
89 				zfs_error_aux(hdl,
90 				    dgettext(TEXT_DOMAIN, "invalid character "
91 				    "'%c' in pool name"), what);
92 				break;
93 
94 			case NAME_ERR_NOLETTER:
95 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
96 				    "name must begin with a letter"));
97 				break;
98 
99 			case NAME_ERR_RESERVED:
100 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
101 				    "name is reserved"));
102 				break;
103 
104 			case NAME_ERR_DISKLIKE:
105 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
106 				    "pool name is reserved"));
107 				break;
108 
109 			case NAME_ERR_LEADING_SLASH:
110 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
111 				    "leading slash in name"));
112 				break;
113 
114 			case NAME_ERR_EMPTY_COMPONENT:
115 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
116 				    "empty component in name"));
117 				break;
118 
119 			case NAME_ERR_TRAILING_SLASH:
120 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
121 				    "trailing slash in name"));
122 				break;
123 
124 			case NAME_ERR_MULTIPLE_AT:
125 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
126 				    "multiple '@' delimiters in name"));
127 				break;
128 
129 			}
130 		}
131 		return (B_FALSE);
132 	}
133 
134 	return (B_TRUE);
135 }
136 
137 static int
138 zpool_get_all_props(zpool_handle_t *zhp)
139 {
140 	zfs_cmd_t zc = { 0 };
141 	libzfs_handle_t *hdl = zhp->zpool_hdl;
142 
143 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
144 
145 	if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
146 		return (-1);
147 
148 	while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
149 		if (errno == ENOMEM) {
150 			if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
151 				zcmd_free_nvlists(&zc);
152 				return (-1);
153 			}
154 		} else {
155 			zcmd_free_nvlists(&zc);
156 			return (-1);
157 		}
158 	}
159 
160 	if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
161 		zcmd_free_nvlists(&zc);
162 		return (-1);
163 	}
164 
165 	zcmd_free_nvlists(&zc);
166 
167 	return (0);
168 }
169 
170 /*
171  * Open a handle to the given pool, even if the pool is currently in the FAULTED
172  * state.
173  */
174 zpool_handle_t *
175 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
176 {
177 	zpool_handle_t *zhp;
178 	boolean_t missing;
179 
180 	/*
181 	 * Make sure the pool name is valid.
182 	 */
183 	if (!zpool_name_valid(hdl, B_TRUE, pool)) {
184 		(void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
185 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
186 		    pool);
187 		return (NULL);
188 	}
189 
190 	if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
191 		return (NULL);
192 
193 	zhp->zpool_hdl = hdl;
194 	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
195 
196 	if (zpool_refresh_stats(zhp, &missing) != 0) {
197 		zpool_close(zhp);
198 		return (NULL);
199 	}
200 
201 	if (missing) {
202 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
203 		    "no such pool"));
204 		(void) zfs_error_fmt(hdl, EZFS_NOENT,
205 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
206 		    pool);
207 		zpool_close(zhp);
208 		return (NULL);
209 	}
210 
211 	return (zhp);
212 }
213 
214 /*
215  * Like the above, but silent on error.  Used when iterating over pools (because
216  * the configuration cache may be out of date).
217  */
218 int
219 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
220 {
221 	zpool_handle_t *zhp;
222 	boolean_t missing;
223 
224 	if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
225 		return (-1);
226 
227 	zhp->zpool_hdl = hdl;
228 	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
229 
230 	if (zpool_refresh_stats(zhp, &missing) != 0) {
231 		zpool_close(zhp);
232 		return (-1);
233 	}
234 
235 	if (missing) {
236 		zpool_close(zhp);
237 		*ret = NULL;
238 		return (0);
239 	}
240 
241 	*ret = zhp;
242 	return (0);
243 }
244 
245 /*
246  * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
247  * state.
248  */
249 zpool_handle_t *
250 zpool_open(libzfs_handle_t *hdl, const char *pool)
251 {
252 	zpool_handle_t *zhp;
253 
254 	if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
255 		return (NULL);
256 
257 	if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
258 		(void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
259 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
260 		zpool_close(zhp);
261 		return (NULL);
262 	}
263 
264 	return (zhp);
265 }
266 
267 /*
268  * Close the handle.  Simply frees the memory associated with the handle.
269  */
270 void
271 zpool_close(zpool_handle_t *zhp)
272 {
273 	if (zhp->zpool_config)
274 		nvlist_free(zhp->zpool_config);
275 	if (zhp->zpool_old_config)
276 		nvlist_free(zhp->zpool_old_config);
277 	if (zhp->zpool_props)
278 		nvlist_free(zhp->zpool_props);
279 	free(zhp);
280 }
281 
282 /*
283  * Return the name of the pool.
284  */
285 const char *
286 zpool_get_name(zpool_handle_t *zhp)
287 {
288 	return (zhp->zpool_name);
289 }
290 
291 /*
292  * Return the GUID of the pool.
293  */
294 uint64_t
295 zpool_get_guid(zpool_handle_t *zhp)
296 {
297 	uint64_t guid;
298 
299 	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID,
300 	    &guid) == 0);
301 	return (guid);
302 }
303 
304 /*
305  * Return the version of the pool.
306  */
307 uint64_t
308 zpool_get_version(zpool_handle_t *zhp)
309 {
310 	uint64_t version;
311 
312 	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_VERSION,
313 	    &version) == 0);
314 
315 	return (version);
316 }
317 
318 /*
319  * Return the amount of space currently consumed by the pool.
320  */
321 uint64_t
322 zpool_get_space_used(zpool_handle_t *zhp)
323 {
324 	nvlist_t *nvroot;
325 	vdev_stat_t *vs;
326 	uint_t vsc;
327 
328 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
329 	    &nvroot) == 0);
330 	verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS,
331 	    (uint64_t **)&vs, &vsc) == 0);
332 
333 	return (vs->vs_alloc);
334 }
335 
336 /*
337  * Return the total space in the pool.
338  */
339 uint64_t
340 zpool_get_space_total(zpool_handle_t *zhp)
341 {
342 	nvlist_t *nvroot;
343 	vdev_stat_t *vs;
344 	uint_t vsc;
345 
346 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
347 	    &nvroot) == 0);
348 	verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS,
349 	    (uint64_t **)&vs, &vsc) == 0);
350 
351 	return (vs->vs_space);
352 }
353 
354 /*
355  * Return the alternate root for this pool, if any.
356  */
357 int
358 zpool_get_root(zpool_handle_t *zhp, char *buf, size_t buflen)
359 {
360 	zfs_cmd_t zc = { 0 };
361 
362 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
363 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, &zc) != 0 ||
364 	    zc.zc_value[0] == '\0')
365 		return (-1);
366 
367 	(void) strlcpy(buf, zc.zc_value, buflen);
368 
369 	return (0);
370 }
371 
372 /*
373  * Return the state of the pool (ACTIVE or UNAVAILABLE)
374  */
375 int
376 zpool_get_state(zpool_handle_t *zhp)
377 {
378 	return (zhp->zpool_state);
379 }
380 
381 /*
382  * Create the named pool, using the provided vdev list.  It is assumed
383  * that the consumer has already validated the contents of the nvlist, so we
384  * don't have to worry about error semantics.
385  */
386 int
387 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
388     const char *altroot)
389 {
390 	zfs_cmd_t zc = { 0 };
391 	char msg[1024];
392 
393 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
394 	    "cannot create '%s'"), pool);
395 
396 	if (!zpool_name_valid(hdl, B_FALSE, pool))
397 		return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
398 
399 	if (altroot != NULL && altroot[0] != '/')
400 		return (zfs_error_fmt(hdl, EZFS_BADPATH,
401 		    dgettext(TEXT_DOMAIN, "bad alternate root '%s'"), altroot));
402 
403 	if (zcmd_write_src_nvlist(hdl, &zc, nvroot, NULL) != 0)
404 		return (-1);
405 
406 	(void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
407 
408 	if (altroot != NULL)
409 		(void) strlcpy(zc.zc_value, altroot, sizeof (zc.zc_value));
410 
411 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_CREATE, &zc) != 0) {
412 		zcmd_free_nvlists(&zc);
413 
414 		switch (errno) {
415 		case EBUSY:
416 			/*
417 			 * This can happen if the user has specified the same
418 			 * device multiple times.  We can't reliably detect this
419 			 * until we try to add it and see we already have a
420 			 * label.
421 			 */
422 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
423 			    "one or more vdevs refer to the same device"));
424 			return (zfs_error(hdl, EZFS_BADDEV, msg));
425 
426 		case EOVERFLOW:
427 			/*
428 			 * This occurs when one of the devices is below
429 			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
430 			 * device was the problem device since there's no
431 			 * reliable way to determine device size from userland.
432 			 */
433 			{
434 				char buf[64];
435 
436 				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
437 
438 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
439 				    "one or more devices is less than the "
440 				    "minimum size (%s)"), buf);
441 			}
442 			return (zfs_error(hdl, EZFS_BADDEV, msg));
443 
444 		case ENOSPC:
445 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
446 			    "one or more devices is out of space"));
447 			return (zfs_error(hdl, EZFS_BADDEV, msg));
448 
449 		default:
450 			return (zpool_standard_error(hdl, errno, msg));
451 		}
452 	}
453 
454 	zcmd_free_nvlists(&zc);
455 
456 	/*
457 	 * If this is an alternate root pool, then we automatically set the
458 	 * mountpoint of the root dataset to be '/'.
459 	 */
460 	if (altroot != NULL) {
461 		zfs_handle_t *zhp;
462 
463 		verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_ANY)) != NULL);
464 		verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
465 		    "/") == 0);
466 
467 		zfs_close(zhp);
468 	}
469 
470 	return (0);
471 }
472 
473 /*
474  * Destroy the given pool.  It is up to the caller to ensure that there are no
475  * datasets left in the pool.
476  */
477 int
478 zpool_destroy(zpool_handle_t *zhp)
479 {
480 	zfs_cmd_t zc = { 0 };
481 	zfs_handle_t *zfp = NULL;
482 	libzfs_handle_t *hdl = zhp->zpool_hdl;
483 	char msg[1024];
484 
485 	if (zhp->zpool_state == POOL_STATE_ACTIVE &&
486 	    (zfp = zfs_open(zhp->zpool_hdl, zhp->zpool_name,
487 	    ZFS_TYPE_FILESYSTEM)) == NULL)
488 		return (-1);
489 
490 	if (zpool_remove_zvol_links(zhp) != 0)
491 		return (-1);
492 
493 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
494 
495 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
496 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
497 		    "cannot destroy '%s'"), zhp->zpool_name);
498 
499 		if (errno == EROFS) {
500 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
501 			    "one or more devices is read only"));
502 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
503 		} else {
504 			(void) zpool_standard_error(hdl, errno, msg);
505 		}
506 
507 		if (zfp)
508 			zfs_close(zfp);
509 		return (-1);
510 	}
511 
512 	if (zfp) {
513 		remove_mountpoint(zfp);
514 		zfs_close(zfp);
515 	}
516 
517 	return (0);
518 }
519 
520 /*
521  * Add the given vdevs to the pool.  The caller must have already performed the
522  * necessary verification to ensure that the vdev specification is well-formed.
523  */
524 int
525 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
526 {
527 	zfs_cmd_t zc = { 0 };
528 	int ret;
529 	libzfs_handle_t *hdl = zhp->zpool_hdl;
530 	char msg[1024];
531 	nvlist_t **spares;
532 	uint_t nspares;
533 
534 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
535 	    "cannot add to '%s'"), zhp->zpool_name);
536 
537 	if (zpool_get_version(zhp) < ZFS_VERSION_SPARES &&
538 	    nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
539 	    &spares, &nspares) == 0) {
540 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
541 		    "upgraded to add hot spares"));
542 		return (zfs_error(hdl, EZFS_BADVERSION, msg));
543 	}
544 
545 	if (zcmd_write_src_nvlist(hdl, &zc, nvroot, NULL) != 0)
546 		return (-1);
547 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
548 
549 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_ADD, &zc) != 0) {
550 		switch (errno) {
551 		case EBUSY:
552 			/*
553 			 * This can happen if the user has specified the same
554 			 * device multiple times.  We can't reliably detect this
555 			 * until we try to add it and see we already have a
556 			 * label.
557 			 */
558 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
559 			    "one or more vdevs refer to the same device"));
560 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
561 			break;
562 
563 		case EOVERFLOW:
564 			/*
565 			 * This occurrs when one of the devices is below
566 			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
567 			 * device was the problem device since there's no
568 			 * reliable way to determine device size from userland.
569 			 */
570 			{
571 				char buf[64];
572 
573 				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
574 
575 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
576 				    "device is less than the minimum "
577 				    "size (%s)"), buf);
578 			}
579 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
580 			break;
581 
582 		case ENOTSUP:
583 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
584 			    "pool must be upgraded to add raidz2 vdevs"));
585 			(void) zfs_error(hdl, EZFS_BADVERSION, msg);
586 			break;
587 
588 		case EDOM:
589 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
590 			    "root pool can not have multiple vdevs"));
591 			(void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
592 			break;
593 
594 		default:
595 			(void) zpool_standard_error(hdl, errno, msg);
596 		}
597 
598 		ret = -1;
599 	} else {
600 		ret = 0;
601 	}
602 
603 	zcmd_free_nvlists(&zc);
604 
605 	return (ret);
606 }
607 
608 /*
609  * Exports the pool from the system.  The caller must ensure that there are no
610  * mounted datasets in the pool.
611  */
612 int
613 zpool_export(zpool_handle_t *zhp)
614 {
615 	zfs_cmd_t zc = { 0 };
616 
617 	if (zpool_remove_zvol_links(zhp) != 0)
618 		return (-1);
619 
620 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
621 
622 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_EXPORT, &zc) != 0)
623 		return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
624 		    dgettext(TEXT_DOMAIN, "cannot export '%s'"),
625 		    zhp->zpool_name));
626 	return (0);
627 }
628 
629 /*
630  * Import the given pool using the known configuration.  The configuration
631  * should have come from zpool_find_import().  The 'newname' and 'altroot'
632  * parameters control whether the pool is imported with a different name or with
633  * an alternate root, respectively.
634  */
635 int
636 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
637     const char *altroot)
638 {
639 	zfs_cmd_t zc = { 0 };
640 	char *thename;
641 	char *origname;
642 	int ret;
643 
644 	verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
645 	    &origname) == 0);
646 
647 	if (newname != NULL) {
648 		if (!zpool_name_valid(hdl, B_FALSE, newname))
649 			return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
650 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
651 			    newname));
652 		thename = (char *)newname;
653 	} else {
654 		thename = origname;
655 	}
656 
657 	if (altroot != NULL && altroot[0] != '/')
658 		return (zfs_error_fmt(hdl, EZFS_BADPATH,
659 		    dgettext(TEXT_DOMAIN, "bad alternate root '%s'"),
660 		    altroot));
661 
662 	(void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
663 
664 	if (altroot != NULL)
665 		(void) strlcpy(zc.zc_value, altroot, sizeof (zc.zc_value));
666 	else
667 		zc.zc_value[0] = '\0';
668 
669 	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
670 	    &zc.zc_guid) == 0);
671 
672 	if (zcmd_write_src_nvlist(hdl, &zc, config, NULL) != 0)
673 		return (-1);
674 
675 	ret = 0;
676 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_IMPORT, &zc) != 0) {
677 		char desc[1024];
678 		if (newname == NULL)
679 			(void) snprintf(desc, sizeof (desc),
680 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
681 			    thename);
682 		else
683 			(void) snprintf(desc, sizeof (desc),
684 			    dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
685 			    origname, thename);
686 
687 		switch (errno) {
688 		case ENOTSUP:
689 			/*
690 			 * Unsupported version.
691 			 */
692 			(void) zfs_error(hdl, EZFS_BADVERSION, desc);
693 			break;
694 
695 		case EINVAL:
696 			(void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
697 			break;
698 
699 		default:
700 			(void) zpool_standard_error(hdl, errno, desc);
701 		}
702 
703 		ret = -1;
704 	} else {
705 		zpool_handle_t *zhp;
706 		/*
707 		 * This should never fail, but play it safe anyway.
708 		 */
709 		if (zpool_open_silent(hdl, thename, &zhp) != 0) {
710 			ret = -1;
711 		} else if (zhp != NULL) {
712 			ret = zpool_create_zvol_links(zhp);
713 			zpool_close(zhp);
714 		}
715 	}
716 
717 	zcmd_free_nvlists(&zc);
718 	return (ret);
719 }
720 
721 /*
722  * Scrub the pool.
723  */
724 int
725 zpool_scrub(zpool_handle_t *zhp, pool_scrub_type_t type)
726 {
727 	zfs_cmd_t zc = { 0 };
728 	char msg[1024];
729 	libzfs_handle_t *hdl = zhp->zpool_hdl;
730 
731 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
732 	zc.zc_cookie = type;
733 
734 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_SCRUB, &zc) == 0)
735 		return (0);
736 
737 	(void) snprintf(msg, sizeof (msg),
738 	    dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
739 
740 	if (errno == EBUSY)
741 		return (zfs_error(hdl, EZFS_RESILVERING, msg));
742 	else
743 		return (zpool_standard_error(hdl, errno, msg));
744 }
745 
746 /*
747  * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
748  * spare; but FALSE if its an INUSE spare.
749  */
750 static nvlist_t *
751 vdev_to_nvlist_iter(nvlist_t *nv, const char *search, uint64_t guid,
752     boolean_t *avail_spare)
753 {
754 	uint_t c, children;
755 	nvlist_t **child;
756 	uint64_t theguid, present;
757 	char *path;
758 	uint64_t wholedisk = 0;
759 	nvlist_t *ret;
760 
761 	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &theguid) == 0);
762 
763 	if (search == NULL &&
764 	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &present) == 0) {
765 		/*
766 		 * If the device has never been present since import, the only
767 		 * reliable way to match the vdev is by GUID.
768 		 */
769 		if (theguid == guid)
770 			return (nv);
771 	} else if (search != NULL &&
772 	    nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
773 		(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
774 		    &wholedisk);
775 		if (wholedisk) {
776 			/*
777 			 * For whole disks, the internal path has 's0', but the
778 			 * path passed in by the user doesn't.
779 			 */
780 			if (strlen(search) == strlen(path) - 2 &&
781 			    strncmp(search, path, strlen(search)) == 0)
782 				return (nv);
783 		} else if (strcmp(search, path) == 0) {
784 			return (nv);
785 		}
786 	}
787 
788 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
789 	    &child, &children) != 0)
790 		return (NULL);
791 
792 	for (c = 0; c < children; c++)
793 		if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
794 		    avail_spare)) != NULL)
795 			return (ret);
796 
797 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
798 	    &child, &children) == 0) {
799 		for (c = 0; c < children; c++) {
800 			if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
801 			    avail_spare)) != NULL) {
802 				*avail_spare = B_TRUE;
803 				return (ret);
804 			}
805 		}
806 	}
807 
808 	return (NULL);
809 }
810 
811 nvlist_t *
812 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare)
813 {
814 	char buf[MAXPATHLEN];
815 	const char *search;
816 	char *end;
817 	nvlist_t *nvroot;
818 	uint64_t guid;
819 
820 	guid = strtoull(path, &end, 10);
821 	if (guid != 0 && *end == '\0') {
822 		search = NULL;
823 	} else if (path[0] != '/') {
824 		(void) snprintf(buf, sizeof (buf), "%s%s", "/dev/dsk/", path);
825 		search = buf;
826 	} else {
827 		search = path;
828 	}
829 
830 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
831 	    &nvroot) == 0);
832 
833 	*avail_spare = B_FALSE;
834 	return (vdev_to_nvlist_iter(nvroot, search, guid, avail_spare));
835 }
836 
837 /*
838  * Returns TRUE if the given guid corresponds to a spare (INUSE or not).
839  */
840 static boolean_t
841 is_spare(zpool_handle_t *zhp, uint64_t guid)
842 {
843 	uint64_t spare_guid;
844 	nvlist_t *nvroot;
845 	nvlist_t **spares;
846 	uint_t nspares;
847 	int i;
848 
849 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
850 	    &nvroot) == 0);
851 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
852 	    &spares, &nspares) == 0) {
853 		for (i = 0; i < nspares; i++) {
854 			verify(nvlist_lookup_uint64(spares[i],
855 			    ZPOOL_CONFIG_GUID, &spare_guid) == 0);
856 			if (guid == spare_guid)
857 				return (B_TRUE);
858 		}
859 	}
860 
861 	return (B_FALSE);
862 }
863 
864 /*
865  * Bring the specified vdev online
866  */
867 int
868 zpool_vdev_online(zpool_handle_t *zhp, const char *path)
869 {
870 	zfs_cmd_t zc = { 0 };
871 	char msg[1024];
872 	nvlist_t *tgt;
873 	boolean_t avail_spare;
874 	libzfs_handle_t *hdl = zhp->zpool_hdl;
875 
876 	(void) snprintf(msg, sizeof (msg),
877 	    dgettext(TEXT_DOMAIN, "cannot online %s"), path);
878 
879 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
880 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == NULL)
881 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
882 
883 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
884 
885 	if (avail_spare || is_spare(zhp, zc.zc_guid) == B_TRUE)
886 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
887 
888 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_ONLINE, &zc) == 0)
889 		return (0);
890 
891 	return (zpool_standard_error(hdl, errno, msg));
892 }
893 
894 /*
895  * Take the specified vdev offline
896  */
897 int
898 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, int istmp)
899 {
900 	zfs_cmd_t zc = { 0 };
901 	char msg[1024];
902 	nvlist_t *tgt;
903 	boolean_t avail_spare;
904 	libzfs_handle_t *hdl = zhp->zpool_hdl;
905 
906 	(void) snprintf(msg, sizeof (msg),
907 	    dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
908 
909 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
910 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == NULL)
911 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
912 
913 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
914 
915 	if (avail_spare || is_spare(zhp, zc.zc_guid) == B_TRUE)
916 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
917 
918 	zc.zc_cookie = istmp;
919 
920 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_OFFLINE, &zc) == 0)
921 		return (0);
922 
923 	switch (errno) {
924 	case EBUSY:
925 
926 		/*
927 		 * There are no other replicas of this device.
928 		 */
929 		return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
930 
931 	default:
932 		return (zpool_standard_error(hdl, errno, msg));
933 	}
934 }
935 
936 /*
937  * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
938  * a hot spare.
939  */
940 static boolean_t
941 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
942 {
943 	nvlist_t **child;
944 	uint_t c, children;
945 	char *type;
946 
947 	if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
948 	    &children) == 0) {
949 		verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
950 		    &type) == 0);
951 
952 		if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
953 		    children == 2 && child[which] == tgt)
954 			return (B_TRUE);
955 
956 		for (c = 0; c < children; c++)
957 			if (is_replacing_spare(child[c], tgt, which))
958 				return (B_TRUE);
959 	}
960 
961 	return (B_FALSE);
962 }
963 
964 /*
965  * Attach new_disk (fully described by nvroot) to old_disk.
966  * If 'replacing' is specified, tne new disk will replace the old one.
967  */
968 int
969 zpool_vdev_attach(zpool_handle_t *zhp,
970     const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
971 {
972 	zfs_cmd_t zc = { 0 };
973 	char msg[1024];
974 	int ret;
975 	nvlist_t *tgt;
976 	boolean_t avail_spare;
977 	uint64_t val;
978 	char *path;
979 	nvlist_t **child;
980 	uint_t children;
981 	nvlist_t *config_root;
982 	libzfs_handle_t *hdl = zhp->zpool_hdl;
983 
984 	if (replacing)
985 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
986 		    "cannot replace %s with %s"), old_disk, new_disk);
987 	else
988 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
989 		    "cannot attach %s to %s"), new_disk, old_disk);
990 
991 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
992 	if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare)) == 0)
993 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
994 
995 	if (avail_spare)
996 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
997 
998 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
999 	zc.zc_cookie = replacing;
1000 
1001 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
1002 	    &child, &children) != 0 || children != 1) {
1003 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1004 		    "new device must be a single disk"));
1005 		return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
1006 	}
1007 
1008 	verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
1009 	    ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
1010 
1011 	/*
1012 	 * If the target is a hot spare that has been swapped in, we can only
1013 	 * replace it with another hot spare.
1014 	 */
1015 	if (replacing &&
1016 	    nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
1017 	    nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
1018 	    (zpool_find_vdev(zhp, path, &avail_spare) == NULL ||
1019 	    !avail_spare) && is_replacing_spare(config_root, tgt, 1)) {
1020 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1021 		    "can only be replaced by another hot spare"));
1022 		return (zfs_error(hdl, EZFS_BADTARGET, msg));
1023 	}
1024 
1025 	/*
1026 	 * If we are attempting to replace a spare, it canot be applied to an
1027 	 * already spared device.
1028 	 */
1029 	if (replacing &&
1030 	    nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
1031 	    zpool_find_vdev(zhp, path, &avail_spare) != NULL && avail_spare &&
1032 	    is_replacing_spare(config_root, tgt, 0)) {
1033 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1034 		    "device has already been replaced with a spare"));
1035 		return (zfs_error(hdl, EZFS_BADTARGET, msg));
1036 	}
1037 
1038 	if (zcmd_write_src_nvlist(hdl, &zc, nvroot, NULL) != 0)
1039 		return (-1);
1040 
1041 	ret = ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_ATTACH, &zc);
1042 
1043 	zcmd_free_nvlists(&zc);
1044 
1045 	if (ret == 0)
1046 		return (0);
1047 
1048 	switch (errno) {
1049 	case ENOTSUP:
1050 		/*
1051 		 * Can't attach to or replace this type of vdev.
1052 		 */
1053 		if (replacing)
1054 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1055 			    "cannot replace a replacing device"));
1056 		else
1057 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1058 			    "can only attach to mirrors and top-level "
1059 			    "disks"));
1060 		(void) zfs_error(hdl, EZFS_BADTARGET, msg);
1061 		break;
1062 
1063 	case EINVAL:
1064 		/*
1065 		 * The new device must be a single disk.
1066 		 */
1067 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1068 		    "new device must be a single disk"));
1069 		(void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
1070 		break;
1071 
1072 	case EBUSY:
1073 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
1074 		    new_disk);
1075 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
1076 		break;
1077 
1078 	case EOVERFLOW:
1079 		/*
1080 		 * The new device is too small.
1081 		 */
1082 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1083 		    "device is too small"));
1084 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
1085 		break;
1086 
1087 	case EDOM:
1088 		/*
1089 		 * The new device has a different alignment requirement.
1090 		 */
1091 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1092 		    "devices have different sector alignment"));
1093 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
1094 		break;
1095 
1096 	case ENAMETOOLONG:
1097 		/*
1098 		 * The resulting top-level vdev spec won't fit in the label.
1099 		 */
1100 		(void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
1101 		break;
1102 
1103 	default:
1104 		(void) zpool_standard_error(hdl, errno, msg);
1105 	}
1106 
1107 	return (-1);
1108 }
1109 
1110 /*
1111  * Detach the specified device.
1112  */
1113 int
1114 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
1115 {
1116 	zfs_cmd_t zc = { 0 };
1117 	char msg[1024];
1118 	nvlist_t *tgt;
1119 	boolean_t avail_spare;
1120 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1121 
1122 	(void) snprintf(msg, sizeof (msg),
1123 	    dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
1124 
1125 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1126 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == 0)
1127 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1128 
1129 	if (avail_spare)
1130 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
1131 
1132 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1133 
1134 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_DETACH, &zc) == 0)
1135 		return (0);
1136 
1137 	switch (errno) {
1138 
1139 	case ENOTSUP:
1140 		/*
1141 		 * Can't detach from this type of vdev.
1142 		 */
1143 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
1144 		    "applicable to mirror and replacing vdevs"));
1145 		(void) zfs_error(zhp->zpool_hdl, EZFS_BADTARGET, msg);
1146 		break;
1147 
1148 	case EBUSY:
1149 		/*
1150 		 * There are no other replicas of this device.
1151 		 */
1152 		(void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
1153 		break;
1154 
1155 	default:
1156 		(void) zpool_standard_error(hdl, errno, msg);
1157 	}
1158 
1159 	return (-1);
1160 }
1161 
1162 /*
1163  * Remove the given device.  Currently, this is supported only for hot spares.
1164  */
1165 int
1166 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
1167 {
1168 	zfs_cmd_t zc = { 0 };
1169 	char msg[1024];
1170 	nvlist_t *tgt;
1171 	boolean_t avail_spare;
1172 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1173 
1174 	(void) snprintf(msg, sizeof (msg),
1175 	    dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
1176 
1177 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1178 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == 0)
1179 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1180 
1181 	if (!avail_spare) {
1182 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1183 		    "only inactive hot spares can be removed"));
1184 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1185 	}
1186 
1187 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1188 
1189 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
1190 		return (0);
1191 
1192 	return (zpool_standard_error(hdl, errno, msg));
1193 }
1194 
1195 /*
1196  * Clear the errors for the pool, or the particular device if specified.
1197  */
1198 int
1199 zpool_clear(zpool_handle_t *zhp, const char *path)
1200 {
1201 	zfs_cmd_t zc = { 0 };
1202 	char msg[1024];
1203 	nvlist_t *tgt;
1204 	boolean_t avail_spare;
1205 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1206 
1207 	if (path)
1208 		(void) snprintf(msg, sizeof (msg),
1209 		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
1210 		    path);
1211 	else
1212 		(void) snprintf(msg, sizeof (msg),
1213 		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
1214 		    zhp->zpool_name);
1215 
1216 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1217 	if (path) {
1218 		if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == 0)
1219 			return (zfs_error(hdl, EZFS_NODEVICE, msg));
1220 
1221 		if (avail_spare)
1222 			return (zfs_error(hdl, EZFS_ISSPARE, msg));
1223 
1224 		verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
1225 		    &zc.zc_guid) == 0);
1226 	}
1227 
1228 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
1229 		return (0);
1230 
1231 	return (zpool_standard_error(hdl, errno, msg));
1232 }
1233 
1234 /*
1235  * Iterate over all zvols in a given pool by walking the /dev/zvol/dsk/<pool>
1236  * hierarchy.
1237  */
1238 int
1239 zpool_iter_zvol(zpool_handle_t *zhp, int (*cb)(const char *, void *),
1240     void *data)
1241 {
1242 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1243 	char (*paths)[MAXPATHLEN];
1244 	size_t size = 4;
1245 	int curr, fd, base, ret = 0;
1246 	DIR *dirp;
1247 	struct dirent *dp;
1248 	struct stat st;
1249 
1250 	if ((base = open("/dev/zvol/dsk", O_RDONLY)) < 0)
1251 		return (errno == ENOENT ? 0 : -1);
1252 
1253 	if (fstatat(base, zhp->zpool_name, &st, 0) != 0) {
1254 		int err = errno;
1255 		(void) close(base);
1256 		return (err == ENOENT ? 0 : -1);
1257 	}
1258 
1259 	/*
1260 	 * Oddly this wasn't a directory -- ignore that failure since we
1261 	 * know there are no links lower in the (non-existant) hierarchy.
1262 	 */
1263 	if (!S_ISDIR(st.st_mode)) {
1264 		(void) close(base);
1265 		return (0);
1266 	}
1267 
1268 	if ((paths = zfs_alloc(hdl, size * sizeof (paths[0]))) == NULL) {
1269 		(void) close(base);
1270 		return (-1);
1271 	}
1272 
1273 	(void) strlcpy(paths[0], zhp->zpool_name, sizeof (paths[0]));
1274 	curr = 0;
1275 
1276 	while (curr >= 0) {
1277 		if (fstatat(base, paths[curr], &st, AT_SYMLINK_NOFOLLOW) != 0)
1278 			goto err;
1279 
1280 		if (S_ISDIR(st.st_mode)) {
1281 			if ((fd = openat(base, paths[curr], O_RDONLY)) < 0)
1282 				goto err;
1283 
1284 			if ((dirp = fdopendir(fd)) == NULL) {
1285 				(void) close(fd);
1286 				goto err;
1287 			}
1288 
1289 			while ((dp = readdir(dirp)) != NULL) {
1290 				if (dp->d_name[0] == '.')
1291 					continue;
1292 
1293 				if (curr + 1 == size) {
1294 					paths = zfs_realloc(hdl, paths,
1295 					    size * sizeof (paths[0]),
1296 					    size * 2 * sizeof (paths[0]));
1297 					if (paths == NULL) {
1298 						(void) closedir(dirp);
1299 						(void) close(fd);
1300 						goto err;
1301 					}
1302 
1303 					size *= 2;
1304 				}
1305 
1306 				(void) strlcpy(paths[curr + 1], paths[curr],
1307 				    sizeof (paths[curr + 1]));
1308 				(void) strlcat(paths[curr], "/",
1309 				    sizeof (paths[curr]));
1310 				(void) strlcat(paths[curr], dp->d_name,
1311 				    sizeof (paths[curr]));
1312 				curr++;
1313 			}
1314 
1315 			(void) closedir(dirp);
1316 
1317 		} else {
1318 			if ((ret = cb(paths[curr], data)) != 0)
1319 				break;
1320 		}
1321 
1322 		curr--;
1323 	}
1324 
1325 	free(paths);
1326 	(void) close(base);
1327 
1328 	return (ret);
1329 
1330 err:
1331 	free(paths);
1332 	(void) close(base);
1333 	return (-1);
1334 }
1335 
1336 typedef struct zvol_cb {
1337 	zpool_handle_t *zcb_pool;
1338 	boolean_t zcb_create;
1339 } zvol_cb_t;
1340 
1341 /*ARGSUSED*/
1342 static int
1343 do_zvol_create(zfs_handle_t *zhp, void *data)
1344 {
1345 	int ret;
1346 
1347 	if (ZFS_IS_VOLUME(zhp))
1348 		(void) zvol_create_link(zhp->zfs_hdl, zhp->zfs_name);
1349 
1350 	ret = zfs_iter_children(zhp, do_zvol_create, NULL);
1351 
1352 	zfs_close(zhp);
1353 
1354 	return (ret);
1355 }
1356 
1357 /*
1358  * Iterate over all zvols in the pool and make any necessary minor nodes.
1359  */
1360 int
1361 zpool_create_zvol_links(zpool_handle_t *zhp)
1362 {
1363 	zfs_handle_t *zfp;
1364 	int ret;
1365 
1366 	/*
1367 	 * If the pool is unavailable, just return success.
1368 	 */
1369 	if ((zfp = make_dataset_handle(zhp->zpool_hdl,
1370 	    zhp->zpool_name)) == NULL)
1371 		return (0);
1372 
1373 	ret = zfs_iter_children(zfp, do_zvol_create, NULL);
1374 
1375 	zfs_close(zfp);
1376 	return (ret);
1377 }
1378 
1379 static int
1380 do_zvol_remove(const char *dataset, void *data)
1381 {
1382 	zpool_handle_t *zhp = data;
1383 
1384 	return (zvol_remove_link(zhp->zpool_hdl, dataset));
1385 }
1386 
1387 /*
1388  * Iterate over all zvols in the pool and remove any minor nodes.  We iterate
1389  * by examining the /dev links so that a corrupted pool doesn't impede this
1390  * operation.
1391  */
1392 int
1393 zpool_remove_zvol_links(zpool_handle_t *zhp)
1394 {
1395 	return (zpool_iter_zvol(zhp, do_zvol_remove, zhp));
1396 }
1397 
1398 /*
1399  * Convert from a devid string to a path.
1400  */
1401 static char *
1402 devid_to_path(char *devid_str)
1403 {
1404 	ddi_devid_t devid;
1405 	char *minor;
1406 	char *path;
1407 	devid_nmlist_t *list = NULL;
1408 	int ret;
1409 
1410 	if (devid_str_decode(devid_str, &devid, &minor) != 0)
1411 		return (NULL);
1412 
1413 	ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
1414 
1415 	devid_str_free(minor);
1416 	devid_free(devid);
1417 
1418 	if (ret != 0)
1419 		return (NULL);
1420 
1421 	if ((path = strdup(list[0].devname)) == NULL)
1422 		return (NULL);
1423 
1424 	devid_free_nmlist(list);
1425 
1426 	return (path);
1427 }
1428 
1429 /*
1430  * Convert from a path to a devid string.
1431  */
1432 static char *
1433 path_to_devid(const char *path)
1434 {
1435 	int fd;
1436 	ddi_devid_t devid;
1437 	char *minor, *ret;
1438 
1439 	if ((fd = open(path, O_RDONLY)) < 0)
1440 		return (NULL);
1441 
1442 	minor = NULL;
1443 	ret = NULL;
1444 	if (devid_get(fd, &devid) == 0) {
1445 		if (devid_get_minor_name(fd, &minor) == 0)
1446 			ret = devid_str_encode(devid, minor);
1447 		if (minor != NULL)
1448 			devid_str_free(minor);
1449 		devid_free(devid);
1450 	}
1451 	(void) close(fd);
1452 
1453 	return (ret);
1454 }
1455 
1456 /*
1457  * Issue the necessary ioctl() to update the stored path value for the vdev.  We
1458  * ignore any failure here, since a common case is for an unprivileged user to
1459  * type 'zpool status', and we'll display the correct information anyway.
1460  */
1461 static void
1462 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
1463 {
1464 	zfs_cmd_t zc = { 0 };
1465 
1466 	(void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1467 	(void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
1468 	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1469 	    &zc.zc_guid) == 0);
1470 
1471 	(void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
1472 }
1473 
1474 /*
1475  * Given a vdev, return the name to display in iostat.  If the vdev has a path,
1476  * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
1477  * We also check if this is a whole disk, in which case we strip off the
1478  * trailing 's0' slice name.
1479  *
1480  * This routine is also responsible for identifying when disks have been
1481  * reconfigured in a new location.  The kernel will have opened the device by
1482  * devid, but the path will still refer to the old location.  To catch this, we
1483  * first do a path -> devid translation (which is fast for the common case).  If
1484  * the devid matches, we're done.  If not, we do a reverse devid -> path
1485  * translation and issue the appropriate ioctl() to update the path of the vdev.
1486  * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
1487  * of these checks.
1488  */
1489 char *
1490 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv)
1491 {
1492 	char *path, *devid;
1493 	uint64_t value;
1494 	char buf[64];
1495 
1496 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
1497 	    &value) == 0) {
1498 		verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1499 		    &value) == 0);
1500 		(void) snprintf(buf, sizeof (buf), "%llu",
1501 		    (u_longlong_t)value);
1502 		path = buf;
1503 	} else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
1504 
1505 		if (zhp != NULL &&
1506 		    nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
1507 			/*
1508 			 * Determine if the current path is correct.
1509 			 */
1510 			char *newdevid = path_to_devid(path);
1511 
1512 			if (newdevid == NULL ||
1513 			    strcmp(devid, newdevid) != 0) {
1514 				char *newpath;
1515 
1516 				if ((newpath = devid_to_path(devid)) != NULL) {
1517 					/*
1518 					 * Update the path appropriately.
1519 					 */
1520 					set_path(zhp, nv, newpath);
1521 					if (nvlist_add_string(nv,
1522 					    ZPOOL_CONFIG_PATH, newpath) == 0)
1523 						verify(nvlist_lookup_string(nv,
1524 						    ZPOOL_CONFIG_PATH,
1525 						    &path) == 0);
1526 					free(newpath);
1527 				}
1528 			}
1529 
1530 			if (newdevid)
1531 				devid_str_free(newdevid);
1532 		}
1533 
1534 		if (strncmp(path, "/dev/dsk/", 9) == 0)
1535 			path += 9;
1536 
1537 		if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
1538 		    &value) == 0 && value) {
1539 			char *tmp = zfs_strdup(hdl, path);
1540 			if (tmp == NULL)
1541 				return (NULL);
1542 			tmp[strlen(path) - 2] = '\0';
1543 			return (tmp);
1544 		}
1545 	} else {
1546 		verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
1547 
1548 		/*
1549 		 * If it's a raidz device, we need to stick in the parity level.
1550 		 */
1551 		if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
1552 			verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
1553 			    &value) == 0);
1554 			(void) snprintf(buf, sizeof (buf), "%s%llu", path,
1555 			    (u_longlong_t)value);
1556 			path = buf;
1557 		}
1558 	}
1559 
1560 	return (zfs_strdup(hdl, path));
1561 }
1562 
1563 static int
1564 zbookmark_compare(const void *a, const void *b)
1565 {
1566 	return (memcmp(a, b, sizeof (zbookmark_t)));
1567 }
1568 
1569 /*
1570  * Retrieve the persistent error log, uniquify the members, and return to the
1571  * caller.
1572  */
1573 int
1574 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
1575 {
1576 	zfs_cmd_t zc = { 0 };
1577 	uint64_t count;
1578 	zbookmark_t *zb = NULL;
1579 	int i;
1580 
1581 	/*
1582 	 * Retrieve the raw error list from the kernel.  If the number of errors
1583 	 * has increased, allocate more space and continue until we get the
1584 	 * entire list.
1585 	 */
1586 	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
1587 	    &count) == 0);
1588 	if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
1589 	    count * sizeof (zbookmark_t))) == (uintptr_t)NULL)
1590 		return (-1);
1591 	zc.zc_nvlist_dst_size = count;
1592 	(void) strcpy(zc.zc_name, zhp->zpool_name);
1593 	for (;;) {
1594 		if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
1595 		    &zc) != 0) {
1596 			free((void *)(uintptr_t)zc.zc_nvlist_dst);
1597 			if (errno == ENOMEM) {
1598 				count = zc.zc_nvlist_dst_size;
1599 				if ((zc.zc_nvlist_dst = (uintptr_t)
1600 				    zfs_alloc(zhp->zpool_hdl, count *
1601 				    sizeof (zbookmark_t))) == (uintptr_t)NULL)
1602 					return (-1);
1603 			} else {
1604 				return (-1);
1605 			}
1606 		} else {
1607 			break;
1608 		}
1609 	}
1610 
1611 	/*
1612 	 * Sort the resulting bookmarks.  This is a little confusing due to the
1613 	 * implementation of ZFS_IOC_ERROR_LOG.  The bookmarks are copied last
1614 	 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
1615 	 * _not_ copied as part of the process.  So we point the start of our
1616 	 * array appropriate and decrement the total number of elements.
1617 	 */
1618 	zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) +
1619 	    zc.zc_nvlist_dst_size;
1620 	count -= zc.zc_nvlist_dst_size;
1621 
1622 	qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare);
1623 
1624 	verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
1625 
1626 	/*
1627 	 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
1628 	 */
1629 	for (i = 0; i < count; i++) {
1630 		nvlist_t *nv;
1631 
1632 		/* ignoring zb_blkid and zb_level for now */
1633 		if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
1634 		    zb[i-1].zb_object == zb[i].zb_object)
1635 			continue;
1636 
1637 		if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
1638 			goto nomem;
1639 		if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
1640 		    zb[i].zb_objset) != 0) {
1641 			nvlist_free(nv);
1642 			goto nomem;
1643 		}
1644 		if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
1645 		    zb[i].zb_object) != 0) {
1646 			nvlist_free(nv);
1647 			goto nomem;
1648 		}
1649 		if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
1650 			nvlist_free(nv);
1651 			goto nomem;
1652 		}
1653 		nvlist_free(nv);
1654 	}
1655 
1656 	free((void *)(uintptr_t)zc.zc_nvlist_dst);
1657 	return (0);
1658 
1659 nomem:
1660 	free((void *)(uintptr_t)zc.zc_nvlist_dst);
1661 	return (no_memory(zhp->zpool_hdl));
1662 }
1663 
1664 /*
1665  * Upgrade a ZFS pool to the latest on-disk version.
1666  */
1667 int
1668 zpool_upgrade(zpool_handle_t *zhp)
1669 {
1670 	zfs_cmd_t zc = { 0 };
1671 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1672 
1673 	(void) strcpy(zc.zc_name, zhp->zpool_name);
1674 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
1675 		return (zpool_standard_error_fmt(hdl, errno,
1676 		    dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
1677 		    zhp->zpool_name));
1678 
1679 	return (0);
1680 }
1681 
1682 /*
1683  * Log command history.
1684  *
1685  * 'pool' is B_TRUE if we are logging a command for 'zpool'; B_FALSE
1686  * otherwise ('zfs').  'pool_create' is B_TRUE if we are logging the creation
1687  * of the pool; B_FALSE otherwise.  'path' is the pathanme containing the
1688  * poolname.  'argc' and 'argv' are used to construct the command string.
1689  */
1690 void
1691 zpool_log_history(libzfs_handle_t *hdl, int argc, char **argv, const char *path,
1692 	boolean_t pool, boolean_t pool_create)
1693 {
1694 	char cmd_buf[HIS_MAX_RECORD_LEN];
1695 	char *dspath;
1696 	zfs_cmd_t zc = { 0 };
1697 	int i;
1698 
1699 	/* construct the command string */
1700 	(void) strcpy(cmd_buf, pool ? "zpool" : "zfs");
1701 	for (i = 0; i < argc; i++) {
1702 		if (strlen(cmd_buf) + 1 + strlen(argv[i]) > HIS_MAX_RECORD_LEN)
1703 			break;
1704 		(void) strcat(cmd_buf, " ");
1705 		(void) strcat(cmd_buf, argv[i]);
1706 	}
1707 
1708 	/* figure out the poolname */
1709 	dspath = strpbrk(path, "/@");
1710 	if (dspath == NULL) {
1711 		(void) strcpy(zc.zc_name, path);
1712 	} else {
1713 		(void) strncpy(zc.zc_name, path, dspath - path);
1714 		zc.zc_name[dspath-path] = '\0';
1715 	}
1716 
1717 	zc.zc_history = (uint64_t)(uintptr_t)cmd_buf;
1718 	zc.zc_history_len = strlen(cmd_buf);
1719 
1720 	/* overloading zc_history_offset */
1721 	zc.zc_history_offset = pool_create;
1722 
1723 	(void) ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_LOG_HISTORY, &zc);
1724 }
1725 
1726 /*
1727  * Perform ioctl to get some command history of a pool.
1728  *
1729  * 'buf' is the buffer to fill up to 'len' bytes.  'off' is the
1730  * logical offset of the history buffer to start reading from.
1731  *
1732  * Upon return, 'off' is the next logical offset to read from and
1733  * 'len' is the actual amount of bytes read into 'buf'.
1734  */
1735 static int
1736 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
1737 {
1738 	zfs_cmd_t zc = { 0 };
1739 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1740 
1741 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1742 
1743 	zc.zc_history = (uint64_t)(uintptr_t)buf;
1744 	zc.zc_history_len = *len;
1745 	zc.zc_history_offset = *off;
1746 
1747 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
1748 		switch (errno) {
1749 		case EPERM:
1750 			return (zfs_error_fmt(hdl, EZFS_PERM,
1751 			    dgettext(TEXT_DOMAIN,
1752 			    "cannot show history for pool '%s'"),
1753 			    zhp->zpool_name));
1754 		case ENOENT:
1755 			return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
1756 			    dgettext(TEXT_DOMAIN, "cannot get history for pool "
1757 			    "'%s'"), zhp->zpool_name));
1758 		case ENOTSUP:
1759 			return (zfs_error_fmt(hdl, EZFS_BADVERSION,
1760 			    dgettext(TEXT_DOMAIN, "cannot get history for pool "
1761 			    "'%s', pool must be upgraded"), zhp->zpool_name));
1762 		default:
1763 			return (zpool_standard_error_fmt(hdl, errno,
1764 			    dgettext(TEXT_DOMAIN,
1765 			    "cannot get history for '%s'"), zhp->zpool_name));
1766 		}
1767 	}
1768 
1769 	*len = zc.zc_history_len;
1770 	*off = zc.zc_history_offset;
1771 
1772 	return (0);
1773 }
1774 
1775 /*
1776  * Process the buffer of nvlists, unpacking and storing each nvlist record
1777  * into 'records'.  'leftover' is set to the number of bytes that weren't
1778  * processed as there wasn't a complete record.
1779  */
1780 static int
1781 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
1782     nvlist_t ***records, uint_t *numrecords)
1783 {
1784 	uint64_t reclen;
1785 	nvlist_t *nv;
1786 	int i;
1787 
1788 	while (bytes_read > sizeof (reclen)) {
1789 
1790 		/* get length of packed record (stored as little endian) */
1791 		for (i = 0, reclen = 0; i < sizeof (reclen); i++)
1792 			reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
1793 
1794 		if (bytes_read < sizeof (reclen) + reclen)
1795 			break;
1796 
1797 		/* unpack record */
1798 		if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
1799 			return (ENOMEM);
1800 		bytes_read -= sizeof (reclen) + reclen;
1801 		buf += sizeof (reclen) + reclen;
1802 
1803 		/* add record to nvlist array */
1804 		(*numrecords)++;
1805 		if (ISP2(*numrecords + 1)) {
1806 			*records = realloc(*records,
1807 			    *numrecords * 2 * sizeof (nvlist_t *));
1808 		}
1809 		(*records)[*numrecords - 1] = nv;
1810 	}
1811 
1812 	*leftover = bytes_read;
1813 	return (0);
1814 }
1815 
1816 #define	HIS_BUF_LEN	(128*1024)
1817 
1818 /*
1819  * Retrieve the command history of a pool.
1820  */
1821 int
1822 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
1823 {
1824 	char buf[HIS_BUF_LEN];
1825 	uint64_t off = 0;
1826 	nvlist_t **records = NULL;
1827 	uint_t numrecords = 0;
1828 	int err, i;
1829 
1830 	do {
1831 		uint64_t bytes_read = sizeof (buf);
1832 		uint64_t leftover;
1833 
1834 		if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
1835 			break;
1836 
1837 		/* if nothing else was read in, we're at EOF, just return */
1838 		if (!bytes_read)
1839 			break;
1840 
1841 		if ((err = zpool_history_unpack(buf, bytes_read,
1842 		    &leftover, &records, &numrecords)) != 0)
1843 			break;
1844 		off -= leftover;
1845 
1846 		/* CONSTCOND */
1847 	} while (1);
1848 
1849 	if (!err) {
1850 		verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
1851 		verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
1852 		    records, numrecords) == 0);
1853 	}
1854 	for (i = 0; i < numrecords; i++)
1855 		nvlist_free(records[i]);
1856 	free(records);
1857 
1858 	return (err);
1859 }
1860 
1861 void
1862 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
1863     char *pathname, size_t len)
1864 {
1865 	zfs_cmd_t zc = { 0 };
1866 	boolean_t mounted = B_FALSE;
1867 	char *mntpnt = NULL;
1868 	char dsname[MAXNAMELEN];
1869 
1870 	if (dsobj == 0) {
1871 		/* special case for the MOS */
1872 		(void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj);
1873 		return;
1874 	}
1875 
1876 	/* get the dataset's name */
1877 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1878 	zc.zc_obj = dsobj;
1879 	if (ioctl(zhp->zpool_hdl->libzfs_fd,
1880 	    ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
1881 		/* just write out a path of two object numbers */
1882 		(void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
1883 		    dsobj, obj);
1884 		return;
1885 	}
1886 	(void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
1887 
1888 	/* find out if the dataset is mounted */
1889 	mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
1890 
1891 	/* get the corrupted object's path */
1892 	(void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
1893 	zc.zc_obj = obj;
1894 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
1895 	    &zc) == 0) {
1896 		if (mounted) {
1897 			(void) snprintf(pathname, len, "%s%s", mntpnt,
1898 			    zc.zc_value);
1899 		} else {
1900 			(void) snprintf(pathname, len, "%s:%s",
1901 			    dsname, zc.zc_value);
1902 		}
1903 	} else {
1904 		(void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj);
1905 	}
1906 	free(mntpnt);
1907 }
1908 
1909 #define	RDISK_ROOT	"/dev/rdsk"
1910 #define	BACKUP_SLICE	"s2"
1911 /*
1912  * Don't start the slice at the default block of 34; many storage
1913  * devices will use a stripe width of 128k, so start there instead.
1914  */
1915 #define	NEW_START_BLOCK	256
1916 
1917 /*
1918  * determine where a partition starts on a disk in the current
1919  * configuration
1920  */
1921 static diskaddr_t
1922 find_start_block(nvlist_t *config)
1923 {
1924 	nvlist_t **child;
1925 	uint_t c, children;
1926 	char *path;
1927 	diskaddr_t sb = MAXOFFSET_T;
1928 	int fd;
1929 	char diskname[MAXPATHLEN];
1930 	uint64_t wholedisk;
1931 
1932 	if (nvlist_lookup_nvlist_array(config,
1933 	    ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
1934 		if (nvlist_lookup_uint64(config,
1935 		    ZPOOL_CONFIG_WHOLE_DISK,
1936 		    &wholedisk) != 0 || !wholedisk) {
1937 			return (MAXOFFSET_T);
1938 		}
1939 		if (nvlist_lookup_string(config,
1940 		    ZPOOL_CONFIG_PATH, &path) != 0) {
1941 			return (MAXOFFSET_T);
1942 		}
1943 
1944 		(void) snprintf(diskname, sizeof (diskname), "%s%s",
1945 		    RDISK_ROOT, strrchr(path, '/'));
1946 		if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) {
1947 			struct dk_gpt *vtoc;
1948 			if (efi_alloc_and_read(fd, &vtoc) >= 0) {
1949 				sb = vtoc->efi_parts[0].p_start;
1950 				efi_free(vtoc);
1951 			}
1952 			(void) close(fd);
1953 		}
1954 		return (sb);
1955 	}
1956 
1957 	for (c = 0; c < children; c++) {
1958 		sb = find_start_block(child[c]);
1959 		if (sb != MAXOFFSET_T) {
1960 			return (sb);
1961 		}
1962 	}
1963 	return (MAXOFFSET_T);
1964 }
1965 
1966 /*
1967  * Label an individual disk.  The name provided is the short name,
1968  * stripped of any leading /dev path.
1969  */
1970 int
1971 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
1972 {
1973 	char path[MAXPATHLEN];
1974 	struct dk_gpt *vtoc;
1975 	int fd;
1976 	size_t resv = EFI_MIN_RESV_SIZE;
1977 	uint64_t slice_size;
1978 	diskaddr_t start_block;
1979 	char errbuf[1024];
1980 
1981 	if (zhp) {
1982 		nvlist_t *nvroot;
1983 
1984 		verify(nvlist_lookup_nvlist(zhp->zpool_config,
1985 		    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1986 
1987 		if (zhp->zpool_start_block == 0)
1988 			start_block = find_start_block(nvroot);
1989 		else
1990 			start_block = zhp->zpool_start_block;
1991 		zhp->zpool_start_block = start_block;
1992 	} else {
1993 		/* new pool */
1994 		start_block = NEW_START_BLOCK;
1995 	}
1996 
1997 	(void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name,
1998 	    BACKUP_SLICE);
1999 
2000 	if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
2001 		/*
2002 		 * This shouldn't happen.  We've long since verified that this
2003 		 * is a valid device.
2004 		 */
2005 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2006 		    "label '%s': unable to open device"), name);
2007 		return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
2008 	}
2009 
2010 	if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
2011 		/*
2012 		 * The only way this can fail is if we run out of memory, or we
2013 		 * were unable to read the disk's capacity
2014 		 */
2015 		if (errno == ENOMEM)
2016 			(void) no_memory(hdl);
2017 
2018 		(void) close(fd);
2019 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2020 		    "label '%s': unable to read disk capacity"), name);
2021 
2022 		return (zfs_error(hdl, EZFS_NOCAP, errbuf));
2023 	}
2024 
2025 	slice_size = vtoc->efi_last_u_lba + 1;
2026 	slice_size -= EFI_MIN_RESV_SIZE;
2027 	if (start_block == MAXOFFSET_T)
2028 		start_block = NEW_START_BLOCK;
2029 	slice_size -= start_block;
2030 
2031 	vtoc->efi_parts[0].p_start = start_block;
2032 	vtoc->efi_parts[0].p_size = slice_size;
2033 
2034 	/*
2035 	 * Why we use V_USR: V_BACKUP confuses users, and is considered
2036 	 * disposable by some EFI utilities (since EFI doesn't have a backup
2037 	 * slice).  V_UNASSIGNED is supposed to be used only for zero size
2038 	 * partitions, and efi_write() will fail if we use it.  V_ROOT, V_BOOT,
2039 	 * etc. were all pretty specific.  V_USR is as close to reality as we
2040 	 * can get, in the absence of V_OTHER.
2041 	 */
2042 	vtoc->efi_parts[0].p_tag = V_USR;
2043 	(void) strcpy(vtoc->efi_parts[0].p_name, "zfs");
2044 
2045 	vtoc->efi_parts[8].p_start = slice_size + start_block;
2046 	vtoc->efi_parts[8].p_size = resv;
2047 	vtoc->efi_parts[8].p_tag = V_RESERVED;
2048 
2049 	if (efi_write(fd, vtoc) != 0) {
2050 		/*
2051 		 * Some block drivers (like pcata) may not support EFI
2052 		 * GPT labels.  Print out a helpful error message dir-
2053 		 * ecting the user to manually label the disk and give
2054 		 * a specific slice.
2055 		 */
2056 		(void) close(fd);
2057 		efi_free(vtoc);
2058 
2059 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2060 		    "cannot label '%s': try using fdisk(1M) and then "
2061 		    "provide a specific slice"), name);
2062 		return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
2063 	}
2064 
2065 	(void) close(fd);
2066 	efi_free(vtoc);
2067 	return (0);
2068 }
2069 
2070 int
2071 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
2072 {
2073 	zfs_cmd_t zc = { 0 };
2074 	int ret = -1;
2075 	char errbuf[1024];
2076 	nvlist_t *nvl = NULL;
2077 	nvlist_t *realprops;
2078 
2079 	(void) snprintf(errbuf, sizeof (errbuf),
2080 	    dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
2081 	    zhp->zpool_name);
2082 
2083 	if (zpool_get_version(zhp) < ZFS_VERSION_BOOTFS) {
2084 		zfs_error_aux(zhp->zpool_hdl,
2085 		    dgettext(TEXT_DOMAIN, "pool must be "
2086 		    "upgraded to support pool properties"));
2087 		return (zfs_error(zhp->zpool_hdl, EZFS_BADVERSION, errbuf));
2088 	}
2089 
2090 	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp))
2091 		return (zfs_error(zhp->zpool_hdl, EZFS_POOLPROPS, errbuf));
2092 
2093 	if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0 ||
2094 	    nvlist_add_string(nvl, propname, propval) != 0) {
2095 		return (no_memory(zhp->zpool_hdl));
2096 	}
2097 
2098 	if ((realprops = zfs_validate_properties(zhp->zpool_hdl, ZFS_TYPE_POOL,
2099 	    zhp->zpool_name, nvl, 0, NULL, errbuf)) == NULL) {
2100 		nvlist_free(nvl);
2101 		return (-1);
2102 	}
2103 
2104 	nvlist_free(nvl);
2105 	nvl = realprops;
2106 
2107 	/*
2108 	 * Execute the corresponding ioctl() to set this property.
2109 	 */
2110 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2111 
2112 	if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl, NULL) != 0)
2113 		return (-1);
2114 
2115 	ret = ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_SET_PROPS, &zc);
2116 	zcmd_free_nvlists(&zc);
2117 
2118 	if (ret)
2119 		(void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
2120 
2121 	return (ret);
2122 }
2123 
2124 int
2125 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *propbuf,
2126     size_t proplen, zfs_source_t *srctype)
2127 {
2128 	uint64_t value;
2129 	char msg[1024], *strvalue;
2130 	nvlist_t *nvp;
2131 	zfs_source_t src = ZFS_SRC_NONE;
2132 
2133 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2134 	    "cannot get property '%s'"), zpool_prop_to_name(prop));
2135 
2136 	if (zpool_get_version(zhp) < ZFS_VERSION_BOOTFS) {
2137 		zfs_error_aux(zhp->zpool_hdl,
2138 		    dgettext(TEXT_DOMAIN, "pool must be "
2139 		    "upgraded to support pool properties"));
2140 		return (zfs_error(zhp->zpool_hdl, EZFS_BADVERSION, msg));
2141 	}
2142 
2143 	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp))
2144 		return (zfs_error(zhp->zpool_hdl, EZFS_POOLPROPS, msg));
2145 
2146 	/*
2147 	 * the "name" property is special cased
2148 	 */
2149 	if (!zfs_prop_valid_for_type(prop, ZFS_TYPE_POOL) &&
2150 	    prop != ZFS_PROP_NAME)
2151 		return (-1);
2152 
2153 	switch (prop) {
2154 	case ZFS_PROP_NAME:
2155 		(void) strlcpy(propbuf, zhp->zpool_name, proplen);
2156 		break;
2157 
2158 	case ZFS_PROP_BOOTFS:
2159 		if (nvlist_lookup_nvlist(zhp->zpool_props,
2160 		    zpool_prop_to_name(prop), &nvp) != 0) {
2161 			strvalue = (char *)zfs_prop_default_string(prop);
2162 			if (strvalue == NULL)
2163 				strvalue = "-";
2164 			src = ZFS_SRC_DEFAULT;
2165 		} else {
2166 			VERIFY(nvlist_lookup_uint64(nvp,
2167 			    ZFS_PROP_SOURCE, &value) == 0);
2168 			src = value;
2169 			VERIFY(nvlist_lookup_string(nvp, ZFS_PROP_VALUE,
2170 			    &strvalue) == 0);
2171 			if (strlen(strvalue) >= proplen)
2172 				return (-1);
2173 		}
2174 		(void) strcpy(propbuf, strvalue);
2175 		break;
2176 
2177 	default:
2178 		return (-1);
2179 	}
2180 	if (srctype)
2181 		*srctype = src;
2182 	return (0);
2183 }
2184 
2185 int
2186 zpool_get_proplist(libzfs_handle_t *hdl, char *fields, zpool_proplist_t **listp)
2187 {
2188 	return (zfs_get_proplist_common(hdl, fields, listp, ZFS_TYPE_POOL));
2189 }
2190 
2191 
2192 int
2193 zpool_expand_proplist(zpool_handle_t *zhp, zpool_proplist_t **plp)
2194 {
2195 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2196 	zpool_proplist_t *entry;
2197 	char buf[ZFS_MAXPROPLEN];
2198 
2199 	if (zfs_expand_proplist_common(hdl, plp, ZFS_TYPE_POOL) != 0)
2200 		return (-1);
2201 
2202 	for (entry = *plp; entry != NULL; entry = entry->pl_next) {
2203 
2204 		if (entry->pl_fixed)
2205 			continue;
2206 
2207 		if (entry->pl_prop != ZFS_PROP_INVAL &&
2208 		    zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
2209 		    NULL) == 0) {
2210 			if (strlen(buf) > entry->pl_width)
2211 				entry->pl_width = strlen(buf);
2212 		}
2213 	}
2214 
2215 	return (0);
2216 }
2217