xref: /freebsd/sys/contrib/openzfs/cmd/zpool/zpool_main.c (revision f126890ac5386406dadf7c4cfa9566cbb56537c5)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or https://opensource.org/licenses/CDDL-1.0.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
25  * Copyright (c) 2011, 2020 by Delphix. All rights reserved.
26  * Copyright (c) 2012 by Frederik Wessels. All rights reserved.
27  * Copyright (c) 2012 by Cyril Plisko. All rights reserved.
28  * Copyright (c) 2013 by Prasad Joshi (sTec). All rights reserved.
29  * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>.
30  * Copyright (c) 2017 Datto Inc.
31  * Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
32  * Copyright (c) 2017, Intel Corporation.
33  * Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>
34  * Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
35  * Copyright (c) 2021, Klara Inc.
36  * Copyright [2021] Hewlett Packard Enterprise Development LP
37  */
38 
39 #include <assert.h>
40 #include <ctype.h>
41 #include <dirent.h>
42 #include <errno.h>
43 #include <fcntl.h>
44 #include <getopt.h>
45 #include <libgen.h>
46 #include <libintl.h>
47 #include <libuutil.h>
48 #include <locale.h>
49 #include <pthread.h>
50 #include <stdio.h>
51 #include <stdlib.h>
52 #include <string.h>
53 #include <time.h>
54 #include <unistd.h>
55 #include <pwd.h>
56 #include <zone.h>
57 #include <sys/wait.h>
58 #include <zfs_prop.h>
59 #include <sys/fs/zfs.h>
60 #include <sys/stat.h>
61 #include <sys/systeminfo.h>
62 #include <sys/fm/fs/zfs.h>
63 #include <sys/fm/util.h>
64 #include <sys/fm/protocol.h>
65 #include <sys/zfs_ioctl.h>
66 #include <sys/mount.h>
67 #include <sys/sysmacros.h>
68 
69 #include <math.h>
70 
71 #include <libzfs.h>
72 #include <libzutil.h>
73 
74 #include "zpool_util.h"
75 #include "zfs_comutil.h"
76 #include "zfeature_common.h"
77 
78 #include "statcommon.h"
79 
80 libzfs_handle_t *g_zfs;
81 
82 static int zpool_do_create(int, char **);
83 static int zpool_do_destroy(int, char **);
84 
85 static int zpool_do_add(int, char **);
86 static int zpool_do_remove(int, char **);
87 static int zpool_do_labelclear(int, char **);
88 
89 static int zpool_do_checkpoint(int, char **);
90 
91 static int zpool_do_list(int, char **);
92 static int zpool_do_iostat(int, char **);
93 static int zpool_do_status(int, char **);
94 
95 static int zpool_do_online(int, char **);
96 static int zpool_do_offline(int, char **);
97 static int zpool_do_clear(int, char **);
98 static int zpool_do_reopen(int, char **);
99 
100 static int zpool_do_reguid(int, char **);
101 
102 static int zpool_do_attach(int, char **);
103 static int zpool_do_detach(int, char **);
104 static int zpool_do_replace(int, char **);
105 static int zpool_do_split(int, char **);
106 
107 static int zpool_do_initialize(int, char **);
108 static int zpool_do_scrub(int, char **);
109 static int zpool_do_resilver(int, char **);
110 static int zpool_do_trim(int, char **);
111 
112 static int zpool_do_import(int, char **);
113 static int zpool_do_export(int, char **);
114 
115 static int zpool_do_upgrade(int, char **);
116 
117 static int zpool_do_history(int, char **);
118 static int zpool_do_events(int, char **);
119 
120 static int zpool_do_get(int, char **);
121 static int zpool_do_set(int, char **);
122 
123 static int zpool_do_sync(int, char **);
124 
125 static int zpool_do_version(int, char **);
126 
127 static int zpool_do_wait(int, char **);
128 
129 static int zpool_do_help(int argc, char **argv);
130 
131 static zpool_compat_status_t zpool_do_load_compat(
132     const char *, boolean_t *);
133 
134 /*
135  * These libumem hooks provide a reasonable set of defaults for the allocator's
136  * debugging facilities.
137  */
138 
139 #ifdef DEBUG
140 const char *
141 _umem_debug_init(void)
142 {
143 	return ("default,verbose"); /* $UMEM_DEBUG setting */
144 }
145 
146 const char *
147 _umem_logging_init(void)
148 {
149 	return ("fail,contents"); /* $UMEM_LOGGING setting */
150 }
151 #endif
152 
153 typedef enum {
154 	HELP_ADD,
155 	HELP_ATTACH,
156 	HELP_CLEAR,
157 	HELP_CREATE,
158 	HELP_CHECKPOINT,
159 	HELP_DESTROY,
160 	HELP_DETACH,
161 	HELP_EXPORT,
162 	HELP_HISTORY,
163 	HELP_IMPORT,
164 	HELP_IOSTAT,
165 	HELP_LABELCLEAR,
166 	HELP_LIST,
167 	HELP_OFFLINE,
168 	HELP_ONLINE,
169 	HELP_REPLACE,
170 	HELP_REMOVE,
171 	HELP_INITIALIZE,
172 	HELP_SCRUB,
173 	HELP_RESILVER,
174 	HELP_TRIM,
175 	HELP_STATUS,
176 	HELP_UPGRADE,
177 	HELP_EVENTS,
178 	HELP_GET,
179 	HELP_SET,
180 	HELP_SPLIT,
181 	HELP_SYNC,
182 	HELP_REGUID,
183 	HELP_REOPEN,
184 	HELP_VERSION,
185 	HELP_WAIT
186 } zpool_help_t;
187 
188 
189 /*
190  * Flags for stats to display with "zpool iostats"
191  */
192 enum iostat_type {
193 	IOS_DEFAULT = 0,
194 	IOS_LATENCY = 1,
195 	IOS_QUEUES = 2,
196 	IOS_L_HISTO = 3,
197 	IOS_RQ_HISTO = 4,
198 	IOS_COUNT,	/* always last element */
199 };
200 
201 /* iostat_type entries as bitmasks */
202 #define	IOS_DEFAULT_M	(1ULL << IOS_DEFAULT)
203 #define	IOS_LATENCY_M	(1ULL << IOS_LATENCY)
204 #define	IOS_QUEUES_M	(1ULL << IOS_QUEUES)
205 #define	IOS_L_HISTO_M	(1ULL << IOS_L_HISTO)
206 #define	IOS_RQ_HISTO_M	(1ULL << IOS_RQ_HISTO)
207 
208 /* Mask of all the histo bits */
209 #define	IOS_ANYHISTO_M (IOS_L_HISTO_M | IOS_RQ_HISTO_M)
210 
211 /*
212  * Lookup table for iostat flags to nvlist names.  Basically a list
213  * of all the nvlists a flag requires.  Also specifies the order in
214  * which data gets printed in zpool iostat.
215  */
216 static const char *vsx_type_to_nvlist[IOS_COUNT][15] = {
217 	[IOS_L_HISTO] = {
218 	    ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
219 	    ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
220 	    ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
221 	    ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
222 	    ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO,
223 	    ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO,
224 	    ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO,
225 	    ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO,
226 	    ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO,
227 	    ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
228 	    ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
229 	    NULL},
230 	[IOS_LATENCY] = {
231 	    ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
232 	    ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
233 	    ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
234 	    ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
235 	    ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
236 	    ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
237 	    NULL},
238 	[IOS_QUEUES] = {
239 	    ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE,
240 	    ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE,
241 	    ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE,
242 	    ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE,
243 	    ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,
244 	    ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE,
245 	    ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE,
246 	    NULL},
247 	[IOS_RQ_HISTO] = {
248 	    ZPOOL_CONFIG_VDEV_SYNC_IND_R_HISTO,
249 	    ZPOOL_CONFIG_VDEV_SYNC_AGG_R_HISTO,
250 	    ZPOOL_CONFIG_VDEV_SYNC_IND_W_HISTO,
251 	    ZPOOL_CONFIG_VDEV_SYNC_AGG_W_HISTO,
252 	    ZPOOL_CONFIG_VDEV_ASYNC_IND_R_HISTO,
253 	    ZPOOL_CONFIG_VDEV_ASYNC_AGG_R_HISTO,
254 	    ZPOOL_CONFIG_VDEV_ASYNC_IND_W_HISTO,
255 	    ZPOOL_CONFIG_VDEV_ASYNC_AGG_W_HISTO,
256 	    ZPOOL_CONFIG_VDEV_IND_SCRUB_HISTO,
257 	    ZPOOL_CONFIG_VDEV_AGG_SCRUB_HISTO,
258 	    ZPOOL_CONFIG_VDEV_IND_TRIM_HISTO,
259 	    ZPOOL_CONFIG_VDEV_AGG_TRIM_HISTO,
260 	    ZPOOL_CONFIG_VDEV_IND_REBUILD_HISTO,
261 	    ZPOOL_CONFIG_VDEV_AGG_REBUILD_HISTO,
262 	    NULL},
263 };
264 
265 
266 /*
267  * Given a cb->cb_flags with a histogram bit set, return the iostat_type.
268  * Right now, only one histo bit is ever set at one time, so we can
269  * just do a highbit64(a)
270  */
271 #define	IOS_HISTO_IDX(a)	(highbit64(a & IOS_ANYHISTO_M) - 1)
272 
273 typedef struct zpool_command {
274 	const char	*name;
275 	int		(*func)(int, char **);
276 	zpool_help_t	usage;
277 } zpool_command_t;
278 
279 /*
280  * Master command table.  Each ZFS command has a name, associated function, and
281  * usage message.  The usage messages need to be internationalized, so we have
282  * to have a function to return the usage message based on a command index.
283  *
284  * These commands are organized according to how they are displayed in the usage
285  * message.  An empty command (one with a NULL name) indicates an empty line in
286  * the generic usage message.
287  */
288 static zpool_command_t command_table[] = {
289 	{ "version",	zpool_do_version,	HELP_VERSION		},
290 	{ NULL },
291 	{ "create",	zpool_do_create,	HELP_CREATE		},
292 	{ "destroy",	zpool_do_destroy,	HELP_DESTROY		},
293 	{ NULL },
294 	{ "add",	zpool_do_add,		HELP_ADD		},
295 	{ "remove",	zpool_do_remove,	HELP_REMOVE		},
296 	{ NULL },
297 	{ "labelclear",	zpool_do_labelclear,	HELP_LABELCLEAR		},
298 	{ NULL },
299 	{ "checkpoint",	zpool_do_checkpoint,	HELP_CHECKPOINT		},
300 	{ NULL },
301 	{ "list",	zpool_do_list,		HELP_LIST		},
302 	{ "iostat",	zpool_do_iostat,	HELP_IOSTAT		},
303 	{ "status",	zpool_do_status,	HELP_STATUS		},
304 	{ NULL },
305 	{ "online",	zpool_do_online,	HELP_ONLINE		},
306 	{ "offline",	zpool_do_offline,	HELP_OFFLINE		},
307 	{ "clear",	zpool_do_clear,		HELP_CLEAR		},
308 	{ "reopen",	zpool_do_reopen,	HELP_REOPEN		},
309 	{ NULL },
310 	{ "attach",	zpool_do_attach,	HELP_ATTACH		},
311 	{ "detach",	zpool_do_detach,	HELP_DETACH		},
312 	{ "replace",	zpool_do_replace,	HELP_REPLACE		},
313 	{ "split",	zpool_do_split,		HELP_SPLIT		},
314 	{ NULL },
315 	{ "initialize",	zpool_do_initialize,	HELP_INITIALIZE		},
316 	{ "resilver",	zpool_do_resilver,	HELP_RESILVER		},
317 	{ "scrub",	zpool_do_scrub,		HELP_SCRUB		},
318 	{ "trim",	zpool_do_trim,		HELP_TRIM		},
319 	{ NULL },
320 	{ "import",	zpool_do_import,	HELP_IMPORT		},
321 	{ "export",	zpool_do_export,	HELP_EXPORT		},
322 	{ "upgrade",	zpool_do_upgrade,	HELP_UPGRADE		},
323 	{ "reguid",	zpool_do_reguid,	HELP_REGUID		},
324 	{ NULL },
325 	{ "history",	zpool_do_history,	HELP_HISTORY		},
326 	{ "events",	zpool_do_events,	HELP_EVENTS		},
327 	{ NULL },
328 	{ "get",	zpool_do_get,		HELP_GET		},
329 	{ "set",	zpool_do_set,		HELP_SET		},
330 	{ "sync",	zpool_do_sync,		HELP_SYNC		},
331 	{ NULL },
332 	{ "wait",	zpool_do_wait,		HELP_WAIT		},
333 };
334 
335 #define	NCOMMAND	(ARRAY_SIZE(command_table))
336 
337 #define	VDEV_ALLOC_CLASS_LOGS	"logs"
338 
339 static zpool_command_t *current_command;
340 static zfs_type_t current_prop_type = (ZFS_TYPE_POOL | ZFS_TYPE_VDEV);
341 static char history_str[HIS_MAX_RECORD_LEN];
342 static boolean_t log_history = B_TRUE;
343 static uint_t timestamp_fmt = NODATE;
344 
345 static const char *
346 get_usage(zpool_help_t idx)
347 {
348 	switch (idx) {
349 	case HELP_ADD:
350 		return (gettext("\tadd [-fgLnP] [-o property=value] "
351 		    "<pool> <vdev> ...\n"));
352 	case HELP_ATTACH:
353 		return (gettext("\tattach [-fsw] [-o property=value] "
354 		    "<pool> <device> <new-device>\n"));
355 	case HELP_CLEAR:
356 		return (gettext("\tclear [[--power]|[-nF]] <pool> [device]\n"));
357 	case HELP_CREATE:
358 		return (gettext("\tcreate [-fnd] [-o property=value] ... \n"
359 		    "\t    [-O file-system-property=value] ... \n"
360 		    "\t    [-m mountpoint] [-R root] <pool> <vdev> ...\n"));
361 	case HELP_CHECKPOINT:
362 		return (gettext("\tcheckpoint [-d [-w]] <pool> ...\n"));
363 	case HELP_DESTROY:
364 		return (gettext("\tdestroy [-f] <pool>\n"));
365 	case HELP_DETACH:
366 		return (gettext("\tdetach <pool> <device>\n"));
367 	case HELP_EXPORT:
368 		return (gettext("\texport [-af] <pool> ...\n"));
369 	case HELP_HISTORY:
370 		return (gettext("\thistory [-il] [<pool>] ...\n"));
371 	case HELP_IMPORT:
372 		return (gettext("\timport [-d dir] [-D]\n"
373 		    "\timport [-o mntopts] [-o property=value] ... \n"
374 		    "\t    [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] "
375 		    "[-R root] [-F [-n]] -a\n"
376 		    "\timport [-o mntopts] [-o property=value] ... \n"
377 		    "\t    [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] "
378 		    "[-R root] [-F [-n]]\n"
379 		    "\t    [--rewind-to-checkpoint] <pool | id> [newpool]\n"));
380 	case HELP_IOSTAT:
381 		return (gettext("\tiostat [[[-c [script1,script2,...]"
382 		    "[-lq]]|[-rw]] [-T d | u] [-ghHLpPvy]\n"
383 		    "\t    [[pool ...]|[pool vdev ...]|[vdev ...]]"
384 		    " [[-n] interval [count]]\n"));
385 	case HELP_LABELCLEAR:
386 		return (gettext("\tlabelclear [-f] <vdev>\n"));
387 	case HELP_LIST:
388 		return (gettext("\tlist [-gHLpPv] [-o property[,...]] "
389 		    "[-T d|u] [pool] ... \n"
390 		    "\t    [interval [count]]\n"));
391 	case HELP_OFFLINE:
392 		return (gettext("\toffline [--power]|[[-f][-t]] <pool> "
393 		    "<device> ...\n"));
394 	case HELP_ONLINE:
395 		return (gettext("\tonline [--power][-e] <pool> <device> "
396 		    "...\n"));
397 	case HELP_REPLACE:
398 		return (gettext("\treplace [-fsw] [-o property=value] "
399 		    "<pool> <device> [new-device]\n"));
400 	case HELP_REMOVE:
401 		return (gettext("\tremove [-npsw] <pool> <device> ...\n"));
402 	case HELP_REOPEN:
403 		return (gettext("\treopen [-n] <pool>\n"));
404 	case HELP_INITIALIZE:
405 		return (gettext("\tinitialize [-c | -s | -u] [-w] <pool> "
406 		    "[<device> ...]\n"));
407 	case HELP_SCRUB:
408 		return (gettext("\tscrub [-s | -p] [-w] [-e] <pool> ...\n"));
409 	case HELP_RESILVER:
410 		return (gettext("\tresilver <pool> ...\n"));
411 	case HELP_TRIM:
412 		return (gettext("\ttrim [-dw] [-r <rate>] [-c | -s] <pool> "
413 		    "[<device> ...]\n"));
414 	case HELP_STATUS:
415 		return (gettext("\tstatus [--power] [-c [script1,script2,...]] "
416 		    "[-igLpPstvxD]  [-T d|u] [pool] ... \n"
417 		    "\t    [interval [count]]\n"));
418 	case HELP_UPGRADE:
419 		return (gettext("\tupgrade\n"
420 		    "\tupgrade -v\n"
421 		    "\tupgrade [-V version] <-a | pool ...>\n"));
422 	case HELP_EVENTS:
423 		return (gettext("\tevents [-vHf [pool] | -c]\n"));
424 	case HELP_GET:
425 		return (gettext("\tget [-Hp] [-o \"all\" | field[,...]] "
426 		    "<\"all\" | property[,...]> <pool> ...\n"));
427 	case HELP_SET:
428 		return (gettext("\tset <property=value> <pool>\n"
429 		    "\tset <vdev_property=value> <pool> <vdev>\n"));
430 	case HELP_SPLIT:
431 		return (gettext("\tsplit [-gLnPl] [-R altroot] [-o mntopts]\n"
432 		    "\t    [-o property=value] <pool> <newpool> "
433 		    "[<device> ...]\n"));
434 	case HELP_REGUID:
435 		return (gettext("\treguid <pool>\n"));
436 	case HELP_SYNC:
437 		return (gettext("\tsync [pool] ...\n"));
438 	case HELP_VERSION:
439 		return (gettext("\tversion\n"));
440 	case HELP_WAIT:
441 		return (gettext("\twait [-Hp] [-T d|u] [-t <activity>[,...]] "
442 		    "<pool> [interval]\n"));
443 	default:
444 		__builtin_unreachable();
445 	}
446 }
447 
448 static void
449 zpool_collect_leaves(zpool_handle_t *zhp, nvlist_t *nvroot, nvlist_t *res)
450 {
451 	uint_t children = 0;
452 	nvlist_t **child;
453 	uint_t i;
454 
455 	(void) nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
456 	    &child, &children);
457 
458 	if (children == 0) {
459 		char *path = zpool_vdev_name(g_zfs, zhp, nvroot,
460 		    VDEV_NAME_PATH);
461 
462 		if (strcmp(path, VDEV_TYPE_INDIRECT) != 0 &&
463 		    strcmp(path, VDEV_TYPE_HOLE) != 0)
464 			fnvlist_add_boolean(res, path);
465 
466 		free(path);
467 		return;
468 	}
469 
470 	for (i = 0; i < children; i++) {
471 		zpool_collect_leaves(zhp, child[i], res);
472 	}
473 }
474 
475 /*
476  * Callback routine that will print out a pool property value.
477  */
478 static int
479 print_pool_prop_cb(int prop, void *cb)
480 {
481 	FILE *fp = cb;
482 
483 	(void) fprintf(fp, "\t%-19s  ", zpool_prop_to_name(prop));
484 
485 	if (zpool_prop_readonly(prop))
486 		(void) fprintf(fp, "  NO   ");
487 	else
488 		(void) fprintf(fp, " YES   ");
489 
490 	if (zpool_prop_values(prop) == NULL)
491 		(void) fprintf(fp, "-\n");
492 	else
493 		(void) fprintf(fp, "%s\n", zpool_prop_values(prop));
494 
495 	return (ZPROP_CONT);
496 }
497 
498 /*
499  * Callback routine that will print out a vdev property value.
500  */
501 static int
502 print_vdev_prop_cb(int prop, void *cb)
503 {
504 	FILE *fp = cb;
505 
506 	(void) fprintf(fp, "\t%-19s  ", vdev_prop_to_name(prop));
507 
508 	if (vdev_prop_readonly(prop))
509 		(void) fprintf(fp, "  NO   ");
510 	else
511 		(void) fprintf(fp, " YES   ");
512 
513 	if (vdev_prop_values(prop) == NULL)
514 		(void) fprintf(fp, "-\n");
515 	else
516 		(void) fprintf(fp, "%s\n", vdev_prop_values(prop));
517 
518 	return (ZPROP_CONT);
519 }
520 
521 /*
522  * Given a leaf vdev name like 'L5' return its VDEV_CONFIG_PATH like
523  * '/dev/disk/by-vdev/L5'.
524  */
525 static const char *
526 vdev_name_to_path(zpool_handle_t *zhp, char *vdev)
527 {
528 	nvlist_t *vdev_nv = zpool_find_vdev(zhp, vdev, NULL, NULL, NULL);
529 	if (vdev_nv == NULL) {
530 		return (NULL);
531 	}
532 	return (fnvlist_lookup_string(vdev_nv, ZPOOL_CONFIG_PATH));
533 }
534 
535 static int
536 zpool_power_on(zpool_handle_t *zhp, char *vdev)
537 {
538 	return (zpool_power(zhp, vdev, B_TRUE));
539 }
540 
541 static int
542 zpool_power_on_and_disk_wait(zpool_handle_t *zhp, char *vdev)
543 {
544 	int rc;
545 
546 	rc = zpool_power_on(zhp, vdev);
547 	if (rc != 0)
548 		return (rc);
549 
550 	zpool_disk_wait(vdev_name_to_path(zhp, vdev));
551 
552 	return (0);
553 }
554 
555 static int
556 zpool_power_on_pool_and_wait_for_devices(zpool_handle_t *zhp)
557 {
558 	nvlist_t *nv;
559 	const char *path = NULL;
560 	int rc;
561 
562 	/* Power up all the devices first */
563 	FOR_EACH_REAL_LEAF_VDEV(zhp, nv) {
564 		path = fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH);
565 		if (path != NULL) {
566 			rc = zpool_power_on(zhp, (char *)path);
567 			if (rc != 0) {
568 				return (rc);
569 			}
570 		}
571 	}
572 
573 	/*
574 	 * Wait for their devices to show up.  Since we powered them on
575 	 * at roughly the same time, they should all come online around
576 	 * the same time.
577 	 */
578 	FOR_EACH_REAL_LEAF_VDEV(zhp, nv) {
579 		path = fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH);
580 		zpool_disk_wait(path);
581 	}
582 
583 	return (0);
584 }
585 
586 static int
587 zpool_power_off(zpool_handle_t *zhp, char *vdev)
588 {
589 	return (zpool_power(zhp, vdev, B_FALSE));
590 }
591 
592 /*
593  * Display usage message.  If we're inside a command, display only the usage for
594  * that command.  Otherwise, iterate over the entire command table and display
595  * a complete usage message.
596  */
597 static __attribute__((noreturn)) void
598 usage(boolean_t requested)
599 {
600 	FILE *fp = requested ? stdout : stderr;
601 
602 	if (current_command == NULL) {
603 		int i;
604 
605 		(void) fprintf(fp, gettext("usage: zpool command args ...\n"));
606 		(void) fprintf(fp,
607 		    gettext("where 'command' is one of the following:\n\n"));
608 
609 		for (i = 0; i < NCOMMAND; i++) {
610 			if (command_table[i].name == NULL)
611 				(void) fprintf(fp, "\n");
612 			else
613 				(void) fprintf(fp, "%s",
614 				    get_usage(command_table[i].usage));
615 		}
616 
617 		(void) fprintf(fp,
618 		    gettext("\nFor further help on a command or topic, "
619 		    "run: %s\n"), "zpool help [<topic>]");
620 	} else {
621 		(void) fprintf(fp, gettext("usage:\n"));
622 		(void) fprintf(fp, "%s", get_usage(current_command->usage));
623 	}
624 
625 	if (current_command != NULL &&
626 	    current_prop_type != (ZFS_TYPE_POOL | ZFS_TYPE_VDEV) &&
627 	    ((strcmp(current_command->name, "set") == 0) ||
628 	    (strcmp(current_command->name, "get") == 0) ||
629 	    (strcmp(current_command->name, "list") == 0))) {
630 
631 		(void) fprintf(fp, "%s",
632 		    gettext("\nthe following properties are supported:\n"));
633 
634 		(void) fprintf(fp, "\n\t%-19s  %s   %s\n\n",
635 		    "PROPERTY", "EDIT", "VALUES");
636 
637 		/* Iterate over all properties */
638 		if (current_prop_type == ZFS_TYPE_POOL) {
639 			(void) zprop_iter(print_pool_prop_cb, fp, B_FALSE,
640 			    B_TRUE, current_prop_type);
641 
642 			(void) fprintf(fp, "\t%-19s   ", "feature@...");
643 			(void) fprintf(fp, "YES   "
644 			    "disabled | enabled | active\n");
645 
646 			(void) fprintf(fp, gettext("\nThe feature@ properties "
647 			    "must be appended with a feature name.\n"
648 			    "See zpool-features(7).\n"));
649 		} else if (current_prop_type == ZFS_TYPE_VDEV) {
650 			(void) zprop_iter(print_vdev_prop_cb, fp, B_FALSE,
651 			    B_TRUE, current_prop_type);
652 		}
653 	}
654 
655 	/*
656 	 * See comments at end of main().
657 	 */
658 	if (getenv("ZFS_ABORT") != NULL) {
659 		(void) printf("dumping core by request\n");
660 		abort();
661 	}
662 
663 	exit(requested ? 0 : 2);
664 }
665 
666 /*
667  * zpool initialize [-c | -s | -u] [-w] <pool> [<vdev> ...]
668  * Initialize all unused blocks in the specified vdevs, or all vdevs in the pool
669  * if none specified.
670  *
671  *	-c	Cancel. Ends active initializing.
672  *	-s	Suspend. Initializing can then be restarted with no flags.
673  *	-u	Uninitialize. Clears initialization state.
674  *	-w	Wait. Blocks until initializing has completed.
675  */
676 int
677 zpool_do_initialize(int argc, char **argv)
678 {
679 	int c;
680 	char *poolname;
681 	zpool_handle_t *zhp;
682 	nvlist_t *vdevs;
683 	int err = 0;
684 	boolean_t wait = B_FALSE;
685 
686 	struct option long_options[] = {
687 		{"cancel",	no_argument,		NULL, 'c'},
688 		{"suspend",	no_argument,		NULL, 's'},
689 		{"uninit",	no_argument,		NULL, 'u'},
690 		{"wait",	no_argument,		NULL, 'w'},
691 		{0, 0, 0, 0}
692 	};
693 
694 	pool_initialize_func_t cmd_type = POOL_INITIALIZE_START;
695 	while ((c = getopt_long(argc, argv, "csuw", long_options,
696 	    NULL)) != -1) {
697 		switch (c) {
698 		case 'c':
699 			if (cmd_type != POOL_INITIALIZE_START &&
700 			    cmd_type != POOL_INITIALIZE_CANCEL) {
701 				(void) fprintf(stderr, gettext("-c cannot be "
702 				    "combined with other options\n"));
703 				usage(B_FALSE);
704 			}
705 			cmd_type = POOL_INITIALIZE_CANCEL;
706 			break;
707 		case 's':
708 			if (cmd_type != POOL_INITIALIZE_START &&
709 			    cmd_type != POOL_INITIALIZE_SUSPEND) {
710 				(void) fprintf(stderr, gettext("-s cannot be "
711 				    "combined with other options\n"));
712 				usage(B_FALSE);
713 			}
714 			cmd_type = POOL_INITIALIZE_SUSPEND;
715 			break;
716 		case 'u':
717 			if (cmd_type != POOL_INITIALIZE_START &&
718 			    cmd_type != POOL_INITIALIZE_UNINIT) {
719 				(void) fprintf(stderr, gettext("-u cannot be "
720 				    "combined with other options\n"));
721 				usage(B_FALSE);
722 			}
723 			cmd_type = POOL_INITIALIZE_UNINIT;
724 			break;
725 		case 'w':
726 			wait = B_TRUE;
727 			break;
728 		case '?':
729 			if (optopt != 0) {
730 				(void) fprintf(stderr,
731 				    gettext("invalid option '%c'\n"), optopt);
732 			} else {
733 				(void) fprintf(stderr,
734 				    gettext("invalid option '%s'\n"),
735 				    argv[optind - 1]);
736 			}
737 			usage(B_FALSE);
738 		}
739 	}
740 
741 	argc -= optind;
742 	argv += optind;
743 
744 	if (argc < 1) {
745 		(void) fprintf(stderr, gettext("missing pool name argument\n"));
746 		usage(B_FALSE);
747 		return (-1);
748 	}
749 
750 	if (wait && (cmd_type != POOL_INITIALIZE_START)) {
751 		(void) fprintf(stderr, gettext("-w cannot be used with -c, -s"
752 		    "or -u\n"));
753 		usage(B_FALSE);
754 	}
755 
756 	poolname = argv[0];
757 	zhp = zpool_open(g_zfs, poolname);
758 	if (zhp == NULL)
759 		return (-1);
760 
761 	vdevs = fnvlist_alloc();
762 	if (argc == 1) {
763 		/* no individual leaf vdevs specified, so add them all */
764 		nvlist_t *config = zpool_get_config(zhp, NULL);
765 		nvlist_t *nvroot = fnvlist_lookup_nvlist(config,
766 		    ZPOOL_CONFIG_VDEV_TREE);
767 		zpool_collect_leaves(zhp, nvroot, vdevs);
768 	} else {
769 		for (int i = 1; i < argc; i++) {
770 			fnvlist_add_boolean(vdevs, argv[i]);
771 		}
772 	}
773 
774 	if (wait)
775 		err = zpool_initialize_wait(zhp, cmd_type, vdevs);
776 	else
777 		err = zpool_initialize(zhp, cmd_type, vdevs);
778 
779 	fnvlist_free(vdevs);
780 	zpool_close(zhp);
781 
782 	return (err);
783 }
784 
785 /*
786  * print a pool vdev config for dry runs
787  */
788 static void
789 print_vdev_tree(zpool_handle_t *zhp, const char *name, nvlist_t *nv, int indent,
790     const char *match, int name_flags)
791 {
792 	nvlist_t **child;
793 	uint_t c, children;
794 	char *vname;
795 	boolean_t printed = B_FALSE;
796 
797 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
798 	    &child, &children) != 0) {
799 		if (name != NULL)
800 			(void) printf("\t%*s%s\n", indent, "", name);
801 		return;
802 	}
803 
804 	for (c = 0; c < children; c++) {
805 		uint64_t is_log = B_FALSE, is_hole = B_FALSE;
806 		const char *class = "";
807 
808 		(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
809 		    &is_hole);
810 
811 		if (is_hole == B_TRUE) {
812 			continue;
813 		}
814 
815 		(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
816 		    &is_log);
817 		if (is_log)
818 			class = VDEV_ALLOC_BIAS_LOG;
819 		(void) nvlist_lookup_string(child[c],
820 		    ZPOOL_CONFIG_ALLOCATION_BIAS, &class);
821 		if (strcmp(match, class) != 0)
822 			continue;
823 
824 		if (!printed && name != NULL) {
825 			(void) printf("\t%*s%s\n", indent, "", name);
826 			printed = B_TRUE;
827 		}
828 		vname = zpool_vdev_name(g_zfs, zhp, child[c], name_flags);
829 		print_vdev_tree(zhp, vname, child[c], indent + 2, "",
830 		    name_flags);
831 		free(vname);
832 	}
833 }
834 
835 /*
836  * Print the list of l2cache devices for dry runs.
837  */
838 static void
839 print_cache_list(nvlist_t *nv, int indent)
840 {
841 	nvlist_t **child;
842 	uint_t c, children;
843 
844 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
845 	    &child, &children) == 0 && children > 0) {
846 		(void) printf("\t%*s%s\n", indent, "", "cache");
847 	} else {
848 		return;
849 	}
850 	for (c = 0; c < children; c++) {
851 		char *vname;
852 
853 		vname = zpool_vdev_name(g_zfs, NULL, child[c], 0);
854 		(void) printf("\t%*s%s\n", indent + 2, "", vname);
855 		free(vname);
856 	}
857 }
858 
859 /*
860  * Print the list of spares for dry runs.
861  */
862 static void
863 print_spare_list(nvlist_t *nv, int indent)
864 {
865 	nvlist_t **child;
866 	uint_t c, children;
867 
868 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
869 	    &child, &children) == 0 && children > 0) {
870 		(void) printf("\t%*s%s\n", indent, "", "spares");
871 	} else {
872 		return;
873 	}
874 	for (c = 0; c < children; c++) {
875 		char *vname;
876 
877 		vname = zpool_vdev_name(g_zfs, NULL, child[c], 0);
878 		(void) printf("\t%*s%s\n", indent + 2, "", vname);
879 		free(vname);
880 	}
881 }
882 
883 static boolean_t
884 prop_list_contains_feature(nvlist_t *proplist)
885 {
886 	nvpair_t *nvp;
887 	for (nvp = nvlist_next_nvpair(proplist, NULL); NULL != nvp;
888 	    nvp = nvlist_next_nvpair(proplist, nvp)) {
889 		if (zpool_prop_feature(nvpair_name(nvp)))
890 			return (B_TRUE);
891 	}
892 	return (B_FALSE);
893 }
894 
895 /*
896  * Add a property pair (name, string-value) into a property nvlist.
897  */
898 static int
899 add_prop_list(const char *propname, const char *propval, nvlist_t **props,
900     boolean_t poolprop)
901 {
902 	zpool_prop_t prop = ZPOOL_PROP_INVAL;
903 	nvlist_t *proplist;
904 	const char *normnm;
905 	const char *strval;
906 
907 	if (*props == NULL &&
908 	    nvlist_alloc(props, NV_UNIQUE_NAME, 0) != 0) {
909 		(void) fprintf(stderr,
910 		    gettext("internal error: out of memory\n"));
911 		return (1);
912 	}
913 
914 	proplist = *props;
915 
916 	if (poolprop) {
917 		const char *vname = zpool_prop_to_name(ZPOOL_PROP_VERSION);
918 		const char *cname =
919 		    zpool_prop_to_name(ZPOOL_PROP_COMPATIBILITY);
920 
921 		if ((prop = zpool_name_to_prop(propname)) == ZPOOL_PROP_INVAL &&
922 		    (!zpool_prop_feature(propname) &&
923 		    !zpool_prop_vdev(propname))) {
924 			(void) fprintf(stderr, gettext("property '%s' is "
925 			    "not a valid pool or vdev property\n"), propname);
926 			return (2);
927 		}
928 
929 		/*
930 		 * feature@ properties and version should not be specified
931 		 * at the same time.
932 		 */
933 		if ((prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname) &&
934 		    nvlist_exists(proplist, vname)) ||
935 		    (prop == ZPOOL_PROP_VERSION &&
936 		    prop_list_contains_feature(proplist))) {
937 			(void) fprintf(stderr, gettext("'feature@' and "
938 			    "'version' properties cannot be specified "
939 			    "together\n"));
940 			return (2);
941 		}
942 
943 		/*
944 		 * if version is specified, only "legacy" compatibility
945 		 * may be requested
946 		 */
947 		if ((prop == ZPOOL_PROP_COMPATIBILITY &&
948 		    strcmp(propval, ZPOOL_COMPAT_LEGACY) != 0 &&
949 		    nvlist_exists(proplist, vname)) ||
950 		    (prop == ZPOOL_PROP_VERSION &&
951 		    nvlist_exists(proplist, cname) &&
952 		    strcmp(fnvlist_lookup_string(proplist, cname),
953 		    ZPOOL_COMPAT_LEGACY) != 0)) {
954 			(void) fprintf(stderr, gettext("when 'version' is "
955 			    "specified, the 'compatibility' feature may only "
956 			    "be set to '" ZPOOL_COMPAT_LEGACY "'\n"));
957 			return (2);
958 		}
959 
960 		if (zpool_prop_feature(propname) || zpool_prop_vdev(propname))
961 			normnm = propname;
962 		else
963 			normnm = zpool_prop_to_name(prop);
964 	} else {
965 		zfs_prop_t fsprop = zfs_name_to_prop(propname);
966 
967 		if (zfs_prop_valid_for_type(fsprop, ZFS_TYPE_FILESYSTEM,
968 		    B_FALSE)) {
969 			normnm = zfs_prop_to_name(fsprop);
970 		} else if (zfs_prop_user(propname) ||
971 		    zfs_prop_userquota(propname)) {
972 			normnm = propname;
973 		} else {
974 			(void) fprintf(stderr, gettext("property '%s' is "
975 			    "not a valid filesystem property\n"), propname);
976 			return (2);
977 		}
978 	}
979 
980 	if (nvlist_lookup_string(proplist, normnm, &strval) == 0 &&
981 	    prop != ZPOOL_PROP_CACHEFILE) {
982 		(void) fprintf(stderr, gettext("property '%s' "
983 		    "specified multiple times\n"), propname);
984 		return (2);
985 	}
986 
987 	if (nvlist_add_string(proplist, normnm, propval) != 0) {
988 		(void) fprintf(stderr, gettext("internal "
989 		    "error: out of memory\n"));
990 		return (1);
991 	}
992 
993 	return (0);
994 }
995 
996 /*
997  * Set a default property pair (name, string-value) in a property nvlist
998  */
999 static int
1000 add_prop_list_default(const char *propname, const char *propval,
1001     nvlist_t **props)
1002 {
1003 	const char *pval;
1004 
1005 	if (nvlist_lookup_string(*props, propname, &pval) == 0)
1006 		return (0);
1007 
1008 	return (add_prop_list(propname, propval, props, B_TRUE));
1009 }
1010 
1011 /*
1012  * zpool add [-fgLnP] [-o property=value] <pool> <vdev> ...
1013  *
1014  *	-f	Force addition of devices, even if they appear in use
1015  *	-g	Display guid for individual vdev name.
1016  *	-L	Follow links when resolving vdev path name.
1017  *	-n	Do not add the devices, but display the resulting layout if
1018  *		they were to be added.
1019  *	-o	Set property=value.
1020  *	-P	Display full path for vdev name.
1021  *
1022  * Adds the given vdevs to 'pool'.  As with create, the bulk of this work is
1023  * handled by make_root_vdev(), which constructs the nvlist needed to pass to
1024  * libzfs.
1025  */
1026 int
1027 zpool_do_add(int argc, char **argv)
1028 {
1029 	boolean_t force = B_FALSE;
1030 	boolean_t dryrun = B_FALSE;
1031 	int name_flags = 0;
1032 	int c;
1033 	nvlist_t *nvroot;
1034 	char *poolname;
1035 	int ret;
1036 	zpool_handle_t *zhp;
1037 	nvlist_t *config;
1038 	nvlist_t *props = NULL;
1039 	char *propval;
1040 
1041 	/* check options */
1042 	while ((c = getopt(argc, argv, "fgLno:P")) != -1) {
1043 		switch (c) {
1044 		case 'f':
1045 			force = B_TRUE;
1046 			break;
1047 		case 'g':
1048 			name_flags |= VDEV_NAME_GUID;
1049 			break;
1050 		case 'L':
1051 			name_flags |= VDEV_NAME_FOLLOW_LINKS;
1052 			break;
1053 		case 'n':
1054 			dryrun = B_TRUE;
1055 			break;
1056 		case 'o':
1057 			if ((propval = strchr(optarg, '=')) == NULL) {
1058 				(void) fprintf(stderr, gettext("missing "
1059 				    "'=' for -o option\n"));
1060 				usage(B_FALSE);
1061 			}
1062 			*propval = '\0';
1063 			propval++;
1064 
1065 			if ((strcmp(optarg, ZPOOL_CONFIG_ASHIFT) != 0) ||
1066 			    (add_prop_list(optarg, propval, &props, B_TRUE)))
1067 				usage(B_FALSE);
1068 			break;
1069 		case 'P':
1070 			name_flags |= VDEV_NAME_PATH;
1071 			break;
1072 		case '?':
1073 			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
1074 			    optopt);
1075 			usage(B_FALSE);
1076 		}
1077 	}
1078 
1079 	argc -= optind;
1080 	argv += optind;
1081 
1082 	/* get pool name and check number of arguments */
1083 	if (argc < 1) {
1084 		(void) fprintf(stderr, gettext("missing pool name argument\n"));
1085 		usage(B_FALSE);
1086 	}
1087 	if (argc < 2) {
1088 		(void) fprintf(stderr, gettext("missing vdev specification\n"));
1089 		usage(B_FALSE);
1090 	}
1091 
1092 	poolname = argv[0];
1093 
1094 	argc--;
1095 	argv++;
1096 
1097 	if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
1098 		return (1);
1099 
1100 	if ((config = zpool_get_config(zhp, NULL)) == NULL) {
1101 		(void) fprintf(stderr, gettext("pool '%s' is unavailable\n"),
1102 		    poolname);
1103 		zpool_close(zhp);
1104 		return (1);
1105 	}
1106 
1107 	/* unless manually specified use "ashift" pool property (if set) */
1108 	if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) {
1109 		int intval;
1110 		zprop_source_t src;
1111 		char strval[ZPOOL_MAXPROPLEN];
1112 
1113 		intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src);
1114 		if (src != ZPROP_SRC_DEFAULT) {
1115 			(void) sprintf(strval, "%" PRId32, intval);
1116 			verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval,
1117 			    &props, B_TRUE) == 0);
1118 		}
1119 	}
1120 
1121 	/* pass off to make_root_vdev for processing */
1122 	nvroot = make_root_vdev(zhp, props, force, !force, B_FALSE, dryrun,
1123 	    argc, argv);
1124 	if (nvroot == NULL) {
1125 		zpool_close(zhp);
1126 		return (1);
1127 	}
1128 
1129 	if (dryrun) {
1130 		nvlist_t *poolnvroot;
1131 		nvlist_t **l2child, **sparechild;
1132 		uint_t l2children, sparechildren, c;
1133 		char *vname;
1134 		boolean_t hadcache = B_FALSE, hadspare = B_FALSE;
1135 
1136 		verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
1137 		    &poolnvroot) == 0);
1138 
1139 		(void) printf(gettext("would update '%s' to the following "
1140 		    "configuration:\n\n"), zpool_get_name(zhp));
1141 
1142 		/* print original main pool and new tree */
1143 		print_vdev_tree(zhp, poolname, poolnvroot, 0, "",
1144 		    name_flags | VDEV_NAME_TYPE_ID);
1145 		print_vdev_tree(zhp, NULL, nvroot, 0, "", name_flags);
1146 
1147 		/* print other classes: 'dedup', 'special', and 'log' */
1148 		if (zfs_special_devs(poolnvroot, VDEV_ALLOC_BIAS_DEDUP)) {
1149 			print_vdev_tree(zhp, "dedup", poolnvroot, 0,
1150 			    VDEV_ALLOC_BIAS_DEDUP, name_flags);
1151 			print_vdev_tree(zhp, NULL, nvroot, 0,
1152 			    VDEV_ALLOC_BIAS_DEDUP, name_flags);
1153 		} else if (zfs_special_devs(nvroot, VDEV_ALLOC_BIAS_DEDUP)) {
1154 			print_vdev_tree(zhp, "dedup", nvroot, 0,
1155 			    VDEV_ALLOC_BIAS_DEDUP, name_flags);
1156 		}
1157 
1158 		if (zfs_special_devs(poolnvroot, VDEV_ALLOC_BIAS_SPECIAL)) {
1159 			print_vdev_tree(zhp, "special", poolnvroot, 0,
1160 			    VDEV_ALLOC_BIAS_SPECIAL, name_flags);
1161 			print_vdev_tree(zhp, NULL, nvroot, 0,
1162 			    VDEV_ALLOC_BIAS_SPECIAL, name_flags);
1163 		} else if (zfs_special_devs(nvroot, VDEV_ALLOC_BIAS_SPECIAL)) {
1164 			print_vdev_tree(zhp, "special", nvroot, 0,
1165 			    VDEV_ALLOC_BIAS_SPECIAL, name_flags);
1166 		}
1167 
1168 		if (num_logs(poolnvroot) > 0) {
1169 			print_vdev_tree(zhp, "logs", poolnvroot, 0,
1170 			    VDEV_ALLOC_BIAS_LOG, name_flags);
1171 			print_vdev_tree(zhp, NULL, nvroot, 0,
1172 			    VDEV_ALLOC_BIAS_LOG, name_flags);
1173 		} else if (num_logs(nvroot) > 0) {
1174 			print_vdev_tree(zhp, "logs", nvroot, 0,
1175 			    VDEV_ALLOC_BIAS_LOG, name_flags);
1176 		}
1177 
1178 		/* Do the same for the caches */
1179 		if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_L2CACHE,
1180 		    &l2child, &l2children) == 0 && l2children) {
1181 			hadcache = B_TRUE;
1182 			(void) printf(gettext("\tcache\n"));
1183 			for (c = 0; c < l2children; c++) {
1184 				vname = zpool_vdev_name(g_zfs, NULL,
1185 				    l2child[c], name_flags);
1186 				(void) printf("\t  %s\n", vname);
1187 				free(vname);
1188 			}
1189 		}
1190 		if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1191 		    &l2child, &l2children) == 0 && l2children) {
1192 			if (!hadcache)
1193 				(void) printf(gettext("\tcache\n"));
1194 			for (c = 0; c < l2children; c++) {
1195 				vname = zpool_vdev_name(g_zfs, NULL,
1196 				    l2child[c], name_flags);
1197 				(void) printf("\t  %s\n", vname);
1198 				free(vname);
1199 			}
1200 		}
1201 		/* And finally the spares */
1202 		if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_SPARES,
1203 		    &sparechild, &sparechildren) == 0 && sparechildren > 0) {
1204 			hadspare = B_TRUE;
1205 			(void) printf(gettext("\tspares\n"));
1206 			for (c = 0; c < sparechildren; c++) {
1207 				vname = zpool_vdev_name(g_zfs, NULL,
1208 				    sparechild[c], name_flags);
1209 				(void) printf("\t  %s\n", vname);
1210 				free(vname);
1211 			}
1212 		}
1213 		if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1214 		    &sparechild, &sparechildren) == 0 && sparechildren > 0) {
1215 			if (!hadspare)
1216 				(void) printf(gettext("\tspares\n"));
1217 			for (c = 0; c < sparechildren; c++) {
1218 				vname = zpool_vdev_name(g_zfs, NULL,
1219 				    sparechild[c], name_flags);
1220 				(void) printf("\t  %s\n", vname);
1221 				free(vname);
1222 			}
1223 		}
1224 
1225 		ret = 0;
1226 	} else {
1227 		ret = (zpool_add(zhp, nvroot) != 0);
1228 	}
1229 
1230 	nvlist_free(props);
1231 	nvlist_free(nvroot);
1232 	zpool_close(zhp);
1233 
1234 	return (ret);
1235 }
1236 
1237 /*
1238  * zpool remove [-npsw] <pool> <vdev> ...
1239  *
1240  * Removes the given vdev from the pool.
1241  */
1242 int
1243 zpool_do_remove(int argc, char **argv)
1244 {
1245 	char *poolname;
1246 	int i, ret = 0;
1247 	zpool_handle_t *zhp = NULL;
1248 	boolean_t stop = B_FALSE;
1249 	int c;
1250 	boolean_t noop = B_FALSE;
1251 	boolean_t parsable = B_FALSE;
1252 	boolean_t wait = B_FALSE;
1253 
1254 	/* check options */
1255 	while ((c = getopt(argc, argv, "npsw")) != -1) {
1256 		switch (c) {
1257 		case 'n':
1258 			noop = B_TRUE;
1259 			break;
1260 		case 'p':
1261 			parsable = B_TRUE;
1262 			break;
1263 		case 's':
1264 			stop = B_TRUE;
1265 			break;
1266 		case 'w':
1267 			wait = B_TRUE;
1268 			break;
1269 		case '?':
1270 			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
1271 			    optopt);
1272 			usage(B_FALSE);
1273 		}
1274 	}
1275 
1276 	argc -= optind;
1277 	argv += optind;
1278 
1279 	/* get pool name and check number of arguments */
1280 	if (argc < 1) {
1281 		(void) fprintf(stderr, gettext("missing pool name argument\n"));
1282 		usage(B_FALSE);
1283 	}
1284 
1285 	poolname = argv[0];
1286 
1287 	if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
1288 		return (1);
1289 
1290 	if (stop && noop) {
1291 		zpool_close(zhp);
1292 		(void) fprintf(stderr, gettext("stop request ignored\n"));
1293 		return (0);
1294 	}
1295 
1296 	if (stop) {
1297 		if (argc > 1) {
1298 			(void) fprintf(stderr, gettext("too many arguments\n"));
1299 			usage(B_FALSE);
1300 		}
1301 		if (zpool_vdev_remove_cancel(zhp) != 0)
1302 			ret = 1;
1303 		if (wait) {
1304 			(void) fprintf(stderr, gettext("invalid option "
1305 			    "combination: -w cannot be used with -s\n"));
1306 			usage(B_FALSE);
1307 		}
1308 	} else {
1309 		if (argc < 2) {
1310 			(void) fprintf(stderr, gettext("missing device\n"));
1311 			usage(B_FALSE);
1312 		}
1313 
1314 		for (i = 1; i < argc; i++) {
1315 			if (noop) {
1316 				uint64_t size;
1317 
1318 				if (zpool_vdev_indirect_size(zhp, argv[i],
1319 				    &size) != 0) {
1320 					ret = 1;
1321 					break;
1322 				}
1323 				if (parsable) {
1324 					(void) printf("%s %llu\n",
1325 					    argv[i], (unsigned long long)size);
1326 				} else {
1327 					char valstr[32];
1328 					zfs_nicenum(size, valstr,
1329 					    sizeof (valstr));
1330 					(void) printf("Memory that will be "
1331 					    "used after removing %s: %s\n",
1332 					    argv[i], valstr);
1333 				}
1334 			} else {
1335 				if (zpool_vdev_remove(zhp, argv[i]) != 0)
1336 					ret = 1;
1337 			}
1338 		}
1339 
1340 		if (ret == 0 && wait)
1341 			ret = zpool_wait(zhp, ZPOOL_WAIT_REMOVE);
1342 	}
1343 	zpool_close(zhp);
1344 
1345 	return (ret);
1346 }
1347 
1348 /*
1349  * Return 1 if a vdev is active (being used in a pool)
1350  * Return 0 if a vdev is inactive (offlined or faulted, or not in active pool)
1351  *
1352  * This is useful for checking if a disk in an active pool is offlined or
1353  * faulted.
1354  */
1355 static int
1356 vdev_is_active(char *vdev_path)
1357 {
1358 	int fd;
1359 	fd = open(vdev_path, O_EXCL);
1360 	if (fd < 0) {
1361 		return (1);   /* cant open O_EXCL - disk is active */
1362 	}
1363 
1364 	close(fd);
1365 	return (0);   /* disk is inactive in the pool */
1366 }
1367 
1368 /*
1369  * zpool labelclear [-f] <vdev>
1370  *
1371  *	-f	Force clearing the label for the vdevs which are members of
1372  *		the exported or foreign pools.
1373  *
1374  * Verifies that the vdev is not active and zeros out the label information
1375  * on the device.
1376  */
1377 int
1378 zpool_do_labelclear(int argc, char **argv)
1379 {
1380 	char vdev[MAXPATHLEN];
1381 	char *name = NULL;
1382 	int c, fd = -1, ret = 0;
1383 	nvlist_t *config;
1384 	pool_state_t state;
1385 	boolean_t inuse = B_FALSE;
1386 	boolean_t force = B_FALSE;
1387 
1388 	/* check options */
1389 	while ((c = getopt(argc, argv, "f")) != -1) {
1390 		switch (c) {
1391 		case 'f':
1392 			force = B_TRUE;
1393 			break;
1394 		default:
1395 			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
1396 			    optopt);
1397 			usage(B_FALSE);
1398 		}
1399 	}
1400 
1401 	argc -= optind;
1402 	argv += optind;
1403 
1404 	/* get vdev name */
1405 	if (argc < 1) {
1406 		(void) fprintf(stderr, gettext("missing vdev name\n"));
1407 		usage(B_FALSE);
1408 	}
1409 	if (argc > 1) {
1410 		(void) fprintf(stderr, gettext("too many arguments\n"));
1411 		usage(B_FALSE);
1412 	}
1413 
1414 	(void) strlcpy(vdev, argv[0], sizeof (vdev));
1415 
1416 	/*
1417 	 * If we cannot open an absolute path, we quit.
1418 	 * Otherwise if the provided vdev name doesn't point to a file,
1419 	 * try prepending expected disk paths and partition numbers.
1420 	 */
1421 	if ((fd = open(vdev, O_RDWR)) < 0) {
1422 		int error;
1423 		if (vdev[0] == '/') {
1424 			(void) fprintf(stderr, gettext("failed to open "
1425 			    "%s: %s\n"), vdev, strerror(errno));
1426 			return (1);
1427 		}
1428 
1429 		error = zfs_resolve_shortname(argv[0], vdev, MAXPATHLEN);
1430 		if (error == 0 && zfs_dev_is_whole_disk(vdev)) {
1431 			if (zfs_append_partition(vdev, MAXPATHLEN) == -1)
1432 				error = ENOENT;
1433 		}
1434 
1435 		if (error || ((fd = open(vdev, O_RDWR)) < 0)) {
1436 			if (errno == ENOENT) {
1437 				(void) fprintf(stderr, gettext(
1438 				    "failed to find device %s, try "
1439 				    "specifying absolute path instead\n"),
1440 				    argv[0]);
1441 				return (1);
1442 			}
1443 
1444 			(void) fprintf(stderr, gettext("failed to open %s:"
1445 			    " %s\n"), vdev, strerror(errno));
1446 			return (1);
1447 		}
1448 	}
1449 
1450 	/*
1451 	 * Flush all dirty pages for the block device.  This should not be
1452 	 * fatal when the device does not support BLKFLSBUF as would be the
1453 	 * case for a file vdev.
1454 	 */
1455 	if ((zfs_dev_flush(fd) != 0) && (errno != ENOTTY))
1456 		(void) fprintf(stderr, gettext("failed to invalidate "
1457 		    "cache for %s: %s\n"), vdev, strerror(errno));
1458 
1459 	if (zpool_read_label(fd, &config, NULL) != 0) {
1460 		(void) fprintf(stderr,
1461 		    gettext("failed to read label from %s\n"), vdev);
1462 		ret = 1;
1463 		goto errout;
1464 	}
1465 	nvlist_free(config);
1466 
1467 	ret = zpool_in_use(g_zfs, fd, &state, &name, &inuse);
1468 	if (ret != 0) {
1469 		(void) fprintf(stderr,
1470 		    gettext("failed to check state for %s\n"), vdev);
1471 		ret = 1;
1472 		goto errout;
1473 	}
1474 
1475 	if (!inuse)
1476 		goto wipe_label;
1477 
1478 	switch (state) {
1479 	default:
1480 	case POOL_STATE_ACTIVE:
1481 	case POOL_STATE_SPARE:
1482 	case POOL_STATE_L2CACHE:
1483 		/*
1484 		 * We allow the user to call 'zpool offline -f'
1485 		 * on an offlined disk in an active pool. We can check if
1486 		 * the disk is online by calling vdev_is_active().
1487 		 */
1488 		if (force && !vdev_is_active(vdev))
1489 			break;
1490 
1491 		(void) fprintf(stderr, gettext(
1492 		    "%s is a member (%s) of pool \"%s\""),
1493 		    vdev, zpool_pool_state_to_name(state), name);
1494 
1495 		if (force) {
1496 			(void) fprintf(stderr, gettext(
1497 			    ". Offline the disk first to clear its label."));
1498 		}
1499 		printf("\n");
1500 		ret = 1;
1501 		goto errout;
1502 
1503 	case POOL_STATE_EXPORTED:
1504 		if (force)
1505 			break;
1506 		(void) fprintf(stderr, gettext(
1507 		    "use '-f' to override the following error:\n"
1508 		    "%s is a member of exported pool \"%s\"\n"),
1509 		    vdev, name);
1510 		ret = 1;
1511 		goto errout;
1512 
1513 	case POOL_STATE_POTENTIALLY_ACTIVE:
1514 		if (force)
1515 			break;
1516 		(void) fprintf(stderr, gettext(
1517 		    "use '-f' to override the following error:\n"
1518 		    "%s is a member of potentially active pool \"%s\"\n"),
1519 		    vdev, name);
1520 		ret = 1;
1521 		goto errout;
1522 
1523 	case POOL_STATE_DESTROYED:
1524 		/* inuse should never be set for a destroyed pool */
1525 		assert(0);
1526 		break;
1527 	}
1528 
1529 wipe_label:
1530 	ret = zpool_clear_label(fd);
1531 	if (ret != 0) {
1532 		(void) fprintf(stderr,
1533 		    gettext("failed to clear label for %s\n"), vdev);
1534 	}
1535 
1536 errout:
1537 	free(name);
1538 	(void) close(fd);
1539 
1540 	return (ret);
1541 }
1542 
1543 /*
1544  * zpool create [-fnd] [-o property=value] ...
1545  *		[-O file-system-property=value] ...
1546  *		[-R root] [-m mountpoint] <pool> <dev> ...
1547  *
1548  *	-f	Force creation, even if devices appear in use
1549  *	-n	Do not create the pool, but display the resulting layout if it
1550  *		were to be created.
1551  *      -R	Create a pool under an alternate root
1552  *      -m	Set default mountpoint for the root dataset.  By default it's
1553  *		'/<pool>'
1554  *	-o	Set property=value.
1555  *	-o	Set feature@feature=enabled|disabled.
1556  *	-d	Don't automatically enable all supported pool features
1557  *		(individual features can be enabled with -o).
1558  *	-O	Set fsproperty=value in the pool's root file system
1559  *
1560  * Creates the named pool according to the given vdev specification.  The
1561  * bulk of the vdev processing is done in make_root_vdev() in zpool_vdev.c.
1562  * Once we get the nvlist back from make_root_vdev(), we either print out the
1563  * contents (if '-n' was specified), or pass it to libzfs to do the creation.
1564  */
1565 int
1566 zpool_do_create(int argc, char **argv)
1567 {
1568 	boolean_t force = B_FALSE;
1569 	boolean_t dryrun = B_FALSE;
1570 	boolean_t enable_pool_features = B_TRUE;
1571 
1572 	int c;
1573 	nvlist_t *nvroot = NULL;
1574 	char *poolname;
1575 	char *tname = NULL;
1576 	int ret = 1;
1577 	char *altroot = NULL;
1578 	char *compat = NULL;
1579 	char *mountpoint = NULL;
1580 	nvlist_t *fsprops = NULL;
1581 	nvlist_t *props = NULL;
1582 	char *propval;
1583 
1584 	/* check options */
1585 	while ((c = getopt(argc, argv, ":fndR:m:o:O:t:")) != -1) {
1586 		switch (c) {
1587 		case 'f':
1588 			force = B_TRUE;
1589 			break;
1590 		case 'n':
1591 			dryrun = B_TRUE;
1592 			break;
1593 		case 'd':
1594 			enable_pool_features = B_FALSE;
1595 			break;
1596 		case 'R':
1597 			altroot = optarg;
1598 			if (add_prop_list(zpool_prop_to_name(
1599 			    ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE))
1600 				goto errout;
1601 			if (add_prop_list_default(zpool_prop_to_name(
1602 			    ZPOOL_PROP_CACHEFILE), "none", &props))
1603 				goto errout;
1604 			break;
1605 		case 'm':
1606 			/* Equivalent to -O mountpoint=optarg */
1607 			mountpoint = optarg;
1608 			break;
1609 		case 'o':
1610 			if ((propval = strchr(optarg, '=')) == NULL) {
1611 				(void) fprintf(stderr, gettext("missing "
1612 				    "'=' for -o option\n"));
1613 				goto errout;
1614 			}
1615 			*propval = '\0';
1616 			propval++;
1617 
1618 			if (add_prop_list(optarg, propval, &props, B_TRUE))
1619 				goto errout;
1620 
1621 			/*
1622 			 * If the user is creating a pool that doesn't support
1623 			 * feature flags, don't enable any features.
1624 			 */
1625 			if (zpool_name_to_prop(optarg) == ZPOOL_PROP_VERSION) {
1626 				char *end;
1627 				u_longlong_t ver;
1628 
1629 				ver = strtoull(propval, &end, 10);
1630 				if (*end == '\0' &&
1631 				    ver < SPA_VERSION_FEATURES) {
1632 					enable_pool_features = B_FALSE;
1633 				}
1634 			}
1635 			if (zpool_name_to_prop(optarg) == ZPOOL_PROP_ALTROOT)
1636 				altroot = propval;
1637 			if (zpool_name_to_prop(optarg) ==
1638 			    ZPOOL_PROP_COMPATIBILITY)
1639 				compat = propval;
1640 			break;
1641 		case 'O':
1642 			if ((propval = strchr(optarg, '=')) == NULL) {
1643 				(void) fprintf(stderr, gettext("missing "
1644 				    "'=' for -O option\n"));
1645 				goto errout;
1646 			}
1647 			*propval = '\0';
1648 			propval++;
1649 
1650 			/*
1651 			 * Mountpoints are checked and then added later.
1652 			 * Uniquely among properties, they can be specified
1653 			 * more than once, to avoid conflict with -m.
1654 			 */
1655 			if (0 == strcmp(optarg,
1656 			    zfs_prop_to_name(ZFS_PROP_MOUNTPOINT))) {
1657 				mountpoint = propval;
1658 			} else if (add_prop_list(optarg, propval, &fsprops,
1659 			    B_FALSE)) {
1660 				goto errout;
1661 			}
1662 			break;
1663 		case 't':
1664 			/*
1665 			 * Sanity check temporary pool name.
1666 			 */
1667 			if (strchr(optarg, '/') != NULL) {
1668 				(void) fprintf(stderr, gettext("cannot create "
1669 				    "'%s': invalid character '/' in temporary "
1670 				    "name\n"), optarg);
1671 				(void) fprintf(stderr, gettext("use 'zfs "
1672 				    "create' to create a dataset\n"));
1673 				goto errout;
1674 			}
1675 
1676 			if (add_prop_list(zpool_prop_to_name(
1677 			    ZPOOL_PROP_TNAME), optarg, &props, B_TRUE))
1678 				goto errout;
1679 			if (add_prop_list_default(zpool_prop_to_name(
1680 			    ZPOOL_PROP_CACHEFILE), "none", &props))
1681 				goto errout;
1682 			tname = optarg;
1683 			break;
1684 		case ':':
1685 			(void) fprintf(stderr, gettext("missing argument for "
1686 			    "'%c' option\n"), optopt);
1687 			goto badusage;
1688 		case '?':
1689 			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
1690 			    optopt);
1691 			goto badusage;
1692 		}
1693 	}
1694 
1695 	argc -= optind;
1696 	argv += optind;
1697 
1698 	/* get pool name and check number of arguments */
1699 	if (argc < 1) {
1700 		(void) fprintf(stderr, gettext("missing pool name argument\n"));
1701 		goto badusage;
1702 	}
1703 	if (argc < 2) {
1704 		(void) fprintf(stderr, gettext("missing vdev specification\n"));
1705 		goto badusage;
1706 	}
1707 
1708 	poolname = argv[0];
1709 
1710 	/*
1711 	 * As a special case, check for use of '/' in the name, and direct the
1712 	 * user to use 'zfs create' instead.
1713 	 */
1714 	if (strchr(poolname, '/') != NULL) {
1715 		(void) fprintf(stderr, gettext("cannot create '%s': invalid "
1716 		    "character '/' in pool name\n"), poolname);
1717 		(void) fprintf(stderr, gettext("use 'zfs create' to "
1718 		    "create a dataset\n"));
1719 		goto errout;
1720 	}
1721 
1722 	/* pass off to make_root_vdev for bulk processing */
1723 	nvroot = make_root_vdev(NULL, props, force, !force, B_FALSE, dryrun,
1724 	    argc - 1, argv + 1);
1725 	if (nvroot == NULL)
1726 		goto errout;
1727 
1728 	/* make_root_vdev() allows 0 toplevel children if there are spares */
1729 	if (!zfs_allocatable_devs(nvroot)) {
1730 		(void) fprintf(stderr, gettext("invalid vdev "
1731 		    "specification: at least one toplevel vdev must be "
1732 		    "specified\n"));
1733 		goto errout;
1734 	}
1735 
1736 	if (altroot != NULL && altroot[0] != '/') {
1737 		(void) fprintf(stderr, gettext("invalid alternate root '%s': "
1738 		    "must be an absolute path\n"), altroot);
1739 		goto errout;
1740 	}
1741 
1742 	/*
1743 	 * Check the validity of the mountpoint and direct the user to use the
1744 	 * '-m' mountpoint option if it looks like its in use.
1745 	 */
1746 	if (mountpoint == NULL ||
1747 	    (strcmp(mountpoint, ZFS_MOUNTPOINT_LEGACY) != 0 &&
1748 	    strcmp(mountpoint, ZFS_MOUNTPOINT_NONE) != 0)) {
1749 		char buf[MAXPATHLEN];
1750 		DIR *dirp;
1751 
1752 		if (mountpoint && mountpoint[0] != '/') {
1753 			(void) fprintf(stderr, gettext("invalid mountpoint "
1754 			    "'%s': must be an absolute path, 'legacy', or "
1755 			    "'none'\n"), mountpoint);
1756 			goto errout;
1757 		}
1758 
1759 		if (mountpoint == NULL) {
1760 			if (altroot != NULL)
1761 				(void) snprintf(buf, sizeof (buf), "%s/%s",
1762 				    altroot, poolname);
1763 			else
1764 				(void) snprintf(buf, sizeof (buf), "/%s",
1765 				    poolname);
1766 		} else {
1767 			if (altroot != NULL)
1768 				(void) snprintf(buf, sizeof (buf), "%s%s",
1769 				    altroot, mountpoint);
1770 			else
1771 				(void) snprintf(buf, sizeof (buf), "%s",
1772 				    mountpoint);
1773 		}
1774 
1775 		if ((dirp = opendir(buf)) == NULL && errno != ENOENT) {
1776 			(void) fprintf(stderr, gettext("mountpoint '%s' : "
1777 			    "%s\n"), buf, strerror(errno));
1778 			(void) fprintf(stderr, gettext("use '-m' "
1779 			    "option to provide a different default\n"));
1780 			goto errout;
1781 		} else if (dirp) {
1782 			int count = 0;
1783 
1784 			while (count < 3 && readdir(dirp) != NULL)
1785 				count++;
1786 			(void) closedir(dirp);
1787 
1788 			if (count > 2) {
1789 				(void) fprintf(stderr, gettext("mountpoint "
1790 				    "'%s' exists and is not empty\n"), buf);
1791 				(void) fprintf(stderr, gettext("use '-m' "
1792 				    "option to provide a "
1793 				    "different default\n"));
1794 				goto errout;
1795 			}
1796 		}
1797 	}
1798 
1799 	/*
1800 	 * Now that the mountpoint's validity has been checked, ensure that
1801 	 * the property is set appropriately prior to creating the pool.
1802 	 */
1803 	if (mountpoint != NULL) {
1804 		ret = add_prop_list(zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
1805 		    mountpoint, &fsprops, B_FALSE);
1806 		if (ret != 0)
1807 			goto errout;
1808 	}
1809 
1810 	ret = 1;
1811 	if (dryrun) {
1812 		/*
1813 		 * For a dry run invocation, print out a basic message and run
1814 		 * through all the vdevs in the list and print out in an
1815 		 * appropriate hierarchy.
1816 		 */
1817 		(void) printf(gettext("would create '%s' with the "
1818 		    "following layout:\n\n"), poolname);
1819 
1820 		print_vdev_tree(NULL, poolname, nvroot, 0, "", 0);
1821 		print_vdev_tree(NULL, "dedup", nvroot, 0,
1822 		    VDEV_ALLOC_BIAS_DEDUP, 0);
1823 		print_vdev_tree(NULL, "special", nvroot, 0,
1824 		    VDEV_ALLOC_BIAS_SPECIAL, 0);
1825 		print_vdev_tree(NULL, "logs", nvroot, 0,
1826 		    VDEV_ALLOC_BIAS_LOG, 0);
1827 		print_cache_list(nvroot, 0);
1828 		print_spare_list(nvroot, 0);
1829 
1830 		ret = 0;
1831 	} else {
1832 		/*
1833 		 * Load in feature set.
1834 		 * Note: if compatibility property not given, we'll have
1835 		 * NULL, which means 'all features'.
1836 		 */
1837 		boolean_t requested_features[SPA_FEATURES];
1838 		if (zpool_do_load_compat(compat, requested_features) !=
1839 		    ZPOOL_COMPATIBILITY_OK)
1840 			goto errout;
1841 
1842 		/*
1843 		 * props contains list of features to enable.
1844 		 * For each feature:
1845 		 *  - remove it if feature@name=disabled
1846 		 *  - leave it there if feature@name=enabled
1847 		 *  - add it if:
1848 		 *    - enable_pool_features (ie: no '-d' or '-o version')
1849 		 *    - it's supported by the kernel module
1850 		 *    - it's in the requested feature set
1851 		 *  - warn if it's enabled but not in compat
1852 		 */
1853 		for (spa_feature_t i = 0; i < SPA_FEATURES; i++) {
1854 			char propname[MAXPATHLEN];
1855 			const char *propval;
1856 			zfeature_info_t *feat = &spa_feature_table[i];
1857 
1858 			(void) snprintf(propname, sizeof (propname),
1859 			    "feature@%s", feat->fi_uname);
1860 
1861 			if (!nvlist_lookup_string(props, propname, &propval)) {
1862 				if (strcmp(propval,
1863 				    ZFS_FEATURE_DISABLED) == 0) {
1864 					(void) nvlist_remove_all(props,
1865 					    propname);
1866 				} else if (strcmp(propval,
1867 				    ZFS_FEATURE_ENABLED) == 0 &&
1868 				    !requested_features[i]) {
1869 					(void) fprintf(stderr, gettext(
1870 					    "Warning: feature \"%s\" enabled "
1871 					    "but is not in specified "
1872 					    "'compatibility' feature set.\n"),
1873 					    feat->fi_uname);
1874 				}
1875 			} else if (
1876 			    enable_pool_features &&
1877 			    feat->fi_zfs_mod_supported &&
1878 			    requested_features[i]) {
1879 				ret = add_prop_list(propname,
1880 				    ZFS_FEATURE_ENABLED, &props, B_TRUE);
1881 				if (ret != 0)
1882 					goto errout;
1883 			}
1884 		}
1885 
1886 		ret = 1;
1887 		if (zpool_create(g_zfs, poolname,
1888 		    nvroot, props, fsprops) == 0) {
1889 			zfs_handle_t *pool = zfs_open(g_zfs,
1890 			    tname ? tname : poolname, ZFS_TYPE_FILESYSTEM);
1891 			if (pool != NULL) {
1892 				if (zfs_mount(pool, NULL, 0) == 0) {
1893 					ret = zfs_share(pool, NULL);
1894 					zfs_commit_shares(NULL);
1895 				}
1896 				zfs_close(pool);
1897 			}
1898 		} else if (libzfs_errno(g_zfs) == EZFS_INVALIDNAME) {
1899 			(void) fprintf(stderr, gettext("pool name may have "
1900 			    "been omitted\n"));
1901 		}
1902 	}
1903 
1904 errout:
1905 	nvlist_free(nvroot);
1906 	nvlist_free(fsprops);
1907 	nvlist_free(props);
1908 	return (ret);
1909 badusage:
1910 	nvlist_free(fsprops);
1911 	nvlist_free(props);
1912 	usage(B_FALSE);
1913 	return (2);
1914 }
1915 
1916 /*
1917  * zpool destroy <pool>
1918  *
1919  * 	-f	Forcefully unmount any datasets
1920  *
1921  * Destroy the given pool.  Automatically unmounts any datasets in the pool.
1922  */
1923 int
1924 zpool_do_destroy(int argc, char **argv)
1925 {
1926 	boolean_t force = B_FALSE;
1927 	int c;
1928 	char *pool;
1929 	zpool_handle_t *zhp;
1930 	int ret;
1931 
1932 	/* check options */
1933 	while ((c = getopt(argc, argv, "f")) != -1) {
1934 		switch (c) {
1935 		case 'f':
1936 			force = B_TRUE;
1937 			break;
1938 		case '?':
1939 			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
1940 			    optopt);
1941 			usage(B_FALSE);
1942 		}
1943 	}
1944 
1945 	argc -= optind;
1946 	argv += optind;
1947 
1948 	/* check arguments */
1949 	if (argc < 1) {
1950 		(void) fprintf(stderr, gettext("missing pool argument\n"));
1951 		usage(B_FALSE);
1952 	}
1953 	if (argc > 1) {
1954 		(void) fprintf(stderr, gettext("too many arguments\n"));
1955 		usage(B_FALSE);
1956 	}
1957 
1958 	pool = argv[0];
1959 
1960 	if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) {
1961 		/*
1962 		 * As a special case, check for use of '/' in the name, and
1963 		 * direct the user to use 'zfs destroy' instead.
1964 		 */
1965 		if (strchr(pool, '/') != NULL)
1966 			(void) fprintf(stderr, gettext("use 'zfs destroy' to "
1967 			    "destroy a dataset\n"));
1968 		return (1);
1969 	}
1970 
1971 	if (zpool_disable_datasets(zhp, force) != 0) {
1972 		(void) fprintf(stderr, gettext("could not destroy '%s': "
1973 		    "could not unmount datasets\n"), zpool_get_name(zhp));
1974 		zpool_close(zhp);
1975 		return (1);
1976 	}
1977 
1978 	/* The history must be logged as part of the export */
1979 	log_history = B_FALSE;
1980 
1981 	ret = (zpool_destroy(zhp, history_str) != 0);
1982 
1983 	zpool_close(zhp);
1984 
1985 	return (ret);
1986 }
1987 
1988 typedef struct export_cbdata {
1989 	boolean_t force;
1990 	boolean_t hardforce;
1991 } export_cbdata_t;
1992 
1993 /*
1994  * Export one pool
1995  */
1996 static int
1997 zpool_export_one(zpool_handle_t *zhp, void *data)
1998 {
1999 	export_cbdata_t *cb = data;
2000 
2001 	if (zpool_disable_datasets(zhp, cb->force) != 0)
2002 		return (1);
2003 
2004 	/* The history must be logged as part of the export */
2005 	log_history = B_FALSE;
2006 
2007 	if (cb->hardforce) {
2008 		if (zpool_export_force(zhp, history_str) != 0)
2009 			return (1);
2010 	} else if (zpool_export(zhp, cb->force, history_str) != 0) {
2011 		return (1);
2012 	}
2013 
2014 	return (0);
2015 }
2016 
2017 /*
2018  * zpool export [-f] <pool> ...
2019  *
2020  *	-a	Export all pools
2021  *	-f	Forcefully unmount datasets
2022  *
2023  * Export the given pools.  By default, the command will attempt to cleanly
2024  * unmount any active datasets within the pool.  If the '-f' flag is specified,
2025  * then the datasets will be forcefully unmounted.
2026  */
2027 int
2028 zpool_do_export(int argc, char **argv)
2029 {
2030 	export_cbdata_t cb;
2031 	boolean_t do_all = B_FALSE;
2032 	boolean_t force = B_FALSE;
2033 	boolean_t hardforce = B_FALSE;
2034 	int c, ret;
2035 
2036 	/* check options */
2037 	while ((c = getopt(argc, argv, "afF")) != -1) {
2038 		switch (c) {
2039 		case 'a':
2040 			do_all = B_TRUE;
2041 			break;
2042 		case 'f':
2043 			force = B_TRUE;
2044 			break;
2045 		case 'F':
2046 			hardforce = B_TRUE;
2047 			break;
2048 		case '?':
2049 			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
2050 			    optopt);
2051 			usage(B_FALSE);
2052 		}
2053 	}
2054 
2055 	cb.force = force;
2056 	cb.hardforce = hardforce;
2057 	argc -= optind;
2058 	argv += optind;
2059 
2060 	if (do_all) {
2061 		if (argc != 0) {
2062 			(void) fprintf(stderr, gettext("too many arguments\n"));
2063 			usage(B_FALSE);
2064 		}
2065 
2066 		return (for_each_pool(argc, argv, B_TRUE, NULL,
2067 		    ZFS_TYPE_POOL, B_FALSE, zpool_export_one, &cb));
2068 	}
2069 
2070 	/* check arguments */
2071 	if (argc < 1) {
2072 		(void) fprintf(stderr, gettext("missing pool argument\n"));
2073 		usage(B_FALSE);
2074 	}
2075 
2076 	ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
2077 	    B_FALSE, zpool_export_one, &cb);
2078 
2079 	return (ret);
2080 }
2081 
2082 /*
2083  * Given a vdev configuration, determine the maximum width needed for the device
2084  * name column.
2085  */
2086 static int
2087 max_width(zpool_handle_t *zhp, nvlist_t *nv, int depth, int max,
2088     int name_flags)
2089 {
2090 	static const char *const subtypes[] =
2091 	    {ZPOOL_CONFIG_SPARES, ZPOOL_CONFIG_L2CACHE, ZPOOL_CONFIG_CHILDREN};
2092 
2093 	char *name = zpool_vdev_name(g_zfs, zhp, nv, name_flags);
2094 	max = MAX(strlen(name) + depth, max);
2095 	free(name);
2096 
2097 	nvlist_t **child;
2098 	uint_t children;
2099 	for (size_t i = 0; i < ARRAY_SIZE(subtypes); ++i)
2100 		if (nvlist_lookup_nvlist_array(nv, subtypes[i],
2101 		    &child, &children) == 0)
2102 			for (uint_t c = 0; c < children; ++c)
2103 				max = MAX(max_width(zhp, child[c], depth + 2,
2104 				    max, name_flags), max);
2105 
2106 	return (max);
2107 }
2108 
2109 typedef struct spare_cbdata {
2110 	uint64_t	cb_guid;
2111 	zpool_handle_t	*cb_zhp;
2112 } spare_cbdata_t;
2113 
2114 static boolean_t
2115 find_vdev(nvlist_t *nv, uint64_t search)
2116 {
2117 	uint64_t guid;
2118 	nvlist_t **child;
2119 	uint_t c, children;
2120 
2121 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0 &&
2122 	    search == guid)
2123 		return (B_TRUE);
2124 
2125 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2126 	    &child, &children) == 0) {
2127 		for (c = 0; c < children; c++)
2128 			if (find_vdev(child[c], search))
2129 				return (B_TRUE);
2130 	}
2131 
2132 	return (B_FALSE);
2133 }
2134 
2135 static int
2136 find_spare(zpool_handle_t *zhp, void *data)
2137 {
2138 	spare_cbdata_t *cbp = data;
2139 	nvlist_t *config, *nvroot;
2140 
2141 	config = zpool_get_config(zhp, NULL);
2142 	verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2143 	    &nvroot) == 0);
2144 
2145 	if (find_vdev(nvroot, cbp->cb_guid)) {
2146 		cbp->cb_zhp = zhp;
2147 		return (1);
2148 	}
2149 
2150 	zpool_close(zhp);
2151 	return (0);
2152 }
2153 
2154 typedef struct status_cbdata {
2155 	int		cb_count;
2156 	int		cb_name_flags;
2157 	int		cb_namewidth;
2158 	boolean_t	cb_allpools;
2159 	boolean_t	cb_verbose;
2160 	boolean_t	cb_literal;
2161 	boolean_t	cb_explain;
2162 	boolean_t	cb_first;
2163 	boolean_t	cb_dedup_stats;
2164 	boolean_t	cb_print_unhealthy;
2165 	boolean_t	cb_print_status;
2166 	boolean_t	cb_print_slow_ios;
2167 	boolean_t	cb_print_vdev_init;
2168 	boolean_t	cb_print_vdev_trim;
2169 	vdev_cmd_data_list_t	*vcdl;
2170 	boolean_t	cb_print_power;
2171 } status_cbdata_t;
2172 
2173 /* Return 1 if string is NULL, empty, or whitespace; return 0 otherwise. */
2174 static boolean_t
2175 is_blank_str(const char *str)
2176 {
2177 	for (; str != NULL && *str != '\0'; ++str)
2178 		if (!isblank(*str))
2179 			return (B_FALSE);
2180 	return (B_TRUE);
2181 }
2182 
2183 /* Print command output lines for specific vdev in a specific pool */
2184 static void
2185 zpool_print_cmd(vdev_cmd_data_list_t *vcdl, const char *pool, const char *path)
2186 {
2187 	vdev_cmd_data_t *data;
2188 	int i, j;
2189 	const char *val;
2190 
2191 	for (i = 0; i < vcdl->count; i++) {
2192 		if ((strcmp(vcdl->data[i].path, path) != 0) ||
2193 		    (strcmp(vcdl->data[i].pool, pool) != 0)) {
2194 			/* Not the vdev we're looking for */
2195 			continue;
2196 		}
2197 
2198 		data = &vcdl->data[i];
2199 		/* Print out all the output values for this vdev */
2200 		for (j = 0; j < vcdl->uniq_cols_cnt; j++) {
2201 			val = NULL;
2202 			/* Does this vdev have values for this column? */
2203 			for (int k = 0; k < data->cols_cnt; k++) {
2204 				if (strcmp(data->cols[k],
2205 				    vcdl->uniq_cols[j]) == 0) {
2206 					/* yes it does, record the value */
2207 					val = data->lines[k];
2208 					break;
2209 				}
2210 			}
2211 			/*
2212 			 * Mark empty values with dashes to make output
2213 			 * awk-able.
2214 			 */
2215 			if (val == NULL || is_blank_str(val))
2216 				val = "-";
2217 
2218 			printf("%*s", vcdl->uniq_cols_width[j], val);
2219 			if (j < vcdl->uniq_cols_cnt - 1)
2220 				fputs("  ", stdout);
2221 		}
2222 
2223 		/* Print out any values that aren't in a column at the end */
2224 		for (j = data->cols_cnt; j < data->lines_cnt; j++) {
2225 			/* Did we have any columns?  If so print a spacer. */
2226 			if (vcdl->uniq_cols_cnt > 0)
2227 				fputs("  ", stdout);
2228 
2229 			val = data->lines[j];
2230 			fputs(val ?: "", stdout);
2231 		}
2232 		break;
2233 	}
2234 }
2235 
2236 /*
2237  * Print vdev initialization status for leaves
2238  */
2239 static void
2240 print_status_initialize(vdev_stat_t *vs, boolean_t verbose)
2241 {
2242 	if (verbose) {
2243 		if ((vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE ||
2244 		    vs->vs_initialize_state == VDEV_INITIALIZE_SUSPENDED ||
2245 		    vs->vs_initialize_state == VDEV_INITIALIZE_COMPLETE) &&
2246 		    !vs->vs_scan_removing) {
2247 			char zbuf[1024];
2248 			char tbuf[256];
2249 			struct tm zaction_ts;
2250 
2251 			time_t t = vs->vs_initialize_action_time;
2252 			int initialize_pct = 100;
2253 			if (vs->vs_initialize_state !=
2254 			    VDEV_INITIALIZE_COMPLETE) {
2255 				initialize_pct = (vs->vs_initialize_bytes_done *
2256 				    100 / (vs->vs_initialize_bytes_est + 1));
2257 			}
2258 
2259 			(void) localtime_r(&t, &zaction_ts);
2260 			(void) strftime(tbuf, sizeof (tbuf), "%c", &zaction_ts);
2261 
2262 			switch (vs->vs_initialize_state) {
2263 			case VDEV_INITIALIZE_SUSPENDED:
2264 				(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2265 				    gettext("suspended, started at"), tbuf);
2266 				break;
2267 			case VDEV_INITIALIZE_ACTIVE:
2268 				(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2269 				    gettext("started at"), tbuf);
2270 				break;
2271 			case VDEV_INITIALIZE_COMPLETE:
2272 				(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2273 				    gettext("completed at"), tbuf);
2274 				break;
2275 			}
2276 
2277 			(void) printf(gettext("  (%d%% initialized%s)"),
2278 			    initialize_pct, zbuf);
2279 		} else {
2280 			(void) printf(gettext("  (uninitialized)"));
2281 		}
2282 	} else if (vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE) {
2283 		(void) printf(gettext("  (initializing)"));
2284 	}
2285 }
2286 
2287 /*
2288  * Print vdev TRIM status for leaves
2289  */
2290 static void
2291 print_status_trim(vdev_stat_t *vs, boolean_t verbose)
2292 {
2293 	if (verbose) {
2294 		if ((vs->vs_trim_state == VDEV_TRIM_ACTIVE ||
2295 		    vs->vs_trim_state == VDEV_TRIM_SUSPENDED ||
2296 		    vs->vs_trim_state == VDEV_TRIM_COMPLETE) &&
2297 		    !vs->vs_scan_removing) {
2298 			char zbuf[1024];
2299 			char tbuf[256];
2300 			struct tm zaction_ts;
2301 
2302 			time_t t = vs->vs_trim_action_time;
2303 			int trim_pct = 100;
2304 			if (vs->vs_trim_state != VDEV_TRIM_COMPLETE) {
2305 				trim_pct = (vs->vs_trim_bytes_done *
2306 				    100 / (vs->vs_trim_bytes_est + 1));
2307 			}
2308 
2309 			(void) localtime_r(&t, &zaction_ts);
2310 			(void) strftime(tbuf, sizeof (tbuf), "%c", &zaction_ts);
2311 
2312 			switch (vs->vs_trim_state) {
2313 			case VDEV_TRIM_SUSPENDED:
2314 				(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2315 				    gettext("suspended, started at"), tbuf);
2316 				break;
2317 			case VDEV_TRIM_ACTIVE:
2318 				(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2319 				    gettext("started at"), tbuf);
2320 				break;
2321 			case VDEV_TRIM_COMPLETE:
2322 				(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2323 				    gettext("completed at"), tbuf);
2324 				break;
2325 			}
2326 
2327 			(void) printf(gettext("  (%d%% trimmed%s)"),
2328 			    trim_pct, zbuf);
2329 		} else if (vs->vs_trim_notsup) {
2330 			(void) printf(gettext("  (trim unsupported)"));
2331 		} else {
2332 			(void) printf(gettext("  (untrimmed)"));
2333 		}
2334 	} else if (vs->vs_trim_state == VDEV_TRIM_ACTIVE) {
2335 		(void) printf(gettext("  (trimming)"));
2336 	}
2337 }
2338 
2339 /*
2340  * Return the color associated with a health string.  This includes returning
2341  * NULL for no color change.
2342  */
2343 static const char *
2344 health_str_to_color(const char *health)
2345 {
2346 	if (strcmp(health, gettext("FAULTED")) == 0 ||
2347 	    strcmp(health, gettext("SUSPENDED")) == 0 ||
2348 	    strcmp(health, gettext("UNAVAIL")) == 0) {
2349 		return (ANSI_RED);
2350 	}
2351 
2352 	if (strcmp(health, gettext("OFFLINE")) == 0 ||
2353 	    strcmp(health, gettext("DEGRADED")) == 0 ||
2354 	    strcmp(health, gettext("REMOVED")) == 0) {
2355 		return (ANSI_YELLOW);
2356 	}
2357 
2358 	return (NULL);
2359 }
2360 
2361 /*
2362  * Called for each leaf vdev.  Returns 0 if the vdev is healthy.
2363  * A vdev is unhealthy if any of the following are true:
2364  * 1) there are read, write, or checksum errors,
2365  * 2) its state is not ONLINE, or
2366  * 3) slow IO reporting was requested (-s) and there are slow IOs.
2367  */
2368 static int
2369 vdev_health_check_cb(void *hdl_data, nvlist_t *nv, void *data)
2370 {
2371 	status_cbdata_t *cb = data;
2372 	vdev_stat_t *vs;
2373 	uint_t vsc;
2374 	(void) hdl_data;
2375 
2376 	if (nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
2377 	    (uint64_t **)&vs, &vsc) != 0)
2378 		return (1);
2379 
2380 	if (vs->vs_checksum_errors || vs->vs_read_errors ||
2381 	    vs->vs_write_errors || vs->vs_state != VDEV_STATE_HEALTHY)
2382 		return (1);
2383 
2384 	if (cb->cb_print_slow_ios && vs->vs_slow_ios)
2385 		return (1);
2386 
2387 	return (0);
2388 }
2389 
2390 /*
2391  * Print out configuration state as requested by status_callback.
2392  */
2393 static void
2394 print_status_config(zpool_handle_t *zhp, status_cbdata_t *cb, const char *name,
2395     nvlist_t *nv, int depth, boolean_t isspare, vdev_rebuild_stat_t *vrs)
2396 {
2397 	nvlist_t **child, *root;
2398 	uint_t c, i, vsc, children;
2399 	pool_scan_stat_t *ps = NULL;
2400 	vdev_stat_t *vs;
2401 	char rbuf[6], wbuf[6], cbuf[6];
2402 	char *vname;
2403 	uint64_t notpresent;
2404 	spare_cbdata_t spare_cb;
2405 	const char *state;
2406 	const char *type;
2407 	const char *path = NULL;
2408 	const char *rcolor = NULL, *wcolor = NULL, *ccolor = NULL,
2409 	    *scolor = NULL;
2410 
2411 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2412 	    &child, &children) != 0)
2413 		children = 0;
2414 
2415 	verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
2416 	    (uint64_t **)&vs, &vsc) == 0);
2417 
2418 	verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
2419 
2420 	if (strcmp(type, VDEV_TYPE_INDIRECT) == 0)
2421 		return;
2422 
2423 	state = zpool_state_to_name(vs->vs_state, vs->vs_aux);
2424 
2425 	if (isspare) {
2426 		/*
2427 		 * For hot spares, we use the terms 'INUSE' and 'AVAILABLE' for
2428 		 * online drives.
2429 		 */
2430 		if (vs->vs_aux == VDEV_AUX_SPARED)
2431 			state = gettext("INUSE");
2432 		else if (vs->vs_state == VDEV_STATE_HEALTHY)
2433 			state = gettext("AVAIL");
2434 	}
2435 
2436 	/*
2437 	 * If '-e' is specified then top-level vdevs and their children
2438 	 * can be pruned if all of their leaves are healthy.
2439 	 */
2440 	if (cb->cb_print_unhealthy && depth > 0 &&
2441 	    for_each_vdev_in_nvlist(nv, vdev_health_check_cb, cb) == 0) {
2442 		return;
2443 	}
2444 
2445 	printf_color(health_str_to_color(state),
2446 	    "\t%*s%-*s  %-8s", depth, "", cb->cb_namewidth - depth,
2447 	    name, state);
2448 
2449 	if (!isspare) {
2450 		if (vs->vs_read_errors)
2451 			rcolor = ANSI_RED;
2452 
2453 		if (vs->vs_write_errors)
2454 			wcolor = ANSI_RED;
2455 
2456 		if (vs->vs_checksum_errors)
2457 			ccolor = ANSI_RED;
2458 
2459 		if (vs->vs_slow_ios)
2460 			scolor = ANSI_BLUE;
2461 
2462 		if (cb->cb_literal) {
2463 			fputc(' ', stdout);
2464 			printf_color(rcolor, "%5llu",
2465 			    (u_longlong_t)vs->vs_read_errors);
2466 			fputc(' ', stdout);
2467 			printf_color(wcolor, "%5llu",
2468 			    (u_longlong_t)vs->vs_write_errors);
2469 			fputc(' ', stdout);
2470 			printf_color(ccolor, "%5llu",
2471 			    (u_longlong_t)vs->vs_checksum_errors);
2472 		} else {
2473 			zfs_nicenum(vs->vs_read_errors, rbuf, sizeof (rbuf));
2474 			zfs_nicenum(vs->vs_write_errors, wbuf, sizeof (wbuf));
2475 			zfs_nicenum(vs->vs_checksum_errors, cbuf,
2476 			    sizeof (cbuf));
2477 			fputc(' ', stdout);
2478 			printf_color(rcolor, "%5s", rbuf);
2479 			fputc(' ', stdout);
2480 			printf_color(wcolor, "%5s", wbuf);
2481 			fputc(' ', stdout);
2482 			printf_color(ccolor, "%5s", cbuf);
2483 		}
2484 		if (cb->cb_print_slow_ios) {
2485 			if (children == 0)  {
2486 				/* Only leafs vdevs have slow IOs */
2487 				zfs_nicenum(vs->vs_slow_ios, rbuf,
2488 				    sizeof (rbuf));
2489 			} else {
2490 				snprintf(rbuf, sizeof (rbuf), "-");
2491 			}
2492 
2493 			if (cb->cb_literal)
2494 				printf_color(scolor, " %5llu",
2495 				    (u_longlong_t)vs->vs_slow_ios);
2496 			else
2497 				printf_color(scolor, " %5s", rbuf);
2498 		}
2499 		if (cb->cb_print_power) {
2500 			if (children == 0)  {
2501 				/* Only leaf vdevs have physical slots */
2502 				switch (zpool_power_current_state(zhp, (char *)
2503 				    fnvlist_lookup_string(nv,
2504 				    ZPOOL_CONFIG_PATH))) {
2505 				case 0:
2506 					printf_color(ANSI_RED, " %5s",
2507 					    gettext("off"));
2508 					break;
2509 				case 1:
2510 					printf(" %5s", gettext("on"));
2511 					break;
2512 				default:
2513 					printf(" %5s", "-");
2514 				}
2515 			} else {
2516 				printf(" %5s", "-");
2517 			}
2518 		}
2519 	}
2520 
2521 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
2522 	    &notpresent) == 0) {
2523 		verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0);
2524 		(void) printf("  %s %s", gettext("was"), path);
2525 	} else if (vs->vs_aux != 0) {
2526 		(void) printf("  ");
2527 		color_start(ANSI_RED);
2528 		switch (vs->vs_aux) {
2529 		case VDEV_AUX_OPEN_FAILED:
2530 			(void) printf(gettext("cannot open"));
2531 			break;
2532 
2533 		case VDEV_AUX_BAD_GUID_SUM:
2534 			(void) printf(gettext("missing device"));
2535 			break;
2536 
2537 		case VDEV_AUX_NO_REPLICAS:
2538 			(void) printf(gettext("insufficient replicas"));
2539 			break;
2540 
2541 		case VDEV_AUX_VERSION_NEWER:
2542 			(void) printf(gettext("newer version"));
2543 			break;
2544 
2545 		case VDEV_AUX_UNSUP_FEAT:
2546 			(void) printf(gettext("unsupported feature(s)"));
2547 			break;
2548 
2549 		case VDEV_AUX_ASHIFT_TOO_BIG:
2550 			(void) printf(gettext("unsupported minimum blocksize"));
2551 			break;
2552 
2553 		case VDEV_AUX_SPARED:
2554 			verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2555 			    &spare_cb.cb_guid) == 0);
2556 			if (zpool_iter(g_zfs, find_spare, &spare_cb) == 1) {
2557 				if (strcmp(zpool_get_name(spare_cb.cb_zhp),
2558 				    zpool_get_name(zhp)) == 0)
2559 					(void) printf(gettext("currently in "
2560 					    "use"));
2561 				else
2562 					(void) printf(gettext("in use by "
2563 					    "pool '%s'"),
2564 					    zpool_get_name(spare_cb.cb_zhp));
2565 				zpool_close(spare_cb.cb_zhp);
2566 			} else {
2567 				(void) printf(gettext("currently in use"));
2568 			}
2569 			break;
2570 
2571 		case VDEV_AUX_ERR_EXCEEDED:
2572 			if (vs->vs_read_errors + vs->vs_write_errors +
2573 			    vs->vs_checksum_errors == 0 && children == 0 &&
2574 			    vs->vs_slow_ios > 0) {
2575 				(void) printf(gettext("too many slow I/Os"));
2576 			} else {
2577 				(void) printf(gettext("too many errors"));
2578 			}
2579 			break;
2580 
2581 		case VDEV_AUX_IO_FAILURE:
2582 			(void) printf(gettext("experienced I/O failures"));
2583 			break;
2584 
2585 		case VDEV_AUX_BAD_LOG:
2586 			(void) printf(gettext("bad intent log"));
2587 			break;
2588 
2589 		case VDEV_AUX_EXTERNAL:
2590 			(void) printf(gettext("external device fault"));
2591 			break;
2592 
2593 		case VDEV_AUX_SPLIT_POOL:
2594 			(void) printf(gettext("split into new pool"));
2595 			break;
2596 
2597 		case VDEV_AUX_ACTIVE:
2598 			(void) printf(gettext("currently in use"));
2599 			break;
2600 
2601 		case VDEV_AUX_CHILDREN_OFFLINE:
2602 			(void) printf(gettext("all children offline"));
2603 			break;
2604 
2605 		case VDEV_AUX_BAD_LABEL:
2606 			(void) printf(gettext("invalid label"));
2607 			break;
2608 
2609 		default:
2610 			(void) printf(gettext("corrupted data"));
2611 			break;
2612 		}
2613 		color_end();
2614 	} else if (children == 0 && !isspare &&
2615 	    getenv("ZPOOL_STATUS_NON_NATIVE_ASHIFT_IGNORE") == NULL &&
2616 	    VDEV_STAT_VALID(vs_physical_ashift, vsc) &&
2617 	    vs->vs_configured_ashift < vs->vs_physical_ashift) {
2618 		(void) printf(
2619 		    gettext("  block size: %dB configured, %dB native"),
2620 		    1 << vs->vs_configured_ashift, 1 << vs->vs_physical_ashift);
2621 	}
2622 
2623 	if (vs->vs_scan_removing != 0) {
2624 		(void) printf(gettext("  (removing)"));
2625 	} else if (VDEV_STAT_VALID(vs_noalloc, vsc) && vs->vs_noalloc != 0) {
2626 		(void) printf(gettext("  (non-allocating)"));
2627 	}
2628 
2629 	/* The root vdev has the scrub/resilver stats */
2630 	root = fnvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
2631 	    ZPOOL_CONFIG_VDEV_TREE);
2632 	(void) nvlist_lookup_uint64_array(root, ZPOOL_CONFIG_SCAN_STATS,
2633 	    (uint64_t **)&ps, &c);
2634 
2635 	/*
2636 	 * If you force fault a drive that's resilvering, its scan stats can
2637 	 * get frozen in time, giving the false impression that it's
2638 	 * being resilvered.  That's why we check the state to see if the vdev
2639 	 * is healthy before reporting "resilvering" or "repairing".
2640 	 */
2641 	if (ps != NULL && ps->pss_state == DSS_SCANNING && children == 0 &&
2642 	    vs->vs_state == VDEV_STATE_HEALTHY) {
2643 		if (vs->vs_scan_processed != 0) {
2644 			(void) printf(gettext("  (%s)"),
2645 			    (ps->pss_func == POOL_SCAN_RESILVER) ?
2646 			    "resilvering" : "repairing");
2647 		} else if (vs->vs_resilver_deferred) {
2648 			(void) printf(gettext("  (awaiting resilver)"));
2649 		}
2650 	}
2651 
2652 	/* The top-level vdevs have the rebuild stats */
2653 	if (vrs != NULL && vrs->vrs_state == VDEV_REBUILD_ACTIVE &&
2654 	    children == 0 && vs->vs_state == VDEV_STATE_HEALTHY) {
2655 		if (vs->vs_rebuild_processed != 0) {
2656 			(void) printf(gettext("  (resilvering)"));
2657 		}
2658 	}
2659 
2660 	if (cb->vcdl != NULL) {
2661 		if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
2662 			printf("  ");
2663 			zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path);
2664 		}
2665 	}
2666 
2667 	/* Display vdev initialization and trim status for leaves. */
2668 	if (children == 0) {
2669 		print_status_initialize(vs, cb->cb_print_vdev_init);
2670 		print_status_trim(vs, cb->cb_print_vdev_trim);
2671 	}
2672 
2673 	(void) printf("\n");
2674 
2675 	for (c = 0; c < children; c++) {
2676 		uint64_t islog = B_FALSE, ishole = B_FALSE;
2677 
2678 		/* Don't print logs or holes here */
2679 		(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
2680 		    &islog);
2681 		(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
2682 		    &ishole);
2683 		if (islog || ishole)
2684 			continue;
2685 		/* Only print normal classes here */
2686 		if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
2687 			continue;
2688 
2689 		/* Provide vdev_rebuild_stats to children if available */
2690 		if (vrs == NULL) {
2691 			(void) nvlist_lookup_uint64_array(nv,
2692 			    ZPOOL_CONFIG_REBUILD_STATS,
2693 			    (uint64_t **)&vrs, &i);
2694 		}
2695 
2696 		vname = zpool_vdev_name(g_zfs, zhp, child[c],
2697 		    cb->cb_name_flags | VDEV_NAME_TYPE_ID);
2698 		print_status_config(zhp, cb, vname, child[c], depth + 2,
2699 		    isspare, vrs);
2700 		free(vname);
2701 	}
2702 }
2703 
2704 /*
2705  * Print the configuration of an exported pool.  Iterate over all vdevs in the
2706  * pool, printing out the name and status for each one.
2707  */
2708 static void
2709 print_import_config(status_cbdata_t *cb, const char *name, nvlist_t *nv,
2710     int depth)
2711 {
2712 	nvlist_t **child;
2713 	uint_t c, children;
2714 	vdev_stat_t *vs;
2715 	const char *type;
2716 	char *vname;
2717 
2718 	verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
2719 	if (strcmp(type, VDEV_TYPE_MISSING) == 0 ||
2720 	    strcmp(type, VDEV_TYPE_HOLE) == 0)
2721 		return;
2722 
2723 	verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
2724 	    (uint64_t **)&vs, &c) == 0);
2725 
2726 	(void) printf("\t%*s%-*s", depth, "", cb->cb_namewidth - depth, name);
2727 	(void) printf("  %s", zpool_state_to_name(vs->vs_state, vs->vs_aux));
2728 
2729 	if (vs->vs_aux != 0) {
2730 		(void) printf("  ");
2731 
2732 		switch (vs->vs_aux) {
2733 		case VDEV_AUX_OPEN_FAILED:
2734 			(void) printf(gettext("cannot open"));
2735 			break;
2736 
2737 		case VDEV_AUX_BAD_GUID_SUM:
2738 			(void) printf(gettext("missing device"));
2739 			break;
2740 
2741 		case VDEV_AUX_NO_REPLICAS:
2742 			(void) printf(gettext("insufficient replicas"));
2743 			break;
2744 
2745 		case VDEV_AUX_VERSION_NEWER:
2746 			(void) printf(gettext("newer version"));
2747 			break;
2748 
2749 		case VDEV_AUX_UNSUP_FEAT:
2750 			(void) printf(gettext("unsupported feature(s)"));
2751 			break;
2752 
2753 		case VDEV_AUX_ERR_EXCEEDED:
2754 			(void) printf(gettext("too many errors"));
2755 			break;
2756 
2757 		case VDEV_AUX_ACTIVE:
2758 			(void) printf(gettext("currently in use"));
2759 			break;
2760 
2761 		case VDEV_AUX_CHILDREN_OFFLINE:
2762 			(void) printf(gettext("all children offline"));
2763 			break;
2764 
2765 		case VDEV_AUX_BAD_LABEL:
2766 			(void) printf(gettext("invalid label"));
2767 			break;
2768 
2769 		default:
2770 			(void) printf(gettext("corrupted data"));
2771 			break;
2772 		}
2773 	}
2774 	(void) printf("\n");
2775 
2776 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2777 	    &child, &children) != 0)
2778 		return;
2779 
2780 	for (c = 0; c < children; c++) {
2781 		uint64_t is_log = B_FALSE;
2782 
2783 		(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
2784 		    &is_log);
2785 		if (is_log)
2786 			continue;
2787 		if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
2788 			continue;
2789 
2790 		vname = zpool_vdev_name(g_zfs, NULL, child[c],
2791 		    cb->cb_name_flags | VDEV_NAME_TYPE_ID);
2792 		print_import_config(cb, vname, child[c], depth + 2);
2793 		free(vname);
2794 	}
2795 
2796 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
2797 	    &child, &children) == 0) {
2798 		(void) printf(gettext("\tcache\n"));
2799 		for (c = 0; c < children; c++) {
2800 			vname = zpool_vdev_name(g_zfs, NULL, child[c],
2801 			    cb->cb_name_flags);
2802 			(void) printf("\t  %s\n", vname);
2803 			free(vname);
2804 		}
2805 	}
2806 
2807 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
2808 	    &child, &children) == 0) {
2809 		(void) printf(gettext("\tspares\n"));
2810 		for (c = 0; c < children; c++) {
2811 			vname = zpool_vdev_name(g_zfs, NULL, child[c],
2812 			    cb->cb_name_flags);
2813 			(void) printf("\t  %s\n", vname);
2814 			free(vname);
2815 		}
2816 	}
2817 }
2818 
2819 /*
2820  * Print specialized class vdevs.
2821  *
2822  * These are recorded as top level vdevs in the main pool child array
2823  * but with "is_log" set to 1 or an "alloc_bias" string. We use either
2824  * print_status_config() or print_import_config() to print the top level
2825  * class vdevs then any of their children (eg mirrored slogs) are printed
2826  * recursively - which works because only the top level vdev is marked.
2827  */
2828 static void
2829 print_class_vdevs(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,
2830     const char *class)
2831 {
2832 	uint_t c, children;
2833 	nvlist_t **child;
2834 	boolean_t printed = B_FALSE;
2835 
2836 	assert(zhp != NULL || !cb->cb_verbose);
2837 
2838 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &child,
2839 	    &children) != 0)
2840 		return;
2841 
2842 	for (c = 0; c < children; c++) {
2843 		uint64_t is_log = B_FALSE;
2844 		const char *bias = NULL;
2845 		const char *type = NULL;
2846 
2847 		(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
2848 		    &is_log);
2849 
2850 		if (is_log) {
2851 			bias = (char *)VDEV_ALLOC_CLASS_LOGS;
2852 		} else {
2853 			(void) nvlist_lookup_string(child[c],
2854 			    ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
2855 			(void) nvlist_lookup_string(child[c],
2856 			    ZPOOL_CONFIG_TYPE, &type);
2857 		}
2858 
2859 		if (bias == NULL || strcmp(bias, class) != 0)
2860 			continue;
2861 		if (!is_log && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
2862 			continue;
2863 
2864 		if (!printed) {
2865 			(void) printf("\t%s\t\n", gettext(class));
2866 			printed = B_TRUE;
2867 		}
2868 
2869 		char *name = zpool_vdev_name(g_zfs, zhp, child[c],
2870 		    cb->cb_name_flags | VDEV_NAME_TYPE_ID);
2871 		if (cb->cb_print_status)
2872 			print_status_config(zhp, cb, name, child[c], 2,
2873 			    B_FALSE, NULL);
2874 		else
2875 			print_import_config(cb, name, child[c], 2);
2876 		free(name);
2877 	}
2878 }
2879 
2880 /*
2881  * Display the status for the given pool.
2882  */
2883 static int
2884 show_import(nvlist_t *config, boolean_t report_error)
2885 {
2886 	uint64_t pool_state;
2887 	vdev_stat_t *vs;
2888 	const char *name;
2889 	uint64_t guid;
2890 	uint64_t hostid = 0;
2891 	const char *msgid;
2892 	const char *hostname = "unknown";
2893 	nvlist_t *nvroot, *nvinfo;
2894 	zpool_status_t reason;
2895 	zpool_errata_t errata;
2896 	const char *health;
2897 	uint_t vsc;
2898 	const char *comment;
2899 	status_cbdata_t cb = { 0 };
2900 
2901 	verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
2902 	    &name) == 0);
2903 	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
2904 	    &guid) == 0);
2905 	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
2906 	    &pool_state) == 0);
2907 	verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2908 	    &nvroot) == 0);
2909 
2910 	verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
2911 	    (uint64_t **)&vs, &vsc) == 0);
2912 	health = zpool_state_to_name(vs->vs_state, vs->vs_aux);
2913 
2914 	reason = zpool_import_status(config, &msgid, &errata);
2915 
2916 	/*
2917 	 * If we're importing using a cachefile, then we won't report any
2918 	 * errors unless we are in the scan phase of the import.
2919 	 */
2920 	if (reason != ZPOOL_STATUS_OK && !report_error)
2921 		return (reason);
2922 
2923 	(void) printf(gettext("   pool: %s\n"), name);
2924 	(void) printf(gettext("     id: %llu\n"), (u_longlong_t)guid);
2925 	(void) printf(gettext("  state: %s"), health);
2926 	if (pool_state == POOL_STATE_DESTROYED)
2927 		(void) printf(gettext(" (DESTROYED)"));
2928 	(void) printf("\n");
2929 
2930 	switch (reason) {
2931 	case ZPOOL_STATUS_MISSING_DEV_R:
2932 	case ZPOOL_STATUS_MISSING_DEV_NR:
2933 	case ZPOOL_STATUS_BAD_GUID_SUM:
2934 		printf_color(ANSI_BOLD, gettext("status: "));
2935 		printf_color(ANSI_YELLOW, gettext("One or more devices are "
2936 		    "missing from the system.\n"));
2937 		break;
2938 
2939 	case ZPOOL_STATUS_CORRUPT_LABEL_R:
2940 	case ZPOOL_STATUS_CORRUPT_LABEL_NR:
2941 		printf_color(ANSI_BOLD, gettext("status: "));
2942 		printf_color(ANSI_YELLOW, gettext("One or more devices contains"
2943 		    " corrupted data.\n"));
2944 		break;
2945 
2946 	case ZPOOL_STATUS_CORRUPT_DATA:
2947 		(void) printf(
2948 		    gettext(" status: The pool data is corrupted.\n"));
2949 		break;
2950 
2951 	case ZPOOL_STATUS_OFFLINE_DEV:
2952 		printf_color(ANSI_BOLD, gettext("status: "));
2953 		printf_color(ANSI_YELLOW, gettext("One or more devices "
2954 		    "are offlined.\n"));
2955 		break;
2956 
2957 	case ZPOOL_STATUS_CORRUPT_POOL:
2958 		printf_color(ANSI_BOLD, gettext("status: "));
2959 		printf_color(ANSI_YELLOW, gettext("The pool metadata is "
2960 		    "corrupted.\n"));
2961 		break;
2962 
2963 	case ZPOOL_STATUS_VERSION_OLDER:
2964 		printf_color(ANSI_BOLD, gettext("status: "));
2965 		printf_color(ANSI_YELLOW, gettext("The pool is formatted using "
2966 		    "a legacy on-disk version.\n"));
2967 		break;
2968 
2969 	case ZPOOL_STATUS_VERSION_NEWER:
2970 		printf_color(ANSI_BOLD, gettext("status: "));
2971 		printf_color(ANSI_YELLOW, gettext("The pool is formatted using "
2972 		    "an incompatible version.\n"));
2973 		break;
2974 
2975 	case ZPOOL_STATUS_FEAT_DISABLED:
2976 		printf_color(ANSI_BOLD, gettext("status: "));
2977 		printf_color(ANSI_YELLOW, gettext("Some supported "
2978 		    "features are not enabled on the pool.\n\t"
2979 		    "(Note that they may be intentionally disabled "
2980 		    "if the\n\t'compatibility' property is set.)\n"));
2981 		break;
2982 
2983 	case ZPOOL_STATUS_COMPATIBILITY_ERR:
2984 		printf_color(ANSI_BOLD, gettext("status: "));
2985 		printf_color(ANSI_YELLOW, gettext("Error reading or parsing "
2986 		    "the file(s) indicated by the 'compatibility'\n"
2987 		    "property.\n"));
2988 		break;
2989 
2990 	case ZPOOL_STATUS_INCOMPATIBLE_FEAT:
2991 		printf_color(ANSI_BOLD, gettext("status: "));
2992 		printf_color(ANSI_YELLOW, gettext("One or more features "
2993 		    "are enabled on the pool despite not being\n"
2994 		    "requested by the 'compatibility' property.\n"));
2995 		break;
2996 
2997 	case ZPOOL_STATUS_UNSUP_FEAT_READ:
2998 		printf_color(ANSI_BOLD, gettext("status: "));
2999 		printf_color(ANSI_YELLOW, gettext("The pool uses the following "
3000 		    "feature(s) not supported on this system:\n"));
3001 		color_start(ANSI_YELLOW);
3002 		zpool_print_unsup_feat(config);
3003 		color_end();
3004 		break;
3005 
3006 	case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
3007 		printf_color(ANSI_BOLD, gettext("status: "));
3008 		printf_color(ANSI_YELLOW, gettext("The pool can only be "
3009 		    "accessed in read-only mode on this system. It\n\tcannot be"
3010 		    " accessed in read-write mode because it uses the "
3011 		    "following\n\tfeature(s) not supported on this system:\n"));
3012 		color_start(ANSI_YELLOW);
3013 		zpool_print_unsup_feat(config);
3014 		color_end();
3015 		break;
3016 
3017 	case ZPOOL_STATUS_HOSTID_ACTIVE:
3018 		printf_color(ANSI_BOLD, gettext("status: "));
3019 		printf_color(ANSI_YELLOW, gettext("The pool is currently "
3020 		    "imported by another system.\n"));
3021 		break;
3022 
3023 	case ZPOOL_STATUS_HOSTID_REQUIRED:
3024 		printf_color(ANSI_BOLD, gettext("status: "));
3025 		printf_color(ANSI_YELLOW, gettext("The pool has the "
3026 		    "multihost property on.  It cannot\n\tbe safely imported "
3027 		    "when the system hostid is not set.\n"));
3028 		break;
3029 
3030 	case ZPOOL_STATUS_HOSTID_MISMATCH:
3031 		printf_color(ANSI_BOLD, gettext("status: "));
3032 		printf_color(ANSI_YELLOW, gettext("The pool was last accessed "
3033 		    "by another system.\n"));
3034 		break;
3035 
3036 	case ZPOOL_STATUS_FAULTED_DEV_R:
3037 	case ZPOOL_STATUS_FAULTED_DEV_NR:
3038 		printf_color(ANSI_BOLD, gettext("status: "));
3039 		printf_color(ANSI_YELLOW, gettext("One or more devices are "
3040 		    "faulted.\n"));
3041 		break;
3042 
3043 	case ZPOOL_STATUS_BAD_LOG:
3044 		printf_color(ANSI_BOLD, gettext("status: "));
3045 		printf_color(ANSI_YELLOW, gettext("An intent log record cannot "
3046 		    "be read.\n"));
3047 		break;
3048 
3049 	case ZPOOL_STATUS_RESILVERING:
3050 	case ZPOOL_STATUS_REBUILDING:
3051 		printf_color(ANSI_BOLD, gettext("status: "));
3052 		printf_color(ANSI_YELLOW, gettext("One or more devices were "
3053 		    "being resilvered.\n"));
3054 		break;
3055 
3056 	case ZPOOL_STATUS_ERRATA:
3057 		printf_color(ANSI_BOLD, gettext("status: "));
3058 		printf_color(ANSI_YELLOW, gettext("Errata #%d detected.\n"),
3059 		    errata);
3060 		break;
3061 
3062 	case ZPOOL_STATUS_NON_NATIVE_ASHIFT:
3063 		printf_color(ANSI_BOLD, gettext("status: "));
3064 		printf_color(ANSI_YELLOW, gettext("One or more devices are "
3065 		    "configured to use a non-native block size.\n"
3066 		    "\tExpect reduced performance.\n"));
3067 		break;
3068 
3069 	default:
3070 		/*
3071 		 * No other status can be seen when importing pools.
3072 		 */
3073 		assert(reason == ZPOOL_STATUS_OK);
3074 	}
3075 
3076 	/*
3077 	 * Print out an action according to the overall state of the pool.
3078 	 */
3079 	if (vs->vs_state == VDEV_STATE_HEALTHY) {
3080 		if (reason == ZPOOL_STATUS_VERSION_OLDER ||
3081 		    reason == ZPOOL_STATUS_FEAT_DISABLED) {
3082 			(void) printf(gettext(" action: The pool can be "
3083 			    "imported using its name or numeric identifier, "
3084 			    "though\n\tsome features will not be available "
3085 			    "without an explicit 'zpool upgrade'.\n"));
3086 		} else if (reason == ZPOOL_STATUS_COMPATIBILITY_ERR) {
3087 			(void) printf(gettext(" action: The pool can be "
3088 			    "imported using its name or numeric\n\tidentifier, "
3089 			    "though the file(s) indicated by its "
3090 			    "'compatibility'\n\tproperty cannot be parsed at "
3091 			    "this time.\n"));
3092 		} else if (reason == ZPOOL_STATUS_HOSTID_MISMATCH) {
3093 			(void) printf(gettext(" action: The pool can be "
3094 			    "imported using its name or numeric "
3095 			    "identifier and\n\tthe '-f' flag.\n"));
3096 		} else if (reason == ZPOOL_STATUS_ERRATA) {
3097 			switch (errata) {
3098 			case ZPOOL_ERRATA_NONE:
3099 				break;
3100 
3101 			case ZPOOL_ERRATA_ZOL_2094_SCRUB:
3102 				(void) printf(gettext(" action: The pool can "
3103 				    "be imported using its name or numeric "
3104 				    "identifier,\n\thowever there is a compat"
3105 				    "ibility issue which should be corrected"
3106 				    "\n\tby running 'zpool scrub'\n"));
3107 				break;
3108 
3109 			case ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY:
3110 				(void) printf(gettext(" action: The pool can"
3111 				    "not be imported with this version of ZFS "
3112 				    "due to\n\tan active asynchronous destroy. "
3113 				    "Revert to an earlier version\n\tand "
3114 				    "allow the destroy to complete before "
3115 				    "updating.\n"));
3116 				break;
3117 
3118 			case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION:
3119 				(void) printf(gettext(" action: Existing "
3120 				    "encrypted datasets contain an on-disk "
3121 				    "incompatibility, which\n\tneeds to be "
3122 				    "corrected. Backup these datasets to new "
3123 				    "encrypted datasets\n\tand destroy the "
3124 				    "old ones.\n"));
3125 				break;
3126 
3127 			case ZPOOL_ERRATA_ZOL_8308_ENCRYPTION:
3128 				(void) printf(gettext(" action: Existing "
3129 				    "encrypted snapshots and bookmarks contain "
3130 				    "an on-disk\n\tincompatibility. This may "
3131 				    "cause on-disk corruption if they are used"
3132 				    "\n\twith 'zfs recv'. To correct the "
3133 				    "issue, enable the bookmark_v2 feature.\n\t"
3134 				    "No additional action is needed if there "
3135 				    "are no encrypted snapshots or\n\t"
3136 				    "bookmarks. If preserving the encrypted "
3137 				    "snapshots and bookmarks is\n\trequired, "
3138 				    "use a non-raw send to backup and restore "
3139 				    "them. Alternately,\n\tthey may be removed"
3140 				    " to resolve the incompatibility.\n"));
3141 				break;
3142 			default:
3143 				/*
3144 				 * All errata must contain an action message.
3145 				 */
3146 				assert(0);
3147 			}
3148 		} else {
3149 			(void) printf(gettext(" action: The pool can be "
3150 			    "imported using its name or numeric "
3151 			    "identifier.\n"));
3152 		}
3153 	} else if (vs->vs_state == VDEV_STATE_DEGRADED) {
3154 		(void) printf(gettext(" action: The pool can be imported "
3155 		    "despite missing or damaged devices.  The\n\tfault "
3156 		    "tolerance of the pool may be compromised if imported.\n"));
3157 	} else {
3158 		switch (reason) {
3159 		case ZPOOL_STATUS_VERSION_NEWER:
3160 			(void) printf(gettext(" action: The pool cannot be "
3161 			    "imported.  Access the pool on a system running "
3162 			    "newer\n\tsoftware, or recreate the pool from "
3163 			    "backup.\n"));
3164 			break;
3165 		case ZPOOL_STATUS_UNSUP_FEAT_READ:
3166 			printf_color(ANSI_BOLD, gettext("action: "));
3167 			printf_color(ANSI_YELLOW, gettext("The pool cannot be "
3168 			    "imported. Access the pool on a system that "
3169 			    "supports\n\tthe required feature(s), or recreate "
3170 			    "the pool from backup.\n"));
3171 			break;
3172 		case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
3173 			printf_color(ANSI_BOLD, gettext("action: "));
3174 			printf_color(ANSI_YELLOW, gettext("The pool cannot be "
3175 			    "imported in read-write mode. Import the pool "
3176 			    "with\n"
3177 			    "\t\"-o readonly=on\", access the pool on a system "
3178 			    "that supports the\n\trequired feature(s), or "
3179 			    "recreate the pool from backup.\n"));
3180 			break;
3181 		case ZPOOL_STATUS_MISSING_DEV_R:
3182 		case ZPOOL_STATUS_MISSING_DEV_NR:
3183 		case ZPOOL_STATUS_BAD_GUID_SUM:
3184 			(void) printf(gettext(" action: The pool cannot be "
3185 			    "imported. Attach the missing\n\tdevices and try "
3186 			    "again.\n"));
3187 			break;
3188 		case ZPOOL_STATUS_HOSTID_ACTIVE:
3189 			VERIFY0(nvlist_lookup_nvlist(config,
3190 			    ZPOOL_CONFIG_LOAD_INFO, &nvinfo));
3191 
3192 			if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME))
3193 				hostname = fnvlist_lookup_string(nvinfo,
3194 				    ZPOOL_CONFIG_MMP_HOSTNAME);
3195 
3196 			if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID))
3197 				hostid = fnvlist_lookup_uint64(nvinfo,
3198 				    ZPOOL_CONFIG_MMP_HOSTID);
3199 
3200 			(void) printf(gettext(" action: The pool must be "
3201 			    "exported from %s (hostid=%"PRIx64")\n\tbefore it "
3202 			    "can be safely imported.\n"), hostname, hostid);
3203 			break;
3204 		case ZPOOL_STATUS_HOSTID_REQUIRED:
3205 			(void) printf(gettext(" action: Set a unique system "
3206 			    "hostid with the zgenhostid(8) command.\n"));
3207 			break;
3208 		default:
3209 			(void) printf(gettext(" action: The pool cannot be "
3210 			    "imported due to damaged devices or data.\n"));
3211 		}
3212 	}
3213 
3214 	/* Print the comment attached to the pool. */
3215 	if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0)
3216 		(void) printf(gettext("comment: %s\n"), comment);
3217 
3218 	/*
3219 	 * If the state is "closed" or "can't open", and the aux state
3220 	 * is "corrupt data":
3221 	 */
3222 	if (((vs->vs_state == VDEV_STATE_CLOSED) ||
3223 	    (vs->vs_state == VDEV_STATE_CANT_OPEN)) &&
3224 	    (vs->vs_aux == VDEV_AUX_CORRUPT_DATA)) {
3225 		if (pool_state == POOL_STATE_DESTROYED)
3226 			(void) printf(gettext("\tThe pool was destroyed, "
3227 			    "but can be imported using the '-Df' flags.\n"));
3228 		else if (pool_state != POOL_STATE_EXPORTED)
3229 			(void) printf(gettext("\tThe pool may be active on "
3230 			    "another system, but can be imported using\n\t"
3231 			    "the '-f' flag.\n"));
3232 	}
3233 
3234 	if (msgid != NULL) {
3235 		(void) printf(gettext(
3236 		    "   see: https://openzfs.github.io/openzfs-docs/msg/%s\n"),
3237 		    msgid);
3238 	}
3239 
3240 	(void) printf(gettext(" config:\n\n"));
3241 
3242 	cb.cb_namewidth = max_width(NULL, nvroot, 0, strlen(name),
3243 	    VDEV_NAME_TYPE_ID);
3244 	if (cb.cb_namewidth < 10)
3245 		cb.cb_namewidth = 10;
3246 
3247 	print_import_config(&cb, name, nvroot, 0);
3248 
3249 	print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_BIAS_DEDUP);
3250 	print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_BIAS_SPECIAL);
3251 	print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_CLASS_LOGS);
3252 
3253 	if (reason == ZPOOL_STATUS_BAD_GUID_SUM) {
3254 		(void) printf(gettext("\n\tAdditional devices are known to "
3255 		    "be part of this pool, though their\n\texact "
3256 		    "configuration cannot be determined.\n"));
3257 	}
3258 	return (0);
3259 }
3260 
3261 static boolean_t
3262 zfs_force_import_required(nvlist_t *config)
3263 {
3264 	uint64_t state;
3265 	uint64_t hostid = 0;
3266 	nvlist_t *nvinfo;
3267 
3268 	state = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE);
3269 	nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
3270 
3271 	/*
3272 	 * The hostid on LOAD_INFO comes from the MOS label via
3273 	 * spa_tryimport(). If its not there then we're likely talking to an
3274 	 * older kernel, so use the top one, which will be from the label
3275 	 * discovered in zpool_find_import(), or if a cachefile is in use, the
3276 	 * local hostid.
3277 	 */
3278 	if (nvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_HOSTID, &hostid) != 0)
3279 		(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_HOSTID,
3280 		    &hostid);
3281 
3282 	if (state != POOL_STATE_EXPORTED && hostid != get_system_hostid())
3283 		return (B_TRUE);
3284 
3285 	if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE)) {
3286 		mmp_state_t mmp_state = fnvlist_lookup_uint64(nvinfo,
3287 		    ZPOOL_CONFIG_MMP_STATE);
3288 
3289 		if (mmp_state != MMP_STATE_INACTIVE)
3290 			return (B_TRUE);
3291 	}
3292 
3293 	return (B_FALSE);
3294 }
3295 
3296 /*
3297  * Perform the import for the given configuration.  This passes the heavy
3298  * lifting off to zpool_import_props(), and then mounts the datasets contained
3299  * within the pool.
3300  */
3301 static int
3302 do_import(nvlist_t *config, const char *newname, const char *mntopts,
3303     nvlist_t *props, int flags)
3304 {
3305 	int ret = 0;
3306 	int ms_status = 0;
3307 	zpool_handle_t *zhp;
3308 	const char *name;
3309 	uint64_t version;
3310 
3311 	name = fnvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME);
3312 	version = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION);
3313 
3314 	if (!SPA_VERSION_IS_SUPPORTED(version)) {
3315 		(void) fprintf(stderr, gettext("cannot import '%s': pool "
3316 		    "is formatted using an unsupported ZFS version\n"), name);
3317 		return (1);
3318 	} else if (zfs_force_import_required(config) &&
3319 	    !(flags & ZFS_IMPORT_ANY_HOST)) {
3320 		mmp_state_t mmp_state = MMP_STATE_INACTIVE;
3321 		nvlist_t *nvinfo;
3322 
3323 		nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
3324 		if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE))
3325 			mmp_state = fnvlist_lookup_uint64(nvinfo,
3326 			    ZPOOL_CONFIG_MMP_STATE);
3327 
3328 		if (mmp_state == MMP_STATE_ACTIVE) {
3329 			const char *hostname = "<unknown>";
3330 			uint64_t hostid = 0;
3331 
3332 			if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME))
3333 				hostname = fnvlist_lookup_string(nvinfo,
3334 				    ZPOOL_CONFIG_MMP_HOSTNAME);
3335 
3336 			if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID))
3337 				hostid = fnvlist_lookup_uint64(nvinfo,
3338 				    ZPOOL_CONFIG_MMP_HOSTID);
3339 
3340 			(void) fprintf(stderr, gettext("cannot import '%s': "
3341 			    "pool is imported on %s (hostid: "
3342 			    "0x%"PRIx64")\nExport the pool on the other "
3343 			    "system, then run 'zpool import'.\n"),
3344 			    name, hostname, hostid);
3345 		} else if (mmp_state == MMP_STATE_NO_HOSTID) {
3346 			(void) fprintf(stderr, gettext("Cannot import '%s': "
3347 			    "pool has the multihost property on and the\n"
3348 			    "system's hostid is not set. Set a unique hostid "
3349 			    "with the zgenhostid(8) command.\n"), name);
3350 		} else {
3351 			const char *hostname = "<unknown>";
3352 			time_t timestamp = 0;
3353 			uint64_t hostid = 0;
3354 
3355 			if (nvlist_exists(nvinfo, ZPOOL_CONFIG_HOSTNAME))
3356 				hostname = fnvlist_lookup_string(nvinfo,
3357 				    ZPOOL_CONFIG_HOSTNAME);
3358 			else if (nvlist_exists(config, ZPOOL_CONFIG_HOSTNAME))
3359 				hostname = fnvlist_lookup_string(config,
3360 				    ZPOOL_CONFIG_HOSTNAME);
3361 
3362 			if (nvlist_exists(config, ZPOOL_CONFIG_TIMESTAMP))
3363 				timestamp = fnvlist_lookup_uint64(config,
3364 				    ZPOOL_CONFIG_TIMESTAMP);
3365 
3366 			if (nvlist_exists(nvinfo, ZPOOL_CONFIG_HOSTID))
3367 				hostid = fnvlist_lookup_uint64(nvinfo,
3368 				    ZPOOL_CONFIG_HOSTID);
3369 			else if (nvlist_exists(config, ZPOOL_CONFIG_HOSTID))
3370 				hostid = fnvlist_lookup_uint64(config,
3371 				    ZPOOL_CONFIG_HOSTID);
3372 
3373 			(void) fprintf(stderr, gettext("cannot import '%s': "
3374 			    "pool was previously in use from another system.\n"
3375 			    "Last accessed by %s (hostid=%"PRIx64") at %s"
3376 			    "The pool can be imported, use 'zpool import -f' "
3377 			    "to import the pool.\n"), name, hostname,
3378 			    hostid, ctime(&timestamp));
3379 		}
3380 
3381 		return (1);
3382 	}
3383 
3384 	if (zpool_import_props(g_zfs, config, newname, props, flags) != 0)
3385 		return (1);
3386 
3387 	if (newname != NULL)
3388 		name = newname;
3389 
3390 	if ((zhp = zpool_open_canfail(g_zfs, name)) == NULL)
3391 		return (1);
3392 
3393 	/*
3394 	 * Loading keys is best effort. We don't want to return immediately
3395 	 * if it fails but we do want to give the error to the caller.
3396 	 */
3397 	if (flags & ZFS_IMPORT_LOAD_KEYS &&
3398 	    zfs_crypto_attempt_load_keys(g_zfs, name) != 0)
3399 			ret = 1;
3400 
3401 	if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL &&
3402 	    !(flags & ZFS_IMPORT_ONLY)) {
3403 		ms_status = zpool_enable_datasets(zhp, mntopts, 0);
3404 		if (ms_status == EZFS_SHAREFAILED) {
3405 			(void) fprintf(stderr, gettext("Import was "
3406 			    "successful, but unable to share some datasets"));
3407 		} else if (ms_status == EZFS_MOUNTFAILED) {
3408 			(void) fprintf(stderr, gettext("Import was "
3409 			    "successful, but unable to mount some datasets"));
3410 		}
3411 	}
3412 
3413 	zpool_close(zhp);
3414 	return (ret);
3415 }
3416 
3417 static int
3418 import_pools(nvlist_t *pools, nvlist_t *props, char *mntopts, int flags,
3419     char *orig_name, char *new_name,
3420     boolean_t do_destroyed, boolean_t pool_specified, boolean_t do_all,
3421     importargs_t *import)
3422 {
3423 	nvlist_t *config = NULL;
3424 	nvlist_t *found_config = NULL;
3425 	uint64_t pool_state;
3426 
3427 	/*
3428 	 * At this point we have a list of import candidate configs. Even if
3429 	 * we were searching by pool name or guid, we still need to
3430 	 * post-process the list to deal with pool state and possible
3431 	 * duplicate names.
3432 	 */
3433 	int err = 0;
3434 	nvpair_t *elem = NULL;
3435 	boolean_t first = B_TRUE;
3436 	while ((elem = nvlist_next_nvpair(pools, elem)) != NULL) {
3437 
3438 		verify(nvpair_value_nvlist(elem, &config) == 0);
3439 
3440 		verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
3441 		    &pool_state) == 0);
3442 		if (!do_destroyed && pool_state == POOL_STATE_DESTROYED)
3443 			continue;
3444 		if (do_destroyed && pool_state != POOL_STATE_DESTROYED)
3445 			continue;
3446 
3447 		verify(nvlist_add_nvlist(config, ZPOOL_LOAD_POLICY,
3448 		    import->policy) == 0);
3449 
3450 		if (!pool_specified) {
3451 			if (first)
3452 				first = B_FALSE;
3453 			else if (!do_all)
3454 				(void) fputc('\n', stdout);
3455 
3456 			if (do_all) {
3457 				err |= do_import(config, NULL, mntopts,
3458 				    props, flags);
3459 			} else {
3460 				/*
3461 				 * If we're importing from cachefile, then
3462 				 * we don't want to report errors until we
3463 				 * are in the scan phase of the import. If
3464 				 * we get an error, then we return that error
3465 				 * to invoke the scan phase.
3466 				 */
3467 				if (import->cachefile && !import->scan)
3468 					err = show_import(config, B_FALSE);
3469 				else
3470 					(void) show_import(config, B_TRUE);
3471 			}
3472 		} else if (import->poolname != NULL) {
3473 			const char *name;
3474 
3475 			/*
3476 			 * We are searching for a pool based on name.
3477 			 */
3478 			verify(nvlist_lookup_string(config,
3479 			    ZPOOL_CONFIG_POOL_NAME, &name) == 0);
3480 
3481 			if (strcmp(name, import->poolname) == 0) {
3482 				if (found_config != NULL) {
3483 					(void) fprintf(stderr, gettext(
3484 					    "cannot import '%s': more than "
3485 					    "one matching pool\n"),
3486 					    import->poolname);
3487 					(void) fprintf(stderr, gettext(
3488 					    "import by numeric ID instead\n"));
3489 					err = B_TRUE;
3490 				}
3491 				found_config = config;
3492 			}
3493 		} else {
3494 			uint64_t guid;
3495 
3496 			/*
3497 			 * Search for a pool by guid.
3498 			 */
3499 			verify(nvlist_lookup_uint64(config,
3500 			    ZPOOL_CONFIG_POOL_GUID, &guid) == 0);
3501 
3502 			if (guid == import->guid)
3503 				found_config = config;
3504 		}
3505 	}
3506 
3507 	/*
3508 	 * If we were searching for a specific pool, verify that we found a
3509 	 * pool, and then do the import.
3510 	 */
3511 	if (pool_specified && err == 0) {
3512 		if (found_config == NULL) {
3513 			(void) fprintf(stderr, gettext("cannot import '%s': "
3514 			    "no such pool available\n"), orig_name);
3515 			err = B_TRUE;
3516 		} else {
3517 			err |= do_import(found_config, new_name,
3518 			    mntopts, props, flags);
3519 		}
3520 	}
3521 
3522 	/*
3523 	 * If we were just looking for pools, report an error if none were
3524 	 * found.
3525 	 */
3526 	if (!pool_specified && first)
3527 		(void) fprintf(stderr,
3528 		    gettext("no pools available to import\n"));
3529 	return (err);
3530 }
3531 
3532 typedef struct target_exists_args {
3533 	const char	*poolname;
3534 	uint64_t	poolguid;
3535 } target_exists_args_t;
3536 
3537 static int
3538 name_or_guid_exists(zpool_handle_t *zhp, void *data)
3539 {
3540 	target_exists_args_t *args = data;
3541 	nvlist_t *config = zpool_get_config(zhp, NULL);
3542 	int found = 0;
3543 
3544 	if (config == NULL)
3545 		return (0);
3546 
3547 	if (args->poolname != NULL) {
3548 		const char *pool_name;
3549 
3550 		verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
3551 		    &pool_name) == 0);
3552 		if (strcmp(pool_name, args->poolname) == 0)
3553 			found = 1;
3554 	} else {
3555 		uint64_t pool_guid;
3556 
3557 		verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
3558 		    &pool_guid) == 0);
3559 		if (pool_guid == args->poolguid)
3560 			found = 1;
3561 	}
3562 	zpool_close(zhp);
3563 
3564 	return (found);
3565 }
3566 /*
3567  * zpool checkpoint <pool>
3568  *       checkpoint --discard <pool>
3569  *
3570  *       -d         Discard the checkpoint from a checkpointed
3571  *       --discard  pool.
3572  *
3573  *       -w         Wait for discarding a checkpoint to complete.
3574  *       --wait
3575  *
3576  * Checkpoints the specified pool, by taking a "snapshot" of its
3577  * current state. A pool can only have one checkpoint at a time.
3578  */
3579 int
3580 zpool_do_checkpoint(int argc, char **argv)
3581 {
3582 	boolean_t discard, wait;
3583 	char *pool;
3584 	zpool_handle_t *zhp;
3585 	int c, err;
3586 
3587 	struct option long_options[] = {
3588 		{"discard", no_argument, NULL, 'd'},
3589 		{"wait", no_argument, NULL, 'w'},
3590 		{0, 0, 0, 0}
3591 	};
3592 
3593 	discard = B_FALSE;
3594 	wait = B_FALSE;
3595 	while ((c = getopt_long(argc, argv, ":dw", long_options, NULL)) != -1) {
3596 		switch (c) {
3597 		case 'd':
3598 			discard = B_TRUE;
3599 			break;
3600 		case 'w':
3601 			wait = B_TRUE;
3602 			break;
3603 		case '?':
3604 			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
3605 			    optopt);
3606 			usage(B_FALSE);
3607 		}
3608 	}
3609 
3610 	if (wait && !discard) {
3611 		(void) fprintf(stderr, gettext("--wait only valid when "
3612 		    "--discard also specified\n"));
3613 		usage(B_FALSE);
3614 	}
3615 
3616 	argc -= optind;
3617 	argv += optind;
3618 
3619 	if (argc < 1) {
3620 		(void) fprintf(stderr, gettext("missing pool argument\n"));
3621 		usage(B_FALSE);
3622 	}
3623 
3624 	if (argc > 1) {
3625 		(void) fprintf(stderr, gettext("too many arguments\n"));
3626 		usage(B_FALSE);
3627 	}
3628 
3629 	pool = argv[0];
3630 
3631 	if ((zhp = zpool_open(g_zfs, pool)) == NULL) {
3632 		/* As a special case, check for use of '/' in the name */
3633 		if (strchr(pool, '/') != NULL)
3634 			(void) fprintf(stderr, gettext("'zpool checkpoint' "
3635 			    "doesn't work on datasets. To save the state "
3636 			    "of a dataset from a specific point in time "
3637 			    "please use 'zfs snapshot'\n"));
3638 		return (1);
3639 	}
3640 
3641 	if (discard) {
3642 		err = (zpool_discard_checkpoint(zhp) != 0);
3643 		if (err == 0 && wait)
3644 			err = zpool_wait(zhp, ZPOOL_WAIT_CKPT_DISCARD);
3645 	} else {
3646 		err = (zpool_checkpoint(zhp) != 0);
3647 	}
3648 
3649 	zpool_close(zhp);
3650 
3651 	return (err);
3652 }
3653 
3654 #define	CHECKPOINT_OPT	1024
3655 
3656 /*
3657  * zpool import [-d dir] [-D]
3658  *       import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l]
3659  *              [-d dir | -c cachefile | -s] [-f] -a
3660  *       import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l]
3661  *              [-d dir | -c cachefile | -s] [-f] [-n] [-F] <pool | id>
3662  *              [newpool]
3663  *
3664  *	-c	Read pool information from a cachefile instead of searching
3665  *		devices. If importing from a cachefile config fails, then
3666  *		fallback to searching for devices only in the directories that
3667  *		exist in the cachefile.
3668  *
3669  *	-d	Scan in a specific directory, other than /dev/.  More than
3670  *		one directory can be specified using multiple '-d' options.
3671  *
3672  *	-D	Scan for previously destroyed pools or import all or only
3673  *		specified destroyed pools.
3674  *
3675  *	-R	Temporarily import the pool, with all mountpoints relative to
3676  *		the given root.  The pool will remain exported when the machine
3677  *		is rebooted.
3678  *
3679  *	-V	Import even in the presence of faulted vdevs.  This is an
3680  *		intentionally undocumented option for testing purposes, and
3681  *		treats the pool configuration as complete, leaving any bad
3682  *		vdevs in the FAULTED state. In other words, it does verbatim
3683  *		import.
3684  *
3685  *	-f	Force import, even if it appears that the pool is active.
3686  *
3687  *	-F	Attempt rewind if necessary.
3688  *
3689  *	-n	See if rewind would work, but don't actually rewind.
3690  *
3691  *	-N	Import the pool but don't mount datasets.
3692  *
3693  *	-T	Specify a starting txg to use for import. This option is
3694  *		intentionally undocumented option for testing purposes.
3695  *
3696  *	-a	Import all pools found.
3697  *
3698  *	-l	Load encryption keys while importing.
3699  *
3700  *	-o	Set property=value and/or temporary mount options (without '=').
3701  *
3702  *	-s	Scan using the default search path, the libblkid cache will
3703  *		not be consulted.
3704  *
3705  *	--rewind-to-checkpoint
3706  *		Import the pool and revert back to the checkpoint.
3707  *
3708  * The import command scans for pools to import, and import pools based on pool
3709  * name and GUID.  The pool can also be renamed as part of the import process.
3710  */
3711 int
3712 zpool_do_import(int argc, char **argv)
3713 {
3714 	char **searchdirs = NULL;
3715 	char *env, *envdup = NULL;
3716 	int nsearch = 0;
3717 	int c;
3718 	int err = 0;
3719 	nvlist_t *pools = NULL;
3720 	boolean_t do_all = B_FALSE;
3721 	boolean_t do_destroyed = B_FALSE;
3722 	char *mntopts = NULL;
3723 	uint64_t searchguid = 0;
3724 	char *searchname = NULL;
3725 	char *propval;
3726 	nvlist_t *policy = NULL;
3727 	nvlist_t *props = NULL;
3728 	int flags = ZFS_IMPORT_NORMAL;
3729 	uint32_t rewind_policy = ZPOOL_NO_REWIND;
3730 	boolean_t dryrun = B_FALSE;
3731 	boolean_t do_rewind = B_FALSE;
3732 	boolean_t xtreme_rewind = B_FALSE;
3733 	boolean_t do_scan = B_FALSE;
3734 	boolean_t pool_exists = B_FALSE;
3735 	boolean_t pool_specified = B_FALSE;
3736 	uint64_t txg = -1ULL;
3737 	char *cachefile = NULL;
3738 	importargs_t idata = { 0 };
3739 	char *endptr;
3740 
3741 	struct option long_options[] = {
3742 		{"rewind-to-checkpoint", no_argument, NULL, CHECKPOINT_OPT},
3743 		{0, 0, 0, 0}
3744 	};
3745 
3746 	/* check options */
3747 	while ((c = getopt_long(argc, argv, ":aCc:d:DEfFlmnNo:R:stT:VX",
3748 	    long_options, NULL)) != -1) {
3749 		switch (c) {
3750 		case 'a':
3751 			do_all = B_TRUE;
3752 			break;
3753 		case 'c':
3754 			cachefile = optarg;
3755 			break;
3756 		case 'd':
3757 			searchdirs = safe_realloc(searchdirs,
3758 			    (nsearch + 1) * sizeof (char *));
3759 			searchdirs[nsearch++] = optarg;
3760 			break;
3761 		case 'D':
3762 			do_destroyed = B_TRUE;
3763 			break;
3764 		case 'f':
3765 			flags |= ZFS_IMPORT_ANY_HOST;
3766 			break;
3767 		case 'F':
3768 			do_rewind = B_TRUE;
3769 			break;
3770 		case 'l':
3771 			flags |= ZFS_IMPORT_LOAD_KEYS;
3772 			break;
3773 		case 'm':
3774 			flags |= ZFS_IMPORT_MISSING_LOG;
3775 			break;
3776 		case 'n':
3777 			dryrun = B_TRUE;
3778 			break;
3779 		case 'N':
3780 			flags |= ZFS_IMPORT_ONLY;
3781 			break;
3782 		case 'o':
3783 			if ((propval = strchr(optarg, '=')) != NULL) {
3784 				*propval = '\0';
3785 				propval++;
3786 				if (add_prop_list(optarg, propval,
3787 				    &props, B_TRUE))
3788 					goto error;
3789 			} else {
3790 				mntopts = optarg;
3791 			}
3792 			break;
3793 		case 'R':
3794 			if (add_prop_list(zpool_prop_to_name(
3795 			    ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE))
3796 				goto error;
3797 			if (add_prop_list_default(zpool_prop_to_name(
3798 			    ZPOOL_PROP_CACHEFILE), "none", &props))
3799 				goto error;
3800 			break;
3801 		case 's':
3802 			do_scan = B_TRUE;
3803 			break;
3804 		case 't':
3805 			flags |= ZFS_IMPORT_TEMP_NAME;
3806 			if (add_prop_list_default(zpool_prop_to_name(
3807 			    ZPOOL_PROP_CACHEFILE), "none", &props))
3808 				goto error;
3809 			break;
3810 
3811 		case 'T':
3812 			errno = 0;
3813 			txg = strtoull(optarg, &endptr, 0);
3814 			if (errno != 0 || *endptr != '\0') {
3815 				(void) fprintf(stderr,
3816 				    gettext("invalid txg value\n"));
3817 				usage(B_FALSE);
3818 			}
3819 			rewind_policy = ZPOOL_DO_REWIND | ZPOOL_EXTREME_REWIND;
3820 			break;
3821 		case 'V':
3822 			flags |= ZFS_IMPORT_VERBATIM;
3823 			break;
3824 		case 'X':
3825 			xtreme_rewind = B_TRUE;
3826 			break;
3827 		case CHECKPOINT_OPT:
3828 			flags |= ZFS_IMPORT_CHECKPOINT;
3829 			break;
3830 		case ':':
3831 			(void) fprintf(stderr, gettext("missing argument for "
3832 			    "'%c' option\n"), optopt);
3833 			usage(B_FALSE);
3834 			break;
3835 		case '?':
3836 			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
3837 			    optopt);
3838 			usage(B_FALSE);
3839 		}
3840 	}
3841 
3842 	argc -= optind;
3843 	argv += optind;
3844 
3845 	if (cachefile && nsearch != 0) {
3846 		(void) fprintf(stderr, gettext("-c is incompatible with -d\n"));
3847 		usage(B_FALSE);
3848 	}
3849 
3850 	if (cachefile && do_scan) {
3851 		(void) fprintf(stderr, gettext("-c is incompatible with -s\n"));
3852 		usage(B_FALSE);
3853 	}
3854 
3855 	if ((flags & ZFS_IMPORT_LOAD_KEYS) && (flags & ZFS_IMPORT_ONLY)) {
3856 		(void) fprintf(stderr, gettext("-l is incompatible with -N\n"));
3857 		usage(B_FALSE);
3858 	}
3859 
3860 	if ((flags & ZFS_IMPORT_LOAD_KEYS) && !do_all && argc == 0) {
3861 		(void) fprintf(stderr, gettext("-l is only meaningful during "
3862 		    "an import\n"));
3863 		usage(B_FALSE);
3864 	}
3865 
3866 	if ((dryrun || xtreme_rewind) && !do_rewind) {
3867 		(void) fprintf(stderr,
3868 		    gettext("-n or -X only meaningful with -F\n"));
3869 		usage(B_FALSE);
3870 	}
3871 	if (dryrun)
3872 		rewind_policy = ZPOOL_TRY_REWIND;
3873 	else if (do_rewind)
3874 		rewind_policy = ZPOOL_DO_REWIND;
3875 	if (xtreme_rewind)
3876 		rewind_policy |= ZPOOL_EXTREME_REWIND;
3877 
3878 	/* In the future, we can capture further policy and include it here */
3879 	if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 ||
3880 	    nvlist_add_uint64(policy, ZPOOL_LOAD_REQUEST_TXG, txg) != 0 ||
3881 	    nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY,
3882 	    rewind_policy) != 0)
3883 		goto error;
3884 
3885 	/* check argument count */
3886 	if (do_all) {
3887 		if (argc != 0) {
3888 			(void) fprintf(stderr, gettext("too many arguments\n"));
3889 			usage(B_FALSE);
3890 		}
3891 	} else {
3892 		if (argc > 2) {
3893 			(void) fprintf(stderr, gettext("too many arguments\n"));
3894 			usage(B_FALSE);
3895 		}
3896 	}
3897 
3898 	/*
3899 	 * Check for the effective uid.  We do this explicitly here because
3900 	 * otherwise any attempt to discover pools will silently fail.
3901 	 */
3902 	if (argc == 0 && geteuid() != 0) {
3903 		(void) fprintf(stderr, gettext("cannot "
3904 		    "discover pools: permission denied\n"));
3905 
3906 		free(searchdirs);
3907 		nvlist_free(props);
3908 		nvlist_free(policy);
3909 		return (1);
3910 	}
3911 
3912 	/*
3913 	 * Depending on the arguments given, we do one of the following:
3914 	 *
3915 	 *	<none>	Iterate through all pools and display information about
3916 	 *		each one.
3917 	 *
3918 	 *	-a	Iterate through all pools and try to import each one.
3919 	 *
3920 	 *	<id>	Find the pool that corresponds to the given GUID/pool
3921 	 *		name and import that one.
3922 	 *
3923 	 *	-D	Above options applies only to destroyed pools.
3924 	 */
3925 	if (argc != 0) {
3926 		char *endptr;
3927 
3928 		errno = 0;
3929 		searchguid = strtoull(argv[0], &endptr, 10);
3930 		if (errno != 0 || *endptr != '\0') {
3931 			searchname = argv[0];
3932 			searchguid = 0;
3933 		}
3934 		pool_specified = B_TRUE;
3935 
3936 		/*
3937 		 * User specified a name or guid.  Ensure it's unique.
3938 		 */
3939 		target_exists_args_t search = {searchname, searchguid};
3940 		pool_exists = zpool_iter(g_zfs, name_or_guid_exists, &search);
3941 	}
3942 
3943 	/*
3944 	 * Check the environment for the preferred search path.
3945 	 */
3946 	if ((searchdirs == NULL) && (env = getenv("ZPOOL_IMPORT_PATH"))) {
3947 		char *dir, *tmp = NULL;
3948 
3949 		envdup = strdup(env);
3950 
3951 		for (dir = strtok_r(envdup, ":", &tmp);
3952 		    dir != NULL;
3953 		    dir = strtok_r(NULL, ":", &tmp)) {
3954 			searchdirs = safe_realloc(searchdirs,
3955 			    (nsearch + 1) * sizeof (char *));
3956 			searchdirs[nsearch++] = dir;
3957 		}
3958 	}
3959 
3960 	idata.path = searchdirs;
3961 	idata.paths = nsearch;
3962 	idata.poolname = searchname;
3963 	idata.guid = searchguid;
3964 	idata.cachefile = cachefile;
3965 	idata.scan = do_scan;
3966 	idata.policy = policy;
3967 
3968 	libpc_handle_t lpch = {
3969 		.lpc_lib_handle = g_zfs,
3970 		.lpc_ops = &libzfs_config_ops,
3971 		.lpc_printerr = B_TRUE
3972 	};
3973 	pools = zpool_search_import(&lpch, &idata);
3974 
3975 	if (pools != NULL && pool_exists &&
3976 	    (argc == 1 || strcmp(argv[0], argv[1]) == 0)) {
3977 		(void) fprintf(stderr, gettext("cannot import '%s': "
3978 		    "a pool with that name already exists\n"),
3979 		    argv[0]);
3980 		(void) fprintf(stderr, gettext("use the form '%s "
3981 		    "<pool | id> <newpool>' to give it a new name\n"),
3982 		    "zpool import");
3983 		err = 1;
3984 	} else if (pools == NULL && pool_exists) {
3985 		(void) fprintf(stderr, gettext("cannot import '%s': "
3986 		    "a pool with that name is already created/imported,\n"),
3987 		    argv[0]);
3988 		(void) fprintf(stderr, gettext("and no additional pools "
3989 		    "with that name were found\n"));
3990 		err = 1;
3991 	} else if (pools == NULL) {
3992 		if (argc != 0) {
3993 			(void) fprintf(stderr, gettext("cannot import '%s': "
3994 			    "no such pool available\n"), argv[0]);
3995 		}
3996 		err = 1;
3997 	}
3998 
3999 	if (err == 1) {
4000 		free(searchdirs);
4001 		free(envdup);
4002 		nvlist_free(policy);
4003 		nvlist_free(pools);
4004 		nvlist_free(props);
4005 		return (1);
4006 	}
4007 
4008 	err = import_pools(pools, props, mntopts, flags,
4009 	    argc >= 1 ? argv[0] : NULL,
4010 	    argc >= 2 ? argv[1] : NULL,
4011 	    do_destroyed, pool_specified, do_all, &idata);
4012 
4013 	/*
4014 	 * If we're using the cachefile and we failed to import, then
4015 	 * fallback to scanning the directory for pools that match
4016 	 * those in the cachefile.
4017 	 */
4018 	if (err != 0 && cachefile != NULL) {
4019 		(void) printf(gettext("cachefile import failed, retrying\n"));
4020 
4021 		/*
4022 		 * We use the scan flag to gather the directories that exist
4023 		 * in the cachefile. If we need to fallback to searching for
4024 		 * the pool config, we will only search devices in these
4025 		 * directories.
4026 		 */
4027 		idata.scan = B_TRUE;
4028 		nvlist_free(pools);
4029 		pools = zpool_search_import(&lpch, &idata);
4030 
4031 		err = import_pools(pools, props, mntopts, flags,
4032 		    argc >= 1 ? argv[0] : NULL,
4033 		    argc >= 2 ? argv[1] : NULL,
4034 		    do_destroyed, pool_specified, do_all, &idata);
4035 	}
4036 
4037 error:
4038 	nvlist_free(props);
4039 	nvlist_free(pools);
4040 	nvlist_free(policy);
4041 	free(searchdirs);
4042 	free(envdup);
4043 
4044 	return (err ? 1 : 0);
4045 }
4046 
4047 /*
4048  * zpool sync [-f] [pool] ...
4049  *
4050  * -f (undocumented) force uberblock (and config including zpool cache file)
4051  *    update.
4052  *
4053  * Sync the specified pool(s).
4054  * Without arguments "zpool sync" will sync all pools.
4055  * This command initiates TXG sync(s) and will return after the TXG(s) commit.
4056  *
4057  */
4058 static int
4059 zpool_do_sync(int argc, char **argv)
4060 {
4061 	int ret;
4062 	boolean_t force = B_FALSE;
4063 
4064 	/* check options */
4065 	while ((ret  = getopt(argc, argv, "f")) != -1) {
4066 		switch (ret) {
4067 		case 'f':
4068 			force = B_TRUE;
4069 			break;
4070 		case '?':
4071 			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
4072 			    optopt);
4073 			usage(B_FALSE);
4074 		}
4075 	}
4076 
4077 	argc -= optind;
4078 	argv += optind;
4079 
4080 	/* if argc == 0 we will execute zpool_sync_one on all pools */
4081 	ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL,
4082 	    B_FALSE, zpool_sync_one, &force);
4083 
4084 	return (ret);
4085 }
4086 
4087 typedef struct iostat_cbdata {
4088 	uint64_t cb_flags;
4089 	int cb_namewidth;
4090 	int cb_iteration;
4091 	boolean_t cb_verbose;
4092 	boolean_t cb_literal;
4093 	boolean_t cb_scripted;
4094 	zpool_list_t *cb_list;
4095 	vdev_cmd_data_list_t *vcdl;
4096 	vdev_cbdata_t cb_vdevs;
4097 } iostat_cbdata_t;
4098 
4099 /*  iostat labels */
4100 typedef struct name_and_columns {
4101 	const char *name;	/* Column name */
4102 	unsigned int columns;	/* Center name to this number of columns */
4103 } name_and_columns_t;
4104 
4105 #define	IOSTAT_MAX_LABELS	15	/* Max number of labels on one line */
4106 
4107 static const name_and_columns_t iostat_top_labels[][IOSTAT_MAX_LABELS] =
4108 {
4109 	[IOS_DEFAULT] = {{"capacity", 2}, {"operations", 2}, {"bandwidth", 2},
4110 	    {NULL}},
4111 	[IOS_LATENCY] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2},
4112 	    {"asyncq_wait", 2}, {"scrub", 1}, {"trim", 1}, {"rebuild", 1},
4113 	    {NULL}},
4114 	[IOS_QUEUES] = {{"syncq_read", 2}, {"syncq_write", 2},
4115 	    {"asyncq_read", 2}, {"asyncq_write", 2}, {"scrubq_read", 2},
4116 	    {"trimq_write", 2}, {"rebuildq_write", 2}, {NULL}},
4117 	[IOS_L_HISTO] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2},
4118 	    {"asyncq_wait", 2}, {NULL}},
4119 	[IOS_RQ_HISTO] = {{"sync_read", 2}, {"sync_write", 2},
4120 	    {"async_read", 2}, {"async_write", 2}, {"scrub", 2},
4121 	    {"trim", 2}, {"rebuild", 2}, {NULL}},
4122 };
4123 
4124 /* Shorthand - if "columns" field not set, default to 1 column */
4125 static const name_and_columns_t iostat_bottom_labels[][IOSTAT_MAX_LABELS] =
4126 {
4127 	[IOS_DEFAULT] = {{"alloc"}, {"free"}, {"read"}, {"write"}, {"read"},
4128 	    {"write"}, {NULL}},
4129 	[IOS_LATENCY] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"},
4130 	    {"write"}, {"read"}, {"write"}, {"wait"}, {"wait"}, {"wait"},
4131 	    {NULL}},
4132 	[IOS_QUEUES] = {{"pend"}, {"activ"}, {"pend"}, {"activ"}, {"pend"},
4133 	    {"activ"}, {"pend"}, {"activ"}, {"pend"}, {"activ"},
4134 	    {"pend"}, {"activ"}, {"pend"}, {"activ"}, {NULL}},
4135 	[IOS_L_HISTO] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"},
4136 	    {"write"}, {"read"}, {"write"}, {"scrub"}, {"trim"}, {"rebuild"},
4137 	    {NULL}},
4138 	[IOS_RQ_HISTO] = {{"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"},
4139 	    {"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"},
4140 	    {"ind"}, {"agg"}, {NULL}},
4141 };
4142 
4143 static const char *histo_to_title[] = {
4144 	[IOS_L_HISTO] = "latency",
4145 	[IOS_RQ_HISTO] = "req_size",
4146 };
4147 
4148 /*
4149  * Return the number of labels in a null-terminated name_and_columns_t
4150  * array.
4151  *
4152  */
4153 static unsigned int
4154 label_array_len(const name_and_columns_t *labels)
4155 {
4156 	int i = 0;
4157 
4158 	while (labels[i].name)
4159 		i++;
4160 
4161 	return (i);
4162 }
4163 
4164 /*
4165  * Return the number of strings in a null-terminated string array.
4166  * For example:
4167  *
4168  *     const char foo[] = {"bar", "baz", NULL}
4169  *
4170  * returns 2
4171  */
4172 static uint64_t
4173 str_array_len(const char *array[])
4174 {
4175 	uint64_t i = 0;
4176 	while (array[i])
4177 		i++;
4178 
4179 	return (i);
4180 }
4181 
4182 
4183 /*
4184  * Return a default column width for default/latency/queue columns. This does
4185  * not include histograms, which have their columns autosized.
4186  */
4187 static unsigned int
4188 default_column_width(iostat_cbdata_t *cb, enum iostat_type type)
4189 {
4190 	unsigned long column_width = 5; /* Normal niceprint */
4191 	static unsigned long widths[] = {
4192 		/*
4193 		 * Choose some sane default column sizes for printing the
4194 		 * raw numbers.
4195 		 */
4196 		[IOS_DEFAULT] = 15, /* 1PB capacity */
4197 		[IOS_LATENCY] = 10, /* 1B ns = 10sec */
4198 		[IOS_QUEUES] = 6,   /* 1M queue entries */
4199 		[IOS_L_HISTO] = 10, /* 1B ns = 10sec */
4200 		[IOS_RQ_HISTO] = 6, /* 1M queue entries */
4201 	};
4202 
4203 	if (cb->cb_literal)
4204 		column_width = widths[type];
4205 
4206 	return (column_width);
4207 }
4208 
4209 /*
4210  * Print the column labels, i.e:
4211  *
4212  *   capacity     operations     bandwidth
4213  * alloc   free   read  write   read  write  ...
4214  *
4215  * If force_column_width is set, use it for the column width.  If not set, use
4216  * the default column width.
4217  */
4218 static void
4219 print_iostat_labels(iostat_cbdata_t *cb, unsigned int force_column_width,
4220     const name_and_columns_t labels[][IOSTAT_MAX_LABELS])
4221 {
4222 	int i, idx, s;
4223 	int text_start, rw_column_width, spaces_to_end;
4224 	uint64_t flags = cb->cb_flags;
4225 	uint64_t f;
4226 	unsigned int column_width = force_column_width;
4227 
4228 	/* For each bit set in flags */
4229 	for (f = flags; f; f &= ~(1ULL << idx)) {
4230 		idx = lowbit64(f) - 1;
4231 		if (!force_column_width)
4232 			column_width = default_column_width(cb, idx);
4233 		/* Print our top labels centered over "read  write" label. */
4234 		for (i = 0; i < label_array_len(labels[idx]); i++) {
4235 			const char *name = labels[idx][i].name;
4236 			/*
4237 			 * We treat labels[][].columns == 0 as shorthand
4238 			 * for one column.  It makes writing out the label
4239 			 * tables more concise.
4240 			 */
4241 			unsigned int columns = MAX(1, labels[idx][i].columns);
4242 			unsigned int slen = strlen(name);
4243 
4244 			rw_column_width = (column_width * columns) +
4245 			    (2 * (columns - 1));
4246 
4247 			text_start = (int)((rw_column_width) / columns -
4248 			    slen / columns);
4249 			if (text_start < 0)
4250 				text_start = 0;
4251 
4252 			printf("  ");	/* Two spaces between columns */
4253 
4254 			/* Space from beginning of column to label */
4255 			for (s = 0; s < text_start; s++)
4256 				printf(" ");
4257 
4258 			printf("%s", name);
4259 
4260 			/* Print space after label to end of column */
4261 			spaces_to_end = rw_column_width - text_start - slen;
4262 			if (spaces_to_end < 0)
4263 				spaces_to_end = 0;
4264 
4265 			for (s = 0; s < spaces_to_end; s++)
4266 				printf(" ");
4267 		}
4268 	}
4269 }
4270 
4271 
4272 /*
4273  * print_cmd_columns - Print custom column titles from -c
4274  *
4275  * If the user specified the "zpool status|iostat -c" then print their custom
4276  * column titles in the header.  For example, print_cmd_columns() would print
4277  * the "  col1  col2" part of this:
4278  *
4279  * $ zpool iostat -vc 'echo col1=val1; echo col2=val2'
4280  * ...
4281  *	      capacity     operations     bandwidth
4282  * pool        alloc   free   read  write   read  write  col1  col2
4283  * ----------  -----  -----  -----  -----  -----  -----  ----  ----
4284  * mypool       269K  1008M      0      0    107    946
4285  *   mirror     269K  1008M      0      0    107    946
4286  *     sdb         -      -      0      0    102    473  val1  val2
4287  *     sdc         -      -      0      0      5    473  val1  val2
4288  * ----------  -----  -----  -----  -----  -----  -----  ----  ----
4289  */
4290 static void
4291 print_cmd_columns(vdev_cmd_data_list_t *vcdl, int use_dashes)
4292 {
4293 	int i, j;
4294 	vdev_cmd_data_t *data = &vcdl->data[0];
4295 
4296 	if (vcdl->count == 0 || data == NULL)
4297 		return;
4298 
4299 	/*
4300 	 * Each vdev cmd should have the same column names unless the user did
4301 	 * something weird with their cmd.  Just take the column names from the
4302 	 * first vdev and assume it works for all of them.
4303 	 */
4304 	for (i = 0; i < vcdl->uniq_cols_cnt; i++) {
4305 		printf("  ");
4306 		if (use_dashes) {
4307 			for (j = 0; j < vcdl->uniq_cols_width[i]; j++)
4308 				printf("-");
4309 		} else {
4310 			printf_color(ANSI_BOLD, "%*s", vcdl->uniq_cols_width[i],
4311 			    vcdl->uniq_cols[i]);
4312 		}
4313 	}
4314 }
4315 
4316 
4317 /*
4318  * Utility function to print out a line of dashes like:
4319  *
4320  * 	--------------------------------  -----  -----  -----  -----  -----
4321  *
4322  * ...or a dashed named-row line like:
4323  *
4324  * 	logs                                  -      -      -      -      -
4325  *
4326  * @cb:				iostat data
4327  *
4328  * @force_column_width		If non-zero, use the value as the column width.
4329  * 				Otherwise use the default column widths.
4330  *
4331  * @name:			Print a dashed named-row line starting
4332  * 				with @name.  Otherwise, print a regular
4333  * 				dashed line.
4334  */
4335 static void
4336 print_iostat_dashes(iostat_cbdata_t *cb, unsigned int force_column_width,
4337     const char *name)
4338 {
4339 	int i;
4340 	unsigned int namewidth;
4341 	uint64_t flags = cb->cb_flags;
4342 	uint64_t f;
4343 	int idx;
4344 	const name_and_columns_t *labels;
4345 	const char *title;
4346 
4347 
4348 	if (cb->cb_flags & IOS_ANYHISTO_M) {
4349 		title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)];
4350 	} else if (cb->cb_vdevs.cb_names_count) {
4351 		title = "vdev";
4352 	} else  {
4353 		title = "pool";
4354 	}
4355 
4356 	namewidth = MAX(MAX(strlen(title), cb->cb_namewidth),
4357 	    name ? strlen(name) : 0);
4358 
4359 
4360 	if (name) {
4361 		printf("%-*s", namewidth, name);
4362 	} else {
4363 		for (i = 0; i < namewidth; i++)
4364 			(void) printf("-");
4365 	}
4366 
4367 	/* For each bit in flags */
4368 	for (f = flags; f; f &= ~(1ULL << idx)) {
4369 		unsigned int column_width;
4370 		idx = lowbit64(f) - 1;
4371 		if (force_column_width)
4372 			column_width = force_column_width;
4373 		else
4374 			column_width = default_column_width(cb, idx);
4375 
4376 		labels = iostat_bottom_labels[idx];
4377 		for (i = 0; i < label_array_len(labels); i++) {
4378 			if (name)
4379 				printf("  %*s-", column_width - 1, " ");
4380 			else
4381 				printf("  %.*s", column_width,
4382 				    "--------------------");
4383 		}
4384 	}
4385 }
4386 
4387 
4388 static void
4389 print_iostat_separator_impl(iostat_cbdata_t *cb,
4390     unsigned int force_column_width)
4391 {
4392 	print_iostat_dashes(cb, force_column_width, NULL);
4393 }
4394 
4395 static void
4396 print_iostat_separator(iostat_cbdata_t *cb)
4397 {
4398 	print_iostat_separator_impl(cb, 0);
4399 }
4400 
4401 static void
4402 print_iostat_header_impl(iostat_cbdata_t *cb, unsigned int force_column_width,
4403     const char *histo_vdev_name)
4404 {
4405 	unsigned int namewidth;
4406 	const char *title;
4407 
4408 	color_start(ANSI_BOLD);
4409 
4410 	if (cb->cb_flags & IOS_ANYHISTO_M) {
4411 		title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)];
4412 	} else if (cb->cb_vdevs.cb_names_count) {
4413 		title = "vdev";
4414 	} else  {
4415 		title = "pool";
4416 	}
4417 
4418 	namewidth = MAX(MAX(strlen(title), cb->cb_namewidth),
4419 	    histo_vdev_name ? strlen(histo_vdev_name) : 0);
4420 
4421 	if (histo_vdev_name)
4422 		printf("%-*s", namewidth, histo_vdev_name);
4423 	else
4424 		printf("%*s", namewidth, "");
4425 
4426 
4427 	print_iostat_labels(cb, force_column_width, iostat_top_labels);
4428 	printf("\n");
4429 
4430 	printf("%-*s", namewidth, title);
4431 
4432 	print_iostat_labels(cb, force_column_width, iostat_bottom_labels);
4433 	if (cb->vcdl != NULL)
4434 		print_cmd_columns(cb->vcdl, 0);
4435 
4436 	printf("\n");
4437 
4438 	print_iostat_separator_impl(cb, force_column_width);
4439 
4440 	if (cb->vcdl != NULL)
4441 		print_cmd_columns(cb->vcdl, 1);
4442 
4443 	color_end();
4444 
4445 	printf("\n");
4446 }
4447 
4448 static void
4449 print_iostat_header(iostat_cbdata_t *cb)
4450 {
4451 	print_iostat_header_impl(cb, 0, NULL);
4452 }
4453 
4454 /*
4455  * Prints a size string (i.e. 120M) with the suffix ("M") colored
4456  * by order of magnitude. Uses column_size to add padding.
4457  */
4458 static void
4459 print_stat_color(const char *statbuf, unsigned int column_size)
4460 {
4461 	fputs("  ", stdout);
4462 	size_t len = strlen(statbuf);
4463 	while (len < column_size) {
4464 		fputc(' ', stdout);
4465 		column_size--;
4466 	}
4467 	if (*statbuf == '0') {
4468 		color_start(ANSI_GRAY);
4469 		fputc('0', stdout);
4470 	} else {
4471 		for (; *statbuf; statbuf++) {
4472 			if (*statbuf == 'K') color_start(ANSI_GREEN);
4473 			else if (*statbuf == 'M') color_start(ANSI_YELLOW);
4474 			else if (*statbuf == 'G') color_start(ANSI_RED);
4475 			else if (*statbuf == 'T') color_start(ANSI_BOLD_BLUE);
4476 			else if (*statbuf == 'P') color_start(ANSI_MAGENTA);
4477 			else if (*statbuf == 'E') color_start(ANSI_CYAN);
4478 			fputc(*statbuf, stdout);
4479 			if (--column_size <= 0)
4480 				break;
4481 		}
4482 	}
4483 	color_end();
4484 }
4485 
4486 /*
4487  * Display a single statistic.
4488  */
4489 static void
4490 print_one_stat(uint64_t value, enum zfs_nicenum_format format,
4491     unsigned int column_size, boolean_t scripted)
4492 {
4493 	char buf[64];
4494 
4495 	zfs_nicenum_format(value, buf, sizeof (buf), format);
4496 
4497 	if (scripted)
4498 		printf("\t%s", buf);
4499 	else
4500 		print_stat_color(buf, column_size);
4501 }
4502 
4503 /*
4504  * Calculate the default vdev stats
4505  *
4506  * Subtract oldvs from newvs, apply a scaling factor, and save the resulting
4507  * stats into calcvs.
4508  */
4509 static void
4510 calc_default_iostats(vdev_stat_t *oldvs, vdev_stat_t *newvs,
4511     vdev_stat_t *calcvs)
4512 {
4513 	int i;
4514 
4515 	memcpy(calcvs, newvs, sizeof (*calcvs));
4516 	for (i = 0; i < ARRAY_SIZE(calcvs->vs_ops); i++)
4517 		calcvs->vs_ops[i] = (newvs->vs_ops[i] - oldvs->vs_ops[i]);
4518 
4519 	for (i = 0; i < ARRAY_SIZE(calcvs->vs_bytes); i++)
4520 		calcvs->vs_bytes[i] = (newvs->vs_bytes[i] - oldvs->vs_bytes[i]);
4521 }
4522 
4523 /*
4524  * Internal representation of the extended iostats data.
4525  *
4526  * The extended iostat stats are exported in nvlists as either uint64_t arrays
4527  * or single uint64_t's.  We make both look like arrays to make them easier
4528  * to process.  In order to make single uint64_t's look like arrays, we set
4529  * __data to the stat data, and then set *data = &__data with count = 1.  Then,
4530  * we can just use *data and count.
4531  */
4532 struct stat_array {
4533 	uint64_t *data;
4534 	uint_t count;	/* Number of entries in data[] */
4535 	uint64_t __data; /* Only used when data is a single uint64_t */
4536 };
4537 
4538 static uint64_t
4539 stat_histo_max(struct stat_array *nva, unsigned int len)
4540 {
4541 	uint64_t max = 0;
4542 	int i;
4543 	for (i = 0; i < len; i++)
4544 		max = MAX(max, array64_max(nva[i].data, nva[i].count));
4545 
4546 	return (max);
4547 }
4548 
4549 /*
4550  * Helper function to lookup a uint64_t array or uint64_t value and store its
4551  * data as a stat_array.  If the nvpair is a single uint64_t value, then we make
4552  * it look like a one element array to make it easier to process.
4553  */
4554 static int
4555 nvpair64_to_stat_array(nvlist_t *nvl, const char *name,
4556     struct stat_array *nva)
4557 {
4558 	nvpair_t *tmp;
4559 	int ret;
4560 
4561 	verify(nvlist_lookup_nvpair(nvl, name, &tmp) == 0);
4562 	switch (nvpair_type(tmp)) {
4563 	case DATA_TYPE_UINT64_ARRAY:
4564 		ret = nvpair_value_uint64_array(tmp, &nva->data, &nva->count);
4565 		break;
4566 	case DATA_TYPE_UINT64:
4567 		ret = nvpair_value_uint64(tmp, &nva->__data);
4568 		nva->data = &nva->__data;
4569 		nva->count = 1;
4570 		break;
4571 	default:
4572 		/* Not a uint64_t */
4573 		ret = EINVAL;
4574 		break;
4575 	}
4576 
4577 	return (ret);
4578 }
4579 
4580 /*
4581  * Given a list of nvlist names, look up the extended stats in newnv and oldnv,
4582  * subtract them, and return the results in a newly allocated stat_array.
4583  * You must free the returned array after you are done with it with
4584  * free_calc_stats().
4585  *
4586  * Additionally, you can set "oldnv" to NULL if you simply want the newnv
4587  * values.
4588  */
4589 static struct stat_array *
4590 calc_and_alloc_stats_ex(const char **names, unsigned int len, nvlist_t *oldnv,
4591     nvlist_t *newnv)
4592 {
4593 	nvlist_t *oldnvx = NULL, *newnvx;
4594 	struct stat_array *oldnva, *newnva, *calcnva;
4595 	int i, j;
4596 	unsigned int alloc_size = (sizeof (struct stat_array)) * len;
4597 
4598 	/* Extract our extended stats nvlist from the main list */
4599 	verify(nvlist_lookup_nvlist(newnv, ZPOOL_CONFIG_VDEV_STATS_EX,
4600 	    &newnvx) == 0);
4601 	if (oldnv) {
4602 		verify(nvlist_lookup_nvlist(oldnv, ZPOOL_CONFIG_VDEV_STATS_EX,
4603 		    &oldnvx) == 0);
4604 	}
4605 
4606 	newnva = safe_malloc(alloc_size);
4607 	oldnva = safe_malloc(alloc_size);
4608 	calcnva = safe_malloc(alloc_size);
4609 
4610 	for (j = 0; j < len; j++) {
4611 		verify(nvpair64_to_stat_array(newnvx, names[j],
4612 		    &newnva[j]) == 0);
4613 		calcnva[j].count = newnva[j].count;
4614 		alloc_size = calcnva[j].count * sizeof (calcnva[j].data[0]);
4615 		calcnva[j].data = safe_malloc(alloc_size);
4616 		memcpy(calcnva[j].data, newnva[j].data, alloc_size);
4617 
4618 		if (oldnvx) {
4619 			verify(nvpair64_to_stat_array(oldnvx, names[j],
4620 			    &oldnva[j]) == 0);
4621 			for (i = 0; i < oldnva[j].count; i++)
4622 				calcnva[j].data[i] -= oldnva[j].data[i];
4623 		}
4624 	}
4625 	free(newnva);
4626 	free(oldnva);
4627 	return (calcnva);
4628 }
4629 
4630 static void
4631 free_calc_stats(struct stat_array *nva, unsigned int len)
4632 {
4633 	int i;
4634 	for (i = 0; i < len; i++)
4635 		free(nva[i].data);
4636 
4637 	free(nva);
4638 }
4639 
4640 static void
4641 print_iostat_histo(struct stat_array *nva, unsigned int len,
4642     iostat_cbdata_t *cb, unsigned int column_width, unsigned int namewidth,
4643     double scale)
4644 {
4645 	int i, j;
4646 	char buf[6];
4647 	uint64_t val;
4648 	enum zfs_nicenum_format format;
4649 	unsigned int buckets;
4650 	unsigned int start_bucket;
4651 
4652 	if (cb->cb_literal)
4653 		format = ZFS_NICENUM_RAW;
4654 	else
4655 		format = ZFS_NICENUM_1024;
4656 
4657 	/* All these histos are the same size, so just use nva[0].count */
4658 	buckets = nva[0].count;
4659 
4660 	if (cb->cb_flags & IOS_RQ_HISTO_M) {
4661 		/* Start at 512 - req size should never be lower than this */
4662 		start_bucket = 9;
4663 	} else {
4664 		start_bucket = 0;
4665 	}
4666 
4667 	for (j = start_bucket; j < buckets; j++) {
4668 		/* Print histogram bucket label */
4669 		if (cb->cb_flags & IOS_L_HISTO_M) {
4670 			/* Ending range of this bucket */
4671 			val = (1UL << (j + 1)) - 1;
4672 			zfs_nicetime(val, buf, sizeof (buf));
4673 		} else {
4674 			/* Request size (starting range of bucket) */
4675 			val = (1UL << j);
4676 			zfs_nicenum(val, buf, sizeof (buf));
4677 		}
4678 
4679 		if (cb->cb_scripted)
4680 			printf("%llu", (u_longlong_t)val);
4681 		else
4682 			printf("%-*s", namewidth, buf);
4683 
4684 		/* Print the values on the line */
4685 		for (i = 0; i < len; i++) {
4686 			print_one_stat(nva[i].data[j] * scale, format,
4687 			    column_width, cb->cb_scripted);
4688 		}
4689 		printf("\n");
4690 	}
4691 }
4692 
4693 static void
4694 print_solid_separator(unsigned int length)
4695 {
4696 	while (length--)
4697 		printf("-");
4698 	printf("\n");
4699 }
4700 
4701 static void
4702 print_iostat_histos(iostat_cbdata_t *cb, nvlist_t *oldnv,
4703     nvlist_t *newnv, double scale, const char *name)
4704 {
4705 	unsigned int column_width;
4706 	unsigned int namewidth;
4707 	unsigned int entire_width;
4708 	enum iostat_type type;
4709 	struct stat_array *nva;
4710 	const char **names;
4711 	unsigned int names_len;
4712 
4713 	/* What type of histo are we? */
4714 	type = IOS_HISTO_IDX(cb->cb_flags);
4715 
4716 	/* Get NULL-terminated array of nvlist names for our histo */
4717 	names = vsx_type_to_nvlist[type];
4718 	names_len = str_array_len(names); /* num of names */
4719 
4720 	nva = calc_and_alloc_stats_ex(names, names_len, oldnv, newnv);
4721 
4722 	if (cb->cb_literal) {
4723 		column_width = MAX(5,
4724 		    (unsigned int) log10(stat_histo_max(nva, names_len)) + 1);
4725 	} else {
4726 		column_width = 5;
4727 	}
4728 
4729 	namewidth = MAX(cb->cb_namewidth,
4730 	    strlen(histo_to_title[IOS_HISTO_IDX(cb->cb_flags)]));
4731 
4732 	/*
4733 	 * Calculate the entire line width of what we're printing.  The
4734 	 * +2 is for the two spaces between columns:
4735 	 */
4736 	/*	 read  write				*/
4737 	/*	-----  -----				*/
4738 	/*	|___|  <---------- column_width		*/
4739 	/*						*/
4740 	/*	|__________|  <--- entire_width		*/
4741 	/*						*/
4742 	entire_width = namewidth + (column_width + 2) *
4743 	    label_array_len(iostat_bottom_labels[type]);
4744 
4745 	if (cb->cb_scripted)
4746 		printf("%s\n", name);
4747 	else
4748 		print_iostat_header_impl(cb, column_width, name);
4749 
4750 	print_iostat_histo(nva, names_len, cb, column_width,
4751 	    namewidth, scale);
4752 
4753 	free_calc_stats(nva, names_len);
4754 	if (!cb->cb_scripted)
4755 		print_solid_separator(entire_width);
4756 }
4757 
4758 /*
4759  * Calculate the average latency of a power-of-two latency histogram
4760  */
4761 static uint64_t
4762 single_histo_average(uint64_t *histo, unsigned int buckets)
4763 {
4764 	int i;
4765 	uint64_t count = 0, total = 0;
4766 
4767 	for (i = 0; i < buckets; i++) {
4768 		/*
4769 		 * Our buckets are power-of-two latency ranges.  Use the
4770 		 * midpoint latency of each bucket to calculate the average.
4771 		 * For example:
4772 		 *
4773 		 * Bucket          Midpoint
4774 		 * 8ns-15ns:       12ns
4775 		 * 16ns-31ns:      24ns
4776 		 * ...
4777 		 */
4778 		if (histo[i] != 0) {
4779 			total += histo[i] * (((1UL << i) + ((1UL << i)/2)));
4780 			count += histo[i];
4781 		}
4782 	}
4783 
4784 	/* Prevent divide by zero */
4785 	return (count == 0 ? 0 : total / count);
4786 }
4787 
4788 static void
4789 print_iostat_queues(iostat_cbdata_t *cb, nvlist_t *newnv)
4790 {
4791 	const char *names[] = {
4792 		ZPOOL_CONFIG_VDEV_SYNC_R_PEND_QUEUE,
4793 		ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE,
4794 		ZPOOL_CONFIG_VDEV_SYNC_W_PEND_QUEUE,
4795 		ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE,
4796 		ZPOOL_CONFIG_VDEV_ASYNC_R_PEND_QUEUE,
4797 		ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE,
4798 		ZPOOL_CONFIG_VDEV_ASYNC_W_PEND_QUEUE,
4799 		ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE,
4800 		ZPOOL_CONFIG_VDEV_SCRUB_PEND_QUEUE,
4801 		ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,
4802 		ZPOOL_CONFIG_VDEV_TRIM_PEND_QUEUE,
4803 		ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE,
4804 		ZPOOL_CONFIG_VDEV_REBUILD_PEND_QUEUE,
4805 		ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE,
4806 	};
4807 
4808 	struct stat_array *nva;
4809 
4810 	unsigned int column_width = default_column_width(cb, IOS_QUEUES);
4811 	enum zfs_nicenum_format format;
4812 
4813 	nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), NULL, newnv);
4814 
4815 	if (cb->cb_literal)
4816 		format = ZFS_NICENUM_RAW;
4817 	else
4818 		format = ZFS_NICENUM_1024;
4819 
4820 	for (int i = 0; i < ARRAY_SIZE(names); i++) {
4821 		uint64_t val = nva[i].data[0];
4822 		print_one_stat(val, format, column_width, cb->cb_scripted);
4823 	}
4824 
4825 	free_calc_stats(nva, ARRAY_SIZE(names));
4826 }
4827 
4828 static void
4829 print_iostat_latency(iostat_cbdata_t *cb, nvlist_t *oldnv,
4830     nvlist_t *newnv)
4831 {
4832 	int i;
4833 	uint64_t val;
4834 	const char *names[] = {
4835 		ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
4836 		ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
4837 		ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
4838 		ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
4839 		ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO,
4840 		ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO,
4841 		ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO,
4842 		ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO,
4843 		ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO,
4844 		ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
4845 		ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
4846 	};
4847 	struct stat_array *nva;
4848 
4849 	unsigned int column_width = default_column_width(cb, IOS_LATENCY);
4850 	enum zfs_nicenum_format format;
4851 
4852 	nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), oldnv, newnv);
4853 
4854 	if (cb->cb_literal)
4855 		format = ZFS_NICENUM_RAWTIME;
4856 	else
4857 		format = ZFS_NICENUM_TIME;
4858 
4859 	/* Print our avg latencies on the line */
4860 	for (i = 0; i < ARRAY_SIZE(names); i++) {
4861 		/* Compute average latency for a latency histo */
4862 		val = single_histo_average(nva[i].data, nva[i].count);
4863 		print_one_stat(val, format, column_width, cb->cb_scripted);
4864 	}
4865 	free_calc_stats(nva, ARRAY_SIZE(names));
4866 }
4867 
4868 /*
4869  * Print default statistics (capacity/operations/bandwidth)
4870  */
4871 static void
4872 print_iostat_default(vdev_stat_t *vs, iostat_cbdata_t *cb, double scale)
4873 {
4874 	unsigned int column_width = default_column_width(cb, IOS_DEFAULT);
4875 	enum zfs_nicenum_format format;
4876 	char na;	/* char to print for "not applicable" values */
4877 
4878 	if (cb->cb_literal) {
4879 		format = ZFS_NICENUM_RAW;
4880 		na = '0';
4881 	} else {
4882 		format = ZFS_NICENUM_1024;
4883 		na = '-';
4884 	}
4885 
4886 	/* only toplevel vdevs have capacity stats */
4887 	if (vs->vs_space == 0) {
4888 		if (cb->cb_scripted)
4889 			printf("\t%c\t%c", na, na);
4890 		else
4891 			printf("  %*c  %*c", column_width, na, column_width,
4892 			    na);
4893 	} else {
4894 		print_one_stat(vs->vs_alloc, format, column_width,
4895 		    cb->cb_scripted);
4896 		print_one_stat(vs->vs_space - vs->vs_alloc, format,
4897 		    column_width, cb->cb_scripted);
4898 	}
4899 
4900 	print_one_stat((uint64_t)(vs->vs_ops[ZIO_TYPE_READ] * scale),
4901 	    format, column_width, cb->cb_scripted);
4902 	print_one_stat((uint64_t)(vs->vs_ops[ZIO_TYPE_WRITE] * scale),
4903 	    format, column_width, cb->cb_scripted);
4904 	print_one_stat((uint64_t)(vs->vs_bytes[ZIO_TYPE_READ] * scale),
4905 	    format, column_width, cb->cb_scripted);
4906 	print_one_stat((uint64_t)(vs->vs_bytes[ZIO_TYPE_WRITE] * scale),
4907 	    format, column_width, cb->cb_scripted);
4908 }
4909 
4910 static const char *const class_name[] = {
4911 	VDEV_ALLOC_BIAS_DEDUP,
4912 	VDEV_ALLOC_BIAS_SPECIAL,
4913 	VDEV_ALLOC_CLASS_LOGS
4914 };
4915 
4916 /*
4917  * Print out all the statistics for the given vdev.  This can either be the
4918  * toplevel configuration, or called recursively.  If 'name' is NULL, then this
4919  * is a verbose output, and we don't want to display the toplevel pool stats.
4920  *
4921  * Returns the number of stat lines printed.
4922  */
4923 static unsigned int
4924 print_vdev_stats(zpool_handle_t *zhp, const char *name, nvlist_t *oldnv,
4925     nvlist_t *newnv, iostat_cbdata_t *cb, int depth)
4926 {
4927 	nvlist_t **oldchild, **newchild;
4928 	uint_t c, children, oldchildren;
4929 	vdev_stat_t *oldvs, *newvs, *calcvs;
4930 	vdev_stat_t zerovs = { 0 };
4931 	char *vname;
4932 	int i;
4933 	int ret = 0;
4934 	uint64_t tdelta;
4935 	double scale;
4936 
4937 	if (strcmp(name, VDEV_TYPE_INDIRECT) == 0)
4938 		return (ret);
4939 
4940 	calcvs = safe_malloc(sizeof (*calcvs));
4941 
4942 	if (oldnv != NULL) {
4943 		verify(nvlist_lookup_uint64_array(oldnv,
4944 		    ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&oldvs, &c) == 0);
4945 	} else {
4946 		oldvs = &zerovs;
4947 	}
4948 
4949 	/* Do we only want to see a specific vdev? */
4950 	for (i = 0; i < cb->cb_vdevs.cb_names_count; i++) {
4951 		/* Yes we do.  Is this the vdev? */
4952 		if (strcmp(name, cb->cb_vdevs.cb_names[i]) == 0) {
4953 			/*
4954 			 * This is our vdev.  Since it is the only vdev we
4955 			 * will be displaying, make depth = 0 so that it
4956 			 * doesn't get indented.
4957 			 */
4958 			depth = 0;
4959 			break;
4960 		}
4961 	}
4962 
4963 	if (cb->cb_vdevs.cb_names_count && (i == cb->cb_vdevs.cb_names_count)) {
4964 		/* Couldn't match the name */
4965 		goto children;
4966 	}
4967 
4968 
4969 	verify(nvlist_lookup_uint64_array(newnv, ZPOOL_CONFIG_VDEV_STATS,
4970 	    (uint64_t **)&newvs, &c) == 0);
4971 
4972 	/*
4973 	 * Print the vdev name unless it's is a histogram.  Histograms
4974 	 * display the vdev name in the header itself.
4975 	 */
4976 	if (!(cb->cb_flags & IOS_ANYHISTO_M)) {
4977 		if (cb->cb_scripted) {
4978 			printf("%s", name);
4979 		} else {
4980 			if (strlen(name) + depth > cb->cb_namewidth)
4981 				(void) printf("%*s%s", depth, "", name);
4982 			else
4983 				(void) printf("%*s%s%*s", depth, "", name,
4984 				    (int)(cb->cb_namewidth - strlen(name) -
4985 				    depth), "");
4986 		}
4987 	}
4988 
4989 	/* Calculate our scaling factor */
4990 	tdelta = newvs->vs_timestamp - oldvs->vs_timestamp;
4991 	if ((oldvs->vs_timestamp == 0) && (cb->cb_flags & IOS_ANYHISTO_M)) {
4992 		/*
4993 		 * If we specify printing histograms with no time interval, then
4994 		 * print the histogram numbers over the entire lifetime of the
4995 		 * vdev.
4996 		 */
4997 		scale = 1;
4998 	} else {
4999 		if (tdelta == 0)
5000 			scale = 1.0;
5001 		else
5002 			scale = (double)NANOSEC / tdelta;
5003 	}
5004 
5005 	if (cb->cb_flags & IOS_DEFAULT_M) {
5006 		calc_default_iostats(oldvs, newvs, calcvs);
5007 		print_iostat_default(calcvs, cb, scale);
5008 	}
5009 	if (cb->cb_flags & IOS_LATENCY_M)
5010 		print_iostat_latency(cb, oldnv, newnv);
5011 	if (cb->cb_flags & IOS_QUEUES_M)
5012 		print_iostat_queues(cb, newnv);
5013 	if (cb->cb_flags & IOS_ANYHISTO_M) {
5014 		printf("\n");
5015 		print_iostat_histos(cb, oldnv, newnv, scale, name);
5016 	}
5017 
5018 	if (cb->vcdl != NULL) {
5019 		const char *path;
5020 		if (nvlist_lookup_string(newnv, ZPOOL_CONFIG_PATH,
5021 		    &path) == 0) {
5022 			printf("  ");
5023 			zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path);
5024 		}
5025 	}
5026 
5027 	if (!(cb->cb_flags & IOS_ANYHISTO_M))
5028 		printf("\n");
5029 
5030 	ret++;
5031 
5032 children:
5033 
5034 	free(calcvs);
5035 
5036 	if (!cb->cb_verbose)
5037 		return (ret);
5038 
5039 	if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_CHILDREN,
5040 	    &newchild, &children) != 0)
5041 		return (ret);
5042 
5043 	if (oldnv) {
5044 		if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_CHILDREN,
5045 		    &oldchild, &oldchildren) != 0)
5046 			return (ret);
5047 
5048 		children = MIN(oldchildren, children);
5049 	}
5050 
5051 	/*
5052 	 * print normal top-level devices
5053 	 */
5054 	for (c = 0; c < children; c++) {
5055 		uint64_t ishole = B_FALSE, islog = B_FALSE;
5056 
5057 		(void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_HOLE,
5058 		    &ishole);
5059 
5060 		(void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_LOG,
5061 		    &islog);
5062 
5063 		if (ishole || islog)
5064 			continue;
5065 
5066 		if (nvlist_exists(newchild[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
5067 			continue;
5068 
5069 		vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
5070 		    cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID);
5071 		ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c] : NULL,
5072 		    newchild[c], cb, depth + 2);
5073 		free(vname);
5074 	}
5075 
5076 	/*
5077 	 * print all other top-level devices
5078 	 */
5079 	for (uint_t n = 0; n < ARRAY_SIZE(class_name); n++) {
5080 		boolean_t printed = B_FALSE;
5081 
5082 		for (c = 0; c < children; c++) {
5083 			uint64_t islog = B_FALSE;
5084 			const char *bias = NULL;
5085 			const char *type = NULL;
5086 
5087 			(void) nvlist_lookup_uint64(newchild[c],
5088 			    ZPOOL_CONFIG_IS_LOG, &islog);
5089 			if (islog) {
5090 				bias = VDEV_ALLOC_CLASS_LOGS;
5091 			} else {
5092 				(void) nvlist_lookup_string(newchild[c],
5093 				    ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
5094 				(void) nvlist_lookup_string(newchild[c],
5095 				    ZPOOL_CONFIG_TYPE, &type);
5096 			}
5097 			if (bias == NULL || strcmp(bias, class_name[n]) != 0)
5098 				continue;
5099 			if (!islog && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
5100 				continue;
5101 
5102 			if (!printed) {
5103 				if ((!(cb->cb_flags & IOS_ANYHISTO_M)) &&
5104 				    !cb->cb_scripted &&
5105 				    !cb->cb_vdevs.cb_names) {
5106 					print_iostat_dashes(cb, 0,
5107 					    class_name[n]);
5108 				}
5109 				printf("\n");
5110 				printed = B_TRUE;
5111 			}
5112 
5113 			vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
5114 			    cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID);
5115 			ret += print_vdev_stats(zhp, vname, oldnv ?
5116 			    oldchild[c] : NULL, newchild[c], cb, depth + 2);
5117 			free(vname);
5118 		}
5119 	}
5120 
5121 	/*
5122 	 * Include level 2 ARC devices in iostat output
5123 	 */
5124 	if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_L2CACHE,
5125 	    &newchild, &children) != 0)
5126 		return (ret);
5127 
5128 	if (oldnv) {
5129 		if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_L2CACHE,
5130 		    &oldchild, &oldchildren) != 0)
5131 			return (ret);
5132 
5133 		children = MIN(oldchildren, children);
5134 	}
5135 
5136 	if (children > 0) {
5137 		if ((!(cb->cb_flags & IOS_ANYHISTO_M)) && !cb->cb_scripted &&
5138 		    !cb->cb_vdevs.cb_names) {
5139 			print_iostat_dashes(cb, 0, "cache");
5140 		}
5141 		printf("\n");
5142 
5143 		for (c = 0; c < children; c++) {
5144 			vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
5145 			    cb->cb_vdevs.cb_name_flags);
5146 			ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c]
5147 			    : NULL, newchild[c], cb, depth + 2);
5148 			free(vname);
5149 		}
5150 	}
5151 
5152 	return (ret);
5153 }
5154 
5155 static int
5156 refresh_iostat(zpool_handle_t *zhp, void *data)
5157 {
5158 	iostat_cbdata_t *cb = data;
5159 	boolean_t missing;
5160 
5161 	/*
5162 	 * If the pool has disappeared, remove it from the list and continue.
5163 	 */
5164 	if (zpool_refresh_stats(zhp, &missing) != 0)
5165 		return (-1);
5166 
5167 	if (missing)
5168 		pool_list_remove(cb->cb_list, zhp);
5169 
5170 	return (0);
5171 }
5172 
5173 /*
5174  * Callback to print out the iostats for the given pool.
5175  */
5176 static int
5177 print_iostat(zpool_handle_t *zhp, void *data)
5178 {
5179 	iostat_cbdata_t *cb = data;
5180 	nvlist_t *oldconfig, *newconfig;
5181 	nvlist_t *oldnvroot, *newnvroot;
5182 	int ret;
5183 
5184 	newconfig = zpool_get_config(zhp, &oldconfig);
5185 
5186 	if (cb->cb_iteration == 1)
5187 		oldconfig = NULL;
5188 
5189 	verify(nvlist_lookup_nvlist(newconfig, ZPOOL_CONFIG_VDEV_TREE,
5190 	    &newnvroot) == 0);
5191 
5192 	if (oldconfig == NULL)
5193 		oldnvroot = NULL;
5194 	else
5195 		verify(nvlist_lookup_nvlist(oldconfig, ZPOOL_CONFIG_VDEV_TREE,
5196 		    &oldnvroot) == 0);
5197 
5198 	ret = print_vdev_stats(zhp, zpool_get_name(zhp), oldnvroot, newnvroot,
5199 	    cb, 0);
5200 	if ((ret != 0) && !(cb->cb_flags & IOS_ANYHISTO_M) &&
5201 	    !cb->cb_scripted && cb->cb_verbose &&
5202 	    !cb->cb_vdevs.cb_names_count) {
5203 		print_iostat_separator(cb);
5204 		if (cb->vcdl != NULL) {
5205 			print_cmd_columns(cb->vcdl, 1);
5206 		}
5207 		printf("\n");
5208 	}
5209 
5210 	return (ret);
5211 }
5212 
5213 static int
5214 get_columns(void)
5215 {
5216 	struct winsize ws;
5217 	int columns = 80;
5218 	int error;
5219 
5220 	if (isatty(STDOUT_FILENO)) {
5221 		error = ioctl(STDOUT_FILENO, TIOCGWINSZ, &ws);
5222 		if (error == 0)
5223 			columns = ws.ws_col;
5224 	} else {
5225 		columns = 999;
5226 	}
5227 
5228 	return (columns);
5229 }
5230 
5231 /*
5232  * Return the required length of the pool/vdev name column.  The minimum
5233  * allowed width and output formatting flags must be provided.
5234  */
5235 static int
5236 get_namewidth(zpool_handle_t *zhp, int min_width, int flags, boolean_t verbose)
5237 {
5238 	nvlist_t *config, *nvroot;
5239 	int width = min_width;
5240 
5241 	if ((config = zpool_get_config(zhp, NULL)) != NULL) {
5242 		verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
5243 		    &nvroot) == 0);
5244 		size_t poolname_len = strlen(zpool_get_name(zhp));
5245 		if (verbose == B_FALSE) {
5246 			width = MAX(poolname_len, min_width);
5247 		} else {
5248 			width = MAX(poolname_len,
5249 			    max_width(zhp, nvroot, 0, min_width, flags));
5250 		}
5251 	}
5252 
5253 	return (width);
5254 }
5255 
5256 /*
5257  * Parse the input string, get the 'interval' and 'count' value if there is one.
5258  */
5259 static void
5260 get_interval_count(int *argcp, char **argv, float *iv,
5261     unsigned long *cnt)
5262 {
5263 	float interval = 0;
5264 	unsigned long count = 0;
5265 	int argc = *argcp;
5266 
5267 	/*
5268 	 * Determine if the last argument is an integer or a pool name
5269 	 */
5270 	if (argc > 0 && zfs_isnumber(argv[argc - 1])) {
5271 		char *end;
5272 
5273 		errno = 0;
5274 		interval = strtof(argv[argc - 1], &end);
5275 
5276 		if (*end == '\0' && errno == 0) {
5277 			if (interval == 0) {
5278 				(void) fprintf(stderr, gettext(
5279 				    "interval cannot be zero\n"));
5280 				usage(B_FALSE);
5281 			}
5282 			/*
5283 			 * Ignore the last parameter
5284 			 */
5285 			argc--;
5286 		} else {
5287 			/*
5288 			 * If this is not a valid number, just plow on.  The
5289 			 * user will get a more informative error message later
5290 			 * on.
5291 			 */
5292 			interval = 0;
5293 		}
5294 	}
5295 
5296 	/*
5297 	 * If the last argument is also an integer, then we have both a count
5298 	 * and an interval.
5299 	 */
5300 	if (argc > 0 && zfs_isnumber(argv[argc - 1])) {
5301 		char *end;
5302 
5303 		errno = 0;
5304 		count = interval;
5305 		interval = strtof(argv[argc - 1], &end);
5306 
5307 		if (*end == '\0' && errno == 0) {
5308 			if (interval == 0) {
5309 				(void) fprintf(stderr, gettext(
5310 				    "interval cannot be zero\n"));
5311 				usage(B_FALSE);
5312 			}
5313 
5314 			/*
5315 			 * Ignore the last parameter
5316 			 */
5317 			argc--;
5318 		} else {
5319 			interval = 0;
5320 		}
5321 	}
5322 
5323 	*iv = interval;
5324 	*cnt = count;
5325 	*argcp = argc;
5326 }
5327 
5328 static void
5329 get_timestamp_arg(char c)
5330 {
5331 	if (c == 'u')
5332 		timestamp_fmt = UDATE;
5333 	else if (c == 'd')
5334 		timestamp_fmt = DDATE;
5335 	else
5336 		usage(B_FALSE);
5337 }
5338 
5339 /*
5340  * Return stat flags that are supported by all pools by both the module and
5341  * zpool iostat.  "*data" should be initialized to all 0xFFs before running.
5342  * It will get ANDed down until only the flags that are supported on all pools
5343  * remain.
5344  */
5345 static int
5346 get_stat_flags_cb(zpool_handle_t *zhp, void *data)
5347 {
5348 	uint64_t *mask = data;
5349 	nvlist_t *config, *nvroot, *nvx;
5350 	uint64_t flags = 0;
5351 	int i, j;
5352 
5353 	config = zpool_get_config(zhp, NULL);
5354 	verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
5355 	    &nvroot) == 0);
5356 
5357 	/* Default stats are always supported, but for completeness.. */
5358 	if (nvlist_exists(nvroot, ZPOOL_CONFIG_VDEV_STATS))
5359 		flags |= IOS_DEFAULT_M;
5360 
5361 	/* Get our extended stats nvlist from the main list */
5362 	if (nvlist_lookup_nvlist(nvroot, ZPOOL_CONFIG_VDEV_STATS_EX,
5363 	    &nvx) != 0) {
5364 		/*
5365 		 * No extended stats; they're probably running an older
5366 		 * module.  No big deal, we support that too.
5367 		 */
5368 		goto end;
5369 	}
5370 
5371 	/* For each extended stat, make sure all its nvpairs are supported */
5372 	for (j = 0; j < ARRAY_SIZE(vsx_type_to_nvlist); j++) {
5373 		if (!vsx_type_to_nvlist[j][0])
5374 			continue;
5375 
5376 		/* Start off by assuming the flag is supported, then check */
5377 		flags |= (1ULL << j);
5378 		for (i = 0; vsx_type_to_nvlist[j][i]; i++) {
5379 			if (!nvlist_exists(nvx, vsx_type_to_nvlist[j][i])) {
5380 				/* flag isn't supported */
5381 				flags = flags & ~(1ULL  << j);
5382 				break;
5383 			}
5384 		}
5385 	}
5386 end:
5387 	*mask = *mask & flags;
5388 	return (0);
5389 }
5390 
5391 /*
5392  * Return a bitmask of stats that are supported on all pools by both the module
5393  * and zpool iostat.
5394  */
5395 static uint64_t
5396 get_stat_flags(zpool_list_t *list)
5397 {
5398 	uint64_t mask = -1;
5399 
5400 	/*
5401 	 * get_stat_flags_cb() will lop off bits from "mask" until only the
5402 	 * flags that are supported on all pools remain.
5403 	 */
5404 	pool_list_iter(list, B_FALSE, get_stat_flags_cb, &mask);
5405 	return (mask);
5406 }
5407 
5408 /*
5409  * Return 1 if cb_data->cb_names[0] is this vdev's name, 0 otherwise.
5410  */
5411 static int
5412 is_vdev_cb(void *zhp_data, nvlist_t *nv, void *cb_data)
5413 {
5414 	uint64_t guid;
5415 	vdev_cbdata_t *cb = cb_data;
5416 	zpool_handle_t *zhp = zhp_data;
5417 
5418 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
5419 		return (0);
5420 
5421 	return (guid == zpool_vdev_path_to_guid(zhp, cb->cb_names[0]));
5422 }
5423 
5424 /*
5425  * Returns 1 if cb_data->cb_names[0] is a vdev name, 0 otherwise.
5426  */
5427 static int
5428 is_vdev(zpool_handle_t *zhp, void *cb_data)
5429 {
5430 	return (for_each_vdev(zhp, is_vdev_cb, cb_data));
5431 }
5432 
5433 /*
5434  * Check if vdevs are in a pool
5435  *
5436  * Return 1 if all argv[] strings are vdev names in pool "pool_name". Otherwise
5437  * return 0.  If pool_name is NULL, then search all pools.
5438  */
5439 static int
5440 are_vdevs_in_pool(int argc, char **argv, char *pool_name,
5441     vdev_cbdata_t *cb)
5442 {
5443 	char **tmp_name;
5444 	int ret = 0;
5445 	int i;
5446 	int pool_count = 0;
5447 
5448 	if ((argc == 0) || !*argv)
5449 		return (0);
5450 
5451 	if (pool_name)
5452 		pool_count = 1;
5453 
5454 	/* Temporarily hijack cb_names for a second... */
5455 	tmp_name = cb->cb_names;
5456 
5457 	/* Go though our list of prospective vdev names */
5458 	for (i = 0; i < argc; i++) {
5459 		cb->cb_names = argv + i;
5460 
5461 		/* Is this name a vdev in our pools? */
5462 		ret = for_each_pool(pool_count, &pool_name, B_TRUE, NULL,
5463 		    ZFS_TYPE_POOL, B_FALSE, is_vdev, cb);
5464 		if (!ret) {
5465 			/* No match */
5466 			break;
5467 		}
5468 	}
5469 
5470 	cb->cb_names = tmp_name;
5471 
5472 	return (ret);
5473 }
5474 
5475 static int
5476 is_pool_cb(zpool_handle_t *zhp, void *data)
5477 {
5478 	char *name = data;
5479 	if (strcmp(name, zpool_get_name(zhp)) == 0)
5480 		return (1);
5481 
5482 	return (0);
5483 }
5484 
5485 /*
5486  * Do we have a pool named *name?  If so, return 1, otherwise 0.
5487  */
5488 static int
5489 is_pool(char *name)
5490 {
5491 	return (for_each_pool(0, NULL, B_TRUE, NULL, ZFS_TYPE_POOL, B_FALSE,
5492 	    is_pool_cb, name));
5493 }
5494 
5495 /* Are all our argv[] strings pool names?  If so return 1, 0 otherwise. */
5496 static int
5497 are_all_pools(int argc, char **argv)
5498 {
5499 	if ((argc == 0) || !*argv)
5500 		return (0);
5501 
5502 	while (--argc >= 0)
5503 		if (!is_pool(argv[argc]))
5504 			return (0);
5505 
5506 	return (1);
5507 }
5508 
5509 /*
5510  * Helper function to print out vdev/pool names we can't resolve.  Used for an
5511  * error message.
5512  */
5513 static void
5514 error_list_unresolved_vdevs(int argc, char **argv, char *pool_name,
5515     vdev_cbdata_t *cb)
5516 {
5517 	int i;
5518 	char *name;
5519 	char *str;
5520 	for (i = 0; i < argc; i++) {
5521 		name = argv[i];
5522 
5523 		if (is_pool(name))
5524 			str = gettext("pool");
5525 		else if (are_vdevs_in_pool(1, &name, pool_name, cb))
5526 			str = gettext("vdev in this pool");
5527 		else if (are_vdevs_in_pool(1, &name, NULL, cb))
5528 			str = gettext("vdev in another pool");
5529 		else
5530 			str = gettext("unknown");
5531 
5532 		fprintf(stderr, "\t%s (%s)\n", name, str);
5533 	}
5534 }
5535 
5536 /*
5537  * Same as get_interval_count(), but with additional checks to not misinterpret
5538  * guids as interval/count values.  Assumes VDEV_NAME_GUID is set in
5539  * cb.cb_vdevs.cb_name_flags.
5540  */
5541 static void
5542 get_interval_count_filter_guids(int *argc, char **argv, float *interval,
5543     unsigned long *count, iostat_cbdata_t *cb)
5544 {
5545 	char **tmpargv = argv;
5546 	int argc_for_interval = 0;
5547 
5548 	/* Is the last arg an interval value?  Or a guid? */
5549 	if (*argc >= 1 && !are_vdevs_in_pool(1, &argv[*argc - 1], NULL,
5550 	    &cb->cb_vdevs)) {
5551 		/*
5552 		 * The last arg is not a guid, so it's probably an
5553 		 * interval value.
5554 		 */
5555 		argc_for_interval++;
5556 
5557 		if (*argc >= 2 &&
5558 		    !are_vdevs_in_pool(1, &argv[*argc - 2], NULL,
5559 		    &cb->cb_vdevs)) {
5560 			/*
5561 			 * The 2nd to last arg is not a guid, so it's probably
5562 			 * an interval value.
5563 			 */
5564 			argc_for_interval++;
5565 		}
5566 	}
5567 
5568 	/* Point to our list of possible intervals */
5569 	tmpargv = &argv[*argc - argc_for_interval];
5570 
5571 	*argc = *argc - argc_for_interval;
5572 	get_interval_count(&argc_for_interval, tmpargv,
5573 	    interval, count);
5574 }
5575 
5576 /*
5577  * Terminal height, in rows. Returns -1 if stdout is not connected to a TTY or
5578  * if we were unable to determine its size.
5579  */
5580 static int
5581 terminal_height(void)
5582 {
5583 	struct winsize win;
5584 
5585 	if (isatty(STDOUT_FILENO) == 0)
5586 		return (-1);
5587 
5588 	if (ioctl(STDOUT_FILENO, TIOCGWINSZ, &win) != -1 && win.ws_row > 0)
5589 		return (win.ws_row);
5590 
5591 	return (-1);
5592 }
5593 
5594 /*
5595  * Run one of the zpool status/iostat -c scripts with the help (-h) option and
5596  * print the result.
5597  *
5598  * name:	Short name of the script ('iostat').
5599  * path:	Full path to the script ('/usr/local/etc/zfs/zpool.d/iostat');
5600  */
5601 static void
5602 print_zpool_script_help(char *name, char *path)
5603 {
5604 	char *argv[] = {path, (char *)"-h", NULL};
5605 	char **lines = NULL;
5606 	int lines_cnt = 0;
5607 	int rc;
5608 
5609 	rc = libzfs_run_process_get_stdout_nopath(path, argv, NULL, &lines,
5610 	    &lines_cnt);
5611 	if (rc != 0 || lines == NULL || lines_cnt <= 0) {
5612 		if (lines != NULL)
5613 			libzfs_free_str_array(lines, lines_cnt);
5614 		return;
5615 	}
5616 
5617 	for (int i = 0; i < lines_cnt; i++)
5618 		if (!is_blank_str(lines[i]))
5619 			printf("  %-14s  %s\n", name, lines[i]);
5620 
5621 	libzfs_free_str_array(lines, lines_cnt);
5622 }
5623 
5624 /*
5625  * Go though the zpool status/iostat -c scripts in the user's path, run their
5626  * help option (-h), and print out the results.
5627  */
5628 static void
5629 print_zpool_dir_scripts(char *dirpath)
5630 {
5631 	DIR *dir;
5632 	struct dirent *ent;
5633 	char fullpath[MAXPATHLEN];
5634 	struct stat dir_stat;
5635 
5636 	if ((dir = opendir(dirpath)) != NULL) {
5637 		/* print all the files and directories within directory */
5638 		while ((ent = readdir(dir)) != NULL) {
5639 			if (snprintf(fullpath, sizeof (fullpath), "%s/%s",
5640 			    dirpath, ent->d_name) >= sizeof (fullpath)) {
5641 				(void) fprintf(stderr,
5642 				    gettext("internal error: "
5643 				    "ZPOOL_SCRIPTS_PATH too large.\n"));
5644 				exit(1);
5645 			}
5646 
5647 			/* Print the scripts */
5648 			if (stat(fullpath, &dir_stat) == 0)
5649 				if (dir_stat.st_mode & S_IXUSR &&
5650 				    S_ISREG(dir_stat.st_mode))
5651 					print_zpool_script_help(ent->d_name,
5652 					    fullpath);
5653 		}
5654 		closedir(dir);
5655 	}
5656 }
5657 
5658 /*
5659  * Print out help text for all zpool status/iostat -c scripts.
5660  */
5661 static void
5662 print_zpool_script_list(const char *subcommand)
5663 {
5664 	char *dir, *sp, *tmp;
5665 
5666 	printf(gettext("Available 'zpool %s -c' commands:\n"), subcommand);
5667 
5668 	sp = zpool_get_cmd_search_path();
5669 	if (sp == NULL)
5670 		return;
5671 
5672 	for (dir = strtok_r(sp, ":", &tmp);
5673 	    dir != NULL;
5674 	    dir = strtok_r(NULL, ":", &tmp))
5675 		print_zpool_dir_scripts(dir);
5676 
5677 	free(sp);
5678 }
5679 
5680 /*
5681  * Set the minimum pool/vdev name column width.  The width must be at least 10,
5682  * but may be as large as the column width - 42 so it still fits on one line.
5683  * NOTE: 42 is the width of the default capacity/operations/bandwidth output
5684  */
5685 static int
5686 get_namewidth_iostat(zpool_handle_t *zhp, void *data)
5687 {
5688 	iostat_cbdata_t *cb = data;
5689 	int width, available_width;
5690 
5691 	/*
5692 	 * get_namewidth() returns the maximum width of any name in that column
5693 	 * for any pool/vdev/device line that will be output.
5694 	 */
5695 	width = get_namewidth(zhp, cb->cb_namewidth,
5696 	    cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID, cb->cb_verbose);
5697 
5698 	/*
5699 	 * The width we are calculating is the width of the header and also the
5700 	 * padding width for names that are less than maximum width.  The stats
5701 	 * take up 42 characters, so the width available for names is:
5702 	 */
5703 	available_width = get_columns() - 42;
5704 
5705 	/*
5706 	 * If the maximum width fits on a screen, then great!  Make everything
5707 	 * line up by justifying all lines to the same width.  If that max
5708 	 * width is larger than what's available, the name plus stats won't fit
5709 	 * on one line, and justifying to that width would cause every line to
5710 	 * wrap on the screen.  We only want lines with long names to wrap.
5711 	 * Limit the padding to what won't wrap.
5712 	 */
5713 	if (width > available_width)
5714 		width = available_width;
5715 
5716 	/*
5717 	 * And regardless of whatever the screen width is (get_columns can
5718 	 * return 0 if the width is not known or less than 42 for a narrow
5719 	 * terminal) have the width be a minimum of 10.
5720 	 */
5721 	if (width < 10)
5722 		width = 10;
5723 
5724 	/* Save the calculated width */
5725 	cb->cb_namewidth = width;
5726 
5727 	return (0);
5728 }
5729 
5730 /*
5731  * zpool iostat [[-c [script1,script2,...]] [-lq]|[-rw]] [-ghHLpPvy] [-n name]
5732  *              [-T d|u] [[ pool ...]|[pool vdev ...]|[vdev ...]]
5733  *              [interval [count]]
5734  *
5735  *	-c CMD  For each vdev, run command CMD
5736  *	-g	Display guid for individual vdev name.
5737  *	-L	Follow links when resolving vdev path name.
5738  *	-P	Display full path for vdev name.
5739  *	-v	Display statistics for individual vdevs
5740  *	-h	Display help
5741  *	-p	Display values in parsable (exact) format.
5742  *	-H	Scripted mode.  Don't display headers, and separate properties
5743  *		by a single tab.
5744  *	-l	Display average latency
5745  *	-q	Display queue depths
5746  *	-w	Display latency histograms
5747  *	-r	Display request size histogram
5748  *	-T	Display a timestamp in date(1) or Unix format
5749  *	-n	Only print headers once
5750  *
5751  * This command can be tricky because we want to be able to deal with pool
5752  * creation/destruction as well as vdev configuration changes.  The bulk of this
5753  * processing is handled by the pool_list_* routines in zpool_iter.c.  We rely
5754  * on pool_list_update() to detect the addition of new pools.  Configuration
5755  * changes are all handled within libzfs.
5756  */
5757 int
5758 zpool_do_iostat(int argc, char **argv)
5759 {
5760 	int c;
5761 	int ret;
5762 	int npools;
5763 	float interval = 0;
5764 	unsigned long count = 0;
5765 	int winheight = 24;
5766 	zpool_list_t *list;
5767 	boolean_t verbose = B_FALSE;
5768 	boolean_t latency = B_FALSE, l_histo = B_FALSE, rq_histo = B_FALSE;
5769 	boolean_t queues = B_FALSE, parsable = B_FALSE, scripted = B_FALSE;
5770 	boolean_t omit_since_boot = B_FALSE;
5771 	boolean_t guid = B_FALSE;
5772 	boolean_t follow_links = B_FALSE;
5773 	boolean_t full_name = B_FALSE;
5774 	boolean_t headers_once = B_FALSE;
5775 	iostat_cbdata_t cb = { 0 };
5776 	char *cmd = NULL;
5777 
5778 	/* Used for printing error message */
5779 	const char flag_to_arg[] = {[IOS_LATENCY] = 'l', [IOS_QUEUES] = 'q',
5780 	    [IOS_L_HISTO] = 'w', [IOS_RQ_HISTO] = 'r'};
5781 
5782 	uint64_t unsupported_flags;
5783 
5784 	/* check options */
5785 	while ((c = getopt(argc, argv, "c:gLPT:vyhplqrwnH")) != -1) {
5786 		switch (c) {
5787 		case 'c':
5788 			if (cmd != NULL) {
5789 				fprintf(stderr,
5790 				    gettext("Can't set -c flag twice\n"));
5791 				exit(1);
5792 			}
5793 
5794 			if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL &&
5795 			    !libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) {
5796 				fprintf(stderr, gettext(
5797 				    "Can't run -c, disabled by "
5798 				    "ZPOOL_SCRIPTS_ENABLED.\n"));
5799 				exit(1);
5800 			}
5801 
5802 			if ((getuid() <= 0 || geteuid() <= 0) &&
5803 			    !libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) {
5804 				fprintf(stderr, gettext(
5805 				    "Can't run -c with root privileges "
5806 				    "unless ZPOOL_SCRIPTS_AS_ROOT is set.\n"));
5807 				exit(1);
5808 			}
5809 			cmd = optarg;
5810 			verbose = B_TRUE;
5811 			break;
5812 		case 'g':
5813 			guid = B_TRUE;
5814 			break;
5815 		case 'L':
5816 			follow_links = B_TRUE;
5817 			break;
5818 		case 'P':
5819 			full_name = B_TRUE;
5820 			break;
5821 		case 'T':
5822 			get_timestamp_arg(*optarg);
5823 			break;
5824 		case 'v':
5825 			verbose = B_TRUE;
5826 			break;
5827 		case 'p':
5828 			parsable = B_TRUE;
5829 			break;
5830 		case 'l':
5831 			latency = B_TRUE;
5832 			break;
5833 		case 'q':
5834 			queues = B_TRUE;
5835 			break;
5836 		case 'H':
5837 			scripted = B_TRUE;
5838 			break;
5839 		case 'w':
5840 			l_histo = B_TRUE;
5841 			break;
5842 		case 'r':
5843 			rq_histo = B_TRUE;
5844 			break;
5845 		case 'y':
5846 			omit_since_boot = B_TRUE;
5847 			break;
5848 		case 'n':
5849 			headers_once = B_TRUE;
5850 			break;
5851 		case 'h':
5852 			usage(B_FALSE);
5853 			break;
5854 		case '?':
5855 			if (optopt == 'c') {
5856 				print_zpool_script_list("iostat");
5857 				exit(0);
5858 			} else {
5859 				fprintf(stderr,
5860 				    gettext("invalid option '%c'\n"), optopt);
5861 			}
5862 			usage(B_FALSE);
5863 		}
5864 	}
5865 
5866 	argc -= optind;
5867 	argv += optind;
5868 
5869 	cb.cb_literal = parsable;
5870 	cb.cb_scripted = scripted;
5871 
5872 	if (guid)
5873 		cb.cb_vdevs.cb_name_flags |= VDEV_NAME_GUID;
5874 	if (follow_links)
5875 		cb.cb_vdevs.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
5876 	if (full_name)
5877 		cb.cb_vdevs.cb_name_flags |= VDEV_NAME_PATH;
5878 	cb.cb_iteration = 0;
5879 	cb.cb_namewidth = 0;
5880 	cb.cb_verbose = verbose;
5881 
5882 	/* Get our interval and count values (if any) */
5883 	if (guid) {
5884 		get_interval_count_filter_guids(&argc, argv, &interval,
5885 		    &count, &cb);
5886 	} else {
5887 		get_interval_count(&argc, argv, &interval, &count);
5888 	}
5889 
5890 	if (argc == 0) {
5891 		/* No args, so just print the defaults. */
5892 	} else if (are_all_pools(argc, argv)) {
5893 		/* All the args are pool names */
5894 	} else if (are_vdevs_in_pool(argc, argv, NULL, &cb.cb_vdevs)) {
5895 		/* All the args are vdevs */
5896 		cb.cb_vdevs.cb_names = argv;
5897 		cb.cb_vdevs.cb_names_count = argc;
5898 		argc = 0; /* No pools to process */
5899 	} else if (are_all_pools(1, argv)) {
5900 		/* The first arg is a pool name */
5901 		if (are_vdevs_in_pool(argc - 1, argv + 1, argv[0],
5902 		    &cb.cb_vdevs)) {
5903 			/* ...and the rest are vdev names */
5904 			cb.cb_vdevs.cb_names = argv + 1;
5905 			cb.cb_vdevs.cb_names_count = argc - 1;
5906 			argc = 1; /* One pool to process */
5907 		} else {
5908 			fprintf(stderr, gettext("Expected either a list of "));
5909 			fprintf(stderr, gettext("pools, or list of vdevs in"));
5910 			fprintf(stderr, " \"%s\", ", argv[0]);
5911 			fprintf(stderr, gettext("but got:\n"));
5912 			error_list_unresolved_vdevs(argc - 1, argv + 1,
5913 			    argv[0], &cb.cb_vdevs);
5914 			fprintf(stderr, "\n");
5915 			usage(B_FALSE);
5916 			return (1);
5917 		}
5918 	} else {
5919 		/*
5920 		 * The args don't make sense. The first arg isn't a pool name,
5921 		 * nor are all the args vdevs.
5922 		 */
5923 		fprintf(stderr, gettext("Unable to parse pools/vdevs list.\n"));
5924 		fprintf(stderr, "\n");
5925 		return (1);
5926 	}
5927 
5928 	if (cb.cb_vdevs.cb_names_count != 0) {
5929 		/*
5930 		 * If user specified vdevs, it implies verbose.
5931 		 */
5932 		cb.cb_verbose = B_TRUE;
5933 	}
5934 
5935 	/*
5936 	 * Construct the list of all interesting pools.
5937 	 */
5938 	ret = 0;
5939 	if ((list = pool_list_get(argc, argv, NULL, ZFS_TYPE_POOL, parsable,
5940 	    &ret)) == NULL)
5941 		return (1);
5942 
5943 	if (pool_list_count(list) == 0 && argc != 0) {
5944 		pool_list_free(list);
5945 		return (1);
5946 	}
5947 
5948 	if (pool_list_count(list) == 0 && interval == 0) {
5949 		pool_list_free(list);
5950 		(void) fprintf(stderr, gettext("no pools available\n"));
5951 		return (1);
5952 	}
5953 
5954 	if ((l_histo || rq_histo) && (cmd != NULL || latency || queues)) {
5955 		pool_list_free(list);
5956 		(void) fprintf(stderr,
5957 		    gettext("[-r|-w] isn't allowed with [-c|-l|-q]\n"));
5958 		usage(B_FALSE);
5959 		return (1);
5960 	}
5961 
5962 	if (l_histo && rq_histo) {
5963 		pool_list_free(list);
5964 		(void) fprintf(stderr,
5965 		    gettext("Only one of [-r|-w] can be passed at a time\n"));
5966 		usage(B_FALSE);
5967 		return (1);
5968 	}
5969 
5970 	/*
5971 	 * Enter the main iostat loop.
5972 	 */
5973 	cb.cb_list = list;
5974 
5975 	if (l_histo) {
5976 		/*
5977 		 * Histograms tables look out of place when you try to display
5978 		 * them with the other stats, so make a rule that you can only
5979 		 * print histograms by themselves.
5980 		 */
5981 		cb.cb_flags = IOS_L_HISTO_M;
5982 	} else if (rq_histo) {
5983 		cb.cb_flags = IOS_RQ_HISTO_M;
5984 	} else {
5985 		cb.cb_flags = IOS_DEFAULT_M;
5986 		if (latency)
5987 			cb.cb_flags |= IOS_LATENCY_M;
5988 		if (queues)
5989 			cb.cb_flags |= IOS_QUEUES_M;
5990 	}
5991 
5992 	/*
5993 	 * See if the module supports all the stats we want to display.
5994 	 */
5995 	unsupported_flags = cb.cb_flags & ~get_stat_flags(list);
5996 	if (unsupported_flags) {
5997 		uint64_t f;
5998 		int idx;
5999 		fprintf(stderr,
6000 		    gettext("The loaded zfs module doesn't support:"));
6001 
6002 		/* for each bit set in unsupported_flags */
6003 		for (f = unsupported_flags; f; f &= ~(1ULL << idx)) {
6004 			idx = lowbit64(f) - 1;
6005 			fprintf(stderr, " -%c", flag_to_arg[idx]);
6006 		}
6007 
6008 		fprintf(stderr, ".  Try running a newer module.\n");
6009 		pool_list_free(list);
6010 
6011 		return (1);
6012 	}
6013 
6014 	for (;;) {
6015 		if ((npools = pool_list_count(list)) == 0)
6016 			(void) fprintf(stderr, gettext("no pools available\n"));
6017 		else {
6018 			/*
6019 			 * If this is the first iteration and -y was supplied
6020 			 * we skip any printing.
6021 			 */
6022 			boolean_t skip = (omit_since_boot &&
6023 			    cb.cb_iteration == 0);
6024 
6025 			/*
6026 			 * Refresh all statistics.  This is done as an
6027 			 * explicit step before calculating the maximum name
6028 			 * width, so that any * configuration changes are
6029 			 * properly accounted for.
6030 			 */
6031 			(void) pool_list_iter(list, B_FALSE, refresh_iostat,
6032 			    &cb);
6033 
6034 			/*
6035 			 * Iterate over all pools to determine the maximum width
6036 			 * for the pool / device name column across all pools.
6037 			 */
6038 			cb.cb_namewidth = 0;
6039 			(void) pool_list_iter(list, B_FALSE,
6040 			    get_namewidth_iostat, &cb);
6041 
6042 			if (timestamp_fmt != NODATE)
6043 				print_timestamp(timestamp_fmt);
6044 
6045 			if (cmd != NULL && cb.cb_verbose &&
6046 			    !(cb.cb_flags & IOS_ANYHISTO_M)) {
6047 				cb.vcdl = all_pools_for_each_vdev_run(argc,
6048 				    argv, cmd, g_zfs, cb.cb_vdevs.cb_names,
6049 				    cb.cb_vdevs.cb_names_count,
6050 				    cb.cb_vdevs.cb_name_flags);
6051 			} else {
6052 				cb.vcdl = NULL;
6053 			}
6054 
6055 
6056 			/*
6057 			 * Check terminal size so we can print headers
6058 			 * even when terminal window has its height
6059 			 * changed.
6060 			 */
6061 			winheight = terminal_height();
6062 			/*
6063 			 * Are we connected to TTY? If not, headers_once
6064 			 * should be true, to avoid breaking scripts.
6065 			 */
6066 			if (winheight < 0)
6067 				headers_once = B_TRUE;
6068 
6069 			/*
6070 			 * If it's the first time and we're not skipping it,
6071 			 * or either skip or verbose mode, print the header.
6072 			 *
6073 			 * The histogram code explicitly prints its header on
6074 			 * every vdev, so skip this for histograms.
6075 			 */
6076 			if (((++cb.cb_iteration == 1 && !skip) ||
6077 			    (skip != verbose) ||
6078 			    (!headers_once &&
6079 			    (cb.cb_iteration % winheight) == 0)) &&
6080 			    (!(cb.cb_flags & IOS_ANYHISTO_M)) &&
6081 			    !cb.cb_scripted)
6082 				print_iostat_header(&cb);
6083 
6084 			if (skip) {
6085 				(void) fflush(stdout);
6086 				(void) fsleep(interval);
6087 				continue;
6088 			}
6089 
6090 			pool_list_iter(list, B_FALSE, print_iostat, &cb);
6091 
6092 			/*
6093 			 * If there's more than one pool, and we're not in
6094 			 * verbose mode (which prints a separator for us),
6095 			 * then print a separator.
6096 			 *
6097 			 * In addition, if we're printing specific vdevs then
6098 			 * we also want an ending separator.
6099 			 */
6100 			if (((npools > 1 && !verbose &&
6101 			    !(cb.cb_flags & IOS_ANYHISTO_M)) ||
6102 			    (!(cb.cb_flags & IOS_ANYHISTO_M) &&
6103 			    cb.cb_vdevs.cb_names_count)) &&
6104 			    !cb.cb_scripted) {
6105 				print_iostat_separator(&cb);
6106 				if (cb.vcdl != NULL)
6107 					print_cmd_columns(cb.vcdl, 1);
6108 				printf("\n");
6109 			}
6110 
6111 			if (cb.vcdl != NULL)
6112 				free_vdev_cmd_data_list(cb.vcdl);
6113 
6114 		}
6115 
6116 		if (interval == 0)
6117 			break;
6118 
6119 		if (count != 0 && --count == 0)
6120 			break;
6121 
6122 		(void) fflush(stdout);
6123 		(void) fsleep(interval);
6124 	}
6125 
6126 	pool_list_free(list);
6127 
6128 	return (ret);
6129 }
6130 
6131 typedef struct list_cbdata {
6132 	boolean_t	cb_verbose;
6133 	int		cb_name_flags;
6134 	int		cb_namewidth;
6135 	boolean_t	cb_scripted;
6136 	zprop_list_t	*cb_proplist;
6137 	boolean_t	cb_literal;
6138 } list_cbdata_t;
6139 
6140 
6141 /*
6142  * Given a list of columns to display, output appropriate headers for each one.
6143  */
6144 static void
6145 print_header(list_cbdata_t *cb)
6146 {
6147 	zprop_list_t *pl = cb->cb_proplist;
6148 	char headerbuf[ZPOOL_MAXPROPLEN];
6149 	const char *header;
6150 	boolean_t first = B_TRUE;
6151 	boolean_t right_justify;
6152 	size_t width = 0;
6153 
6154 	for (; pl != NULL; pl = pl->pl_next) {
6155 		width = pl->pl_width;
6156 		if (first && cb->cb_verbose) {
6157 			/*
6158 			 * Reset the width to accommodate the verbose listing
6159 			 * of devices.
6160 			 */
6161 			width = cb->cb_namewidth;
6162 		}
6163 
6164 		if (!first)
6165 			(void) fputs("  ", stdout);
6166 		else
6167 			first = B_FALSE;
6168 
6169 		right_justify = B_FALSE;
6170 		if (pl->pl_prop != ZPROP_USERPROP) {
6171 			header = zpool_prop_column_name(pl->pl_prop);
6172 			right_justify = zpool_prop_align_right(pl->pl_prop);
6173 		} else {
6174 			int i;
6175 
6176 			for (i = 0; pl->pl_user_prop[i] != '\0'; i++)
6177 				headerbuf[i] = toupper(pl->pl_user_prop[i]);
6178 			headerbuf[i] = '\0';
6179 			header = headerbuf;
6180 		}
6181 
6182 		if (pl->pl_next == NULL && !right_justify)
6183 			(void) fputs(header, stdout);
6184 		else if (right_justify)
6185 			(void) printf("%*s", (int)width, header);
6186 		else
6187 			(void) printf("%-*s", (int)width, header);
6188 	}
6189 
6190 	(void) fputc('\n', stdout);
6191 }
6192 
6193 /*
6194  * Given a pool and a list of properties, print out all the properties according
6195  * to the described layout. Used by zpool_do_list().
6196  */
6197 static void
6198 print_pool(zpool_handle_t *zhp, list_cbdata_t *cb)
6199 {
6200 	zprop_list_t *pl = cb->cb_proplist;
6201 	boolean_t first = B_TRUE;
6202 	char property[ZPOOL_MAXPROPLEN];
6203 	const char *propstr;
6204 	boolean_t right_justify;
6205 	size_t width;
6206 
6207 	for (; pl != NULL; pl = pl->pl_next) {
6208 
6209 		width = pl->pl_width;
6210 		if (first && cb->cb_verbose) {
6211 			/*
6212 			 * Reset the width to accommodate the verbose listing
6213 			 * of devices.
6214 			 */
6215 			width = cb->cb_namewidth;
6216 		}
6217 
6218 		if (!first) {
6219 			if (cb->cb_scripted)
6220 				(void) fputc('\t', stdout);
6221 			else
6222 				(void) fputs("  ", stdout);
6223 		} else {
6224 			first = B_FALSE;
6225 		}
6226 
6227 		right_justify = B_FALSE;
6228 		if (pl->pl_prop != ZPROP_USERPROP) {
6229 			if (zpool_get_prop(zhp, pl->pl_prop, property,
6230 			    sizeof (property), NULL, cb->cb_literal) != 0)
6231 				propstr = "-";
6232 			else
6233 				propstr = property;
6234 
6235 			right_justify = zpool_prop_align_right(pl->pl_prop);
6236 		} else if ((zpool_prop_feature(pl->pl_user_prop) ||
6237 		    zpool_prop_unsupported(pl->pl_user_prop)) &&
6238 		    zpool_prop_get_feature(zhp, pl->pl_user_prop, property,
6239 		    sizeof (property)) == 0) {
6240 			propstr = property;
6241 		} else if (zfs_prop_user(pl->pl_user_prop) &&
6242 		    zpool_get_userprop(zhp, pl->pl_user_prop, property,
6243 		    sizeof (property), NULL) == 0) {
6244 			propstr = property;
6245 		} else {
6246 			propstr = "-";
6247 		}
6248 
6249 		/*
6250 		 * If this is being called in scripted mode, or if this is the
6251 		 * last column and it is left-justified, don't include a width
6252 		 * format specifier.
6253 		 */
6254 		if (cb->cb_scripted || (pl->pl_next == NULL && !right_justify))
6255 			(void) fputs(propstr, stdout);
6256 		else if (right_justify)
6257 			(void) printf("%*s", (int)width, propstr);
6258 		else
6259 			(void) printf("%-*s", (int)width, propstr);
6260 	}
6261 
6262 	(void) fputc('\n', stdout);
6263 }
6264 
6265 static void
6266 print_one_column(zpool_prop_t prop, uint64_t value, const char *str,
6267     boolean_t scripted, boolean_t valid, enum zfs_nicenum_format format)
6268 {
6269 	char propval[64];
6270 	boolean_t fixed;
6271 	size_t width = zprop_width(prop, &fixed, ZFS_TYPE_POOL);
6272 
6273 	switch (prop) {
6274 	case ZPOOL_PROP_SIZE:
6275 	case ZPOOL_PROP_EXPANDSZ:
6276 	case ZPOOL_PROP_CHECKPOINT:
6277 	case ZPOOL_PROP_DEDUPRATIO:
6278 		if (value == 0)
6279 			(void) strlcpy(propval, "-", sizeof (propval));
6280 		else
6281 			zfs_nicenum_format(value, propval, sizeof (propval),
6282 			    format);
6283 		break;
6284 	case ZPOOL_PROP_FRAGMENTATION:
6285 		if (value == ZFS_FRAG_INVALID) {
6286 			(void) strlcpy(propval, "-", sizeof (propval));
6287 		} else if (format == ZFS_NICENUM_RAW) {
6288 			(void) snprintf(propval, sizeof (propval), "%llu",
6289 			    (unsigned long long)value);
6290 		} else {
6291 			(void) snprintf(propval, sizeof (propval), "%llu%%",
6292 			    (unsigned long long)value);
6293 		}
6294 		break;
6295 	case ZPOOL_PROP_CAPACITY:
6296 		/* capacity value is in parts-per-10,000 (aka permyriad) */
6297 		if (format == ZFS_NICENUM_RAW)
6298 			(void) snprintf(propval, sizeof (propval), "%llu",
6299 			    (unsigned long long)value / 100);
6300 		else
6301 			(void) snprintf(propval, sizeof (propval),
6302 			    value < 1000 ? "%1.2f%%" : value < 10000 ?
6303 			    "%2.1f%%" : "%3.0f%%", value / 100.0);
6304 		break;
6305 	case ZPOOL_PROP_HEALTH:
6306 		width = 8;
6307 		(void) strlcpy(propval, str, sizeof (propval));
6308 		break;
6309 	default:
6310 		zfs_nicenum_format(value, propval, sizeof (propval), format);
6311 	}
6312 
6313 	if (!valid)
6314 		(void) strlcpy(propval, "-", sizeof (propval));
6315 
6316 	if (scripted)
6317 		(void) printf("\t%s", propval);
6318 	else
6319 		(void) printf("  %*s", (int)width, propval);
6320 }
6321 
6322 /*
6323  * print static default line per vdev
6324  * not compatible with '-o' <proplist> option
6325  */
6326 static void
6327 print_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv,
6328     list_cbdata_t *cb, int depth, boolean_t isspare)
6329 {
6330 	nvlist_t **child;
6331 	vdev_stat_t *vs;
6332 	uint_t c, children;
6333 	char *vname;
6334 	boolean_t scripted = cb->cb_scripted;
6335 	uint64_t islog = B_FALSE;
6336 	const char *dashes = "%-*s      -      -      -        -         "
6337 	    "-      -      -      -         -\n";
6338 
6339 	verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
6340 	    (uint64_t **)&vs, &c) == 0);
6341 
6342 	if (name != NULL) {
6343 		boolean_t toplevel = (vs->vs_space != 0);
6344 		uint64_t cap;
6345 		enum zfs_nicenum_format format;
6346 		const char *state;
6347 
6348 		if (cb->cb_literal)
6349 			format = ZFS_NICENUM_RAW;
6350 		else
6351 			format = ZFS_NICENUM_1024;
6352 
6353 		if (strcmp(name, VDEV_TYPE_INDIRECT) == 0)
6354 			return;
6355 
6356 		if (scripted)
6357 			(void) printf("\t%s", name);
6358 		else if (strlen(name) + depth > cb->cb_namewidth)
6359 			(void) printf("%*s%s", depth, "", name);
6360 		else
6361 			(void) printf("%*s%s%*s", depth, "", name,
6362 			    (int)(cb->cb_namewidth - strlen(name) - depth), "");
6363 
6364 		/*
6365 		 * Print the properties for the individual vdevs. Some
6366 		 * properties are only applicable to toplevel vdevs. The
6367 		 * 'toplevel' boolean value is passed to the print_one_column()
6368 		 * to indicate that the value is valid.
6369 		 */
6370 		if (VDEV_STAT_VALID(vs_pspace, c) && vs->vs_pspace)
6371 			print_one_column(ZPOOL_PROP_SIZE, vs->vs_pspace, NULL,
6372 			    scripted, B_TRUE, format);
6373 		else
6374 			print_one_column(ZPOOL_PROP_SIZE, vs->vs_space, NULL,
6375 			    scripted, toplevel, format);
6376 		print_one_column(ZPOOL_PROP_ALLOCATED, vs->vs_alloc, NULL,
6377 		    scripted, toplevel, format);
6378 		print_one_column(ZPOOL_PROP_FREE, vs->vs_space - vs->vs_alloc,
6379 		    NULL, scripted, toplevel, format);
6380 		print_one_column(ZPOOL_PROP_CHECKPOINT,
6381 		    vs->vs_checkpoint_space, NULL, scripted, toplevel, format);
6382 		print_one_column(ZPOOL_PROP_EXPANDSZ, vs->vs_esize, NULL,
6383 		    scripted, B_TRUE, format);
6384 		print_one_column(ZPOOL_PROP_FRAGMENTATION,
6385 		    vs->vs_fragmentation, NULL, scripted,
6386 		    (vs->vs_fragmentation != ZFS_FRAG_INVALID && toplevel),
6387 		    format);
6388 		cap = (vs->vs_space == 0) ? 0 :
6389 		    (vs->vs_alloc * 10000 / vs->vs_space);
6390 		print_one_column(ZPOOL_PROP_CAPACITY, cap, NULL,
6391 		    scripted, toplevel, format);
6392 		print_one_column(ZPOOL_PROP_DEDUPRATIO, 0, NULL,
6393 		    scripted, toplevel, format);
6394 		state = zpool_state_to_name(vs->vs_state, vs->vs_aux);
6395 		if (isspare) {
6396 			if (vs->vs_aux == VDEV_AUX_SPARED)
6397 				state = "INUSE";
6398 			else if (vs->vs_state == VDEV_STATE_HEALTHY)
6399 				state = "AVAIL";
6400 		}
6401 		print_one_column(ZPOOL_PROP_HEALTH, 0, state, scripted,
6402 		    B_TRUE, format);
6403 		(void) fputc('\n', stdout);
6404 	}
6405 
6406 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
6407 	    &child, &children) != 0)
6408 		return;
6409 
6410 	/* list the normal vdevs first */
6411 	for (c = 0; c < children; c++) {
6412 		uint64_t ishole = B_FALSE;
6413 
6414 		if (nvlist_lookup_uint64(child[c],
6415 		    ZPOOL_CONFIG_IS_HOLE, &ishole) == 0 && ishole)
6416 			continue;
6417 
6418 		if (nvlist_lookup_uint64(child[c],
6419 		    ZPOOL_CONFIG_IS_LOG, &islog) == 0 && islog)
6420 			continue;
6421 
6422 		if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
6423 			continue;
6424 
6425 		vname = zpool_vdev_name(g_zfs, zhp, child[c],
6426 		    cb->cb_name_flags | VDEV_NAME_TYPE_ID);
6427 		print_list_stats(zhp, vname, child[c], cb, depth + 2, B_FALSE);
6428 		free(vname);
6429 	}
6430 
6431 	/* list the classes: 'logs', 'dedup', and 'special' */
6432 	for (uint_t n = 0; n < ARRAY_SIZE(class_name); n++) {
6433 		boolean_t printed = B_FALSE;
6434 
6435 		for (c = 0; c < children; c++) {
6436 			const char *bias = NULL;
6437 			const char *type = NULL;
6438 
6439 			if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
6440 			    &islog) == 0 && islog) {
6441 				bias = VDEV_ALLOC_CLASS_LOGS;
6442 			} else {
6443 				(void) nvlist_lookup_string(child[c],
6444 				    ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
6445 				(void) nvlist_lookup_string(child[c],
6446 				    ZPOOL_CONFIG_TYPE, &type);
6447 			}
6448 			if (bias == NULL || strcmp(bias, class_name[n]) != 0)
6449 				continue;
6450 			if (!islog && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
6451 				continue;
6452 
6453 			if (!printed) {
6454 				/* LINTED E_SEC_PRINTF_VAR_FMT */
6455 				(void) printf(dashes, cb->cb_namewidth,
6456 				    class_name[n]);
6457 				printed = B_TRUE;
6458 			}
6459 			vname = zpool_vdev_name(g_zfs, zhp, child[c],
6460 			    cb->cb_name_flags | VDEV_NAME_TYPE_ID);
6461 			print_list_stats(zhp, vname, child[c], cb, depth + 2,
6462 			    B_FALSE);
6463 			free(vname);
6464 		}
6465 	}
6466 
6467 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
6468 	    &child, &children) == 0 && children > 0) {
6469 		/* LINTED E_SEC_PRINTF_VAR_FMT */
6470 		(void) printf(dashes, cb->cb_namewidth, "cache");
6471 		for (c = 0; c < children; c++) {
6472 			vname = zpool_vdev_name(g_zfs, zhp, child[c],
6473 			    cb->cb_name_flags);
6474 			print_list_stats(zhp, vname, child[c], cb, depth + 2,
6475 			    B_FALSE);
6476 			free(vname);
6477 		}
6478 	}
6479 
6480 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, &child,
6481 	    &children) == 0 && children > 0) {
6482 		/* LINTED E_SEC_PRINTF_VAR_FMT */
6483 		(void) printf(dashes, cb->cb_namewidth, "spare");
6484 		for (c = 0; c < children; c++) {
6485 			vname = zpool_vdev_name(g_zfs, zhp, child[c],
6486 			    cb->cb_name_flags);
6487 			print_list_stats(zhp, vname, child[c], cb, depth + 2,
6488 			    B_TRUE);
6489 			free(vname);
6490 		}
6491 	}
6492 }
6493 
6494 /*
6495  * Generic callback function to list a pool.
6496  */
6497 static int
6498 list_callback(zpool_handle_t *zhp, void *data)
6499 {
6500 	list_cbdata_t *cbp = data;
6501 
6502 	print_pool(zhp, cbp);
6503 
6504 	if (cbp->cb_verbose) {
6505 		nvlist_t *config, *nvroot;
6506 
6507 		config = zpool_get_config(zhp, NULL);
6508 		verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
6509 		    &nvroot) == 0);
6510 		print_list_stats(zhp, NULL, nvroot, cbp, 0, B_FALSE);
6511 	}
6512 
6513 	return (0);
6514 }
6515 
6516 /*
6517  * Set the minimum pool/vdev name column width.  The width must be at least 9,
6518  * but may be as large as needed.
6519  */
6520 static int
6521 get_namewidth_list(zpool_handle_t *zhp, void *data)
6522 {
6523 	list_cbdata_t *cb = data;
6524 	int width;
6525 
6526 	width = get_namewidth(zhp, cb->cb_namewidth,
6527 	    cb->cb_name_flags | VDEV_NAME_TYPE_ID, cb->cb_verbose);
6528 
6529 	if (width < 9)
6530 		width = 9;
6531 
6532 	cb->cb_namewidth = width;
6533 
6534 	return (0);
6535 }
6536 
6537 /*
6538  * zpool list [-gHLpP] [-o prop[,prop]*] [-T d|u] [pool] ... [interval [count]]
6539  *
6540  *	-g	Display guid for individual vdev name.
6541  *	-H	Scripted mode.  Don't display headers, and separate properties
6542  *		by a single tab.
6543  *	-L	Follow links when resolving vdev path name.
6544  *	-o	List of properties to display.  Defaults to
6545  *		"name,size,allocated,free,expandsize,fragmentation,capacity,"
6546  *		"dedupratio,health,altroot"
6547  *	-p	Display values in parsable (exact) format.
6548  *	-P	Display full path for vdev name.
6549  *	-T	Display a timestamp in date(1) or Unix format
6550  *
6551  * List all pools in the system, whether or not they're healthy.  Output space
6552  * statistics for each one, as well as health status summary.
6553  */
6554 int
6555 zpool_do_list(int argc, char **argv)
6556 {
6557 	int c;
6558 	int ret = 0;
6559 	list_cbdata_t cb = { 0 };
6560 	static char default_props[] =
6561 	    "name,size,allocated,free,checkpoint,expandsize,fragmentation,"
6562 	    "capacity,dedupratio,health,altroot";
6563 	char *props = default_props;
6564 	float interval = 0;
6565 	unsigned long count = 0;
6566 	zpool_list_t *list;
6567 	boolean_t first = B_TRUE;
6568 	current_prop_type = ZFS_TYPE_POOL;
6569 
6570 	/* check options */
6571 	while ((c = getopt(argc, argv, ":gHLo:pPT:v")) != -1) {
6572 		switch (c) {
6573 		case 'g':
6574 			cb.cb_name_flags |= VDEV_NAME_GUID;
6575 			break;
6576 		case 'H':
6577 			cb.cb_scripted = B_TRUE;
6578 			break;
6579 		case 'L':
6580 			cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
6581 			break;
6582 		case 'o':
6583 			props = optarg;
6584 			break;
6585 		case 'P':
6586 			cb.cb_name_flags |= VDEV_NAME_PATH;
6587 			break;
6588 		case 'p':
6589 			cb.cb_literal = B_TRUE;
6590 			break;
6591 		case 'T':
6592 			get_timestamp_arg(*optarg);
6593 			break;
6594 		case 'v':
6595 			cb.cb_verbose = B_TRUE;
6596 			cb.cb_namewidth = 8;	/* 8 until precalc is avail */
6597 			break;
6598 		case ':':
6599 			(void) fprintf(stderr, gettext("missing argument for "
6600 			    "'%c' option\n"), optopt);
6601 			usage(B_FALSE);
6602 			break;
6603 		case '?':
6604 			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
6605 			    optopt);
6606 			usage(B_FALSE);
6607 		}
6608 	}
6609 
6610 	argc -= optind;
6611 	argv += optind;
6612 
6613 	get_interval_count(&argc, argv, &interval, &count);
6614 
6615 	if (zprop_get_list(g_zfs, props, &cb.cb_proplist, ZFS_TYPE_POOL) != 0)
6616 		usage(B_FALSE);
6617 
6618 	for (;;) {
6619 		if ((list = pool_list_get(argc, argv, &cb.cb_proplist,
6620 		    ZFS_TYPE_POOL, cb.cb_literal, &ret)) == NULL)
6621 			return (1);
6622 
6623 		if (pool_list_count(list) == 0)
6624 			break;
6625 
6626 		cb.cb_namewidth = 0;
6627 		(void) pool_list_iter(list, B_FALSE, get_namewidth_list, &cb);
6628 
6629 		if (timestamp_fmt != NODATE)
6630 			print_timestamp(timestamp_fmt);
6631 
6632 		if (!cb.cb_scripted && (first || cb.cb_verbose)) {
6633 			print_header(&cb);
6634 			first = B_FALSE;
6635 		}
6636 		ret = pool_list_iter(list, B_TRUE, list_callback, &cb);
6637 
6638 		if (interval == 0)
6639 			break;
6640 
6641 		if (count != 0 && --count == 0)
6642 			break;
6643 
6644 		pool_list_free(list);
6645 
6646 		(void) fflush(stdout);
6647 		(void) fsleep(interval);
6648 	}
6649 
6650 	if (argc == 0 && !cb.cb_scripted && pool_list_count(list) == 0) {
6651 		(void) printf(gettext("no pools available\n"));
6652 		ret = 0;
6653 	}
6654 
6655 	pool_list_free(list);
6656 	zprop_free_list(cb.cb_proplist);
6657 	return (ret);
6658 }
6659 
6660 static int
6661 zpool_do_attach_or_replace(int argc, char **argv, int replacing)
6662 {
6663 	boolean_t force = B_FALSE;
6664 	boolean_t rebuild = B_FALSE;
6665 	boolean_t wait = B_FALSE;
6666 	int c;
6667 	nvlist_t *nvroot;
6668 	char *poolname, *old_disk, *new_disk;
6669 	zpool_handle_t *zhp;
6670 	nvlist_t *props = NULL;
6671 	char *propval;
6672 	int ret;
6673 
6674 	/* check options */
6675 	while ((c = getopt(argc, argv, "fo:sw")) != -1) {
6676 		switch (c) {
6677 		case 'f':
6678 			force = B_TRUE;
6679 			break;
6680 		case 'o':
6681 			if ((propval = strchr(optarg, '=')) == NULL) {
6682 				(void) fprintf(stderr, gettext("missing "
6683 				    "'=' for -o option\n"));
6684 				usage(B_FALSE);
6685 			}
6686 			*propval = '\0';
6687 			propval++;
6688 
6689 			if ((strcmp(optarg, ZPOOL_CONFIG_ASHIFT) != 0) ||
6690 			    (add_prop_list(optarg, propval, &props, B_TRUE)))
6691 				usage(B_FALSE);
6692 			break;
6693 		case 's':
6694 			rebuild = B_TRUE;
6695 			break;
6696 		case 'w':
6697 			wait = B_TRUE;
6698 			break;
6699 		case '?':
6700 			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
6701 			    optopt);
6702 			usage(B_FALSE);
6703 		}
6704 	}
6705 
6706 	argc -= optind;
6707 	argv += optind;
6708 
6709 	/* get pool name and check number of arguments */
6710 	if (argc < 1) {
6711 		(void) fprintf(stderr, gettext("missing pool name argument\n"));
6712 		usage(B_FALSE);
6713 	}
6714 
6715 	poolname = argv[0];
6716 
6717 	if (argc < 2) {
6718 		(void) fprintf(stderr,
6719 		    gettext("missing <device> specification\n"));
6720 		usage(B_FALSE);
6721 	}
6722 
6723 	old_disk = argv[1];
6724 
6725 	if (argc < 3) {
6726 		if (!replacing) {
6727 			(void) fprintf(stderr,
6728 			    gettext("missing <new_device> specification\n"));
6729 			usage(B_FALSE);
6730 		}
6731 		new_disk = old_disk;
6732 		argc -= 1;
6733 		argv += 1;
6734 	} else {
6735 		new_disk = argv[2];
6736 		argc -= 2;
6737 		argv += 2;
6738 	}
6739 
6740 	if (argc > 1) {
6741 		(void) fprintf(stderr, gettext("too many arguments\n"));
6742 		usage(B_FALSE);
6743 	}
6744 
6745 	if ((zhp = zpool_open(g_zfs, poolname)) == NULL) {
6746 		nvlist_free(props);
6747 		return (1);
6748 	}
6749 
6750 	if (zpool_get_config(zhp, NULL) == NULL) {
6751 		(void) fprintf(stderr, gettext("pool '%s' is unavailable\n"),
6752 		    poolname);
6753 		zpool_close(zhp);
6754 		nvlist_free(props);
6755 		return (1);
6756 	}
6757 
6758 	/* unless manually specified use "ashift" pool property (if set) */
6759 	if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) {
6760 		int intval;
6761 		zprop_source_t src;
6762 		char strval[ZPOOL_MAXPROPLEN];
6763 
6764 		intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src);
6765 		if (src != ZPROP_SRC_DEFAULT) {
6766 			(void) sprintf(strval, "%" PRId32, intval);
6767 			verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval,
6768 			    &props, B_TRUE) == 0);
6769 		}
6770 	}
6771 
6772 	nvroot = make_root_vdev(zhp, props, force, B_FALSE, replacing, B_FALSE,
6773 	    argc, argv);
6774 	if (nvroot == NULL) {
6775 		zpool_close(zhp);
6776 		nvlist_free(props);
6777 		return (1);
6778 	}
6779 
6780 	ret = zpool_vdev_attach(zhp, old_disk, new_disk, nvroot, replacing,
6781 	    rebuild);
6782 
6783 	if (ret == 0 && wait) {
6784 		zpool_wait_activity_t activity = ZPOOL_WAIT_RESILVER;
6785 		char raidz_prefix[] = "raidz";
6786 		if (replacing) {
6787 			activity = ZPOOL_WAIT_REPLACE;
6788 		} else if (strncmp(old_disk,
6789 		    raidz_prefix, strlen(raidz_prefix)) == 0) {
6790 			activity = ZPOOL_WAIT_RAIDZ_EXPAND;
6791 		}
6792 		ret = zpool_wait(zhp, activity);
6793 	}
6794 
6795 	nvlist_free(props);
6796 	nvlist_free(nvroot);
6797 	zpool_close(zhp);
6798 
6799 	return (ret);
6800 }
6801 
6802 /*
6803  * zpool replace [-fsw] [-o property=value] <pool> <device> <new_device>
6804  *
6805  *	-f	Force attach, even if <new_device> appears to be in use.
6806  *	-s	Use sequential instead of healing reconstruction for resilver.
6807  *	-o	Set property=value.
6808  *	-w	Wait for replacing to complete before returning
6809  *
6810  * Replace <device> with <new_device>.
6811  */
6812 int
6813 zpool_do_replace(int argc, char **argv)
6814 {
6815 	return (zpool_do_attach_or_replace(argc, argv, B_TRUE));
6816 }
6817 
6818 /*
6819  * zpool attach [-fsw] [-o property=value] <pool> <device>|<vdev> <new_device>
6820  *
6821  *	-f	Force attach, even if <new_device> appears to be in use.
6822  *	-s	Use sequential instead of healing reconstruction for resilver.
6823  *	-o	Set property=value.
6824  *	-w	Wait for resilvering (mirror) or expansion (raidz) to complete
6825  *		before returning.
6826  *
6827  * Attach <new_device> to a <device> or <vdev>, where the vdev can be of type
6828  * mirror or raidz. If <device> is not part of a mirror, then <device> will
6829  * be transformed into a mirror of <device> and <new_device>. When a mirror
6830  * is involved, <new_device> will begin life with a DTL of [0, now], and will
6831  * immediately begin to resilver itself. For the raidz case, a expansion will
6832  * commence and reflow the raidz data across all the disks including the
6833  * <new_device>.
6834  */
6835 int
6836 zpool_do_attach(int argc, char **argv)
6837 {
6838 	return (zpool_do_attach_or_replace(argc, argv, B_FALSE));
6839 }
6840 
6841 /*
6842  * zpool detach [-f] <pool> <device>
6843  *
6844  *	-f	Force detach of <device>, even if DTLs argue against it
6845  *		(not supported yet)
6846  *
6847  * Detach a device from a mirror.  The operation will be refused if <device>
6848  * is the last device in the mirror, or if the DTLs indicate that this device
6849  * has the only valid copy of some data.
6850  */
6851 int
6852 zpool_do_detach(int argc, char **argv)
6853 {
6854 	int c;
6855 	char *poolname, *path;
6856 	zpool_handle_t *zhp;
6857 	int ret;
6858 
6859 	/* check options */
6860 	while ((c = getopt(argc, argv, "")) != -1) {
6861 		switch (c) {
6862 		case '?':
6863 			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
6864 			    optopt);
6865 			usage(B_FALSE);
6866 		}
6867 	}
6868 
6869 	argc -= optind;
6870 	argv += optind;
6871 
6872 	/* get pool name and check number of arguments */
6873 	if (argc < 1) {
6874 		(void) fprintf(stderr, gettext("missing pool name argument\n"));
6875 		usage(B_FALSE);
6876 	}
6877 
6878 	if (argc < 2) {
6879 		(void) fprintf(stderr,
6880 		    gettext("missing <device> specification\n"));
6881 		usage(B_FALSE);
6882 	}
6883 
6884 	poolname = argv[0];
6885 	path = argv[1];
6886 
6887 	if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
6888 		return (1);
6889 
6890 	ret = zpool_vdev_detach(zhp, path);
6891 
6892 	zpool_close(zhp);
6893 
6894 	return (ret);
6895 }
6896 
6897 /*
6898  * zpool split [-gLnP] [-o prop=val] ...
6899  *		[-o mntopt] ...
6900  *		[-R altroot] <pool> <newpool> [<device> ...]
6901  *
6902  *	-g      Display guid for individual vdev name.
6903  *	-L	Follow links when resolving vdev path name.
6904  *	-n	Do not split the pool, but display the resulting layout if
6905  *		it were to be split.
6906  *	-o	Set property=value, or set mount options.
6907  *	-P	Display full path for vdev name.
6908  *	-R	Mount the split-off pool under an alternate root.
6909  *	-l	Load encryption keys while importing.
6910  *
6911  * Splits the named pool and gives it the new pool name.  Devices to be split
6912  * off may be listed, provided that no more than one device is specified
6913  * per top-level vdev mirror.  The newly split pool is left in an exported
6914  * state unless -R is specified.
6915  *
6916  * Restrictions: the top-level of the pool pool must only be made up of
6917  * mirrors; all devices in the pool must be healthy; no device may be
6918  * undergoing a resilvering operation.
6919  */
6920 int
6921 zpool_do_split(int argc, char **argv)
6922 {
6923 	char *srcpool, *newpool, *propval;
6924 	char *mntopts = NULL;
6925 	splitflags_t flags;
6926 	int c, ret = 0;
6927 	int ms_status = 0;
6928 	boolean_t loadkeys = B_FALSE;
6929 	zpool_handle_t *zhp;
6930 	nvlist_t *config, *props = NULL;
6931 
6932 	flags.dryrun = B_FALSE;
6933 	flags.import = B_FALSE;
6934 	flags.name_flags = 0;
6935 
6936 	/* check options */
6937 	while ((c = getopt(argc, argv, ":gLR:lno:P")) != -1) {
6938 		switch (c) {
6939 		case 'g':
6940 			flags.name_flags |= VDEV_NAME_GUID;
6941 			break;
6942 		case 'L':
6943 			flags.name_flags |= VDEV_NAME_FOLLOW_LINKS;
6944 			break;
6945 		case 'R':
6946 			flags.import = B_TRUE;
6947 			if (add_prop_list(
6948 			    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), optarg,
6949 			    &props, B_TRUE) != 0) {
6950 				nvlist_free(props);
6951 				usage(B_FALSE);
6952 			}
6953 			break;
6954 		case 'l':
6955 			loadkeys = B_TRUE;
6956 			break;
6957 		case 'n':
6958 			flags.dryrun = B_TRUE;
6959 			break;
6960 		case 'o':
6961 			if ((propval = strchr(optarg, '=')) != NULL) {
6962 				*propval = '\0';
6963 				propval++;
6964 				if (add_prop_list(optarg, propval,
6965 				    &props, B_TRUE) != 0) {
6966 					nvlist_free(props);
6967 					usage(B_FALSE);
6968 				}
6969 			} else {
6970 				mntopts = optarg;
6971 			}
6972 			break;
6973 		case 'P':
6974 			flags.name_flags |= VDEV_NAME_PATH;
6975 			break;
6976 		case ':':
6977 			(void) fprintf(stderr, gettext("missing argument for "
6978 			    "'%c' option\n"), optopt);
6979 			usage(B_FALSE);
6980 			break;
6981 		case '?':
6982 			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
6983 			    optopt);
6984 			usage(B_FALSE);
6985 			break;
6986 		}
6987 	}
6988 
6989 	if (!flags.import && mntopts != NULL) {
6990 		(void) fprintf(stderr, gettext("setting mntopts is only "
6991 		    "valid when importing the pool\n"));
6992 		usage(B_FALSE);
6993 	}
6994 
6995 	if (!flags.import && loadkeys) {
6996 		(void) fprintf(stderr, gettext("loading keys is only "
6997 		    "valid when importing the pool\n"));
6998 		usage(B_FALSE);
6999 	}
7000 
7001 	argc -= optind;
7002 	argv += optind;
7003 
7004 	if (argc < 1) {
7005 		(void) fprintf(stderr, gettext("Missing pool name\n"));
7006 		usage(B_FALSE);
7007 	}
7008 	if (argc < 2) {
7009 		(void) fprintf(stderr, gettext("Missing new pool name\n"));
7010 		usage(B_FALSE);
7011 	}
7012 
7013 	srcpool = argv[0];
7014 	newpool = argv[1];
7015 
7016 	argc -= 2;
7017 	argv += 2;
7018 
7019 	if ((zhp = zpool_open(g_zfs, srcpool)) == NULL) {
7020 		nvlist_free(props);
7021 		return (1);
7022 	}
7023 
7024 	config = split_mirror_vdev(zhp, newpool, props, flags, argc, argv);
7025 	if (config == NULL) {
7026 		ret = 1;
7027 	} else {
7028 		if (flags.dryrun) {
7029 			(void) printf(gettext("would create '%s' with the "
7030 			    "following layout:\n\n"), newpool);
7031 			print_vdev_tree(NULL, newpool, config, 0, "",
7032 			    flags.name_flags);
7033 			print_vdev_tree(NULL, "dedup", config, 0,
7034 			    VDEV_ALLOC_BIAS_DEDUP, 0);
7035 			print_vdev_tree(NULL, "special", config, 0,
7036 			    VDEV_ALLOC_BIAS_SPECIAL, 0);
7037 		}
7038 	}
7039 
7040 	zpool_close(zhp);
7041 
7042 	if (ret != 0 || flags.dryrun || !flags.import) {
7043 		nvlist_free(config);
7044 		nvlist_free(props);
7045 		return (ret);
7046 	}
7047 
7048 	/*
7049 	 * The split was successful. Now we need to open the new
7050 	 * pool and import it.
7051 	 */
7052 	if ((zhp = zpool_open_canfail(g_zfs, newpool)) == NULL) {
7053 		nvlist_free(config);
7054 		nvlist_free(props);
7055 		return (1);
7056 	}
7057 
7058 	if (loadkeys) {
7059 		ret = zfs_crypto_attempt_load_keys(g_zfs, newpool);
7060 		if (ret != 0)
7061 			ret = 1;
7062 	}
7063 
7064 	if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL) {
7065 		ms_status = zpool_enable_datasets(zhp, mntopts, 0);
7066 		if (ms_status == EZFS_SHAREFAILED) {
7067 			(void) fprintf(stderr, gettext("Split was successful, "
7068 			    "datasets are mounted but sharing of some datasets "
7069 			    "has failed\n"));
7070 		} else if (ms_status == EZFS_MOUNTFAILED) {
7071 			(void) fprintf(stderr, gettext("Split was successful"
7072 			    ", but some datasets could not be mounted\n"));
7073 			(void) fprintf(stderr, gettext("Try doing '%s' with a "
7074 			    "different altroot\n"), "zpool import");
7075 		}
7076 	}
7077 	zpool_close(zhp);
7078 	nvlist_free(config);
7079 	nvlist_free(props);
7080 
7081 	return (ret);
7082 }
7083 
7084 #define	POWER_OPT 1024
7085 
7086 /*
7087  * zpool online [--power] <pool> <device> ...
7088  *
7089  * --power: Power on the enclosure slot to the drive (if possible)
7090  */
7091 int
7092 zpool_do_online(int argc, char **argv)
7093 {
7094 	int c, i;
7095 	char *poolname;
7096 	zpool_handle_t *zhp;
7097 	int ret = 0;
7098 	vdev_state_t newstate;
7099 	int flags = 0;
7100 	boolean_t is_power_on = B_FALSE;
7101 	struct option long_options[] = {
7102 		{"power", no_argument, NULL, POWER_OPT},
7103 		{0, 0, 0, 0}
7104 	};
7105 
7106 	/* check options */
7107 	while ((c = getopt_long(argc, argv, "e", long_options, NULL)) != -1) {
7108 		switch (c) {
7109 		case 'e':
7110 			flags |= ZFS_ONLINE_EXPAND;
7111 			break;
7112 		case POWER_OPT:
7113 			is_power_on = B_TRUE;
7114 			break;
7115 		case '?':
7116 			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
7117 			    optopt);
7118 			usage(B_FALSE);
7119 		}
7120 	}
7121 
7122 	if (libzfs_envvar_is_set("ZPOOL_AUTO_POWER_ON_SLOT"))
7123 		is_power_on = B_TRUE;
7124 
7125 	argc -= optind;
7126 	argv += optind;
7127 
7128 	/* get pool name and check number of arguments */
7129 	if (argc < 1) {
7130 		(void) fprintf(stderr, gettext("missing pool name\n"));
7131 		usage(B_FALSE);
7132 	}
7133 	if (argc < 2) {
7134 		(void) fprintf(stderr, gettext("missing device name\n"));
7135 		usage(B_FALSE);
7136 	}
7137 
7138 	poolname = argv[0];
7139 
7140 	if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
7141 		return (1);
7142 
7143 	for (i = 1; i < argc; i++) {
7144 		vdev_state_t oldstate;
7145 		boolean_t avail_spare, l2cache;
7146 		int rc;
7147 
7148 		if (is_power_on) {
7149 			rc = zpool_power_on_and_disk_wait(zhp, argv[i]);
7150 			if (rc == ENOTSUP) {
7151 				(void) fprintf(stderr,
7152 				    gettext("Power control not supported\n"));
7153 			}
7154 			if (rc != 0)
7155 				return (rc);
7156 		}
7157 
7158 		nvlist_t *tgt = zpool_find_vdev(zhp, argv[i], &avail_spare,
7159 		    &l2cache, NULL);
7160 		if (tgt == NULL) {
7161 			ret = 1;
7162 			continue;
7163 		}
7164 		uint_t vsc;
7165 		oldstate = ((vdev_stat_t *)fnvlist_lookup_uint64_array(tgt,
7166 		    ZPOOL_CONFIG_VDEV_STATS, &vsc))->vs_state;
7167 		if (zpool_vdev_online(zhp, argv[i], flags, &newstate) == 0) {
7168 			if (newstate != VDEV_STATE_HEALTHY) {
7169 				(void) printf(gettext("warning: device '%s' "
7170 				    "onlined, but remains in faulted state\n"),
7171 				    argv[i]);
7172 				if (newstate == VDEV_STATE_FAULTED)
7173 					(void) printf(gettext("use 'zpool "
7174 					    "clear' to restore a faulted "
7175 					    "device\n"));
7176 				else
7177 					(void) printf(gettext("use 'zpool "
7178 					    "replace' to replace devices "
7179 					    "that are no longer present\n"));
7180 				if ((flags & ZFS_ONLINE_EXPAND)) {
7181 					(void) printf(gettext("%s: failed "
7182 					    "to expand usable space on "
7183 					    "unhealthy device '%s'\n"),
7184 					    (oldstate >= VDEV_STATE_DEGRADED ?
7185 					    "error" : "warning"), argv[i]);
7186 					if (oldstate >= VDEV_STATE_DEGRADED) {
7187 						ret = 1;
7188 						break;
7189 					}
7190 				}
7191 			}
7192 		} else {
7193 			ret = 1;
7194 		}
7195 	}
7196 
7197 	zpool_close(zhp);
7198 
7199 	return (ret);
7200 }
7201 
7202 /*
7203  * zpool offline [-ft]|[--power] <pool> <device> ...
7204  *
7205  *
7206  *	-f	Force the device into a faulted state.
7207  *
7208  *	-t	Only take the device off-line temporarily.  The offline/faulted
7209  *		state will not be persistent across reboots.
7210  *
7211  *	--power Power off the enclosure slot to the drive (if possible)
7212  */
7213 int
7214 zpool_do_offline(int argc, char **argv)
7215 {
7216 	int c, i;
7217 	char *poolname;
7218 	zpool_handle_t *zhp;
7219 	int ret = 0;
7220 	boolean_t istmp = B_FALSE;
7221 	boolean_t fault = B_FALSE;
7222 	boolean_t is_power_off = B_FALSE;
7223 
7224 	struct option long_options[] = {
7225 		{"power", no_argument, NULL, POWER_OPT},
7226 		{0, 0, 0, 0}
7227 	};
7228 
7229 	/* check options */
7230 	while ((c = getopt_long(argc, argv, "ft", long_options, NULL)) != -1) {
7231 		switch (c) {
7232 		case 'f':
7233 			fault = B_TRUE;
7234 			break;
7235 		case 't':
7236 			istmp = B_TRUE;
7237 			break;
7238 		case POWER_OPT:
7239 			is_power_off = B_TRUE;
7240 			break;
7241 		case '?':
7242 			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
7243 			    optopt);
7244 			usage(B_FALSE);
7245 		}
7246 	}
7247 
7248 	if (is_power_off && fault) {
7249 		(void) fprintf(stderr,
7250 		    gettext("-0 and -f cannot be used together\n"));
7251 		usage(B_FALSE);
7252 		return (1);
7253 	}
7254 
7255 	if (is_power_off && istmp) {
7256 		(void) fprintf(stderr,
7257 		    gettext("-0 and -t cannot be used together\n"));
7258 		usage(B_FALSE);
7259 		return (1);
7260 	}
7261 
7262 	argc -= optind;
7263 	argv += optind;
7264 
7265 	/* get pool name and check number of arguments */
7266 	if (argc < 1) {
7267 		(void) fprintf(stderr, gettext("missing pool name\n"));
7268 		usage(B_FALSE);
7269 	}
7270 	if (argc < 2) {
7271 		(void) fprintf(stderr, gettext("missing device name\n"));
7272 		usage(B_FALSE);
7273 	}
7274 
7275 	poolname = argv[0];
7276 
7277 	if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
7278 		return (1);
7279 
7280 	for (i = 1; i < argc; i++) {
7281 		uint64_t guid = zpool_vdev_path_to_guid(zhp, argv[i]);
7282 		if (is_power_off) {
7283 			/*
7284 			 * Note: we have to power off first, then set REMOVED,
7285 			 * or else zpool_vdev_set_removed_state() returns
7286 			 * EAGAIN.
7287 			 */
7288 			ret = zpool_power_off(zhp, argv[i]);
7289 			if (ret != 0) {
7290 				(void) fprintf(stderr, "%s %s %d\n",
7291 				    gettext("unable to power off slot for"),
7292 				    argv[i], ret);
7293 			}
7294 			zpool_vdev_set_removed_state(zhp, guid, VDEV_AUX_NONE);
7295 
7296 		} else if (fault) {
7297 			vdev_aux_t aux;
7298 			if (istmp == B_FALSE) {
7299 				/* Force the fault to persist across imports */
7300 				aux = VDEV_AUX_EXTERNAL_PERSIST;
7301 			} else {
7302 				aux = VDEV_AUX_EXTERNAL;
7303 			}
7304 
7305 			if (guid == 0 || zpool_vdev_fault(zhp, guid, aux) != 0)
7306 				ret = 1;
7307 		} else {
7308 			if (zpool_vdev_offline(zhp, argv[i], istmp) != 0)
7309 				ret = 1;
7310 		}
7311 	}
7312 
7313 	zpool_close(zhp);
7314 
7315 	return (ret);
7316 }
7317 
7318 /*
7319  * zpool clear [-nF]|[--power] <pool> [device]
7320  *
7321  * Clear all errors associated with a pool or a particular device.
7322  */
7323 int
7324 zpool_do_clear(int argc, char **argv)
7325 {
7326 	int c;
7327 	int ret = 0;
7328 	boolean_t dryrun = B_FALSE;
7329 	boolean_t do_rewind = B_FALSE;
7330 	boolean_t xtreme_rewind = B_FALSE;
7331 	boolean_t is_power_on = B_FALSE;
7332 	uint32_t rewind_policy = ZPOOL_NO_REWIND;
7333 	nvlist_t *policy = NULL;
7334 	zpool_handle_t *zhp;
7335 	char *pool, *device;
7336 
7337 	struct option long_options[] = {
7338 		{"power", no_argument, NULL, POWER_OPT},
7339 		{0, 0, 0, 0}
7340 	};
7341 
7342 	/* check options */
7343 	while ((c = getopt_long(argc, argv, "FnX", long_options,
7344 	    NULL)) != -1) {
7345 		switch (c) {
7346 		case 'F':
7347 			do_rewind = B_TRUE;
7348 			break;
7349 		case 'n':
7350 			dryrun = B_TRUE;
7351 			break;
7352 		case 'X':
7353 			xtreme_rewind = B_TRUE;
7354 			break;
7355 		case POWER_OPT:
7356 			is_power_on = B_TRUE;
7357 			break;
7358 		case '?':
7359 			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
7360 			    optopt);
7361 			usage(B_FALSE);
7362 		}
7363 	}
7364 
7365 	if (libzfs_envvar_is_set("ZPOOL_AUTO_POWER_ON_SLOT"))
7366 		is_power_on = B_TRUE;
7367 
7368 	argc -= optind;
7369 	argv += optind;
7370 
7371 	if (argc < 1) {
7372 		(void) fprintf(stderr, gettext("missing pool name\n"));
7373 		usage(B_FALSE);
7374 	}
7375 
7376 	if (argc > 2) {
7377 		(void) fprintf(stderr, gettext("too many arguments\n"));
7378 		usage(B_FALSE);
7379 	}
7380 
7381 	if ((dryrun || xtreme_rewind) && !do_rewind) {
7382 		(void) fprintf(stderr,
7383 		    gettext("-n or -X only meaningful with -F\n"));
7384 		usage(B_FALSE);
7385 	}
7386 	if (dryrun)
7387 		rewind_policy = ZPOOL_TRY_REWIND;
7388 	else if (do_rewind)
7389 		rewind_policy = ZPOOL_DO_REWIND;
7390 	if (xtreme_rewind)
7391 		rewind_policy |= ZPOOL_EXTREME_REWIND;
7392 
7393 	/* In future, further rewind policy choices can be passed along here */
7394 	if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 ||
7395 	    nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY,
7396 	    rewind_policy) != 0) {
7397 		return (1);
7398 	}
7399 
7400 	pool = argv[0];
7401 	device = argc == 2 ? argv[1] : NULL;
7402 
7403 	if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) {
7404 		nvlist_free(policy);
7405 		return (1);
7406 	}
7407 
7408 	if (is_power_on) {
7409 		if (device == NULL) {
7410 			zpool_power_on_pool_and_wait_for_devices(zhp);
7411 		} else {
7412 			zpool_power_on_and_disk_wait(zhp, device);
7413 		}
7414 	}
7415 
7416 	if (zpool_clear(zhp, device, policy) != 0)
7417 		ret = 1;
7418 
7419 	zpool_close(zhp);
7420 
7421 	nvlist_free(policy);
7422 
7423 	return (ret);
7424 }
7425 
7426 /*
7427  * zpool reguid <pool>
7428  */
7429 int
7430 zpool_do_reguid(int argc, char **argv)
7431 {
7432 	int c;
7433 	char *poolname;
7434 	zpool_handle_t *zhp;
7435 	int ret = 0;
7436 
7437 	/* check options */
7438 	while ((c = getopt(argc, argv, "")) != -1) {
7439 		switch (c) {
7440 		case '?':
7441 			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
7442 			    optopt);
7443 			usage(B_FALSE);
7444 		}
7445 	}
7446 
7447 	argc -= optind;
7448 	argv += optind;
7449 
7450 	/* get pool name and check number of arguments */
7451 	if (argc < 1) {
7452 		(void) fprintf(stderr, gettext("missing pool name\n"));
7453 		usage(B_FALSE);
7454 	}
7455 
7456 	if (argc > 1) {
7457 		(void) fprintf(stderr, gettext("too many arguments\n"));
7458 		usage(B_FALSE);
7459 	}
7460 
7461 	poolname = argv[0];
7462 	if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
7463 		return (1);
7464 
7465 	ret = zpool_reguid(zhp);
7466 
7467 	zpool_close(zhp);
7468 	return (ret);
7469 }
7470 
7471 
7472 /*
7473  * zpool reopen <pool>
7474  *
7475  * Reopen the pool so that the kernel can update the sizes of all vdevs.
7476  */
7477 int
7478 zpool_do_reopen(int argc, char **argv)
7479 {
7480 	int c;
7481 	int ret = 0;
7482 	boolean_t scrub_restart = B_TRUE;
7483 
7484 	/* check options */
7485 	while ((c = getopt(argc, argv, "n")) != -1) {
7486 		switch (c) {
7487 		case 'n':
7488 			scrub_restart = B_FALSE;
7489 			break;
7490 		case '?':
7491 			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
7492 			    optopt);
7493 			usage(B_FALSE);
7494 		}
7495 	}
7496 
7497 	argc -= optind;
7498 	argv += optind;
7499 
7500 	/* if argc == 0 we will execute zpool_reopen_one on all pools */
7501 	ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
7502 	    B_FALSE, zpool_reopen_one, &scrub_restart);
7503 
7504 	return (ret);
7505 }
7506 
7507 typedef struct scrub_cbdata {
7508 	int	cb_type;
7509 	pool_scrub_cmd_t cb_scrub_cmd;
7510 } scrub_cbdata_t;
7511 
7512 static boolean_t
7513 zpool_has_checkpoint(zpool_handle_t *zhp)
7514 {
7515 	nvlist_t *config, *nvroot;
7516 
7517 	config = zpool_get_config(zhp, NULL);
7518 
7519 	if (config != NULL) {
7520 		pool_checkpoint_stat_t *pcs = NULL;
7521 		uint_t c;
7522 
7523 		nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
7524 		(void) nvlist_lookup_uint64_array(nvroot,
7525 		    ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
7526 
7527 		if (pcs == NULL || pcs->pcs_state == CS_NONE)
7528 			return (B_FALSE);
7529 
7530 		assert(pcs->pcs_state == CS_CHECKPOINT_EXISTS ||
7531 		    pcs->pcs_state == CS_CHECKPOINT_DISCARDING);
7532 		return (B_TRUE);
7533 	}
7534 
7535 	return (B_FALSE);
7536 }
7537 
7538 static int
7539 scrub_callback(zpool_handle_t *zhp, void *data)
7540 {
7541 	scrub_cbdata_t *cb = data;
7542 	int err;
7543 
7544 	/*
7545 	 * Ignore faulted pools.
7546 	 */
7547 	if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
7548 		(void) fprintf(stderr, gettext("cannot scan '%s': pool is "
7549 		    "currently unavailable\n"), zpool_get_name(zhp));
7550 		return (1);
7551 	}
7552 
7553 	err = zpool_scan(zhp, cb->cb_type, cb->cb_scrub_cmd);
7554 
7555 	if (err == 0 && zpool_has_checkpoint(zhp) &&
7556 	    cb->cb_type == POOL_SCAN_SCRUB) {
7557 		(void) printf(gettext("warning: will not scrub state that "
7558 		    "belongs to the checkpoint of pool '%s'\n"),
7559 		    zpool_get_name(zhp));
7560 	}
7561 
7562 	return (err != 0);
7563 }
7564 
7565 static int
7566 wait_callback(zpool_handle_t *zhp, void *data)
7567 {
7568 	zpool_wait_activity_t *act = data;
7569 	return (zpool_wait(zhp, *act));
7570 }
7571 
7572 /*
7573  * zpool scrub [-s | -p] [-w] [-e] <pool> ...
7574  *
7575  *	-e	Only scrub blocks in the error log.
7576  *	-s	Stop.  Stops any in-progress scrub.
7577  *	-p	Pause. Pause in-progress scrub.
7578  *	-w	Wait.  Blocks until scrub has completed.
7579  */
7580 int
7581 zpool_do_scrub(int argc, char **argv)
7582 {
7583 	int c;
7584 	scrub_cbdata_t cb;
7585 	boolean_t wait = B_FALSE;
7586 	int error;
7587 
7588 	cb.cb_type = POOL_SCAN_SCRUB;
7589 	cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
7590 
7591 	boolean_t is_error_scrub = B_FALSE;
7592 	boolean_t is_pause = B_FALSE;
7593 	boolean_t is_stop = B_FALSE;
7594 
7595 	/* check options */
7596 	while ((c = getopt(argc, argv, "spwe")) != -1) {
7597 		switch (c) {
7598 		case 'e':
7599 			is_error_scrub = B_TRUE;
7600 			break;
7601 		case 's':
7602 			is_stop = B_TRUE;
7603 			break;
7604 		case 'p':
7605 			is_pause = B_TRUE;
7606 			break;
7607 		case 'w':
7608 			wait = B_TRUE;
7609 			break;
7610 		case '?':
7611 			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
7612 			    optopt);
7613 			usage(B_FALSE);
7614 		}
7615 	}
7616 
7617 	if (is_pause && is_stop) {
7618 		(void) fprintf(stderr, gettext("invalid option "
7619 		    "combination :-s and -p are mutually exclusive\n"));
7620 		usage(B_FALSE);
7621 	} else {
7622 		if (is_error_scrub)
7623 			cb.cb_type = POOL_SCAN_ERRORSCRUB;
7624 
7625 		if (is_pause) {
7626 			cb.cb_scrub_cmd = POOL_SCRUB_PAUSE;
7627 		} else if (is_stop) {
7628 			cb.cb_type = POOL_SCAN_NONE;
7629 		} else {
7630 			cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
7631 		}
7632 	}
7633 
7634 	if (wait && (cb.cb_type == POOL_SCAN_NONE ||
7635 	    cb.cb_scrub_cmd == POOL_SCRUB_PAUSE)) {
7636 		(void) fprintf(stderr, gettext("invalid option combination: "
7637 		    "-w cannot be used with -p or -s\n"));
7638 		usage(B_FALSE);
7639 	}
7640 
7641 	argc -= optind;
7642 	argv += optind;
7643 
7644 	if (argc < 1) {
7645 		(void) fprintf(stderr, gettext("missing pool name argument\n"));
7646 		usage(B_FALSE);
7647 	}
7648 
7649 	error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
7650 	    B_FALSE, scrub_callback, &cb);
7651 
7652 	if (wait && !error) {
7653 		zpool_wait_activity_t act = ZPOOL_WAIT_SCRUB;
7654 		error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
7655 		    B_FALSE, wait_callback, &act);
7656 	}
7657 
7658 	return (error);
7659 }
7660 
7661 /*
7662  * zpool resilver <pool> ...
7663  *
7664  *	Restarts any in-progress resilver
7665  */
7666 int
7667 zpool_do_resilver(int argc, char **argv)
7668 {
7669 	int c;
7670 	scrub_cbdata_t cb;
7671 
7672 	cb.cb_type = POOL_SCAN_RESILVER;
7673 	cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
7674 
7675 	/* check options */
7676 	while ((c = getopt(argc, argv, "")) != -1) {
7677 		switch (c) {
7678 		case '?':
7679 			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
7680 			    optopt);
7681 			usage(B_FALSE);
7682 		}
7683 	}
7684 
7685 	argc -= optind;
7686 	argv += optind;
7687 
7688 	if (argc < 1) {
7689 		(void) fprintf(stderr, gettext("missing pool name argument\n"));
7690 		usage(B_FALSE);
7691 	}
7692 
7693 	return (for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
7694 	    B_FALSE, scrub_callback, &cb));
7695 }
7696 
7697 /*
7698  * zpool trim [-d] [-r <rate>] [-c | -s] <pool> [<device> ...]
7699  *
7700  *	-c		Cancel. Ends any in-progress trim.
7701  *	-d		Secure trim.  Requires kernel and device support.
7702  *	-r <rate>	Sets the TRIM rate in bytes (per second). Supports
7703  *			adding a multiplier suffix such as 'k' or 'm'.
7704  *	-s		Suspend. TRIM can then be restarted with no flags.
7705  *	-w		Wait. Blocks until trimming has completed.
7706  */
7707 int
7708 zpool_do_trim(int argc, char **argv)
7709 {
7710 	struct option long_options[] = {
7711 		{"cancel",	no_argument,		NULL,	'c'},
7712 		{"secure",	no_argument,		NULL,	'd'},
7713 		{"rate",	required_argument,	NULL,	'r'},
7714 		{"suspend",	no_argument,		NULL,	's'},
7715 		{"wait",	no_argument,		NULL,	'w'},
7716 		{0, 0, 0, 0}
7717 	};
7718 
7719 	pool_trim_func_t cmd_type = POOL_TRIM_START;
7720 	uint64_t rate = 0;
7721 	boolean_t secure = B_FALSE;
7722 	boolean_t wait = B_FALSE;
7723 
7724 	int c;
7725 	while ((c = getopt_long(argc, argv, "cdr:sw", long_options, NULL))
7726 	    != -1) {
7727 		switch (c) {
7728 		case 'c':
7729 			if (cmd_type != POOL_TRIM_START &&
7730 			    cmd_type != POOL_TRIM_CANCEL) {
7731 				(void) fprintf(stderr, gettext("-c cannot be "
7732 				    "combined with other options\n"));
7733 				usage(B_FALSE);
7734 			}
7735 			cmd_type = POOL_TRIM_CANCEL;
7736 			break;
7737 		case 'd':
7738 			if (cmd_type != POOL_TRIM_START) {
7739 				(void) fprintf(stderr, gettext("-d cannot be "
7740 				    "combined with the -c or -s options\n"));
7741 				usage(B_FALSE);
7742 			}
7743 			secure = B_TRUE;
7744 			break;
7745 		case 'r':
7746 			if (cmd_type != POOL_TRIM_START) {
7747 				(void) fprintf(stderr, gettext("-r cannot be "
7748 				    "combined with the -c or -s options\n"));
7749 				usage(B_FALSE);
7750 			}
7751 			if (zfs_nicestrtonum(g_zfs, optarg, &rate) == -1) {
7752 				(void) fprintf(stderr, "%s: %s\n",
7753 				    gettext("invalid value for rate"),
7754 				    libzfs_error_description(g_zfs));
7755 				usage(B_FALSE);
7756 			}
7757 			break;
7758 		case 's':
7759 			if (cmd_type != POOL_TRIM_START &&
7760 			    cmd_type != POOL_TRIM_SUSPEND) {
7761 				(void) fprintf(stderr, gettext("-s cannot be "
7762 				    "combined with other options\n"));
7763 				usage(B_FALSE);
7764 			}
7765 			cmd_type = POOL_TRIM_SUSPEND;
7766 			break;
7767 		case 'w':
7768 			wait = B_TRUE;
7769 			break;
7770 		case '?':
7771 			if (optopt != 0) {
7772 				(void) fprintf(stderr,
7773 				    gettext("invalid option '%c'\n"), optopt);
7774 			} else {
7775 				(void) fprintf(stderr,
7776 				    gettext("invalid option '%s'\n"),
7777 				    argv[optind - 1]);
7778 			}
7779 			usage(B_FALSE);
7780 		}
7781 	}
7782 
7783 	argc -= optind;
7784 	argv += optind;
7785 
7786 	if (argc < 1) {
7787 		(void) fprintf(stderr, gettext("missing pool name argument\n"));
7788 		usage(B_FALSE);
7789 		return (-1);
7790 	}
7791 
7792 	if (wait && (cmd_type != POOL_TRIM_START)) {
7793 		(void) fprintf(stderr, gettext("-w cannot be used with -c or "
7794 		    "-s\n"));
7795 		usage(B_FALSE);
7796 	}
7797 
7798 	char *poolname = argv[0];
7799 	zpool_handle_t *zhp = zpool_open(g_zfs, poolname);
7800 	if (zhp == NULL)
7801 		return (-1);
7802 
7803 	trimflags_t trim_flags = {
7804 		.secure = secure,
7805 		.rate = rate,
7806 		.wait = wait,
7807 	};
7808 
7809 	nvlist_t *vdevs = fnvlist_alloc();
7810 	if (argc == 1) {
7811 		/* no individual leaf vdevs specified, so add them all */
7812 		nvlist_t *config = zpool_get_config(zhp, NULL);
7813 		nvlist_t *nvroot = fnvlist_lookup_nvlist(config,
7814 		    ZPOOL_CONFIG_VDEV_TREE);
7815 		zpool_collect_leaves(zhp, nvroot, vdevs);
7816 		trim_flags.fullpool = B_TRUE;
7817 	} else {
7818 		trim_flags.fullpool = B_FALSE;
7819 		for (int i = 1; i < argc; i++) {
7820 			fnvlist_add_boolean(vdevs, argv[i]);
7821 		}
7822 	}
7823 
7824 	int error = zpool_trim(zhp, cmd_type, vdevs, &trim_flags);
7825 
7826 	fnvlist_free(vdevs);
7827 	zpool_close(zhp);
7828 
7829 	return (error);
7830 }
7831 
7832 /*
7833  * Converts a total number of seconds to a human readable string broken
7834  * down in to days/hours/minutes/seconds.
7835  */
7836 static void
7837 secs_to_dhms(uint64_t total, char *buf)
7838 {
7839 	uint64_t days = total / 60 / 60 / 24;
7840 	uint64_t hours = (total / 60 / 60) % 24;
7841 	uint64_t mins = (total / 60) % 60;
7842 	uint64_t secs = (total % 60);
7843 
7844 	if (days > 0) {
7845 		(void) sprintf(buf, "%llu days %02llu:%02llu:%02llu",
7846 		    (u_longlong_t)days, (u_longlong_t)hours,
7847 		    (u_longlong_t)mins, (u_longlong_t)secs);
7848 	} else {
7849 		(void) sprintf(buf, "%02llu:%02llu:%02llu",
7850 		    (u_longlong_t)hours, (u_longlong_t)mins,
7851 		    (u_longlong_t)secs);
7852 	}
7853 }
7854 
7855 /*
7856  * Print out detailed error scrub status.
7857  */
7858 static void
7859 print_err_scrub_status(pool_scan_stat_t *ps)
7860 {
7861 	time_t start, end, pause;
7862 	uint64_t total_secs_left;
7863 	uint64_t secs_left, mins_left, hours_left, days_left;
7864 	uint64_t examined, to_be_examined;
7865 
7866 	if (ps == NULL || ps->pss_error_scrub_func != POOL_SCAN_ERRORSCRUB) {
7867 		return;
7868 	}
7869 
7870 	(void) printf(gettext(" scrub: "));
7871 
7872 	start = ps->pss_error_scrub_start;
7873 	end = ps->pss_error_scrub_end;
7874 	pause = ps->pss_pass_error_scrub_pause;
7875 	examined = ps->pss_error_scrub_examined;
7876 	to_be_examined = ps->pss_error_scrub_to_be_examined;
7877 
7878 	assert(ps->pss_error_scrub_func == POOL_SCAN_ERRORSCRUB);
7879 
7880 	if (ps->pss_error_scrub_state == DSS_FINISHED) {
7881 		total_secs_left = end - start;
7882 		days_left = total_secs_left / 60 / 60 / 24;
7883 		hours_left = (total_secs_left / 60 / 60) % 24;
7884 		mins_left = (total_secs_left / 60) % 60;
7885 		secs_left = (total_secs_left % 60);
7886 
7887 		(void) printf(gettext("scrubbed %llu error blocks in %llu days "
7888 		    "%02llu:%02llu:%02llu on %s"), (u_longlong_t)examined,
7889 		    (u_longlong_t)days_left, (u_longlong_t)hours_left,
7890 		    (u_longlong_t)mins_left, (u_longlong_t)secs_left,
7891 		    ctime(&end));
7892 
7893 		return;
7894 	} else if (ps->pss_error_scrub_state == DSS_CANCELED) {
7895 		(void) printf(gettext("error scrub canceled on %s"),
7896 		    ctime(&end));
7897 		return;
7898 	}
7899 	assert(ps->pss_error_scrub_state == DSS_ERRORSCRUBBING);
7900 
7901 	/* Error scrub is in progress. */
7902 	if (pause == 0) {
7903 		(void) printf(gettext("error scrub in progress since %s"),
7904 		    ctime(&start));
7905 	} else {
7906 		(void) printf(gettext("error scrub paused since %s"),
7907 		    ctime(&pause));
7908 		(void) printf(gettext("\terror scrub started on %s"),
7909 		    ctime(&start));
7910 	}
7911 
7912 	double fraction_done = (double)examined / (to_be_examined + examined);
7913 	(void) printf(gettext("\t%.2f%% done, issued I/O for %llu error"
7914 	    " blocks"), 100 * fraction_done, (u_longlong_t)examined);
7915 
7916 	(void) printf("\n");
7917 }
7918 
7919 /*
7920  * Print out detailed scrub status.
7921  */
7922 static void
7923 print_scan_scrub_resilver_status(pool_scan_stat_t *ps)
7924 {
7925 	time_t start, end, pause;
7926 	uint64_t pass_scanned, scanned, pass_issued, issued, total_s, total_i;
7927 	uint64_t elapsed, scan_rate, issue_rate;
7928 	double fraction_done;
7929 	char processed_buf[7], scanned_buf[7], issued_buf[7], total_s_buf[7];
7930 	char total_i_buf[7], srate_buf[7], irate_buf[7], time_buf[32];
7931 
7932 	printf("  ");
7933 	printf_color(ANSI_BOLD, gettext("scan:"));
7934 	printf(" ");
7935 
7936 	/* If there's never been a scan, there's not much to say. */
7937 	if (ps == NULL || ps->pss_func == POOL_SCAN_NONE ||
7938 	    ps->pss_func >= POOL_SCAN_FUNCS) {
7939 		(void) printf(gettext("none requested\n"));
7940 		return;
7941 	}
7942 
7943 	start = ps->pss_start_time;
7944 	end = ps->pss_end_time;
7945 	pause = ps->pss_pass_scrub_pause;
7946 
7947 	zfs_nicebytes(ps->pss_processed, processed_buf, sizeof (processed_buf));
7948 
7949 	int is_resilver = ps->pss_func == POOL_SCAN_RESILVER;
7950 	int is_scrub = ps->pss_func == POOL_SCAN_SCRUB;
7951 	assert(is_resilver || is_scrub);
7952 
7953 	/* Scan is finished or canceled. */
7954 	if (ps->pss_state == DSS_FINISHED) {
7955 		secs_to_dhms(end - start, time_buf);
7956 
7957 		if (is_scrub) {
7958 			(void) printf(gettext("scrub repaired %s "
7959 			    "in %s with %llu errors on %s"), processed_buf,
7960 			    time_buf, (u_longlong_t)ps->pss_errors,
7961 			    ctime(&end));
7962 		} else if (is_resilver) {
7963 			(void) printf(gettext("resilvered %s "
7964 			    "in %s with %llu errors on %s"), processed_buf,
7965 			    time_buf, (u_longlong_t)ps->pss_errors,
7966 			    ctime(&end));
7967 		}
7968 		return;
7969 	} else if (ps->pss_state == DSS_CANCELED) {
7970 		if (is_scrub) {
7971 			(void) printf(gettext("scrub canceled on %s"),
7972 			    ctime(&end));
7973 		} else if (is_resilver) {
7974 			(void) printf(gettext("resilver canceled on %s"),
7975 			    ctime(&end));
7976 		}
7977 		return;
7978 	}
7979 
7980 	assert(ps->pss_state == DSS_SCANNING);
7981 
7982 	/* Scan is in progress. Resilvers can't be paused. */
7983 	if (is_scrub) {
7984 		if (pause == 0) {
7985 			(void) printf(gettext("scrub in progress since %s"),
7986 			    ctime(&start));
7987 		} else {
7988 			(void) printf(gettext("scrub paused since %s"),
7989 			    ctime(&pause));
7990 			(void) printf(gettext("\tscrub started on %s"),
7991 			    ctime(&start));
7992 		}
7993 	} else if (is_resilver) {
7994 		(void) printf(gettext("resilver in progress since %s"),
7995 		    ctime(&start));
7996 	}
7997 
7998 	scanned = ps->pss_examined;
7999 	pass_scanned = ps->pss_pass_exam;
8000 	issued = ps->pss_issued;
8001 	pass_issued = ps->pss_pass_issued;
8002 	total_s = ps->pss_to_examine;
8003 	total_i = ps->pss_to_examine - ps->pss_skipped;
8004 
8005 	/* we are only done with a block once we have issued the IO for it */
8006 	fraction_done = (double)issued / total_i;
8007 
8008 	/* elapsed time for this pass, rounding up to 1 if it's 0 */
8009 	elapsed = time(NULL) - ps->pss_pass_start;
8010 	elapsed -= ps->pss_pass_scrub_spent_paused;
8011 	elapsed = (elapsed != 0) ? elapsed : 1;
8012 
8013 	scan_rate = pass_scanned / elapsed;
8014 	issue_rate = pass_issued / elapsed;
8015 
8016 	/* format all of the numbers we will be reporting */
8017 	zfs_nicebytes(scanned, scanned_buf, sizeof (scanned_buf));
8018 	zfs_nicebytes(issued, issued_buf, sizeof (issued_buf));
8019 	zfs_nicebytes(total_s, total_s_buf, sizeof (total_s_buf));
8020 	zfs_nicebytes(total_i, total_i_buf, sizeof (total_i_buf));
8021 
8022 	/* do not print estimated time if we have a paused scrub */
8023 	(void) printf(gettext("\t%s / %s scanned"), scanned_buf, total_s_buf);
8024 	if (pause == 0 && scan_rate > 0) {
8025 		zfs_nicebytes(scan_rate, srate_buf, sizeof (srate_buf));
8026 		(void) printf(gettext(" at %s/s"), srate_buf);
8027 	}
8028 	(void) printf(gettext(", %s / %s issued"), issued_buf, total_i_buf);
8029 	if (pause == 0 && issue_rate > 0) {
8030 		zfs_nicebytes(issue_rate, irate_buf, sizeof (irate_buf));
8031 		(void) printf(gettext(" at %s/s"), irate_buf);
8032 	}
8033 	(void) printf(gettext("\n"));
8034 
8035 	if (is_resilver) {
8036 		(void) printf(gettext("\t%s resilvered, %.2f%% done"),
8037 		    processed_buf, 100 * fraction_done);
8038 	} else if (is_scrub) {
8039 		(void) printf(gettext("\t%s repaired, %.2f%% done"),
8040 		    processed_buf, 100 * fraction_done);
8041 	}
8042 
8043 	if (pause == 0) {
8044 		/*
8045 		 * Only provide an estimate iff:
8046 		 * 1) we haven't yet issued all we expected, and
8047 		 * 2) the issue rate exceeds 10 MB/s, and
8048 		 * 3) it's either:
8049 		 *    a) a resilver which has started repairs, or
8050 		 *    b) a scrub which has entered the issue phase.
8051 		 */
8052 		if (total_i >= issued && issue_rate >= 10 * 1024 * 1024 &&
8053 		    ((is_resilver && ps->pss_processed > 0) ||
8054 		    (is_scrub && issued > 0))) {
8055 			secs_to_dhms((total_i - issued) / issue_rate, time_buf);
8056 			(void) printf(gettext(", %s to go\n"), time_buf);
8057 		} else {
8058 			(void) printf(gettext(", no estimated "
8059 			    "completion time\n"));
8060 		}
8061 	} else {
8062 		(void) printf(gettext("\n"));
8063 	}
8064 }
8065 
8066 static void
8067 print_rebuild_status_impl(vdev_rebuild_stat_t *vrs, uint_t c, char *vdev_name)
8068 {
8069 	if (vrs == NULL || vrs->vrs_state == VDEV_REBUILD_NONE)
8070 		return;
8071 
8072 	printf("  ");
8073 	printf_color(ANSI_BOLD, gettext("scan:"));
8074 	printf(" ");
8075 
8076 	uint64_t bytes_scanned = vrs->vrs_bytes_scanned;
8077 	uint64_t bytes_issued = vrs->vrs_bytes_issued;
8078 	uint64_t bytes_rebuilt = vrs->vrs_bytes_rebuilt;
8079 	uint64_t bytes_est_s = vrs->vrs_bytes_est;
8080 	uint64_t bytes_est_i = vrs->vrs_bytes_est;
8081 	if (c > offsetof(vdev_rebuild_stat_t, vrs_pass_bytes_skipped) / 8)
8082 		bytes_est_i -= vrs->vrs_pass_bytes_skipped;
8083 	uint64_t scan_rate = (vrs->vrs_pass_bytes_scanned /
8084 	    (vrs->vrs_pass_time_ms + 1)) * 1000;
8085 	uint64_t issue_rate = (vrs->vrs_pass_bytes_issued /
8086 	    (vrs->vrs_pass_time_ms + 1)) * 1000;
8087 	double scan_pct = MIN((double)bytes_scanned * 100 /
8088 	    (bytes_est_s + 1), 100);
8089 
8090 	/* Format all of the numbers we will be reporting */
8091 	char bytes_scanned_buf[7], bytes_issued_buf[7];
8092 	char bytes_rebuilt_buf[7], bytes_est_s_buf[7], bytes_est_i_buf[7];
8093 	char scan_rate_buf[7], issue_rate_buf[7], time_buf[32];
8094 	zfs_nicebytes(bytes_scanned, bytes_scanned_buf,
8095 	    sizeof (bytes_scanned_buf));
8096 	zfs_nicebytes(bytes_issued, bytes_issued_buf,
8097 	    sizeof (bytes_issued_buf));
8098 	zfs_nicebytes(bytes_rebuilt, bytes_rebuilt_buf,
8099 	    sizeof (bytes_rebuilt_buf));
8100 	zfs_nicebytes(bytes_est_s, bytes_est_s_buf, sizeof (bytes_est_s_buf));
8101 	zfs_nicebytes(bytes_est_i, bytes_est_i_buf, sizeof (bytes_est_i_buf));
8102 
8103 	time_t start = vrs->vrs_start_time;
8104 	time_t end = vrs->vrs_end_time;
8105 
8106 	/* Rebuild is finished or canceled. */
8107 	if (vrs->vrs_state == VDEV_REBUILD_COMPLETE) {
8108 		secs_to_dhms(vrs->vrs_scan_time_ms / 1000, time_buf);
8109 		(void) printf(gettext("resilvered (%s) %s in %s "
8110 		    "with %llu errors on %s"), vdev_name, bytes_rebuilt_buf,
8111 		    time_buf, (u_longlong_t)vrs->vrs_errors, ctime(&end));
8112 		return;
8113 	} else if (vrs->vrs_state == VDEV_REBUILD_CANCELED) {
8114 		(void) printf(gettext("resilver (%s) canceled on %s"),
8115 		    vdev_name, ctime(&end));
8116 		return;
8117 	} else if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
8118 		(void) printf(gettext("resilver (%s) in progress since %s"),
8119 		    vdev_name, ctime(&start));
8120 	}
8121 
8122 	assert(vrs->vrs_state == VDEV_REBUILD_ACTIVE);
8123 
8124 	(void) printf(gettext("\t%s / %s scanned"), bytes_scanned_buf,
8125 	    bytes_est_s_buf);
8126 	if (scan_rate > 0) {
8127 		zfs_nicebytes(scan_rate, scan_rate_buf, sizeof (scan_rate_buf));
8128 		(void) printf(gettext(" at %s/s"), scan_rate_buf);
8129 	}
8130 	(void) printf(gettext(", %s / %s issued"), bytes_issued_buf,
8131 	    bytes_est_i_buf);
8132 	if (issue_rate > 0) {
8133 		zfs_nicebytes(issue_rate, issue_rate_buf,
8134 		    sizeof (issue_rate_buf));
8135 		(void) printf(gettext(" at %s/s"), issue_rate_buf);
8136 	}
8137 	(void) printf(gettext("\n"));
8138 
8139 	(void) printf(gettext("\t%s resilvered, %.2f%% done"),
8140 	    bytes_rebuilt_buf, scan_pct);
8141 
8142 	if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
8143 		if (bytes_est_s >= bytes_scanned &&
8144 		    scan_rate >= 10 * 1024 * 1024) {
8145 			secs_to_dhms((bytes_est_s - bytes_scanned) / scan_rate,
8146 			    time_buf);
8147 			(void) printf(gettext(", %s to go\n"), time_buf);
8148 		} else {
8149 			(void) printf(gettext(", no estimated "
8150 			    "completion time\n"));
8151 		}
8152 	} else {
8153 		(void) printf(gettext("\n"));
8154 	}
8155 }
8156 
8157 /*
8158  * Print rebuild status for top-level vdevs.
8159  */
8160 static void
8161 print_rebuild_status(zpool_handle_t *zhp, nvlist_t *nvroot)
8162 {
8163 	nvlist_t **child;
8164 	uint_t children;
8165 
8166 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
8167 	    &child, &children) != 0)
8168 		children = 0;
8169 
8170 	for (uint_t c = 0; c < children; c++) {
8171 		vdev_rebuild_stat_t *vrs;
8172 		uint_t i;
8173 
8174 		if (nvlist_lookup_uint64_array(child[c],
8175 		    ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i) == 0) {
8176 			char *name = zpool_vdev_name(g_zfs, zhp,
8177 			    child[c], VDEV_NAME_TYPE_ID);
8178 			print_rebuild_status_impl(vrs, i, name);
8179 			free(name);
8180 		}
8181 	}
8182 }
8183 
8184 /*
8185  * As we don't scrub checkpointed blocks, we want to warn the user that we
8186  * skipped scanning some blocks if a checkpoint exists or existed at any
8187  * time during the scan.  If a sequential instead of healing reconstruction
8188  * was performed then the blocks were reconstructed.  However, their checksums
8189  * have not been verified so we still print the warning.
8190  */
8191 static void
8192 print_checkpoint_scan_warning(pool_scan_stat_t *ps, pool_checkpoint_stat_t *pcs)
8193 {
8194 	if (ps == NULL || pcs == NULL)
8195 		return;
8196 
8197 	if (pcs->pcs_state == CS_NONE ||
8198 	    pcs->pcs_state == CS_CHECKPOINT_DISCARDING)
8199 		return;
8200 
8201 	assert(pcs->pcs_state == CS_CHECKPOINT_EXISTS);
8202 
8203 	if (ps->pss_state == DSS_NONE)
8204 		return;
8205 
8206 	if ((ps->pss_state == DSS_FINISHED || ps->pss_state == DSS_CANCELED) &&
8207 	    ps->pss_end_time < pcs->pcs_start_time)
8208 		return;
8209 
8210 	if (ps->pss_state == DSS_FINISHED || ps->pss_state == DSS_CANCELED) {
8211 		(void) printf(gettext("    scan warning: skipped blocks "
8212 		    "that are only referenced by the checkpoint.\n"));
8213 	} else {
8214 		assert(ps->pss_state == DSS_SCANNING);
8215 		(void) printf(gettext("    scan warning: skipping blocks "
8216 		    "that are only referenced by the checkpoint.\n"));
8217 	}
8218 }
8219 
8220 /*
8221  * Returns B_TRUE if there is an active rebuild in progress.  Otherwise,
8222  * B_FALSE is returned and 'rebuild_end_time' is set to the end time for
8223  * the last completed (or cancelled) rebuild.
8224  */
8225 static boolean_t
8226 check_rebuilding(nvlist_t *nvroot, uint64_t *rebuild_end_time)
8227 {
8228 	nvlist_t **child;
8229 	uint_t children;
8230 	boolean_t rebuilding = B_FALSE;
8231 	uint64_t end_time = 0;
8232 
8233 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
8234 	    &child, &children) != 0)
8235 		children = 0;
8236 
8237 	for (uint_t c = 0; c < children; c++) {
8238 		vdev_rebuild_stat_t *vrs;
8239 		uint_t i;
8240 
8241 		if (nvlist_lookup_uint64_array(child[c],
8242 		    ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i) == 0) {
8243 
8244 			if (vrs->vrs_end_time > end_time)
8245 				end_time = vrs->vrs_end_time;
8246 
8247 			if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
8248 				rebuilding = B_TRUE;
8249 				end_time = 0;
8250 				break;
8251 			}
8252 		}
8253 	}
8254 
8255 	if (rebuild_end_time != NULL)
8256 		*rebuild_end_time = end_time;
8257 
8258 	return (rebuilding);
8259 }
8260 
8261 /*
8262  * Print the scan status.
8263  */
8264 static void
8265 print_scan_status(zpool_handle_t *zhp, nvlist_t *nvroot)
8266 {
8267 	uint64_t rebuild_end_time = 0, resilver_end_time = 0;
8268 	boolean_t have_resilver = B_FALSE, have_scrub = B_FALSE;
8269 	boolean_t have_errorscrub = B_FALSE;
8270 	boolean_t active_resilver = B_FALSE;
8271 	pool_checkpoint_stat_t *pcs = NULL;
8272 	pool_scan_stat_t *ps = NULL;
8273 	uint_t c;
8274 	time_t scrub_start = 0, errorscrub_start = 0;
8275 
8276 	if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_SCAN_STATS,
8277 	    (uint64_t **)&ps, &c) == 0) {
8278 		if (ps->pss_func == POOL_SCAN_RESILVER) {
8279 			resilver_end_time = ps->pss_end_time;
8280 			active_resilver = (ps->pss_state == DSS_SCANNING);
8281 		}
8282 
8283 		have_resilver = (ps->pss_func == POOL_SCAN_RESILVER);
8284 		have_scrub = (ps->pss_func == POOL_SCAN_SCRUB);
8285 		scrub_start = ps->pss_start_time;
8286 		if (c > offsetof(pool_scan_stat_t,
8287 		    pss_pass_error_scrub_pause) / 8) {
8288 			have_errorscrub = (ps->pss_error_scrub_func ==
8289 			    POOL_SCAN_ERRORSCRUB);
8290 			errorscrub_start = ps->pss_error_scrub_start;
8291 		}
8292 	}
8293 
8294 	boolean_t active_rebuild = check_rebuilding(nvroot, &rebuild_end_time);
8295 	boolean_t have_rebuild = (active_rebuild || (rebuild_end_time > 0));
8296 
8297 	/* Always print the scrub status when available. */
8298 	if (have_scrub && scrub_start > errorscrub_start)
8299 		print_scan_scrub_resilver_status(ps);
8300 	else if (have_errorscrub && errorscrub_start >= scrub_start)
8301 		print_err_scrub_status(ps);
8302 
8303 	/*
8304 	 * When there is an active resilver or rebuild print its status.
8305 	 * Otherwise print the status of the last resilver or rebuild.
8306 	 */
8307 	if (active_resilver || (!active_rebuild && have_resilver &&
8308 	    resilver_end_time && resilver_end_time > rebuild_end_time)) {
8309 		print_scan_scrub_resilver_status(ps);
8310 	} else if (active_rebuild || (!active_resilver && have_rebuild &&
8311 	    rebuild_end_time && rebuild_end_time > resilver_end_time)) {
8312 		print_rebuild_status(zhp, nvroot);
8313 	}
8314 
8315 	(void) nvlist_lookup_uint64_array(nvroot,
8316 	    ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
8317 	print_checkpoint_scan_warning(ps, pcs);
8318 }
8319 
8320 /*
8321  * Print out detailed removal status.
8322  */
8323 static void
8324 print_removal_status(zpool_handle_t *zhp, pool_removal_stat_t *prs)
8325 {
8326 	char copied_buf[7], examined_buf[7], total_buf[7], rate_buf[7];
8327 	time_t start, end;
8328 	nvlist_t *config, *nvroot;
8329 	nvlist_t **child;
8330 	uint_t children;
8331 	char *vdev_name;
8332 
8333 	if (prs == NULL || prs->prs_state == DSS_NONE)
8334 		return;
8335 
8336 	/*
8337 	 * Determine name of vdev.
8338 	 */
8339 	config = zpool_get_config(zhp, NULL);
8340 	nvroot = fnvlist_lookup_nvlist(config,
8341 	    ZPOOL_CONFIG_VDEV_TREE);
8342 	verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
8343 	    &child, &children) == 0);
8344 	assert(prs->prs_removing_vdev < children);
8345 	vdev_name = zpool_vdev_name(g_zfs, zhp,
8346 	    child[prs->prs_removing_vdev], B_TRUE);
8347 
8348 	printf_color(ANSI_BOLD, gettext("remove: "));
8349 
8350 	start = prs->prs_start_time;
8351 	end = prs->prs_end_time;
8352 	zfs_nicenum(prs->prs_copied, copied_buf, sizeof (copied_buf));
8353 
8354 	/*
8355 	 * Removal is finished or canceled.
8356 	 */
8357 	if (prs->prs_state == DSS_FINISHED) {
8358 		uint64_t minutes_taken = (end - start) / 60;
8359 
8360 		(void) printf(gettext("Removal of vdev %llu copied %s "
8361 		    "in %lluh%um, completed on %s"),
8362 		    (longlong_t)prs->prs_removing_vdev,
8363 		    copied_buf,
8364 		    (u_longlong_t)(minutes_taken / 60),
8365 		    (uint_t)(minutes_taken % 60),
8366 		    ctime((time_t *)&end));
8367 	} else if (prs->prs_state == DSS_CANCELED) {
8368 		(void) printf(gettext("Removal of %s canceled on %s"),
8369 		    vdev_name, ctime(&end));
8370 	} else {
8371 		uint64_t copied, total, elapsed, mins_left, hours_left;
8372 		double fraction_done;
8373 		uint_t rate;
8374 
8375 		assert(prs->prs_state == DSS_SCANNING);
8376 
8377 		/*
8378 		 * Removal is in progress.
8379 		 */
8380 		(void) printf(gettext(
8381 		    "Evacuation of %s in progress since %s"),
8382 		    vdev_name, ctime(&start));
8383 
8384 		copied = prs->prs_copied > 0 ? prs->prs_copied : 1;
8385 		total = prs->prs_to_copy;
8386 		fraction_done = (double)copied / total;
8387 
8388 		/* elapsed time for this pass */
8389 		elapsed = time(NULL) - prs->prs_start_time;
8390 		elapsed = elapsed > 0 ? elapsed : 1;
8391 		rate = copied / elapsed;
8392 		rate = rate > 0 ? rate : 1;
8393 		mins_left = ((total - copied) / rate) / 60;
8394 		hours_left = mins_left / 60;
8395 
8396 		zfs_nicenum(copied, examined_buf, sizeof (examined_buf));
8397 		zfs_nicenum(total, total_buf, sizeof (total_buf));
8398 		zfs_nicenum(rate, rate_buf, sizeof (rate_buf));
8399 
8400 		/*
8401 		 * do not print estimated time if hours_left is more than
8402 		 * 30 days
8403 		 */
8404 		(void) printf(gettext(
8405 		    "\t%s copied out of %s at %s/s, %.2f%% done"),
8406 		    examined_buf, total_buf, rate_buf, 100 * fraction_done);
8407 		if (hours_left < (30 * 24)) {
8408 			(void) printf(gettext(", %lluh%um to go\n"),
8409 			    (u_longlong_t)hours_left, (uint_t)(mins_left % 60));
8410 		} else {
8411 			(void) printf(gettext(
8412 			    ", (copy is slow, no estimated time)\n"));
8413 		}
8414 	}
8415 	free(vdev_name);
8416 
8417 	if (prs->prs_mapping_memory > 0) {
8418 		char mem_buf[7];
8419 		zfs_nicenum(prs->prs_mapping_memory, mem_buf, sizeof (mem_buf));
8420 		(void) printf(gettext(
8421 		    "\t%s memory used for removed device mappings\n"),
8422 		    mem_buf);
8423 	}
8424 }
8425 
8426 /*
8427  * Print out detailed raidz expansion status.
8428  */
8429 static void
8430 print_raidz_expand_status(zpool_handle_t *zhp, pool_raidz_expand_stat_t *pres)
8431 {
8432 	char copied_buf[7];
8433 
8434 	if (pres == NULL || pres->pres_state == DSS_NONE)
8435 		return;
8436 
8437 	/*
8438 	 * Determine name of vdev.
8439 	 */
8440 	nvlist_t *config = zpool_get_config(zhp, NULL);
8441 	nvlist_t *nvroot = fnvlist_lookup_nvlist(config,
8442 	    ZPOOL_CONFIG_VDEV_TREE);
8443 	nvlist_t **child;
8444 	uint_t children;
8445 	verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
8446 	    &child, &children) == 0);
8447 	assert(pres->pres_expanding_vdev < children);
8448 
8449 	printf_color(ANSI_BOLD, gettext("expand: "));
8450 
8451 	time_t start = pres->pres_start_time;
8452 	time_t end = pres->pres_end_time;
8453 	char *vname =
8454 	    zpool_vdev_name(g_zfs, zhp, child[pres->pres_expanding_vdev], 0);
8455 	zfs_nicenum(pres->pres_reflowed, copied_buf, sizeof (copied_buf));
8456 
8457 	/*
8458 	 * Expansion is finished or canceled.
8459 	 */
8460 	if (pres->pres_state == DSS_FINISHED) {
8461 		char time_buf[32];
8462 		secs_to_dhms(end - start, time_buf);
8463 
8464 		(void) printf(gettext("expanded %s-%u copied %s in %s, "
8465 		    "on %s"), vname, (int)pres->pres_expanding_vdev,
8466 		    copied_buf, time_buf, ctime((time_t *)&end));
8467 	} else {
8468 		char examined_buf[7], total_buf[7], rate_buf[7];
8469 		uint64_t copied, total, elapsed, secs_left;
8470 		double fraction_done;
8471 		uint_t rate;
8472 
8473 		assert(pres->pres_state == DSS_SCANNING);
8474 
8475 		/*
8476 		 * Expansion is in progress.
8477 		 */
8478 		(void) printf(gettext(
8479 		    "expansion of %s-%u in progress since %s"),
8480 		    vname, (int)pres->pres_expanding_vdev, ctime(&start));
8481 
8482 		copied = pres->pres_reflowed > 0 ? pres->pres_reflowed : 1;
8483 		total = pres->pres_to_reflow;
8484 		fraction_done = (double)copied / total;
8485 
8486 		/* elapsed time for this pass */
8487 		elapsed = time(NULL) - pres->pres_start_time;
8488 		elapsed = elapsed > 0 ? elapsed : 1;
8489 		rate = copied / elapsed;
8490 		rate = rate > 0 ? rate : 1;
8491 		secs_left = (total - copied) / rate;
8492 
8493 		zfs_nicenum(copied, examined_buf, sizeof (examined_buf));
8494 		zfs_nicenum(total, total_buf, sizeof (total_buf));
8495 		zfs_nicenum(rate, rate_buf, sizeof (rate_buf));
8496 
8497 		/*
8498 		 * do not print estimated time if hours_left is more than
8499 		 * 30 days
8500 		 */
8501 		(void) printf(gettext("\t%s / %s copied at %s/s, %.2f%% done"),
8502 		    examined_buf, total_buf, rate_buf, 100 * fraction_done);
8503 		if (pres->pres_waiting_for_resilver) {
8504 			(void) printf(gettext(", paused for resilver or "
8505 			    "clear\n"));
8506 		} else if (secs_left < (30 * 24 * 3600)) {
8507 			char time_buf[32];
8508 			secs_to_dhms(secs_left, time_buf);
8509 			(void) printf(gettext(", %s to go\n"), time_buf);
8510 		} else {
8511 			(void) printf(gettext(
8512 			    ", (copy is slow, no estimated time)\n"));
8513 		}
8514 	}
8515 	free(vname);
8516 }
8517 static void
8518 print_checkpoint_status(pool_checkpoint_stat_t *pcs)
8519 {
8520 	time_t start;
8521 	char space_buf[7];
8522 
8523 	if (pcs == NULL || pcs->pcs_state == CS_NONE)
8524 		return;
8525 
8526 	(void) printf(gettext("checkpoint: "));
8527 
8528 	start = pcs->pcs_start_time;
8529 	zfs_nicenum(pcs->pcs_space, space_buf, sizeof (space_buf));
8530 
8531 	if (pcs->pcs_state == CS_CHECKPOINT_EXISTS) {
8532 		char *date = ctime(&start);
8533 
8534 		/*
8535 		 * ctime() adds a newline at the end of the generated
8536 		 * string, thus the weird format specifier and the
8537 		 * strlen() call used to chop it off from the output.
8538 		 */
8539 		(void) printf(gettext("created %.*s, consumes %s\n"),
8540 		    (int)(strlen(date) - 1), date, space_buf);
8541 		return;
8542 	}
8543 
8544 	assert(pcs->pcs_state == CS_CHECKPOINT_DISCARDING);
8545 
8546 	(void) printf(gettext("discarding, %s remaining.\n"),
8547 	    space_buf);
8548 }
8549 
8550 static void
8551 print_error_log(zpool_handle_t *zhp)
8552 {
8553 	nvlist_t *nverrlist = NULL;
8554 	nvpair_t *elem;
8555 	char *pathname;
8556 	size_t len = MAXPATHLEN * 2;
8557 
8558 	if (zpool_get_errlog(zhp, &nverrlist) != 0)
8559 		return;
8560 
8561 	(void) printf("errors: Permanent errors have been "
8562 	    "detected in the following files:\n\n");
8563 
8564 	pathname = safe_malloc(len);
8565 	elem = NULL;
8566 	while ((elem = nvlist_next_nvpair(nverrlist, elem)) != NULL) {
8567 		nvlist_t *nv;
8568 		uint64_t dsobj, obj;
8569 
8570 		verify(nvpair_value_nvlist(elem, &nv) == 0);
8571 		verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_DATASET,
8572 		    &dsobj) == 0);
8573 		verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_OBJECT,
8574 		    &obj) == 0);
8575 		zpool_obj_to_path(zhp, dsobj, obj, pathname, len);
8576 		(void) printf("%7s %s\n", "", pathname);
8577 	}
8578 	free(pathname);
8579 	nvlist_free(nverrlist);
8580 }
8581 
8582 static void
8583 print_spares(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **spares,
8584     uint_t nspares)
8585 {
8586 	uint_t i;
8587 	char *name;
8588 
8589 	if (nspares == 0)
8590 		return;
8591 
8592 	(void) printf(gettext("\tspares\n"));
8593 
8594 	for (i = 0; i < nspares; i++) {
8595 		name = zpool_vdev_name(g_zfs, zhp, spares[i],
8596 		    cb->cb_name_flags);
8597 		print_status_config(zhp, cb, name, spares[i], 2, B_TRUE, NULL);
8598 		free(name);
8599 	}
8600 }
8601 
8602 static void
8603 print_l2cache(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **l2cache,
8604     uint_t nl2cache)
8605 {
8606 	uint_t i;
8607 	char *name;
8608 
8609 	if (nl2cache == 0)
8610 		return;
8611 
8612 	(void) printf(gettext("\tcache\n"));
8613 
8614 	for (i = 0; i < nl2cache; i++) {
8615 		name = zpool_vdev_name(g_zfs, zhp, l2cache[i],
8616 		    cb->cb_name_flags);
8617 		print_status_config(zhp, cb, name, l2cache[i], 2,
8618 		    B_FALSE, NULL);
8619 		free(name);
8620 	}
8621 }
8622 
8623 static void
8624 print_dedup_stats(nvlist_t *config)
8625 {
8626 	ddt_histogram_t *ddh;
8627 	ddt_stat_t *dds;
8628 	ddt_object_t *ddo;
8629 	uint_t c;
8630 	char dspace[6], mspace[6];
8631 
8632 	/*
8633 	 * If the pool was faulted then we may not have been able to
8634 	 * obtain the config. Otherwise, if we have anything in the dedup
8635 	 * table continue processing the stats.
8636 	 */
8637 	if (nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_OBJ_STATS,
8638 	    (uint64_t **)&ddo, &c) != 0)
8639 		return;
8640 
8641 	(void) printf("\n");
8642 	(void) printf(gettext(" dedup: "));
8643 	if (ddo->ddo_count == 0) {
8644 		(void) printf(gettext("no DDT entries\n"));
8645 		return;
8646 	}
8647 
8648 	zfs_nicebytes(ddo->ddo_dspace, dspace, sizeof (dspace));
8649 	zfs_nicebytes(ddo->ddo_mspace, mspace, sizeof (mspace));
8650 	(void) printf("DDT entries %llu, size %s on disk, %s in core\n",
8651 	    (u_longlong_t)ddo->ddo_count,
8652 	    dspace,
8653 	    mspace);
8654 
8655 	verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_STATS,
8656 	    (uint64_t **)&dds, &c) == 0);
8657 	verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_HISTOGRAM,
8658 	    (uint64_t **)&ddh, &c) == 0);
8659 	zpool_dump_ddt(dds, ddh);
8660 }
8661 
8662 /*
8663  * Display a summary of pool status.  Displays a summary such as:
8664  *
8665  *        pool: tank
8666  *	status: DEGRADED
8667  *	reason: One or more devices ...
8668  *         see: https://openzfs.github.io/openzfs-docs/msg/ZFS-xxxx-01
8669  *	config:
8670  *		mirror		DEGRADED
8671  *                c1t0d0	OK
8672  *                c2t0d0	UNAVAIL
8673  *
8674  * When given the '-v' option, we print out the complete config.  If the '-e'
8675  * option is specified, then we print out error rate information as well.
8676  */
8677 static int
8678 status_callback(zpool_handle_t *zhp, void *data)
8679 {
8680 	status_cbdata_t *cbp = data;
8681 	nvlist_t *config, *nvroot;
8682 	const char *msgid;
8683 	zpool_status_t reason;
8684 	zpool_errata_t errata;
8685 	const char *health;
8686 	uint_t c;
8687 	vdev_stat_t *vs;
8688 
8689 	config = zpool_get_config(zhp, NULL);
8690 	reason = zpool_get_status(zhp, &msgid, &errata);
8691 
8692 	cbp->cb_count++;
8693 
8694 	/*
8695 	 * If we were given 'zpool status -x', only report those pools with
8696 	 * problems.
8697 	 */
8698 	if (cbp->cb_explain &&
8699 	    (reason == ZPOOL_STATUS_OK ||
8700 	    reason == ZPOOL_STATUS_VERSION_OLDER ||
8701 	    reason == ZPOOL_STATUS_FEAT_DISABLED ||
8702 	    reason == ZPOOL_STATUS_COMPATIBILITY_ERR ||
8703 	    reason == ZPOOL_STATUS_INCOMPATIBLE_FEAT)) {
8704 		if (!cbp->cb_allpools) {
8705 			(void) printf(gettext("pool '%s' is healthy\n"),
8706 			    zpool_get_name(zhp));
8707 			if (cbp->cb_first)
8708 				cbp->cb_first = B_FALSE;
8709 		}
8710 		return (0);
8711 	}
8712 
8713 	if (cbp->cb_first)
8714 		cbp->cb_first = B_FALSE;
8715 	else
8716 		(void) printf("\n");
8717 
8718 	nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
8719 	verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
8720 	    (uint64_t **)&vs, &c) == 0);
8721 
8722 	health = zpool_get_state_str(zhp);
8723 
8724 	printf("  ");
8725 	printf_color(ANSI_BOLD, gettext("pool:"));
8726 	printf(" %s\n", zpool_get_name(zhp));
8727 	fputc(' ', stdout);
8728 	printf_color(ANSI_BOLD, gettext("state: "));
8729 
8730 	printf_color(health_str_to_color(health), "%s", health);
8731 
8732 	fputc('\n', stdout);
8733 
8734 	switch (reason) {
8735 	case ZPOOL_STATUS_MISSING_DEV_R:
8736 		printf_color(ANSI_BOLD, gettext("status: "));
8737 		printf_color(ANSI_YELLOW, gettext("One or more devices could "
8738 		    "not be opened.  Sufficient replicas exist for\n\tthe pool "
8739 		    "to continue functioning in a degraded state.\n"));
8740 		printf_color(ANSI_BOLD, gettext("action: "));
8741 		printf_color(ANSI_YELLOW, gettext("Attach the missing device "
8742 		    "and online it using 'zpool online'.\n"));
8743 		break;
8744 
8745 	case ZPOOL_STATUS_MISSING_DEV_NR:
8746 		printf_color(ANSI_BOLD, gettext("status: "));
8747 		printf_color(ANSI_YELLOW, gettext("One or more devices could "
8748 		    "not be opened.  There are insufficient\n\treplicas for the"
8749 		    " pool to continue functioning.\n"));
8750 		printf_color(ANSI_BOLD, gettext("action: "));
8751 		printf_color(ANSI_YELLOW, gettext("Attach the missing device "
8752 		    "and online it using 'zpool online'.\n"));
8753 		break;
8754 
8755 	case ZPOOL_STATUS_CORRUPT_LABEL_R:
8756 		printf_color(ANSI_BOLD, gettext("status: "));
8757 		printf_color(ANSI_YELLOW, gettext("One or more devices could "
8758 		    "not be used because the label is missing or\n\tinvalid.  "
8759 		    "Sufficient replicas exist for the pool to continue\n\t"
8760 		    "functioning in a degraded state.\n"));
8761 		printf_color(ANSI_BOLD, gettext("action: "));
8762 		printf_color(ANSI_YELLOW, gettext("Replace the device using "
8763 		    "'zpool replace'.\n"));
8764 		break;
8765 
8766 	case ZPOOL_STATUS_CORRUPT_LABEL_NR:
8767 		printf_color(ANSI_BOLD, gettext("status: "));
8768 		printf_color(ANSI_YELLOW, gettext("One or more devices could "
8769 		    "not be used because the label is missing \n\tor invalid.  "
8770 		    "There are insufficient replicas for the pool to "
8771 		    "continue\n\tfunctioning.\n"));
8772 		zpool_explain_recover(zpool_get_handle(zhp),
8773 		    zpool_get_name(zhp), reason, config);
8774 		break;
8775 
8776 	case ZPOOL_STATUS_FAILING_DEV:
8777 		printf_color(ANSI_BOLD, gettext("status: "));
8778 		printf_color(ANSI_YELLOW, gettext("One or more devices has "
8779 		    "experienced an unrecoverable error.  An\n\tattempt was "
8780 		    "made to correct the error.  Applications are "
8781 		    "unaffected.\n"));
8782 		printf_color(ANSI_BOLD, gettext("action: "));
8783 			printf_color(ANSI_YELLOW, gettext("Determine if the "
8784 		    "device needs to be replaced, and clear the errors\n\tusing"
8785 		    " 'zpool clear' or replace the device with 'zpool "
8786 		    "replace'.\n"));
8787 		break;
8788 
8789 	case ZPOOL_STATUS_OFFLINE_DEV:
8790 		printf_color(ANSI_BOLD, gettext("status: "));
8791 		printf_color(ANSI_YELLOW, gettext("One or more devices has "
8792 		    "been taken offline by the administrator.\n\tSufficient "
8793 		    "replicas exist for the pool to continue functioning in "
8794 		    "a\n\tdegraded state.\n"));
8795 		printf_color(ANSI_BOLD, gettext("action: "));
8796 		printf_color(ANSI_YELLOW, gettext("Online the device "
8797 		    "using 'zpool online' or replace the device with\n\t'zpool "
8798 		    "replace'.\n"));
8799 		break;
8800 
8801 	case ZPOOL_STATUS_REMOVED_DEV:
8802 		printf_color(ANSI_BOLD, gettext("status: "));
8803 		printf_color(ANSI_YELLOW, gettext("One or more devices has "
8804 		    "been removed by the administrator.\n\tSufficient "
8805 		    "replicas exist for the pool to continue functioning in "
8806 		    "a\n\tdegraded state.\n"));
8807 		printf_color(ANSI_BOLD, gettext("action: "));
8808 		printf_color(ANSI_YELLOW, gettext("Online the device "
8809 		    "using zpool online' or replace the device with\n\t'zpool "
8810 		    "replace'.\n"));
8811 		break;
8812 
8813 	case ZPOOL_STATUS_RESILVERING:
8814 	case ZPOOL_STATUS_REBUILDING:
8815 		printf_color(ANSI_BOLD, gettext("status: "));
8816 		printf_color(ANSI_YELLOW, gettext("One or more devices is "
8817 		    "currently being resilvered.  The pool will\n\tcontinue "
8818 		    "to function, possibly in a degraded state.\n"));
8819 		printf_color(ANSI_BOLD, gettext("action: "));
8820 		printf_color(ANSI_YELLOW, gettext("Wait for the resilver to "
8821 		    "complete.\n"));
8822 		break;
8823 
8824 	case ZPOOL_STATUS_REBUILD_SCRUB:
8825 		printf_color(ANSI_BOLD, gettext("status: "));
8826 		printf_color(ANSI_YELLOW, gettext("One or more devices have "
8827 		    "been sequentially resilvered, scrubbing\n\tthe pool "
8828 		    "is recommended.\n"));
8829 		printf_color(ANSI_BOLD, gettext("action: "));
8830 		printf_color(ANSI_YELLOW, gettext("Use 'zpool scrub' to "
8831 		    "verify all data checksums.\n"));
8832 		break;
8833 
8834 	case ZPOOL_STATUS_CORRUPT_DATA:
8835 		printf_color(ANSI_BOLD, gettext("status: "));
8836 		printf_color(ANSI_YELLOW, gettext("One or more devices has "
8837 		    "experienced an error resulting in data\n\tcorruption.  "
8838 		    "Applications may be affected.\n"));
8839 		printf_color(ANSI_BOLD, gettext("action: "));
8840 		printf_color(ANSI_YELLOW, gettext("Restore the file in question"
8841 		    " if possible.  Otherwise restore the\n\tentire pool from "
8842 		    "backup.\n"));
8843 		break;
8844 
8845 	case ZPOOL_STATUS_CORRUPT_POOL:
8846 		printf_color(ANSI_BOLD, gettext("status: "));
8847 		printf_color(ANSI_YELLOW, gettext("The pool metadata is "
8848 		    "corrupted and the pool cannot be opened.\n"));
8849 		zpool_explain_recover(zpool_get_handle(zhp),
8850 		    zpool_get_name(zhp), reason, config);
8851 		break;
8852 
8853 	case ZPOOL_STATUS_VERSION_OLDER:
8854 		printf_color(ANSI_BOLD, gettext("status: "));
8855 		printf_color(ANSI_YELLOW, gettext("The pool is formatted using "
8856 		    "a legacy on-disk format.  The pool can\n\tstill be used, "
8857 		    "but some features are unavailable.\n"));
8858 		printf_color(ANSI_BOLD, gettext("action: "));
8859 		printf_color(ANSI_YELLOW, gettext("Upgrade the pool using "
8860 		    "'zpool upgrade'.  Once this is done, the\n\tpool will no "
8861 		    "longer be accessible on software that does not support\n\t"
8862 		    "feature flags.\n"));
8863 		break;
8864 
8865 	case ZPOOL_STATUS_VERSION_NEWER:
8866 		printf_color(ANSI_BOLD, gettext("status: "));
8867 		printf_color(ANSI_YELLOW, gettext("The pool has been upgraded "
8868 		    "to a newer, incompatible on-disk version.\n\tThe pool "
8869 		    "cannot be accessed on this system.\n"));
8870 		printf_color(ANSI_BOLD, gettext("action: "));
8871 		printf_color(ANSI_YELLOW, gettext("Access the pool from a "
8872 		    "system running more recent software, or\n\trestore the "
8873 		    "pool from backup.\n"));
8874 		break;
8875 
8876 	case ZPOOL_STATUS_FEAT_DISABLED:
8877 		printf_color(ANSI_BOLD, gettext("status: "));
8878 		printf_color(ANSI_YELLOW, gettext("Some supported and "
8879 		    "requested features are not enabled on the pool.\n\t"
8880 		    "The pool can still be used, but some features are "
8881 		    "unavailable.\n"));
8882 		printf_color(ANSI_BOLD, gettext("action: "));
8883 		printf_color(ANSI_YELLOW, gettext("Enable all features using "
8884 		    "'zpool upgrade'. Once this is done,\n\tthe pool may no "
8885 		    "longer be accessible by software that does not support\n\t"
8886 		    "the features. See zpool-features(7) for details.\n"));
8887 		break;
8888 
8889 	case ZPOOL_STATUS_COMPATIBILITY_ERR:
8890 		printf_color(ANSI_BOLD, gettext("status: "));
8891 		printf_color(ANSI_YELLOW, gettext("This pool has a "
8892 		    "compatibility list specified, but it could not be\n\t"
8893 		    "read/parsed at this time. The pool can still be used, "
8894 		    "but this\n\tshould be investigated.\n"));
8895 		printf_color(ANSI_BOLD, gettext("action: "));
8896 		printf_color(ANSI_YELLOW, gettext("Check the value of the "
8897 		    "'compatibility' property against the\n\t"
8898 		    "appropriate file in " ZPOOL_SYSCONF_COMPAT_D " or "
8899 		    ZPOOL_DATA_COMPAT_D ".\n"));
8900 		break;
8901 
8902 	case ZPOOL_STATUS_INCOMPATIBLE_FEAT:
8903 		printf_color(ANSI_BOLD, gettext("status: "));
8904 		printf_color(ANSI_YELLOW, gettext("One or more features "
8905 		    "are enabled on the pool despite not being\n\t"
8906 		    "requested by the 'compatibility' property.\n"));
8907 		printf_color(ANSI_BOLD, gettext("action: "));
8908 		printf_color(ANSI_YELLOW, gettext("Consider setting "
8909 		    "'compatibility' to an appropriate value, or\n\t"
8910 		    "adding needed features to the relevant file in\n\t"
8911 		    ZPOOL_SYSCONF_COMPAT_D " or " ZPOOL_DATA_COMPAT_D ".\n"));
8912 		break;
8913 
8914 	case ZPOOL_STATUS_UNSUP_FEAT_READ:
8915 		printf_color(ANSI_BOLD, gettext("status: "));
8916 		printf_color(ANSI_YELLOW, gettext("The pool cannot be accessed "
8917 		    "on this system because it uses the\n\tfollowing feature(s)"
8918 		    " not supported on this system:\n"));
8919 		zpool_print_unsup_feat(config);
8920 		(void) printf("\n");
8921 		printf_color(ANSI_BOLD, gettext("action: "));
8922 		printf_color(ANSI_YELLOW, gettext("Access the pool from a "
8923 		    "system that supports the required feature(s),\n\tor "
8924 		    "restore the pool from backup.\n"));
8925 		break;
8926 
8927 	case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
8928 		printf_color(ANSI_BOLD, gettext("status: "));
8929 		printf_color(ANSI_YELLOW, gettext("The pool can only be "
8930 		    "accessed in read-only mode on this system. It\n\tcannot be"
8931 		    " accessed in read-write mode because it uses the "
8932 		    "following\n\tfeature(s) not supported on this system:\n"));
8933 		zpool_print_unsup_feat(config);
8934 		(void) printf("\n");
8935 		printf_color(ANSI_BOLD, gettext("action: "));
8936 		printf_color(ANSI_YELLOW, gettext("The pool cannot be accessed "
8937 		    "in read-write mode. Import the pool with\n"
8938 		    "\t\"-o readonly=on\", access the pool from a system that "
8939 		    "supports the\n\trequired feature(s), or restore the "
8940 		    "pool from backup.\n"));
8941 		break;
8942 
8943 	case ZPOOL_STATUS_FAULTED_DEV_R:
8944 		printf_color(ANSI_BOLD, gettext("status: "));
8945 		printf_color(ANSI_YELLOW, gettext("One or more devices are "
8946 		    "faulted in response to persistent errors.\n\tSufficient "
8947 		    "replicas exist for the pool to continue functioning "
8948 		    "in a\n\tdegraded state.\n"));
8949 		printf_color(ANSI_BOLD, gettext("action: "));
8950 		printf_color(ANSI_YELLOW, gettext("Replace the faulted device, "
8951 		    "or use 'zpool clear' to mark the device\n\trepaired.\n"));
8952 		break;
8953 
8954 	case ZPOOL_STATUS_FAULTED_DEV_NR:
8955 		printf_color(ANSI_BOLD, gettext("status: "));
8956 		printf_color(ANSI_YELLOW, gettext("One or more devices are "
8957 		    "faulted in response to persistent errors.  There are "
8958 		    "insufficient replicas for the pool to\n\tcontinue "
8959 		    "functioning.\n"));
8960 		printf_color(ANSI_BOLD, gettext("action: "));
8961 		printf_color(ANSI_YELLOW, gettext("Destroy and re-create the "
8962 		    "pool from a backup source.  Manually marking the device\n"
8963 		    "\trepaired using 'zpool clear' may allow some data "
8964 		    "to be recovered.\n"));
8965 		break;
8966 
8967 	case ZPOOL_STATUS_IO_FAILURE_MMP:
8968 		printf_color(ANSI_BOLD, gettext("status: "));
8969 		printf_color(ANSI_YELLOW, gettext("The pool is suspended "
8970 		    "because multihost writes failed or were delayed;\n\t"
8971 		    "another system could import the pool undetected.\n"));
8972 		printf_color(ANSI_BOLD, gettext("action: "));
8973 		printf_color(ANSI_YELLOW, gettext("Make sure the pool's devices"
8974 		    " are connected, then reboot your system and\n\timport the "
8975 		    "pool.\n"));
8976 		break;
8977 
8978 	case ZPOOL_STATUS_IO_FAILURE_WAIT:
8979 	case ZPOOL_STATUS_IO_FAILURE_CONTINUE:
8980 		printf_color(ANSI_BOLD, gettext("status: "));
8981 		printf_color(ANSI_YELLOW, gettext("One or more devices are "
8982 		    "faulted in response to IO failures.\n"));
8983 		printf_color(ANSI_BOLD, gettext("action: "));
8984 		printf_color(ANSI_YELLOW, gettext("Make sure the affected "
8985 		    "devices are connected, then run 'zpool clear'.\n"));
8986 		break;
8987 
8988 	case ZPOOL_STATUS_BAD_LOG:
8989 		printf_color(ANSI_BOLD, gettext("status: "));
8990 		printf_color(ANSI_YELLOW, gettext("An intent log record "
8991 		    "could not be read.\n"
8992 		    "\tWaiting for administrator intervention to fix the "
8993 		    "faulted pool.\n"));
8994 		printf_color(ANSI_BOLD, gettext("action: "));
8995 		printf_color(ANSI_YELLOW, gettext("Either restore the affected "
8996 		    "device(s) and run 'zpool online',\n"
8997 		    "\tor ignore the intent log records by running "
8998 		    "'zpool clear'.\n"));
8999 		break;
9000 
9001 	case ZPOOL_STATUS_NON_NATIVE_ASHIFT:
9002 		(void) printf(gettext("status: One or more devices are "
9003 		    "configured to use a non-native block size.\n"
9004 		    "\tExpect reduced performance.\n"));
9005 		(void) printf(gettext("action: Replace affected devices with "
9006 		    "devices that support the\n\tconfigured block size, or "
9007 		    "migrate data to a properly configured\n\tpool.\n"));
9008 		break;
9009 
9010 	case ZPOOL_STATUS_HOSTID_MISMATCH:
9011 		printf_color(ANSI_BOLD, gettext("status: "));
9012 		printf_color(ANSI_YELLOW, gettext("Mismatch between pool hostid"
9013 		    " and system hostid on imported pool.\n\tThis pool was "
9014 		    "previously imported into a system with a different "
9015 		    "hostid,\n\tand then was verbatim imported into this "
9016 		    "system.\n"));
9017 		printf_color(ANSI_BOLD, gettext("action: "));
9018 		printf_color(ANSI_YELLOW, gettext("Export this pool on all "
9019 		    "systems on which it is imported.\n"
9020 		    "\tThen import it to correct the mismatch.\n"));
9021 		break;
9022 
9023 	case ZPOOL_STATUS_ERRATA:
9024 		printf_color(ANSI_BOLD, gettext("status: "));
9025 		printf_color(ANSI_YELLOW, gettext("Errata #%d detected.\n"),
9026 		    errata);
9027 
9028 		switch (errata) {
9029 		case ZPOOL_ERRATA_NONE:
9030 			break;
9031 
9032 		case ZPOOL_ERRATA_ZOL_2094_SCRUB:
9033 			printf_color(ANSI_BOLD, gettext("action: "));
9034 			printf_color(ANSI_YELLOW, gettext("To correct the issue"
9035 			    " run 'zpool scrub'.\n"));
9036 			break;
9037 
9038 		case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION:
9039 			(void) printf(gettext("\tExisting encrypted datasets "
9040 			    "contain an on-disk incompatibility\n\twhich "
9041 			    "needs to be corrected.\n"));
9042 			printf_color(ANSI_BOLD, gettext("action: "));
9043 			printf_color(ANSI_YELLOW, gettext("To correct the issue"
9044 			    " backup existing encrypted datasets to new\n\t"
9045 			    "encrypted datasets and destroy the old ones. "
9046 			    "'zfs mount -o ro' can\n\tbe used to temporarily "
9047 			    "mount existing encrypted datasets readonly.\n"));
9048 			break;
9049 
9050 		case ZPOOL_ERRATA_ZOL_8308_ENCRYPTION:
9051 			(void) printf(gettext("\tExisting encrypted snapshots "
9052 			    "and bookmarks contain an on-disk\n\tincompat"
9053 			    "ibility. This may cause on-disk corruption if "
9054 			    "they are used\n\twith 'zfs recv'.\n"));
9055 			printf_color(ANSI_BOLD, gettext("action: "));
9056 			printf_color(ANSI_YELLOW, gettext("To correct the"
9057 			    "issue, enable the bookmark_v2 feature. No "
9058 			    "additional\n\taction is needed if there are no "
9059 			    "encrypted snapshots or bookmarks.\n\tIf preserving"
9060 			    "the encrypted snapshots and bookmarks is required,"
9061 			    " use\n\ta non-raw send to backup and restore them."
9062 			    " Alternately, they may be\n\tremoved to resolve "
9063 			    "the incompatibility.\n"));
9064 			break;
9065 
9066 		default:
9067 			/*
9068 			 * All errata which allow the pool to be imported
9069 			 * must contain an action message.
9070 			 */
9071 			assert(0);
9072 		}
9073 		break;
9074 
9075 	default:
9076 		/*
9077 		 * The remaining errors can't actually be generated, yet.
9078 		 */
9079 		assert(reason == ZPOOL_STATUS_OK);
9080 	}
9081 
9082 	if (msgid != NULL) {
9083 		printf("   ");
9084 		printf_color(ANSI_BOLD, gettext("see:"));
9085 		printf(gettext(
9086 		    " https://openzfs.github.io/openzfs-docs/msg/%s\n"),
9087 		    msgid);
9088 	}
9089 
9090 	if (config != NULL) {
9091 		uint64_t nerr;
9092 		nvlist_t **spares, **l2cache;
9093 		uint_t nspares, nl2cache;
9094 
9095 		print_scan_status(zhp, nvroot);
9096 
9097 		pool_removal_stat_t *prs = NULL;
9098 		(void) nvlist_lookup_uint64_array(nvroot,
9099 		    ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c);
9100 		print_removal_status(zhp, prs);
9101 
9102 		pool_checkpoint_stat_t *pcs = NULL;
9103 		(void) nvlist_lookup_uint64_array(nvroot,
9104 		    ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
9105 		print_checkpoint_status(pcs);
9106 
9107 		pool_raidz_expand_stat_t *pres = NULL;
9108 		(void) nvlist_lookup_uint64_array(nvroot,
9109 		    ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, (uint64_t **)&pres, &c);
9110 		print_raidz_expand_status(zhp, pres);
9111 
9112 		cbp->cb_namewidth = max_width(zhp, nvroot, 0, 0,
9113 		    cbp->cb_name_flags | VDEV_NAME_TYPE_ID);
9114 		if (cbp->cb_namewidth < 10)
9115 			cbp->cb_namewidth = 10;
9116 
9117 		color_start(ANSI_BOLD);
9118 		(void) printf(gettext("config:\n\n"));
9119 		(void) printf(gettext("\t%-*s  %-8s %5s %5s %5s"),
9120 		    cbp->cb_namewidth, "NAME", "STATE", "READ", "WRITE",
9121 		    "CKSUM");
9122 		color_end();
9123 
9124 		if (cbp->cb_print_slow_ios) {
9125 			printf_color(ANSI_BOLD, " %5s", gettext("SLOW"));
9126 		}
9127 
9128 		if (cbp->cb_print_power) {
9129 			printf_color(ANSI_BOLD, " %5s", gettext("POWER"));
9130 		}
9131 
9132 		if (cbp->vcdl != NULL)
9133 			print_cmd_columns(cbp->vcdl, 0);
9134 
9135 		printf("\n");
9136 
9137 		print_status_config(zhp, cbp, zpool_get_name(zhp), nvroot, 0,
9138 		    B_FALSE, NULL);
9139 
9140 		print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_BIAS_DEDUP);
9141 		print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_BIAS_SPECIAL);
9142 		print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_CLASS_LOGS);
9143 
9144 		if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
9145 		    &l2cache, &nl2cache) == 0)
9146 			print_l2cache(zhp, cbp, l2cache, nl2cache);
9147 
9148 		if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
9149 		    &spares, &nspares) == 0)
9150 			print_spares(zhp, cbp, spares, nspares);
9151 
9152 		if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT,
9153 		    &nerr) == 0) {
9154 			(void) printf("\n");
9155 			if (nerr == 0) {
9156 				(void) printf(gettext(
9157 				    "errors: No known data errors\n"));
9158 			} else if (!cbp->cb_verbose) {
9159 				color_start(ANSI_RED);
9160 				(void) printf(gettext("errors: %llu data "
9161 				    "errors, use '-v' for a list\n"),
9162 				    (u_longlong_t)nerr);
9163 				color_end();
9164 			} else {
9165 				print_error_log(zhp);
9166 			}
9167 		}
9168 
9169 		if (cbp->cb_dedup_stats)
9170 			print_dedup_stats(config);
9171 	} else {
9172 		(void) printf(gettext("config: The configuration cannot be "
9173 		    "determined.\n"));
9174 	}
9175 
9176 	return (0);
9177 }
9178 
9179 /*
9180  * zpool status [-c [script1,script2,...]] [-igLpPstvx] [--power] [-T d|u] ...
9181  *              [pool] [interval [count]]
9182  *
9183  *	-c CMD	For each vdev, run command CMD
9184  *	-e	Display only unhealthy vdevs
9185  *	-i	Display vdev initialization status.
9186  *	-g	Display guid for individual vdev name.
9187  *	-L	Follow links when resolving vdev path name.
9188  *	-p	Display values in parsable (exact) format.
9189  *	-P	Display full path for vdev name.
9190  *	-s	Display slow IOs column.
9191  *	-v	Display complete error logs
9192  *	-x	Display only pools with potential problems
9193  *	-D	Display dedup status (undocumented)
9194  *	-t	Display vdev TRIM status.
9195  *	-T	Display a timestamp in date(1) or Unix format
9196  *	--power	Display vdev enclosure slot power status
9197  *
9198  * Describes the health status of all pools or some subset.
9199  */
9200 int
9201 zpool_do_status(int argc, char **argv)
9202 {
9203 	int c;
9204 	int ret;
9205 	float interval = 0;
9206 	unsigned long count = 0;
9207 	status_cbdata_t cb = { 0 };
9208 	char *cmd = NULL;
9209 
9210 	struct option long_options[] = {
9211 		{"power", no_argument, NULL, POWER_OPT},
9212 		{0, 0, 0, 0}
9213 	};
9214 
9215 	/* check options */
9216 	while ((c = getopt_long(argc, argv, "c:eigLpPsvxDtT:", long_options,
9217 	    NULL)) != -1) {
9218 		switch (c) {
9219 		case 'c':
9220 			if (cmd != NULL) {
9221 				fprintf(stderr,
9222 				    gettext("Can't set -c flag twice\n"));
9223 				exit(1);
9224 			}
9225 
9226 			if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL &&
9227 			    !libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) {
9228 				fprintf(stderr, gettext(
9229 				    "Can't run -c, disabled by "
9230 				    "ZPOOL_SCRIPTS_ENABLED.\n"));
9231 				exit(1);
9232 			}
9233 
9234 			if ((getuid() <= 0 || geteuid() <= 0) &&
9235 			    !libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) {
9236 				fprintf(stderr, gettext(
9237 				    "Can't run -c with root privileges "
9238 				    "unless ZPOOL_SCRIPTS_AS_ROOT is set.\n"));
9239 				exit(1);
9240 			}
9241 			cmd = optarg;
9242 			break;
9243 		case 'e':
9244 			cb.cb_print_unhealthy = B_TRUE;
9245 			break;
9246 		case 'i':
9247 			cb.cb_print_vdev_init = B_TRUE;
9248 			break;
9249 		case 'g':
9250 			cb.cb_name_flags |= VDEV_NAME_GUID;
9251 			break;
9252 		case 'L':
9253 			cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
9254 			break;
9255 		case 'p':
9256 			cb.cb_literal = B_TRUE;
9257 			break;
9258 		case 'P':
9259 			cb.cb_name_flags |= VDEV_NAME_PATH;
9260 			break;
9261 		case 's':
9262 			cb.cb_print_slow_ios = B_TRUE;
9263 			break;
9264 		case 'v':
9265 			cb.cb_verbose = B_TRUE;
9266 			break;
9267 		case 'x':
9268 			cb.cb_explain = B_TRUE;
9269 			break;
9270 		case 'D':
9271 			cb.cb_dedup_stats = B_TRUE;
9272 			break;
9273 		case 't':
9274 			cb.cb_print_vdev_trim = B_TRUE;
9275 			break;
9276 		case 'T':
9277 			get_timestamp_arg(*optarg);
9278 			break;
9279 		case POWER_OPT:
9280 			cb.cb_print_power = B_TRUE;
9281 			break;
9282 		case '?':
9283 			if (optopt == 'c') {
9284 				print_zpool_script_list("status");
9285 				exit(0);
9286 			} else {
9287 				fprintf(stderr,
9288 				    gettext("invalid option '%c'\n"), optopt);
9289 			}
9290 			usage(B_FALSE);
9291 		}
9292 	}
9293 
9294 	argc -= optind;
9295 	argv += optind;
9296 
9297 	get_interval_count(&argc, argv, &interval, &count);
9298 
9299 	if (argc == 0)
9300 		cb.cb_allpools = B_TRUE;
9301 
9302 	cb.cb_first = B_TRUE;
9303 	cb.cb_print_status = B_TRUE;
9304 
9305 	for (;;) {
9306 		if (timestamp_fmt != NODATE)
9307 			print_timestamp(timestamp_fmt);
9308 
9309 		if (cmd != NULL)
9310 			cb.vcdl = all_pools_for_each_vdev_run(argc, argv, cmd,
9311 			    NULL, NULL, 0, 0);
9312 
9313 		ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
9314 		    cb.cb_literal, status_callback, &cb);
9315 
9316 		if (cb.vcdl != NULL)
9317 			free_vdev_cmd_data_list(cb.vcdl);
9318 
9319 		if (argc == 0 && cb.cb_count == 0)
9320 			(void) fprintf(stderr, gettext("no pools available\n"));
9321 		else if (cb.cb_explain && cb.cb_first && cb.cb_allpools)
9322 			(void) printf(gettext("all pools are healthy\n"));
9323 
9324 		if (ret != 0)
9325 			return (ret);
9326 
9327 		if (interval == 0)
9328 			break;
9329 
9330 		if (count != 0 && --count == 0)
9331 			break;
9332 
9333 		(void) fflush(stdout);
9334 		(void) fsleep(interval);
9335 	}
9336 
9337 	return (0);
9338 }
9339 
9340 typedef struct upgrade_cbdata {
9341 	int	cb_first;
9342 	int	cb_argc;
9343 	uint64_t cb_version;
9344 	char	**cb_argv;
9345 } upgrade_cbdata_t;
9346 
9347 static int
9348 check_unsupp_fs(zfs_handle_t *zhp, void *unsupp_fs)
9349 {
9350 	int zfs_version = (int)zfs_prop_get_int(zhp, ZFS_PROP_VERSION);
9351 	int *count = (int *)unsupp_fs;
9352 
9353 	if (zfs_version > ZPL_VERSION) {
9354 		(void) printf(gettext("%s (v%d) is not supported by this "
9355 		    "implementation of ZFS.\n"),
9356 		    zfs_get_name(zhp), zfs_version);
9357 		(*count)++;
9358 	}
9359 
9360 	zfs_iter_filesystems_v2(zhp, 0, check_unsupp_fs, unsupp_fs);
9361 
9362 	zfs_close(zhp);
9363 
9364 	return (0);
9365 }
9366 
9367 static int
9368 upgrade_version(zpool_handle_t *zhp, uint64_t version)
9369 {
9370 	int ret;
9371 	nvlist_t *config;
9372 	uint64_t oldversion;
9373 	int unsupp_fs = 0;
9374 
9375 	config = zpool_get_config(zhp, NULL);
9376 	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
9377 	    &oldversion) == 0);
9378 
9379 	char compat[ZFS_MAXPROPLEN];
9380 	if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, compat,
9381 	    ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
9382 		compat[0] = '\0';
9383 
9384 	assert(SPA_VERSION_IS_SUPPORTED(oldversion));
9385 	assert(oldversion < version);
9386 
9387 	ret = zfs_iter_root(zpool_get_handle(zhp), check_unsupp_fs, &unsupp_fs);
9388 	if (ret != 0)
9389 		return (ret);
9390 
9391 	if (unsupp_fs) {
9392 		(void) fprintf(stderr, gettext("Upgrade not performed due "
9393 		    "to %d unsupported filesystems (max v%d).\n"),
9394 		    unsupp_fs, (int)ZPL_VERSION);
9395 		return (1);
9396 	}
9397 
9398 	if (strcmp(compat, ZPOOL_COMPAT_LEGACY) == 0) {
9399 		(void) fprintf(stderr, gettext("Upgrade not performed because "
9400 		    "'compatibility' property set to '"
9401 		    ZPOOL_COMPAT_LEGACY "'.\n"));
9402 		return (1);
9403 	}
9404 
9405 	ret = zpool_upgrade(zhp, version);
9406 	if (ret != 0)
9407 		return (ret);
9408 
9409 	if (version >= SPA_VERSION_FEATURES) {
9410 		(void) printf(gettext("Successfully upgraded "
9411 		    "'%s' from version %llu to feature flags.\n"),
9412 		    zpool_get_name(zhp), (u_longlong_t)oldversion);
9413 	} else {
9414 		(void) printf(gettext("Successfully upgraded "
9415 		    "'%s' from version %llu to version %llu.\n"),
9416 		    zpool_get_name(zhp), (u_longlong_t)oldversion,
9417 		    (u_longlong_t)version);
9418 	}
9419 
9420 	return (0);
9421 }
9422 
9423 static int
9424 upgrade_enable_all(zpool_handle_t *zhp, int *countp)
9425 {
9426 	int i, ret, count;
9427 	boolean_t firstff = B_TRUE;
9428 	nvlist_t *enabled = zpool_get_features(zhp);
9429 
9430 	char compat[ZFS_MAXPROPLEN];
9431 	if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, compat,
9432 	    ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
9433 		compat[0] = '\0';
9434 
9435 	boolean_t requested_features[SPA_FEATURES];
9436 	if (zpool_do_load_compat(compat, requested_features) !=
9437 	    ZPOOL_COMPATIBILITY_OK)
9438 		return (-1);
9439 
9440 	count = 0;
9441 	for (i = 0; i < SPA_FEATURES; i++) {
9442 		const char *fname = spa_feature_table[i].fi_uname;
9443 		const char *fguid = spa_feature_table[i].fi_guid;
9444 
9445 		if (!spa_feature_table[i].fi_zfs_mod_supported)
9446 			continue;
9447 
9448 		if (!nvlist_exists(enabled, fguid) && requested_features[i]) {
9449 			char *propname;
9450 			verify(-1 != asprintf(&propname, "feature@%s", fname));
9451 			ret = zpool_set_prop(zhp, propname,
9452 			    ZFS_FEATURE_ENABLED);
9453 			if (ret != 0) {
9454 				free(propname);
9455 				return (ret);
9456 			}
9457 			count++;
9458 
9459 			if (firstff) {
9460 				(void) printf(gettext("Enabled the "
9461 				    "following features on '%s':\n"),
9462 				    zpool_get_name(zhp));
9463 				firstff = B_FALSE;
9464 			}
9465 			(void) printf(gettext("  %s\n"), fname);
9466 			free(propname);
9467 		}
9468 	}
9469 
9470 	if (countp != NULL)
9471 		*countp = count;
9472 	return (0);
9473 }
9474 
9475 static int
9476 upgrade_cb(zpool_handle_t *zhp, void *arg)
9477 {
9478 	upgrade_cbdata_t *cbp = arg;
9479 	nvlist_t *config;
9480 	uint64_t version;
9481 	boolean_t modified_pool = B_FALSE;
9482 	int ret;
9483 
9484 	config = zpool_get_config(zhp, NULL);
9485 	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
9486 	    &version) == 0);
9487 
9488 	assert(SPA_VERSION_IS_SUPPORTED(version));
9489 
9490 	if (version < cbp->cb_version) {
9491 		cbp->cb_first = B_FALSE;
9492 		ret = upgrade_version(zhp, cbp->cb_version);
9493 		if (ret != 0)
9494 			return (ret);
9495 		modified_pool = B_TRUE;
9496 
9497 		/*
9498 		 * If they did "zpool upgrade -a", then we could
9499 		 * be doing ioctls to different pools.  We need
9500 		 * to log this history once to each pool, and bypass
9501 		 * the normal history logging that happens in main().
9502 		 */
9503 		(void) zpool_log_history(g_zfs, history_str);
9504 		log_history = B_FALSE;
9505 	}
9506 
9507 	if (cbp->cb_version >= SPA_VERSION_FEATURES) {
9508 		int count;
9509 		ret = upgrade_enable_all(zhp, &count);
9510 		if (ret != 0)
9511 			return (ret);
9512 
9513 		if (count > 0) {
9514 			cbp->cb_first = B_FALSE;
9515 			modified_pool = B_TRUE;
9516 		}
9517 	}
9518 
9519 	if (modified_pool) {
9520 		(void) printf("\n");
9521 		(void) after_zpool_upgrade(zhp);
9522 	}
9523 
9524 	return (0);
9525 }
9526 
9527 static int
9528 upgrade_list_older_cb(zpool_handle_t *zhp, void *arg)
9529 {
9530 	upgrade_cbdata_t *cbp = arg;
9531 	nvlist_t *config;
9532 	uint64_t version;
9533 
9534 	config = zpool_get_config(zhp, NULL);
9535 	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
9536 	    &version) == 0);
9537 
9538 	assert(SPA_VERSION_IS_SUPPORTED(version));
9539 
9540 	if (version < SPA_VERSION_FEATURES) {
9541 		if (cbp->cb_first) {
9542 			(void) printf(gettext("The following pools are "
9543 			    "formatted with legacy version numbers and can\n"
9544 			    "be upgraded to use feature flags.  After "
9545 			    "being upgraded, these pools\nwill no "
9546 			    "longer be accessible by software that does not "
9547 			    "support feature\nflags.\n\n"
9548 			    "Note that setting a pool's 'compatibility' "
9549 			    "feature to '" ZPOOL_COMPAT_LEGACY "' will\n"
9550 			    "inhibit upgrades.\n\n"));
9551 			(void) printf(gettext("VER  POOL\n"));
9552 			(void) printf(gettext("---  ------------\n"));
9553 			cbp->cb_first = B_FALSE;
9554 		}
9555 
9556 		(void) printf("%2llu   %s\n", (u_longlong_t)version,
9557 		    zpool_get_name(zhp));
9558 	}
9559 
9560 	return (0);
9561 }
9562 
9563 static int
9564 upgrade_list_disabled_cb(zpool_handle_t *zhp, void *arg)
9565 {
9566 	upgrade_cbdata_t *cbp = arg;
9567 	nvlist_t *config;
9568 	uint64_t version;
9569 
9570 	config = zpool_get_config(zhp, NULL);
9571 	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
9572 	    &version) == 0);
9573 
9574 	if (version >= SPA_VERSION_FEATURES) {
9575 		int i;
9576 		boolean_t poolfirst = B_TRUE;
9577 		nvlist_t *enabled = zpool_get_features(zhp);
9578 
9579 		for (i = 0; i < SPA_FEATURES; i++) {
9580 			const char *fguid = spa_feature_table[i].fi_guid;
9581 			const char *fname = spa_feature_table[i].fi_uname;
9582 
9583 			if (!spa_feature_table[i].fi_zfs_mod_supported)
9584 				continue;
9585 
9586 			if (!nvlist_exists(enabled, fguid)) {
9587 				if (cbp->cb_first) {
9588 					(void) printf(gettext("\nSome "
9589 					    "supported features are not "
9590 					    "enabled on the following pools. "
9591 					    "Once a\nfeature is enabled the "
9592 					    "pool may become incompatible with "
9593 					    "software\nthat does not support "
9594 					    "the feature. See "
9595 					    "zpool-features(7) for "
9596 					    "details.\n\n"
9597 					    "Note that the pool "
9598 					    "'compatibility' feature can be "
9599 					    "used to inhibit\nfeature "
9600 					    "upgrades.\n\n"));
9601 					(void) printf(gettext("POOL  "
9602 					    "FEATURE\n"));
9603 					(void) printf(gettext("------"
9604 					    "---------\n"));
9605 					cbp->cb_first = B_FALSE;
9606 				}
9607 
9608 				if (poolfirst) {
9609 					(void) printf(gettext("%s\n"),
9610 					    zpool_get_name(zhp));
9611 					poolfirst = B_FALSE;
9612 				}
9613 
9614 				(void) printf(gettext("      %s\n"), fname);
9615 			}
9616 			/*
9617 			 * If they did "zpool upgrade -a", then we could
9618 			 * be doing ioctls to different pools.  We need
9619 			 * to log this history once to each pool, and bypass
9620 			 * the normal history logging that happens in main().
9621 			 */
9622 			(void) zpool_log_history(g_zfs, history_str);
9623 			log_history = B_FALSE;
9624 		}
9625 	}
9626 
9627 	return (0);
9628 }
9629 
9630 static int
9631 upgrade_one(zpool_handle_t *zhp, void *data)
9632 {
9633 	boolean_t modified_pool = B_FALSE;
9634 	upgrade_cbdata_t *cbp = data;
9635 	uint64_t cur_version;
9636 	int ret;
9637 
9638 	if (strcmp("log", zpool_get_name(zhp)) == 0) {
9639 		(void) fprintf(stderr, gettext("'log' is now a reserved word\n"
9640 		    "Pool 'log' must be renamed using export and import"
9641 		    " to upgrade.\n"));
9642 		return (1);
9643 	}
9644 
9645 	cur_version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
9646 	if (cur_version > cbp->cb_version) {
9647 		(void) printf(gettext("Pool '%s' is already formatted "
9648 		    "using more current version '%llu'.\n\n"),
9649 		    zpool_get_name(zhp), (u_longlong_t)cur_version);
9650 		return (0);
9651 	}
9652 
9653 	if (cbp->cb_version != SPA_VERSION && cur_version == cbp->cb_version) {
9654 		(void) printf(gettext("Pool '%s' is already formatted "
9655 		    "using version %llu.\n\n"), zpool_get_name(zhp),
9656 		    (u_longlong_t)cbp->cb_version);
9657 		return (0);
9658 	}
9659 
9660 	if (cur_version != cbp->cb_version) {
9661 		modified_pool = B_TRUE;
9662 		ret = upgrade_version(zhp, cbp->cb_version);
9663 		if (ret != 0)
9664 			return (ret);
9665 	}
9666 
9667 	if (cbp->cb_version >= SPA_VERSION_FEATURES) {
9668 		int count = 0;
9669 		ret = upgrade_enable_all(zhp, &count);
9670 		if (ret != 0)
9671 			return (ret);
9672 
9673 		if (count != 0) {
9674 			modified_pool = B_TRUE;
9675 		} else if (cur_version == SPA_VERSION) {
9676 			(void) printf(gettext("Pool '%s' already has all "
9677 			    "supported and requested features enabled.\n"),
9678 			    zpool_get_name(zhp));
9679 		}
9680 	}
9681 
9682 	if (modified_pool) {
9683 		(void) printf("\n");
9684 		(void) after_zpool_upgrade(zhp);
9685 	}
9686 
9687 	return (0);
9688 }
9689 
9690 /*
9691  * zpool upgrade
9692  * zpool upgrade -v
9693  * zpool upgrade [-V version] <-a | pool ...>
9694  *
9695  * With no arguments, display downrev'd ZFS pool available for upgrade.
9696  * Individual pools can be upgraded by specifying the pool, and '-a' will
9697  * upgrade all pools.
9698  */
9699 int
9700 zpool_do_upgrade(int argc, char **argv)
9701 {
9702 	int c;
9703 	upgrade_cbdata_t cb = { 0 };
9704 	int ret = 0;
9705 	boolean_t showversions = B_FALSE;
9706 	boolean_t upgradeall = B_FALSE;
9707 	char *end;
9708 
9709 
9710 	/* check options */
9711 	while ((c = getopt(argc, argv, ":avV:")) != -1) {
9712 		switch (c) {
9713 		case 'a':
9714 			upgradeall = B_TRUE;
9715 			break;
9716 		case 'v':
9717 			showversions = B_TRUE;
9718 			break;
9719 		case 'V':
9720 			cb.cb_version = strtoll(optarg, &end, 10);
9721 			if (*end != '\0' ||
9722 			    !SPA_VERSION_IS_SUPPORTED(cb.cb_version)) {
9723 				(void) fprintf(stderr,
9724 				    gettext("invalid version '%s'\n"), optarg);
9725 				usage(B_FALSE);
9726 			}
9727 			break;
9728 		case ':':
9729 			(void) fprintf(stderr, gettext("missing argument for "
9730 			    "'%c' option\n"), optopt);
9731 			usage(B_FALSE);
9732 			break;
9733 		case '?':
9734 			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
9735 			    optopt);
9736 			usage(B_FALSE);
9737 		}
9738 	}
9739 
9740 	cb.cb_argc = argc;
9741 	cb.cb_argv = argv;
9742 	argc -= optind;
9743 	argv += optind;
9744 
9745 	if (cb.cb_version == 0) {
9746 		cb.cb_version = SPA_VERSION;
9747 	} else if (!upgradeall && argc == 0) {
9748 		(void) fprintf(stderr, gettext("-V option is "
9749 		    "incompatible with other arguments\n"));
9750 		usage(B_FALSE);
9751 	}
9752 
9753 	if (showversions) {
9754 		if (upgradeall || argc != 0) {
9755 			(void) fprintf(stderr, gettext("-v option is "
9756 			    "incompatible with other arguments\n"));
9757 			usage(B_FALSE);
9758 		}
9759 	} else if (upgradeall) {
9760 		if (argc != 0) {
9761 			(void) fprintf(stderr, gettext("-a option should not "
9762 			    "be used along with a pool name\n"));
9763 			usage(B_FALSE);
9764 		}
9765 	}
9766 
9767 	(void) printf("%s", gettext("This system supports ZFS pool feature "
9768 	    "flags.\n\n"));
9769 	if (showversions) {
9770 		int i;
9771 
9772 		(void) printf(gettext("The following features are "
9773 		    "supported:\n\n"));
9774 		(void) printf(gettext("FEAT DESCRIPTION\n"));
9775 		(void) printf("----------------------------------------------"
9776 		    "---------------\n");
9777 		for (i = 0; i < SPA_FEATURES; i++) {
9778 			zfeature_info_t *fi = &spa_feature_table[i];
9779 			if (!fi->fi_zfs_mod_supported)
9780 				continue;
9781 			const char *ro =
9782 			    (fi->fi_flags & ZFEATURE_FLAG_READONLY_COMPAT) ?
9783 			    " (read-only compatible)" : "";
9784 
9785 			(void) printf("%-37s%s\n", fi->fi_uname, ro);
9786 			(void) printf("     %s\n", fi->fi_desc);
9787 		}
9788 		(void) printf("\n");
9789 
9790 		(void) printf(gettext("The following legacy versions are also "
9791 		    "supported:\n\n"));
9792 		(void) printf(gettext("VER  DESCRIPTION\n"));
9793 		(void) printf("---  -----------------------------------------"
9794 		    "---------------\n");
9795 		(void) printf(gettext(" 1   Initial ZFS version\n"));
9796 		(void) printf(gettext(" 2   Ditto blocks "
9797 		    "(replicated metadata)\n"));
9798 		(void) printf(gettext(" 3   Hot spares and double parity "
9799 		    "RAID-Z\n"));
9800 		(void) printf(gettext(" 4   zpool history\n"));
9801 		(void) printf(gettext(" 5   Compression using the gzip "
9802 		    "algorithm\n"));
9803 		(void) printf(gettext(" 6   bootfs pool property\n"));
9804 		(void) printf(gettext(" 7   Separate intent log devices\n"));
9805 		(void) printf(gettext(" 8   Delegated administration\n"));
9806 		(void) printf(gettext(" 9   refquota and refreservation "
9807 		    "properties\n"));
9808 		(void) printf(gettext(" 10  Cache devices\n"));
9809 		(void) printf(gettext(" 11  Improved scrub performance\n"));
9810 		(void) printf(gettext(" 12  Snapshot properties\n"));
9811 		(void) printf(gettext(" 13  snapused property\n"));
9812 		(void) printf(gettext(" 14  passthrough-x aclinherit\n"));
9813 		(void) printf(gettext(" 15  user/group space accounting\n"));
9814 		(void) printf(gettext(" 16  stmf property support\n"));
9815 		(void) printf(gettext(" 17  Triple-parity RAID-Z\n"));
9816 		(void) printf(gettext(" 18  Snapshot user holds\n"));
9817 		(void) printf(gettext(" 19  Log device removal\n"));
9818 		(void) printf(gettext(" 20  Compression using zle "
9819 		    "(zero-length encoding)\n"));
9820 		(void) printf(gettext(" 21  Deduplication\n"));
9821 		(void) printf(gettext(" 22  Received properties\n"));
9822 		(void) printf(gettext(" 23  Slim ZIL\n"));
9823 		(void) printf(gettext(" 24  System attributes\n"));
9824 		(void) printf(gettext(" 25  Improved scrub stats\n"));
9825 		(void) printf(gettext(" 26  Improved snapshot deletion "
9826 		    "performance\n"));
9827 		(void) printf(gettext(" 27  Improved snapshot creation "
9828 		    "performance\n"));
9829 		(void) printf(gettext(" 28  Multiple vdev replacements\n"));
9830 		(void) printf(gettext("\nFor more information on a particular "
9831 		    "version, including supported releases,\n"));
9832 		(void) printf(gettext("see the ZFS Administration Guide.\n\n"));
9833 	} else if (argc == 0 && upgradeall) {
9834 		cb.cb_first = B_TRUE;
9835 		ret = zpool_iter(g_zfs, upgrade_cb, &cb);
9836 		if (ret == 0 && cb.cb_first) {
9837 			if (cb.cb_version == SPA_VERSION) {
9838 				(void) printf(gettext("All pools are already "
9839 				    "formatted using feature flags.\n\n"));
9840 				(void) printf(gettext("Every feature flags "
9841 				    "pool already has all supported and "
9842 				    "requested features enabled.\n"));
9843 			} else {
9844 				(void) printf(gettext("All pools are already "
9845 				    "formatted with version %llu or higher.\n"),
9846 				    (u_longlong_t)cb.cb_version);
9847 			}
9848 		}
9849 	} else if (argc == 0) {
9850 		cb.cb_first = B_TRUE;
9851 		ret = zpool_iter(g_zfs, upgrade_list_older_cb, &cb);
9852 		assert(ret == 0);
9853 
9854 		if (cb.cb_first) {
9855 			(void) printf(gettext("All pools are formatted "
9856 			    "using feature flags.\n\n"));
9857 		} else {
9858 			(void) printf(gettext("\nUse 'zpool upgrade -v' "
9859 			    "for a list of available legacy versions.\n"));
9860 		}
9861 
9862 		cb.cb_first = B_TRUE;
9863 		ret = zpool_iter(g_zfs, upgrade_list_disabled_cb, &cb);
9864 		assert(ret == 0);
9865 
9866 		if (cb.cb_first) {
9867 			(void) printf(gettext("Every feature flags pool has "
9868 			    "all supported and requested features enabled.\n"));
9869 		} else {
9870 			(void) printf(gettext("\n"));
9871 		}
9872 	} else {
9873 		ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL,
9874 		    B_FALSE, upgrade_one, &cb);
9875 	}
9876 
9877 	return (ret);
9878 }
9879 
9880 typedef struct hist_cbdata {
9881 	boolean_t first;
9882 	boolean_t longfmt;
9883 	boolean_t internal;
9884 } hist_cbdata_t;
9885 
9886 static void
9887 print_history_records(nvlist_t *nvhis, hist_cbdata_t *cb)
9888 {
9889 	nvlist_t **records;
9890 	uint_t numrecords;
9891 	int i;
9892 
9893 	verify(nvlist_lookup_nvlist_array(nvhis, ZPOOL_HIST_RECORD,
9894 	    &records, &numrecords) == 0);
9895 	for (i = 0; i < numrecords; i++) {
9896 		nvlist_t *rec = records[i];
9897 		char tbuf[64] = "";
9898 
9899 		if (nvlist_exists(rec, ZPOOL_HIST_TIME)) {
9900 			time_t tsec;
9901 			struct tm t;
9902 
9903 			tsec = fnvlist_lookup_uint64(records[i],
9904 			    ZPOOL_HIST_TIME);
9905 			(void) localtime_r(&tsec, &t);
9906 			(void) strftime(tbuf, sizeof (tbuf), "%F.%T", &t);
9907 		}
9908 
9909 		if (nvlist_exists(rec, ZPOOL_HIST_ELAPSED_NS)) {
9910 			uint64_t elapsed_ns = fnvlist_lookup_int64(records[i],
9911 			    ZPOOL_HIST_ELAPSED_NS);
9912 			(void) snprintf(tbuf + strlen(tbuf),
9913 			    sizeof (tbuf) - strlen(tbuf),
9914 			    " (%lldms)", (long long)elapsed_ns / 1000 / 1000);
9915 		}
9916 
9917 		if (nvlist_exists(rec, ZPOOL_HIST_CMD)) {
9918 			(void) printf("%s %s", tbuf,
9919 			    fnvlist_lookup_string(rec, ZPOOL_HIST_CMD));
9920 		} else if (nvlist_exists(rec, ZPOOL_HIST_INT_EVENT)) {
9921 			int ievent =
9922 			    fnvlist_lookup_uint64(rec, ZPOOL_HIST_INT_EVENT);
9923 			if (!cb->internal)
9924 				continue;
9925 			if (ievent >= ZFS_NUM_LEGACY_HISTORY_EVENTS) {
9926 				(void) printf("%s unrecognized record:\n",
9927 				    tbuf);
9928 				dump_nvlist(rec, 4);
9929 				continue;
9930 			}
9931 			(void) printf("%s [internal %s txg:%lld] %s", tbuf,
9932 			    zfs_history_event_names[ievent],
9933 			    (longlong_t)fnvlist_lookup_uint64(
9934 			    rec, ZPOOL_HIST_TXG),
9935 			    fnvlist_lookup_string(rec, ZPOOL_HIST_INT_STR));
9936 		} else if (nvlist_exists(rec, ZPOOL_HIST_INT_NAME)) {
9937 			if (!cb->internal)
9938 				continue;
9939 			(void) printf("%s [txg:%lld] %s", tbuf,
9940 			    (longlong_t)fnvlist_lookup_uint64(
9941 			    rec, ZPOOL_HIST_TXG),
9942 			    fnvlist_lookup_string(rec, ZPOOL_HIST_INT_NAME));
9943 			if (nvlist_exists(rec, ZPOOL_HIST_DSNAME)) {
9944 				(void) printf(" %s (%llu)",
9945 				    fnvlist_lookup_string(rec,
9946 				    ZPOOL_HIST_DSNAME),
9947 				    (u_longlong_t)fnvlist_lookup_uint64(rec,
9948 				    ZPOOL_HIST_DSID));
9949 			}
9950 			(void) printf(" %s", fnvlist_lookup_string(rec,
9951 			    ZPOOL_HIST_INT_STR));
9952 		} else if (nvlist_exists(rec, ZPOOL_HIST_IOCTL)) {
9953 			if (!cb->internal)
9954 				continue;
9955 			(void) printf("%s ioctl %s\n", tbuf,
9956 			    fnvlist_lookup_string(rec, ZPOOL_HIST_IOCTL));
9957 			if (nvlist_exists(rec, ZPOOL_HIST_INPUT_NVL)) {
9958 				(void) printf("    input:\n");
9959 				dump_nvlist(fnvlist_lookup_nvlist(rec,
9960 				    ZPOOL_HIST_INPUT_NVL), 8);
9961 			}
9962 			if (nvlist_exists(rec, ZPOOL_HIST_OUTPUT_NVL)) {
9963 				(void) printf("    output:\n");
9964 				dump_nvlist(fnvlist_lookup_nvlist(rec,
9965 				    ZPOOL_HIST_OUTPUT_NVL), 8);
9966 			}
9967 			if (nvlist_exists(rec, ZPOOL_HIST_OUTPUT_SIZE)) {
9968 				(void) printf("    output nvlist omitted; "
9969 				    "original size: %lldKB\n",
9970 				    (longlong_t)fnvlist_lookup_int64(rec,
9971 				    ZPOOL_HIST_OUTPUT_SIZE) / 1024);
9972 			}
9973 			if (nvlist_exists(rec, ZPOOL_HIST_ERRNO)) {
9974 				(void) printf("    errno: %lld\n",
9975 				    (longlong_t)fnvlist_lookup_int64(rec,
9976 				    ZPOOL_HIST_ERRNO));
9977 			}
9978 		} else {
9979 			if (!cb->internal)
9980 				continue;
9981 			(void) printf("%s unrecognized record:\n", tbuf);
9982 			dump_nvlist(rec, 4);
9983 		}
9984 
9985 		if (!cb->longfmt) {
9986 			(void) printf("\n");
9987 			continue;
9988 		}
9989 		(void) printf(" [");
9990 		if (nvlist_exists(rec, ZPOOL_HIST_WHO)) {
9991 			uid_t who = fnvlist_lookup_uint64(rec, ZPOOL_HIST_WHO);
9992 			struct passwd *pwd = getpwuid(who);
9993 			(void) printf("user %d ", (int)who);
9994 			if (pwd != NULL)
9995 				(void) printf("(%s) ", pwd->pw_name);
9996 		}
9997 		if (nvlist_exists(rec, ZPOOL_HIST_HOST)) {
9998 			(void) printf("on %s",
9999 			    fnvlist_lookup_string(rec, ZPOOL_HIST_HOST));
10000 		}
10001 		if (nvlist_exists(rec, ZPOOL_HIST_ZONE)) {
10002 			(void) printf(":%s",
10003 			    fnvlist_lookup_string(rec, ZPOOL_HIST_ZONE));
10004 		}
10005 
10006 		(void) printf("]");
10007 		(void) printf("\n");
10008 	}
10009 }
10010 
10011 /*
10012  * Print out the command history for a specific pool.
10013  */
10014 static int
10015 get_history_one(zpool_handle_t *zhp, void *data)
10016 {
10017 	nvlist_t *nvhis;
10018 	int ret;
10019 	hist_cbdata_t *cb = (hist_cbdata_t *)data;
10020 	uint64_t off = 0;
10021 	boolean_t eof = B_FALSE;
10022 
10023 	cb->first = B_FALSE;
10024 
10025 	(void) printf(gettext("History for '%s':\n"), zpool_get_name(zhp));
10026 
10027 	while (!eof) {
10028 		if ((ret = zpool_get_history(zhp, &nvhis, &off, &eof)) != 0)
10029 			return (ret);
10030 
10031 		print_history_records(nvhis, cb);
10032 		nvlist_free(nvhis);
10033 	}
10034 	(void) printf("\n");
10035 
10036 	return (ret);
10037 }
10038 
10039 /*
10040  * zpool history <pool>
10041  *
10042  * Displays the history of commands that modified pools.
10043  */
10044 int
10045 zpool_do_history(int argc, char **argv)
10046 {
10047 	hist_cbdata_t cbdata = { 0 };
10048 	int ret;
10049 	int c;
10050 
10051 	cbdata.first = B_TRUE;
10052 	/* check options */
10053 	while ((c = getopt(argc, argv, "li")) != -1) {
10054 		switch (c) {
10055 		case 'l':
10056 			cbdata.longfmt = B_TRUE;
10057 			break;
10058 		case 'i':
10059 			cbdata.internal = B_TRUE;
10060 			break;
10061 		case '?':
10062 			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
10063 			    optopt);
10064 			usage(B_FALSE);
10065 		}
10066 	}
10067 	argc -= optind;
10068 	argv += optind;
10069 
10070 	ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL,
10071 	    B_FALSE, get_history_one, &cbdata);
10072 
10073 	if (argc == 0 && cbdata.first == B_TRUE) {
10074 		(void) fprintf(stderr, gettext("no pools available\n"));
10075 		return (0);
10076 	}
10077 
10078 	return (ret);
10079 }
10080 
10081 typedef struct ev_opts {
10082 	int verbose;
10083 	int scripted;
10084 	int follow;
10085 	int clear;
10086 	char poolname[ZFS_MAX_DATASET_NAME_LEN];
10087 } ev_opts_t;
10088 
10089 static void
10090 zpool_do_events_short(nvlist_t *nvl, ev_opts_t *opts)
10091 {
10092 	char ctime_str[26], str[32];
10093 	const char *ptr;
10094 	int64_t *tv;
10095 	uint_t n;
10096 
10097 	verify(nvlist_lookup_int64_array(nvl, FM_EREPORT_TIME, &tv, &n) == 0);
10098 	memset(str, ' ', 32);
10099 	(void) ctime_r((const time_t *)&tv[0], ctime_str);
10100 	(void) memcpy(str, ctime_str+4,  6);		/* 'Jun 30' */
10101 	(void) memcpy(str+7, ctime_str+20, 4);		/* '1993' */
10102 	(void) memcpy(str+12, ctime_str+11, 8);		/* '21:49:08' */
10103 	(void) sprintf(str+20, ".%09lld", (longlong_t)tv[1]); /* '.123456789' */
10104 	if (opts->scripted)
10105 		(void) printf(gettext("%s\t"), str);
10106 	else
10107 		(void) printf(gettext("%s "), str);
10108 
10109 	verify(nvlist_lookup_string(nvl, FM_CLASS, &ptr) == 0);
10110 	(void) printf(gettext("%s\n"), ptr);
10111 }
10112 
10113 static void
10114 zpool_do_events_nvprint(nvlist_t *nvl, int depth)
10115 {
10116 	nvpair_t *nvp;
10117 
10118 	for (nvp = nvlist_next_nvpair(nvl, NULL);
10119 	    nvp != NULL; nvp = nvlist_next_nvpair(nvl, nvp)) {
10120 
10121 		data_type_t type = nvpair_type(nvp);
10122 		const char *name = nvpair_name(nvp);
10123 
10124 		boolean_t b;
10125 		uint8_t i8;
10126 		uint16_t i16;
10127 		uint32_t i32;
10128 		uint64_t i64;
10129 		const char *str;
10130 		nvlist_t *cnv;
10131 
10132 		printf(gettext("%*s%s = "), depth, "", name);
10133 
10134 		switch (type) {
10135 		case DATA_TYPE_BOOLEAN:
10136 			printf(gettext("%s"), "1");
10137 			break;
10138 
10139 		case DATA_TYPE_BOOLEAN_VALUE:
10140 			(void) nvpair_value_boolean_value(nvp, &b);
10141 			printf(gettext("%s"), b ? "1" : "0");
10142 			break;
10143 
10144 		case DATA_TYPE_BYTE:
10145 			(void) nvpair_value_byte(nvp, &i8);
10146 			printf(gettext("0x%x"), i8);
10147 			break;
10148 
10149 		case DATA_TYPE_INT8:
10150 			(void) nvpair_value_int8(nvp, (void *)&i8);
10151 			printf(gettext("0x%x"), i8);
10152 			break;
10153 
10154 		case DATA_TYPE_UINT8:
10155 			(void) nvpair_value_uint8(nvp, &i8);
10156 			printf(gettext("0x%x"), i8);
10157 			break;
10158 
10159 		case DATA_TYPE_INT16:
10160 			(void) nvpair_value_int16(nvp, (void *)&i16);
10161 			printf(gettext("0x%x"), i16);
10162 			break;
10163 
10164 		case DATA_TYPE_UINT16:
10165 			(void) nvpair_value_uint16(nvp, &i16);
10166 			printf(gettext("0x%x"), i16);
10167 			break;
10168 
10169 		case DATA_TYPE_INT32:
10170 			(void) nvpair_value_int32(nvp, (void *)&i32);
10171 			printf(gettext("0x%x"), i32);
10172 			break;
10173 
10174 		case DATA_TYPE_UINT32:
10175 			(void) nvpair_value_uint32(nvp, &i32);
10176 			printf(gettext("0x%x"), i32);
10177 			break;
10178 
10179 		case DATA_TYPE_INT64:
10180 			(void) nvpair_value_int64(nvp, (void *)&i64);
10181 			printf(gettext("0x%llx"), (u_longlong_t)i64);
10182 			break;
10183 
10184 		case DATA_TYPE_UINT64:
10185 			(void) nvpair_value_uint64(nvp, &i64);
10186 			/*
10187 			 * translate vdev state values to readable
10188 			 * strings to aide zpool events consumers
10189 			 */
10190 			if (strcmp(name,
10191 			    FM_EREPORT_PAYLOAD_ZFS_VDEV_STATE) == 0 ||
10192 			    strcmp(name,
10193 			    FM_EREPORT_PAYLOAD_ZFS_VDEV_LASTSTATE) == 0) {
10194 				printf(gettext("\"%s\" (0x%llx)"),
10195 				    zpool_state_to_name(i64, VDEV_AUX_NONE),
10196 				    (u_longlong_t)i64);
10197 			} else {
10198 				printf(gettext("0x%llx"), (u_longlong_t)i64);
10199 			}
10200 			break;
10201 
10202 		case DATA_TYPE_HRTIME:
10203 			(void) nvpair_value_hrtime(nvp, (void *)&i64);
10204 			printf(gettext("0x%llx"), (u_longlong_t)i64);
10205 			break;
10206 
10207 		case DATA_TYPE_STRING:
10208 			(void) nvpair_value_string(nvp, &str);
10209 			printf(gettext("\"%s\""), str ? str : "<NULL>");
10210 			break;
10211 
10212 		case DATA_TYPE_NVLIST:
10213 			printf(gettext("(embedded nvlist)\n"));
10214 			(void) nvpair_value_nvlist(nvp, &cnv);
10215 			zpool_do_events_nvprint(cnv, depth + 8);
10216 			printf(gettext("%*s(end %s)"), depth, "", name);
10217 			break;
10218 
10219 		case DATA_TYPE_NVLIST_ARRAY: {
10220 			nvlist_t **val;
10221 			uint_t i, nelem;
10222 
10223 			(void) nvpair_value_nvlist_array(nvp, &val, &nelem);
10224 			printf(gettext("(%d embedded nvlists)\n"), nelem);
10225 			for (i = 0; i < nelem; i++) {
10226 				printf(gettext("%*s%s[%d] = %s\n"),
10227 				    depth, "", name, i, "(embedded nvlist)");
10228 				zpool_do_events_nvprint(val[i], depth + 8);
10229 				printf(gettext("%*s(end %s[%i])\n"),
10230 				    depth, "", name, i);
10231 			}
10232 			printf(gettext("%*s(end %s)\n"), depth, "", name);
10233 			}
10234 			break;
10235 
10236 		case DATA_TYPE_INT8_ARRAY: {
10237 			int8_t *val;
10238 			uint_t i, nelem;
10239 
10240 			(void) nvpair_value_int8_array(nvp, &val, &nelem);
10241 			for (i = 0; i < nelem; i++)
10242 				printf(gettext("0x%x "), val[i]);
10243 
10244 			break;
10245 			}
10246 
10247 		case DATA_TYPE_UINT8_ARRAY: {
10248 			uint8_t *val;
10249 			uint_t i, nelem;
10250 
10251 			(void) nvpair_value_uint8_array(nvp, &val, &nelem);
10252 			for (i = 0; i < nelem; i++)
10253 				printf(gettext("0x%x "), val[i]);
10254 
10255 			break;
10256 			}
10257 
10258 		case DATA_TYPE_INT16_ARRAY: {
10259 			int16_t *val;
10260 			uint_t i, nelem;
10261 
10262 			(void) nvpair_value_int16_array(nvp, &val, &nelem);
10263 			for (i = 0; i < nelem; i++)
10264 				printf(gettext("0x%x "), val[i]);
10265 
10266 			break;
10267 			}
10268 
10269 		case DATA_TYPE_UINT16_ARRAY: {
10270 			uint16_t *val;
10271 			uint_t i, nelem;
10272 
10273 			(void) nvpair_value_uint16_array(nvp, &val, &nelem);
10274 			for (i = 0; i < nelem; i++)
10275 				printf(gettext("0x%x "), val[i]);
10276 
10277 			break;
10278 			}
10279 
10280 		case DATA_TYPE_INT32_ARRAY: {
10281 			int32_t *val;
10282 			uint_t i, nelem;
10283 
10284 			(void) nvpair_value_int32_array(nvp, &val, &nelem);
10285 			for (i = 0; i < nelem; i++)
10286 				printf(gettext("0x%x "), val[i]);
10287 
10288 			break;
10289 			}
10290 
10291 		case DATA_TYPE_UINT32_ARRAY: {
10292 			uint32_t *val;
10293 			uint_t i, nelem;
10294 
10295 			(void) nvpair_value_uint32_array(nvp, &val, &nelem);
10296 			for (i = 0; i < nelem; i++)
10297 				printf(gettext("0x%x "), val[i]);
10298 
10299 			break;
10300 			}
10301 
10302 		case DATA_TYPE_INT64_ARRAY: {
10303 			int64_t *val;
10304 			uint_t i, nelem;
10305 
10306 			(void) nvpair_value_int64_array(nvp, &val, &nelem);
10307 			for (i = 0; i < nelem; i++)
10308 				printf(gettext("0x%llx "),
10309 				    (u_longlong_t)val[i]);
10310 
10311 			break;
10312 			}
10313 
10314 		case DATA_TYPE_UINT64_ARRAY: {
10315 			uint64_t *val;
10316 			uint_t i, nelem;
10317 
10318 			(void) nvpair_value_uint64_array(nvp, &val, &nelem);
10319 			for (i = 0; i < nelem; i++)
10320 				printf(gettext("0x%llx "),
10321 				    (u_longlong_t)val[i]);
10322 
10323 			break;
10324 			}
10325 
10326 		case DATA_TYPE_STRING_ARRAY: {
10327 			const char **str;
10328 			uint_t i, nelem;
10329 
10330 			(void) nvpair_value_string_array(nvp, &str, &nelem);
10331 			for (i = 0; i < nelem; i++)
10332 				printf(gettext("\"%s\" "),
10333 				    str[i] ? str[i] : "<NULL>");
10334 
10335 			break;
10336 			}
10337 
10338 		case DATA_TYPE_BOOLEAN_ARRAY:
10339 		case DATA_TYPE_BYTE_ARRAY:
10340 		case DATA_TYPE_DOUBLE:
10341 		case DATA_TYPE_DONTCARE:
10342 		case DATA_TYPE_UNKNOWN:
10343 			printf(gettext("<unknown>"));
10344 			break;
10345 		}
10346 
10347 		printf(gettext("\n"));
10348 	}
10349 }
10350 
10351 static int
10352 zpool_do_events_next(ev_opts_t *opts)
10353 {
10354 	nvlist_t *nvl;
10355 	int zevent_fd, ret, dropped;
10356 	const char *pool;
10357 
10358 	zevent_fd = open(ZFS_DEV, O_RDWR);
10359 	VERIFY(zevent_fd >= 0);
10360 
10361 	if (!opts->scripted)
10362 		(void) printf(gettext("%-30s %s\n"), "TIME", "CLASS");
10363 
10364 	while (1) {
10365 		ret = zpool_events_next(g_zfs, &nvl, &dropped,
10366 		    (opts->follow ? ZEVENT_NONE : ZEVENT_NONBLOCK), zevent_fd);
10367 		if (ret || nvl == NULL)
10368 			break;
10369 
10370 		if (dropped > 0)
10371 			(void) printf(gettext("dropped %d events\n"), dropped);
10372 
10373 		if (strlen(opts->poolname) > 0 &&
10374 		    nvlist_lookup_string(nvl, FM_FMRI_ZFS_POOL, &pool) == 0 &&
10375 		    strcmp(opts->poolname, pool) != 0)
10376 			continue;
10377 
10378 		zpool_do_events_short(nvl, opts);
10379 
10380 		if (opts->verbose) {
10381 			zpool_do_events_nvprint(nvl, 8);
10382 			printf(gettext("\n"));
10383 		}
10384 		(void) fflush(stdout);
10385 
10386 		nvlist_free(nvl);
10387 	}
10388 
10389 	VERIFY(0 == close(zevent_fd));
10390 
10391 	return (ret);
10392 }
10393 
10394 static int
10395 zpool_do_events_clear(void)
10396 {
10397 	int count, ret;
10398 
10399 	ret = zpool_events_clear(g_zfs, &count);
10400 	if (!ret)
10401 		(void) printf(gettext("cleared %d events\n"), count);
10402 
10403 	return (ret);
10404 }
10405 
10406 /*
10407  * zpool events [-vHf [pool] | -c]
10408  *
10409  * Displays events logs by ZFS.
10410  */
10411 int
10412 zpool_do_events(int argc, char **argv)
10413 {
10414 	ev_opts_t opts = { 0 };
10415 	int ret;
10416 	int c;
10417 
10418 	/* check options */
10419 	while ((c = getopt(argc, argv, "vHfc")) != -1) {
10420 		switch (c) {
10421 		case 'v':
10422 			opts.verbose = 1;
10423 			break;
10424 		case 'H':
10425 			opts.scripted = 1;
10426 			break;
10427 		case 'f':
10428 			opts.follow = 1;
10429 			break;
10430 		case 'c':
10431 			opts.clear = 1;
10432 			break;
10433 		case '?':
10434 			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
10435 			    optopt);
10436 			usage(B_FALSE);
10437 		}
10438 	}
10439 	argc -= optind;
10440 	argv += optind;
10441 
10442 	if (argc > 1) {
10443 		(void) fprintf(stderr, gettext("too many arguments\n"));
10444 		usage(B_FALSE);
10445 	} else if (argc == 1) {
10446 		(void) strlcpy(opts.poolname, argv[0], sizeof (opts.poolname));
10447 		if (!zfs_name_valid(opts.poolname, ZFS_TYPE_POOL)) {
10448 			(void) fprintf(stderr,
10449 			    gettext("invalid pool name '%s'\n"), opts.poolname);
10450 			usage(B_FALSE);
10451 		}
10452 	}
10453 
10454 	if ((argc == 1 || opts.verbose || opts.scripted || opts.follow) &&
10455 	    opts.clear) {
10456 		(void) fprintf(stderr,
10457 		    gettext("invalid options combined with -c\n"));
10458 		usage(B_FALSE);
10459 	}
10460 
10461 	if (opts.clear)
10462 		ret = zpool_do_events_clear();
10463 	else
10464 		ret = zpool_do_events_next(&opts);
10465 
10466 	return (ret);
10467 }
10468 
10469 static int
10470 get_callback_vdev(zpool_handle_t *zhp, char *vdevname, void *data)
10471 {
10472 	zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
10473 	char value[ZFS_MAXPROPLEN];
10474 	zprop_source_t srctype;
10475 
10476 	for (zprop_list_t *pl = cbp->cb_proplist; pl != NULL;
10477 	    pl = pl->pl_next) {
10478 		char *prop_name;
10479 		/*
10480 		 * If the first property is pool name, it is a special
10481 		 * placeholder that we can skip. This will also skip
10482 		 * over the name property when 'all' is specified.
10483 		 */
10484 		if (pl->pl_prop == ZPOOL_PROP_NAME &&
10485 		    pl == cbp->cb_proplist)
10486 			continue;
10487 
10488 		if (pl->pl_prop == ZPROP_INVAL) {
10489 			prop_name = pl->pl_user_prop;
10490 		} else {
10491 			prop_name = (char *)vdev_prop_to_name(pl->pl_prop);
10492 		}
10493 		if (zpool_get_vdev_prop(zhp, vdevname, pl->pl_prop,
10494 		    prop_name, value, sizeof (value), &srctype,
10495 		    cbp->cb_literal) == 0) {
10496 			zprop_print_one_property(vdevname, cbp, prop_name,
10497 			    value, srctype, NULL, NULL);
10498 		}
10499 	}
10500 
10501 	return (0);
10502 }
10503 
10504 static int
10505 get_callback_vdev_cb(void *zhp_data, nvlist_t *nv, void *data)
10506 {
10507 	zpool_handle_t *zhp = zhp_data;
10508 	zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
10509 	char *vdevname;
10510 	const char *type;
10511 	int ret;
10512 
10513 	/*
10514 	 * zpool_vdev_name() transforms the root vdev name (i.e., root-0) to the
10515 	 * pool name for display purposes, which is not desired. Fallback to
10516 	 * zpool_vdev_name() when not dealing with the root vdev.
10517 	 */
10518 	type = fnvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE);
10519 	if (zhp != NULL && strcmp(type, "root") == 0)
10520 		vdevname = strdup("root-0");
10521 	else
10522 		vdevname = zpool_vdev_name(g_zfs, zhp, nv,
10523 		    cbp->cb_vdevs.cb_name_flags);
10524 
10525 	(void) vdev_expand_proplist(zhp, vdevname, &cbp->cb_proplist);
10526 
10527 	ret = get_callback_vdev(zhp, vdevname, data);
10528 
10529 	free(vdevname);
10530 
10531 	return (ret);
10532 }
10533 
10534 static int
10535 get_callback(zpool_handle_t *zhp, void *data)
10536 {
10537 	zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
10538 	char value[ZFS_MAXPROPLEN];
10539 	zprop_source_t srctype;
10540 	zprop_list_t *pl;
10541 	int vid;
10542 
10543 	if (cbp->cb_type == ZFS_TYPE_VDEV) {
10544 		if (strcmp(cbp->cb_vdevs.cb_names[0], "all-vdevs") == 0) {
10545 			for_each_vdev(zhp, get_callback_vdev_cb, data);
10546 		} else {
10547 			/* Adjust column widths for vdev properties */
10548 			for (vid = 0; vid < cbp->cb_vdevs.cb_names_count;
10549 			    vid++) {
10550 				vdev_expand_proplist(zhp,
10551 				    cbp->cb_vdevs.cb_names[vid],
10552 				    &cbp->cb_proplist);
10553 			}
10554 			/* Display the properties */
10555 			for (vid = 0; vid < cbp->cb_vdevs.cb_names_count;
10556 			    vid++) {
10557 				get_callback_vdev(zhp,
10558 				    cbp->cb_vdevs.cb_names[vid], data);
10559 			}
10560 		}
10561 	} else {
10562 		assert(cbp->cb_type == ZFS_TYPE_POOL);
10563 		for (pl = cbp->cb_proplist; pl != NULL; pl = pl->pl_next) {
10564 			/*
10565 			 * Skip the special fake placeholder. This will also
10566 			 * skip over the name property when 'all' is specified.
10567 			 */
10568 			if (pl->pl_prop == ZPOOL_PROP_NAME &&
10569 			    pl == cbp->cb_proplist)
10570 				continue;
10571 
10572 			if (pl->pl_prop == ZPROP_INVAL &&
10573 			    zfs_prop_user(pl->pl_user_prop)) {
10574 				srctype = ZPROP_SRC_LOCAL;
10575 
10576 				if (zpool_get_userprop(zhp, pl->pl_user_prop,
10577 				    value, sizeof (value), &srctype) != 0)
10578 					continue;
10579 
10580 				zprop_print_one_property(zpool_get_name(zhp),
10581 				    cbp, pl->pl_user_prop, value, srctype,
10582 				    NULL, NULL);
10583 			} else if (pl->pl_prop == ZPROP_INVAL &&
10584 			    (zpool_prop_feature(pl->pl_user_prop) ||
10585 			    zpool_prop_unsupported(pl->pl_user_prop))) {
10586 				srctype = ZPROP_SRC_LOCAL;
10587 
10588 				if (zpool_prop_get_feature(zhp,
10589 				    pl->pl_user_prop, value,
10590 				    sizeof (value)) == 0) {
10591 					zprop_print_one_property(
10592 					    zpool_get_name(zhp), cbp,
10593 					    pl->pl_user_prop, value, srctype,
10594 					    NULL, NULL);
10595 				}
10596 			} else {
10597 				if (zpool_get_prop(zhp, pl->pl_prop, value,
10598 				    sizeof (value), &srctype,
10599 				    cbp->cb_literal) != 0)
10600 					continue;
10601 
10602 				zprop_print_one_property(zpool_get_name(zhp),
10603 				    cbp, zpool_prop_to_name(pl->pl_prop),
10604 				    value, srctype, NULL, NULL);
10605 			}
10606 		}
10607 	}
10608 
10609 	return (0);
10610 }
10611 
10612 /*
10613  * zpool get [-Hp] [-o "all" | field[,...]] <"all" | property[,...]> <pool> ...
10614  *
10615  *	-H	Scripted mode.  Don't display headers, and separate properties
10616  *		by a single tab.
10617  *	-o	List of columns to display.  Defaults to
10618  *		"name,property,value,source".
10619  * 	-p	Display values in parsable (exact) format.
10620  *
10621  * Get properties of pools in the system. Output space statistics
10622  * for each one as well as other attributes.
10623  */
10624 int
10625 zpool_do_get(int argc, char **argv)
10626 {
10627 	zprop_get_cbdata_t cb = { 0 };
10628 	zprop_list_t fake_name = { 0 };
10629 	int ret;
10630 	int c, i;
10631 	char *propstr = NULL;
10632 	char *vdev = NULL;
10633 
10634 	cb.cb_first = B_TRUE;
10635 
10636 	/*
10637 	 * Set up default columns and sources.
10638 	 */
10639 	cb.cb_sources = ZPROP_SRC_ALL;
10640 	cb.cb_columns[0] = GET_COL_NAME;
10641 	cb.cb_columns[1] = GET_COL_PROPERTY;
10642 	cb.cb_columns[2] = GET_COL_VALUE;
10643 	cb.cb_columns[3] = GET_COL_SOURCE;
10644 	cb.cb_type = ZFS_TYPE_POOL;
10645 	cb.cb_vdevs.cb_name_flags |= VDEV_NAME_TYPE_ID;
10646 	current_prop_type = cb.cb_type;
10647 
10648 	/* check options */
10649 	while ((c = getopt(argc, argv, ":Hpo:")) != -1) {
10650 		switch (c) {
10651 		case 'p':
10652 			cb.cb_literal = B_TRUE;
10653 			break;
10654 		case 'H':
10655 			cb.cb_scripted = B_TRUE;
10656 			break;
10657 		case 'o':
10658 			memset(&cb.cb_columns, 0, sizeof (cb.cb_columns));
10659 			i = 0;
10660 
10661 			for (char *tok; (tok = strsep(&optarg, ",")); ) {
10662 				static const char *const col_opts[] =
10663 				{ "name", "property", "value", "source",
10664 				    "all" };
10665 				static const zfs_get_column_t col_cols[] =
10666 				{ GET_COL_NAME, GET_COL_PROPERTY, GET_COL_VALUE,
10667 				    GET_COL_SOURCE };
10668 
10669 				if (i == ZFS_GET_NCOLS - 1) {
10670 					(void) fprintf(stderr, gettext("too "
10671 					"many fields given to -o "
10672 					"option\n"));
10673 					usage(B_FALSE);
10674 				}
10675 
10676 				for (c = 0; c < ARRAY_SIZE(col_opts); ++c)
10677 					if (strcmp(tok, col_opts[c]) == 0)
10678 						goto found;
10679 
10680 				(void) fprintf(stderr,
10681 				    gettext("invalid column name '%s'\n"), tok);
10682 				usage(B_FALSE);
10683 
10684 found:
10685 				if (c >= 4) {
10686 					if (i > 0) {
10687 						(void) fprintf(stderr,
10688 						    gettext("\"all\" conflicts "
10689 						    "with specific fields "
10690 						    "given to -o option\n"));
10691 						usage(B_FALSE);
10692 					}
10693 
10694 					memcpy(cb.cb_columns, col_cols,
10695 					    sizeof (col_cols));
10696 					i = ZFS_GET_NCOLS - 1;
10697 				} else
10698 					cb.cb_columns[i++] = col_cols[c];
10699 			}
10700 			break;
10701 		case '?':
10702 			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
10703 			    optopt);
10704 			usage(B_FALSE);
10705 		}
10706 	}
10707 
10708 	argc -= optind;
10709 	argv += optind;
10710 
10711 	if (argc < 1) {
10712 		(void) fprintf(stderr, gettext("missing property "
10713 		    "argument\n"));
10714 		usage(B_FALSE);
10715 	}
10716 
10717 	/* Properties list is needed later by zprop_get_list() */
10718 	propstr = argv[0];
10719 
10720 	argc--;
10721 	argv++;
10722 
10723 	if (argc == 0) {
10724 		/* No args, so just print the defaults. */
10725 	} else if (are_all_pools(argc, argv)) {
10726 		/* All the args are pool names */
10727 	} else if (are_all_pools(1, argv)) {
10728 		/* The first arg is a pool name */
10729 		if ((argc == 2 && strcmp(argv[1], "all-vdevs") == 0) ||
10730 		    (argc == 2 && strcmp(argv[1], "root") == 0) ||
10731 		    are_vdevs_in_pool(argc - 1, argv + 1, argv[0],
10732 		    &cb.cb_vdevs)) {
10733 
10734 			if (strcmp(argv[1], "root") == 0)
10735 				vdev = strdup("root-0");
10736 			else
10737 				vdev = strdup(argv[1]);
10738 
10739 			/* ... and the rest are vdev names */
10740 			cb.cb_vdevs.cb_names = &vdev;
10741 			cb.cb_vdevs.cb_names_count = argc - 1;
10742 			cb.cb_type = ZFS_TYPE_VDEV;
10743 			argc = 1; /* One pool to process */
10744 		} else {
10745 			fprintf(stderr, gettext("Expected a list of vdevs in"
10746 			    " \"%s\", but got:\n"), argv[0]);
10747 			error_list_unresolved_vdevs(argc - 1, argv + 1,
10748 			    argv[0], &cb.cb_vdevs);
10749 			fprintf(stderr, "\n");
10750 			usage(B_FALSE);
10751 			return (1);
10752 		}
10753 	} else {
10754 		/*
10755 		 * The first arg isn't a pool name,
10756 		 */
10757 		fprintf(stderr, gettext("missing pool name.\n"));
10758 		fprintf(stderr, "\n");
10759 		usage(B_FALSE);
10760 		return (1);
10761 	}
10762 
10763 	if (zprop_get_list(g_zfs, propstr, &cb.cb_proplist,
10764 	    cb.cb_type) != 0) {
10765 		/* Use correct list of valid properties (pool or vdev) */
10766 		current_prop_type = cb.cb_type;
10767 		usage(B_FALSE);
10768 	}
10769 
10770 	if (cb.cb_proplist != NULL) {
10771 		fake_name.pl_prop = ZPOOL_PROP_NAME;
10772 		fake_name.pl_width = strlen(gettext("NAME"));
10773 		fake_name.pl_next = cb.cb_proplist;
10774 		cb.cb_proplist = &fake_name;
10775 	}
10776 
10777 	ret = for_each_pool(argc, argv, B_TRUE, &cb.cb_proplist, cb.cb_type,
10778 	    cb.cb_literal, get_callback, &cb);
10779 
10780 	if (cb.cb_proplist == &fake_name)
10781 		zprop_free_list(fake_name.pl_next);
10782 	else
10783 		zprop_free_list(cb.cb_proplist);
10784 
10785 	if (vdev != NULL)
10786 		free(vdev);
10787 
10788 	return (ret);
10789 }
10790 
10791 typedef struct set_cbdata {
10792 	char *cb_propname;
10793 	char *cb_value;
10794 	zfs_type_t cb_type;
10795 	vdev_cbdata_t cb_vdevs;
10796 	boolean_t cb_any_successful;
10797 } set_cbdata_t;
10798 
10799 static int
10800 set_pool_callback(zpool_handle_t *zhp, set_cbdata_t *cb)
10801 {
10802 	int error;
10803 
10804 	/* Check if we have out-of-bounds features */
10805 	if (strcmp(cb->cb_propname, ZPOOL_CONFIG_COMPATIBILITY) == 0) {
10806 		boolean_t features[SPA_FEATURES];
10807 		if (zpool_do_load_compat(cb->cb_value, features) !=
10808 		    ZPOOL_COMPATIBILITY_OK)
10809 			return (-1);
10810 
10811 		nvlist_t *enabled = zpool_get_features(zhp);
10812 		spa_feature_t i;
10813 		for (i = 0; i < SPA_FEATURES; i++) {
10814 			const char *fguid = spa_feature_table[i].fi_guid;
10815 			if (nvlist_exists(enabled, fguid) && !features[i])
10816 				break;
10817 		}
10818 		if (i < SPA_FEATURES)
10819 			(void) fprintf(stderr, gettext("Warning: one or "
10820 			    "more features already enabled on pool '%s'\n"
10821 			    "are not present in this compatibility set.\n"),
10822 			    zpool_get_name(zhp));
10823 	}
10824 
10825 	/* if we're setting a feature, check it's in compatibility set */
10826 	if (zpool_prop_feature(cb->cb_propname) &&
10827 	    strcmp(cb->cb_value, ZFS_FEATURE_ENABLED) == 0) {
10828 		char *fname = strchr(cb->cb_propname, '@') + 1;
10829 		spa_feature_t f;
10830 
10831 		if (zfeature_lookup_name(fname, &f) == 0) {
10832 			char compat[ZFS_MAXPROPLEN];
10833 			if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY,
10834 			    compat, ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
10835 				compat[0] = '\0';
10836 
10837 			boolean_t features[SPA_FEATURES];
10838 			if (zpool_do_load_compat(compat, features) !=
10839 			    ZPOOL_COMPATIBILITY_OK) {
10840 				(void) fprintf(stderr, gettext("Error: "
10841 				    "cannot enable feature '%s' on pool '%s'\n"
10842 				    "because the pool's 'compatibility' "
10843 				    "property cannot be parsed.\n"),
10844 				    fname, zpool_get_name(zhp));
10845 				return (-1);
10846 			}
10847 
10848 			if (!features[f]) {
10849 				(void) fprintf(stderr, gettext("Error: "
10850 				    "cannot enable feature '%s' on pool '%s'\n"
10851 				    "as it is not specified in this pool's "
10852 				    "current compatibility set.\n"
10853 				    "Consider setting 'compatibility' to a "
10854 				    "less restrictive set, or to 'off'.\n"),
10855 				    fname, zpool_get_name(zhp));
10856 				return (-1);
10857 			}
10858 		}
10859 	}
10860 
10861 	error = zpool_set_prop(zhp, cb->cb_propname, cb->cb_value);
10862 
10863 	return (error);
10864 }
10865 
10866 static int
10867 set_callback(zpool_handle_t *zhp, void *data)
10868 {
10869 	int error;
10870 	set_cbdata_t *cb = (set_cbdata_t *)data;
10871 
10872 	if (cb->cb_type == ZFS_TYPE_VDEV) {
10873 		error = zpool_set_vdev_prop(zhp, *cb->cb_vdevs.cb_names,
10874 		    cb->cb_propname, cb->cb_value);
10875 	} else {
10876 		assert(cb->cb_type == ZFS_TYPE_POOL);
10877 		error = set_pool_callback(zhp, cb);
10878 	}
10879 
10880 	cb->cb_any_successful = !error;
10881 	return (error);
10882 }
10883 
10884 int
10885 zpool_do_set(int argc, char **argv)
10886 {
10887 	set_cbdata_t cb = { 0 };
10888 	int error;
10889 	char *vdev = NULL;
10890 
10891 	current_prop_type = ZFS_TYPE_POOL;
10892 	if (argc > 1 && argv[1][0] == '-') {
10893 		(void) fprintf(stderr, gettext("invalid option '%c'\n"),
10894 		    argv[1][1]);
10895 		usage(B_FALSE);
10896 	}
10897 
10898 	if (argc < 2) {
10899 		(void) fprintf(stderr, gettext("missing property=value "
10900 		    "argument\n"));
10901 		usage(B_FALSE);
10902 	}
10903 
10904 	if (argc < 3) {
10905 		(void) fprintf(stderr, gettext("missing pool name\n"));
10906 		usage(B_FALSE);
10907 	}
10908 
10909 	if (argc > 4) {
10910 		(void) fprintf(stderr, gettext("too many pool names\n"));
10911 		usage(B_FALSE);
10912 	}
10913 
10914 	cb.cb_propname = argv[1];
10915 	cb.cb_type = ZFS_TYPE_POOL;
10916 	cb.cb_vdevs.cb_name_flags |= VDEV_NAME_TYPE_ID;
10917 	cb.cb_value = strchr(cb.cb_propname, '=');
10918 	if (cb.cb_value == NULL) {
10919 		(void) fprintf(stderr, gettext("missing value in "
10920 		    "property=value argument\n"));
10921 		usage(B_FALSE);
10922 	}
10923 
10924 	*(cb.cb_value) = '\0';
10925 	cb.cb_value++;
10926 	argc -= 2;
10927 	argv += 2;
10928 
10929 	/* argv[0] is pool name */
10930 	if (!is_pool(argv[0])) {
10931 		(void) fprintf(stderr,
10932 		    gettext("cannot open '%s': is not a pool\n"), argv[0]);
10933 		return (EINVAL);
10934 	}
10935 
10936 	/* argv[1], when supplied, is vdev name */
10937 	if (argc == 2) {
10938 
10939 		if (strcmp(argv[1], "root") == 0)
10940 			vdev = strdup("root-0");
10941 		else
10942 			vdev = strdup(argv[1]);
10943 
10944 		if (!are_vdevs_in_pool(1, &vdev, argv[0], &cb.cb_vdevs)) {
10945 			(void) fprintf(stderr, gettext(
10946 			    "cannot find '%s' in '%s': device not in pool\n"),
10947 			    vdev, argv[0]);
10948 			free(vdev);
10949 			return (EINVAL);
10950 		}
10951 		cb.cb_vdevs.cb_names = &vdev;
10952 		cb.cb_vdevs.cb_names_count = 1;
10953 		cb.cb_type = ZFS_TYPE_VDEV;
10954 	}
10955 
10956 	error = for_each_pool(1, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
10957 	    B_FALSE, set_callback, &cb);
10958 
10959 	if (vdev != NULL)
10960 		free(vdev);
10961 
10962 	return (error);
10963 }
10964 
10965 /* Add up the total number of bytes left to initialize/trim across all vdevs */
10966 static uint64_t
10967 vdev_activity_remaining(nvlist_t *nv, zpool_wait_activity_t activity)
10968 {
10969 	uint64_t bytes_remaining;
10970 	nvlist_t **child;
10971 	uint_t c, children;
10972 	vdev_stat_t *vs;
10973 
10974 	assert(activity == ZPOOL_WAIT_INITIALIZE ||
10975 	    activity == ZPOOL_WAIT_TRIM);
10976 
10977 	verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
10978 	    (uint64_t **)&vs, &c) == 0);
10979 
10980 	if (activity == ZPOOL_WAIT_INITIALIZE &&
10981 	    vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE)
10982 		bytes_remaining = vs->vs_initialize_bytes_est -
10983 		    vs->vs_initialize_bytes_done;
10984 	else if (activity == ZPOOL_WAIT_TRIM &&
10985 	    vs->vs_trim_state == VDEV_TRIM_ACTIVE)
10986 		bytes_remaining = vs->vs_trim_bytes_est -
10987 		    vs->vs_trim_bytes_done;
10988 	else
10989 		bytes_remaining = 0;
10990 
10991 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
10992 	    &child, &children) != 0)
10993 		children = 0;
10994 
10995 	for (c = 0; c < children; c++)
10996 		bytes_remaining += vdev_activity_remaining(child[c], activity);
10997 
10998 	return (bytes_remaining);
10999 }
11000 
11001 /* Add up the total number of bytes left to rebuild across top-level vdevs */
11002 static uint64_t
11003 vdev_activity_top_remaining(nvlist_t *nv)
11004 {
11005 	uint64_t bytes_remaining = 0;
11006 	nvlist_t **child;
11007 	uint_t children;
11008 	int error;
11009 
11010 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
11011 	    &child, &children) != 0)
11012 		children = 0;
11013 
11014 	for (uint_t c = 0; c < children; c++) {
11015 		vdev_rebuild_stat_t *vrs;
11016 		uint_t i;
11017 
11018 		error = nvlist_lookup_uint64_array(child[c],
11019 		    ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i);
11020 		if (error == 0) {
11021 			if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
11022 				bytes_remaining += (vrs->vrs_bytes_est -
11023 				    vrs->vrs_bytes_rebuilt);
11024 			}
11025 		}
11026 	}
11027 
11028 	return (bytes_remaining);
11029 }
11030 
11031 /* Whether any vdevs are 'spare' or 'replacing' vdevs */
11032 static boolean_t
11033 vdev_any_spare_replacing(nvlist_t *nv)
11034 {
11035 	nvlist_t **child;
11036 	uint_t c, children;
11037 	const char *vdev_type;
11038 
11039 	(void) nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &vdev_type);
11040 
11041 	if (strcmp(vdev_type, VDEV_TYPE_REPLACING) == 0 ||
11042 	    strcmp(vdev_type, VDEV_TYPE_SPARE) == 0 ||
11043 	    strcmp(vdev_type, VDEV_TYPE_DRAID_SPARE) == 0) {
11044 		return (B_TRUE);
11045 	}
11046 
11047 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
11048 	    &child, &children) != 0)
11049 		children = 0;
11050 
11051 	for (c = 0; c < children; c++) {
11052 		if (vdev_any_spare_replacing(child[c]))
11053 			return (B_TRUE);
11054 	}
11055 
11056 	return (B_FALSE);
11057 }
11058 
11059 typedef struct wait_data {
11060 	char *wd_poolname;
11061 	boolean_t wd_scripted;
11062 	boolean_t wd_exact;
11063 	boolean_t wd_headers_once;
11064 	boolean_t wd_should_exit;
11065 	/* Which activities to wait for */
11066 	boolean_t wd_enabled[ZPOOL_WAIT_NUM_ACTIVITIES];
11067 	float wd_interval;
11068 	pthread_cond_t wd_cv;
11069 	pthread_mutex_t wd_mutex;
11070 } wait_data_t;
11071 
11072 /*
11073  * Print to stdout a single line, containing one column for each activity that
11074  * we are waiting for specifying how many bytes of work are left for that
11075  * activity.
11076  */
11077 static void
11078 print_wait_status_row(wait_data_t *wd, zpool_handle_t *zhp, int row)
11079 {
11080 	nvlist_t *config, *nvroot;
11081 	uint_t c;
11082 	int i;
11083 	pool_checkpoint_stat_t *pcs = NULL;
11084 	pool_scan_stat_t *pss = NULL;
11085 	pool_removal_stat_t *prs = NULL;
11086 	pool_raidz_expand_stat_t *pres = NULL;
11087 	const char *const headers[] = {"DISCARD", "FREE", "INITIALIZE",
11088 	    "REPLACE", "REMOVE", "RESILVER", "SCRUB", "TRIM", "RAIDZ_EXPAND"};
11089 	int col_widths[ZPOOL_WAIT_NUM_ACTIVITIES];
11090 
11091 	/* Calculate the width of each column */
11092 	for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
11093 		/*
11094 		 * Make sure we have enough space in the col for pretty-printed
11095 		 * numbers and for the column header, and then leave a couple
11096 		 * spaces between cols for readability.
11097 		 */
11098 		col_widths[i] = MAX(strlen(headers[i]), 6) + 2;
11099 	}
11100 
11101 	if (timestamp_fmt != NODATE)
11102 		print_timestamp(timestamp_fmt);
11103 
11104 	/* Print header if appropriate */
11105 	int term_height = terminal_height();
11106 	boolean_t reprint_header = (!wd->wd_headers_once && term_height > 0 &&
11107 	    row % (term_height-1) == 0);
11108 	if (!wd->wd_scripted && (row == 0 || reprint_header)) {
11109 		for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
11110 			if (wd->wd_enabled[i])
11111 				(void) printf("%*s", col_widths[i], headers[i]);
11112 		}
11113 		(void) fputc('\n', stdout);
11114 	}
11115 
11116 	/* Bytes of work remaining in each activity */
11117 	int64_t bytes_rem[ZPOOL_WAIT_NUM_ACTIVITIES] = {0};
11118 
11119 	bytes_rem[ZPOOL_WAIT_FREE] =
11120 	    zpool_get_prop_int(zhp, ZPOOL_PROP_FREEING, NULL);
11121 
11122 	config = zpool_get_config(zhp, NULL);
11123 	nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
11124 
11125 	(void) nvlist_lookup_uint64_array(nvroot,
11126 	    ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
11127 	if (pcs != NULL && pcs->pcs_state == CS_CHECKPOINT_DISCARDING)
11128 		bytes_rem[ZPOOL_WAIT_CKPT_DISCARD] = pcs->pcs_space;
11129 
11130 	(void) nvlist_lookup_uint64_array(nvroot,
11131 	    ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c);
11132 	if (prs != NULL && prs->prs_state == DSS_SCANNING)
11133 		bytes_rem[ZPOOL_WAIT_REMOVE] = prs->prs_to_copy -
11134 		    prs->prs_copied;
11135 
11136 	(void) nvlist_lookup_uint64_array(nvroot,
11137 	    ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&pss, &c);
11138 	if (pss != NULL && pss->pss_state == DSS_SCANNING &&
11139 	    pss->pss_pass_scrub_pause == 0) {
11140 		int64_t rem = pss->pss_to_examine - pss->pss_issued;
11141 		if (pss->pss_func == POOL_SCAN_SCRUB)
11142 			bytes_rem[ZPOOL_WAIT_SCRUB] = rem;
11143 		else
11144 			bytes_rem[ZPOOL_WAIT_RESILVER] = rem;
11145 	} else if (check_rebuilding(nvroot, NULL)) {
11146 		bytes_rem[ZPOOL_WAIT_RESILVER] =
11147 		    vdev_activity_top_remaining(nvroot);
11148 	}
11149 
11150 	(void) nvlist_lookup_uint64_array(nvroot,
11151 	    ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, (uint64_t **)&pres, &c);
11152 	if (pres != NULL && pres->pres_state == DSS_SCANNING) {
11153 		int64_t rem = pres->pres_to_reflow - pres->pres_reflowed;
11154 		bytes_rem[ZPOOL_WAIT_RAIDZ_EXPAND] = rem;
11155 	}
11156 
11157 	bytes_rem[ZPOOL_WAIT_INITIALIZE] =
11158 	    vdev_activity_remaining(nvroot, ZPOOL_WAIT_INITIALIZE);
11159 	bytes_rem[ZPOOL_WAIT_TRIM] =
11160 	    vdev_activity_remaining(nvroot, ZPOOL_WAIT_TRIM);
11161 
11162 	/*
11163 	 * A replace finishes after resilvering finishes, so the amount of work
11164 	 * left for a replace is the same as for resilvering.
11165 	 *
11166 	 * It isn't quite correct to say that if we have any 'spare' or
11167 	 * 'replacing' vdevs and a resilver is happening, then a replace is in
11168 	 * progress, like we do here. When a hot spare is used, the faulted vdev
11169 	 * is not removed after the hot spare is resilvered, so parent 'spare'
11170 	 * vdev is not removed either. So we could have a 'spare' vdev, but be
11171 	 * resilvering for a different reason. However, we use it as a heuristic
11172 	 * because we don't have access to the DTLs, which could tell us whether
11173 	 * or not we have really finished resilvering a hot spare.
11174 	 */
11175 	if (vdev_any_spare_replacing(nvroot))
11176 		bytes_rem[ZPOOL_WAIT_REPLACE] =  bytes_rem[ZPOOL_WAIT_RESILVER];
11177 
11178 	for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
11179 		char buf[64];
11180 		if (!wd->wd_enabled[i])
11181 			continue;
11182 
11183 		if (wd->wd_exact) {
11184 			(void) snprintf(buf, sizeof (buf), "%" PRIi64,
11185 			    bytes_rem[i]);
11186 		} else {
11187 			zfs_nicenum(bytes_rem[i], buf, sizeof (buf));
11188 		}
11189 
11190 		if (wd->wd_scripted)
11191 			(void) printf(i == 0 ? "%s" : "\t%s", buf);
11192 		else
11193 			(void) printf(" %*s", col_widths[i] - 1, buf);
11194 	}
11195 	(void) printf("\n");
11196 	(void) fflush(stdout);
11197 }
11198 
11199 static void *
11200 wait_status_thread(void *arg)
11201 {
11202 	wait_data_t *wd = (wait_data_t *)arg;
11203 	zpool_handle_t *zhp;
11204 
11205 	if ((zhp = zpool_open(g_zfs, wd->wd_poolname)) == NULL)
11206 		return (void *)(1);
11207 
11208 	for (int row = 0; ; row++) {
11209 		boolean_t missing;
11210 		struct timespec timeout;
11211 		int ret = 0;
11212 		(void) clock_gettime(CLOCK_REALTIME, &timeout);
11213 
11214 		if (zpool_refresh_stats(zhp, &missing) != 0 || missing ||
11215 		    zpool_props_refresh(zhp) != 0) {
11216 			zpool_close(zhp);
11217 			return (void *)(uintptr_t)(missing ? 0 : 1);
11218 		}
11219 
11220 		print_wait_status_row(wd, zhp, row);
11221 
11222 		timeout.tv_sec += floor(wd->wd_interval);
11223 		long nanos = timeout.tv_nsec +
11224 		    (wd->wd_interval - floor(wd->wd_interval)) * NANOSEC;
11225 		if (nanos >= NANOSEC) {
11226 			timeout.tv_sec++;
11227 			timeout.tv_nsec = nanos - NANOSEC;
11228 		} else {
11229 			timeout.tv_nsec = nanos;
11230 		}
11231 		pthread_mutex_lock(&wd->wd_mutex);
11232 		if (!wd->wd_should_exit)
11233 			ret = pthread_cond_timedwait(&wd->wd_cv, &wd->wd_mutex,
11234 			    &timeout);
11235 		pthread_mutex_unlock(&wd->wd_mutex);
11236 		if (ret == 0) {
11237 			break; /* signaled by main thread */
11238 		} else if (ret != ETIMEDOUT) {
11239 			(void) fprintf(stderr, gettext("pthread_cond_timedwait "
11240 			    "failed: %s\n"), strerror(ret));
11241 			zpool_close(zhp);
11242 			return (void *)(uintptr_t)(1);
11243 		}
11244 	}
11245 
11246 	zpool_close(zhp);
11247 	return (void *)(0);
11248 }
11249 
11250 int
11251 zpool_do_wait(int argc, char **argv)
11252 {
11253 	boolean_t verbose = B_FALSE;
11254 	int c, i;
11255 	unsigned long count;
11256 	pthread_t status_thr;
11257 	int error = 0;
11258 	zpool_handle_t *zhp;
11259 
11260 	wait_data_t wd;
11261 	wd.wd_scripted = B_FALSE;
11262 	wd.wd_exact = B_FALSE;
11263 	wd.wd_headers_once = B_FALSE;
11264 	wd.wd_should_exit = B_FALSE;
11265 
11266 	pthread_mutex_init(&wd.wd_mutex, NULL);
11267 	pthread_cond_init(&wd.wd_cv, NULL);
11268 
11269 	/* By default, wait for all types of activity. */
11270 	for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++)
11271 		wd.wd_enabled[i] = B_TRUE;
11272 
11273 	while ((c = getopt(argc, argv, "HpT:t:")) != -1) {
11274 		switch (c) {
11275 		case 'H':
11276 			wd.wd_scripted = B_TRUE;
11277 			break;
11278 		case 'n':
11279 			wd.wd_headers_once = B_TRUE;
11280 			break;
11281 		case 'p':
11282 			wd.wd_exact = B_TRUE;
11283 			break;
11284 		case 'T':
11285 			get_timestamp_arg(*optarg);
11286 			break;
11287 		case 't':
11288 			/* Reset activities array */
11289 			memset(&wd.wd_enabled, 0, sizeof (wd.wd_enabled));
11290 
11291 			for (char *tok; (tok = strsep(&optarg, ",")); ) {
11292 				static const char *const col_opts[] = {
11293 				    "discard", "free", "initialize", "replace",
11294 				    "remove", "resilver", "scrub", "trim",
11295 				    "raidz_expand" };
11296 
11297 				for (i = 0; i < ARRAY_SIZE(col_opts); ++i)
11298 					if (strcmp(tok, col_opts[i]) == 0) {
11299 						wd.wd_enabled[i] = B_TRUE;
11300 						goto found;
11301 					}
11302 
11303 				(void) fprintf(stderr,
11304 				    gettext("invalid activity '%s'\n"), tok);
11305 				usage(B_FALSE);
11306 found:;
11307 			}
11308 			break;
11309 		case '?':
11310 			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
11311 			    optopt);
11312 			usage(B_FALSE);
11313 		}
11314 	}
11315 
11316 	argc -= optind;
11317 	argv += optind;
11318 
11319 	get_interval_count(&argc, argv, &wd.wd_interval, &count);
11320 	if (count != 0) {
11321 		/* This subcmd only accepts an interval, not a count */
11322 		(void) fprintf(stderr, gettext("too many arguments\n"));
11323 		usage(B_FALSE);
11324 	}
11325 
11326 	if (wd.wd_interval != 0)
11327 		verbose = B_TRUE;
11328 
11329 	if (argc < 1) {
11330 		(void) fprintf(stderr, gettext("missing 'pool' argument\n"));
11331 		usage(B_FALSE);
11332 	}
11333 	if (argc > 1) {
11334 		(void) fprintf(stderr, gettext("too many arguments\n"));
11335 		usage(B_FALSE);
11336 	}
11337 
11338 	wd.wd_poolname = argv[0];
11339 
11340 	if ((zhp = zpool_open(g_zfs, wd.wd_poolname)) == NULL)
11341 		return (1);
11342 
11343 	if (verbose) {
11344 		/*
11345 		 * We use a separate thread for printing status updates because
11346 		 * the main thread will call lzc_wait(), which blocks as long
11347 		 * as an activity is in progress, which can be a long time.
11348 		 */
11349 		if (pthread_create(&status_thr, NULL, wait_status_thread, &wd)
11350 		    != 0) {
11351 			(void) fprintf(stderr, gettext("failed to create status"
11352 			    "thread: %s\n"), strerror(errno));
11353 			zpool_close(zhp);
11354 			return (1);
11355 		}
11356 	}
11357 
11358 	/*
11359 	 * Loop over all activities that we are supposed to wait for until none
11360 	 * of them are in progress. Note that this means we can end up waiting
11361 	 * for more activities to complete than just those that were in progress
11362 	 * when we began waiting; if an activity we are interested in begins
11363 	 * while we are waiting for another activity, we will wait for both to
11364 	 * complete before exiting.
11365 	 */
11366 	for (;;) {
11367 		boolean_t missing = B_FALSE;
11368 		boolean_t any_waited = B_FALSE;
11369 
11370 		for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
11371 			boolean_t waited;
11372 
11373 			if (!wd.wd_enabled[i])
11374 				continue;
11375 
11376 			error = zpool_wait_status(zhp, i, &missing, &waited);
11377 			if (error != 0 || missing)
11378 				break;
11379 
11380 			any_waited = (any_waited || waited);
11381 		}
11382 
11383 		if (error != 0 || missing || !any_waited)
11384 			break;
11385 	}
11386 
11387 	zpool_close(zhp);
11388 
11389 	if (verbose) {
11390 		uintptr_t status;
11391 		pthread_mutex_lock(&wd.wd_mutex);
11392 		wd.wd_should_exit = B_TRUE;
11393 		pthread_cond_signal(&wd.wd_cv);
11394 		pthread_mutex_unlock(&wd.wd_mutex);
11395 		(void) pthread_join(status_thr, (void *)&status);
11396 		if (status != 0)
11397 			error = status;
11398 	}
11399 
11400 	pthread_mutex_destroy(&wd.wd_mutex);
11401 	pthread_cond_destroy(&wd.wd_cv);
11402 	return (error);
11403 }
11404 
11405 static int
11406 find_command_idx(const char *command, int *idx)
11407 {
11408 	for (int i = 0; i < NCOMMAND; ++i) {
11409 		if (command_table[i].name == NULL)
11410 			continue;
11411 
11412 		if (strcmp(command, command_table[i].name) == 0) {
11413 			*idx = i;
11414 			return (0);
11415 		}
11416 	}
11417 	return (1);
11418 }
11419 
11420 /*
11421  * Display version message
11422  */
11423 static int
11424 zpool_do_version(int argc, char **argv)
11425 {
11426 	(void) argc, (void) argv;
11427 	return (zfs_version_print() != 0);
11428 }
11429 
11430 /* Display documentation */
11431 static int
11432 zpool_do_help(int argc, char **argv)
11433 {
11434 	char page[MAXNAMELEN];
11435 	if (argc < 3 || strcmp(argv[2], "zpool") == 0)
11436 		strcpy(page, "zpool");
11437 	else if (strcmp(argv[2], "concepts") == 0 ||
11438 	    strcmp(argv[2], "props") == 0)
11439 		snprintf(page, sizeof (page), "zpool%s", argv[2]);
11440 	else
11441 		snprintf(page, sizeof (page), "zpool-%s", argv[2]);
11442 
11443 	execlp("man", "man", page, NULL);
11444 
11445 	fprintf(stderr, "couldn't run man program: %s", strerror(errno));
11446 	return (-1);
11447 }
11448 
11449 /*
11450  * Do zpool_load_compat() and print error message on failure
11451  */
11452 static zpool_compat_status_t
11453 zpool_do_load_compat(const char *compat, boolean_t *list)
11454 {
11455 	char report[1024];
11456 
11457 	zpool_compat_status_t ret;
11458 
11459 	ret = zpool_load_compat(compat, list, report, 1024);
11460 	switch (ret) {
11461 
11462 	case ZPOOL_COMPATIBILITY_OK:
11463 		break;
11464 
11465 	case ZPOOL_COMPATIBILITY_NOFILES:
11466 	case ZPOOL_COMPATIBILITY_BADFILE:
11467 	case ZPOOL_COMPATIBILITY_BADTOKEN:
11468 		(void) fprintf(stderr, "Error: %s\n", report);
11469 		break;
11470 
11471 	case ZPOOL_COMPATIBILITY_WARNTOKEN:
11472 		(void) fprintf(stderr, "Warning: %s\n", report);
11473 		ret = ZPOOL_COMPATIBILITY_OK;
11474 		break;
11475 	}
11476 	return (ret);
11477 }
11478 
11479 int
11480 main(int argc, char **argv)
11481 {
11482 	int ret = 0;
11483 	int i = 0;
11484 	char *cmdname;
11485 	char **newargv;
11486 
11487 	(void) setlocale(LC_ALL, "");
11488 	(void) setlocale(LC_NUMERIC, "C");
11489 	(void) textdomain(TEXT_DOMAIN);
11490 	srand(time(NULL));
11491 
11492 	opterr = 0;
11493 
11494 	/*
11495 	 * Make sure the user has specified some command.
11496 	 */
11497 	if (argc < 2) {
11498 		(void) fprintf(stderr, gettext("missing command\n"));
11499 		usage(B_FALSE);
11500 	}
11501 
11502 	cmdname = argv[1];
11503 
11504 	/*
11505 	 * Special case '-?'
11506 	 */
11507 	if ((strcmp(cmdname, "-?") == 0) || strcmp(cmdname, "--help") == 0)
11508 		usage(B_TRUE);
11509 
11510 	/*
11511 	 * Special case '-V|--version'
11512 	 */
11513 	if ((strcmp(cmdname, "-V") == 0) || (strcmp(cmdname, "--version") == 0))
11514 		return (zpool_do_version(argc, argv));
11515 
11516 	/*
11517 	 * Special case 'help'
11518 	 */
11519 	if (strcmp(cmdname, "help") == 0)
11520 		return (zpool_do_help(argc, argv));
11521 
11522 	if ((g_zfs = libzfs_init()) == NULL) {
11523 		(void) fprintf(stderr, "%s\n", libzfs_error_init(errno));
11524 		return (1);
11525 	}
11526 
11527 	libzfs_print_on_error(g_zfs, B_TRUE);
11528 
11529 	zfs_save_arguments(argc, argv, history_str, sizeof (history_str));
11530 
11531 	/*
11532 	 * Many commands modify input strings for string parsing reasons.
11533 	 * We create a copy to protect the original argv.
11534 	 */
11535 	newargv = safe_malloc((argc + 1) * sizeof (newargv[0]));
11536 	for (i = 0; i < argc; i++)
11537 		newargv[i] = strdup(argv[i]);
11538 	newargv[argc] = NULL;
11539 
11540 	/*
11541 	 * Run the appropriate command.
11542 	 */
11543 	if (find_command_idx(cmdname, &i) == 0) {
11544 		current_command = &command_table[i];
11545 		ret = command_table[i].func(argc - 1, newargv + 1);
11546 	} else if (strchr(cmdname, '=')) {
11547 		verify(find_command_idx("set", &i) == 0);
11548 		current_command = &command_table[i];
11549 		ret = command_table[i].func(argc, newargv);
11550 	} else if (strcmp(cmdname, "freeze") == 0 && argc == 3) {
11551 		/*
11552 		 * 'freeze' is a vile debugging abomination, so we treat
11553 		 * it as such.
11554 		 */
11555 		zfs_cmd_t zc = {"\0"};
11556 
11557 		(void) strlcpy(zc.zc_name, argv[2], sizeof (zc.zc_name));
11558 		ret = zfs_ioctl(g_zfs, ZFS_IOC_POOL_FREEZE, &zc);
11559 		if (ret != 0) {
11560 			(void) fprintf(stderr,
11561 			gettext("failed to freeze pool: %d\n"), errno);
11562 			ret = 1;
11563 		}
11564 
11565 		log_history = 0;
11566 	} else {
11567 		(void) fprintf(stderr, gettext("unrecognized "
11568 		    "command '%s'\n"), cmdname);
11569 		usage(B_FALSE);
11570 		ret = 1;
11571 	}
11572 
11573 	for (i = 0; i < argc; i++)
11574 		free(newargv[i]);
11575 	free(newargv);
11576 
11577 	if (ret == 0 && log_history)
11578 		(void) zpool_log_history(g_zfs, history_str);
11579 
11580 	libzfs_fini(g_zfs);
11581 
11582 	/*
11583 	 * The 'ZFS_ABORT' environment variable causes us to dump core on exit
11584 	 * for the purposes of running ::findleaks.
11585 	 */
11586 	if (getenv("ZFS_ABORT") != NULL) {
11587 		(void) printf("dumping core by request\n");
11588 		abort();
11589 	}
11590 
11591 	return (ret);
11592 }
11593