1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2011, 2024 by Delphix. All rights reserved.
26 * Copyright (c) 2012 by Frederik Wessels. All rights reserved.
27 * Copyright (c) 2012 by Cyril Plisko. All rights reserved.
28 * Copyright (c) 2013 by Prasad Joshi (sTec). All rights reserved.
29 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>.
30 * Copyright (c) 2017 Datto Inc.
31 * Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
32 * Copyright (c) 2017, Intel Corporation.
33 * Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>
34 * Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
35 * Copyright (c) 2021, 2023, Klara Inc.
36 * Copyright [2021] Hewlett Packard Enterprise Development LP
37 */
38
39 #include <assert.h>
40 #include <ctype.h>
41 #include <dirent.h>
42 #include <errno.h>
43 #include <fcntl.h>
44 #include <getopt.h>
45 #include <libgen.h>
46 #include <libintl.h>
47 #include <libuutil.h>
48 #include <locale.h>
49 #include <pthread.h>
50 #include <stdio.h>
51 #include <stdlib.h>
52 #include <string.h>
53 #include <thread_pool.h>
54 #include <time.h>
55 #include <unistd.h>
56 #include <pwd.h>
57 #include <zone.h>
58 #include <sys/wait.h>
59 #include <zfs_prop.h>
60 #include <sys/fs/zfs.h>
61 #include <sys/stat.h>
62 #include <sys/systeminfo.h>
63 #include <sys/fm/fs/zfs.h>
64 #include <sys/fm/util.h>
65 #include <sys/fm/protocol.h>
66 #include <sys/zfs_ioctl.h>
67 #include <sys/mount.h>
68 #include <sys/sysmacros.h>
69 #include <string.h>
70 #include <math.h>
71
72 #include <libzfs.h>
73 #include <libzutil.h>
74
75 #include "zpool_util.h"
76 #include "zfs_comutil.h"
77 #include "zfeature_common.h"
78 #include "zfs_valstr.h"
79
80 #include "statcommon.h"
81
82 libzfs_handle_t *g_zfs;
83
84 static int mount_tp_nthr = 512; /* tpool threads for multi-threaded mounting */
85
86 static int zpool_do_create(int, char **);
87 static int zpool_do_destroy(int, char **);
88
89 static int zpool_do_add(int, char **);
90 static int zpool_do_remove(int, char **);
91 static int zpool_do_labelclear(int, char **);
92
93 static int zpool_do_checkpoint(int, char **);
94 static int zpool_do_prefetch(int, char **);
95
96 static int zpool_do_list(int, char **);
97 static int zpool_do_iostat(int, char **);
98 static int zpool_do_status(int, char **);
99
100 static int zpool_do_online(int, char **);
101 static int zpool_do_offline(int, char **);
102 static int zpool_do_clear(int, char **);
103 static int zpool_do_reopen(int, char **);
104
105 static int zpool_do_reguid(int, char **);
106
107 static int zpool_do_attach(int, char **);
108 static int zpool_do_detach(int, char **);
109 static int zpool_do_replace(int, char **);
110 static int zpool_do_split(int, char **);
111
112 static int zpool_do_initialize(int, char **);
113 static int zpool_do_scrub(int, char **);
114 static int zpool_do_resilver(int, char **);
115 static int zpool_do_trim(int, char **);
116
117 static int zpool_do_import(int, char **);
118 static int zpool_do_export(int, char **);
119
120 static int zpool_do_upgrade(int, char **);
121
122 static int zpool_do_history(int, char **);
123 static int zpool_do_events(int, char **);
124
125 static int zpool_do_get(int, char **);
126 static int zpool_do_set(int, char **);
127
128 static int zpool_do_sync(int, char **);
129
130 static int zpool_do_version(int, char **);
131
132 static int zpool_do_wait(int, char **);
133
134 static int zpool_do_ddt_prune(int, char **);
135
136 static int zpool_do_help(int argc, char **argv);
137
138 static zpool_compat_status_t zpool_do_load_compat(
139 const char *, boolean_t *);
140
141 enum zpool_options {
142 ZPOOL_OPTION_POWER = 1024,
143 ZPOOL_OPTION_ALLOW_INUSE,
144 ZPOOL_OPTION_ALLOW_REPLICATION_MISMATCH,
145 ZPOOL_OPTION_ALLOW_ASHIFT_MISMATCH,
146 ZPOOL_OPTION_POOL_KEY_GUID,
147 ZPOOL_OPTION_JSON_NUMS_AS_INT,
148 ZPOOL_OPTION_JSON_FLAT_VDEVS
149 };
150
151 /*
152 * These libumem hooks provide a reasonable set of defaults for the allocator's
153 * debugging facilities.
154 */
155
156 #ifdef DEBUG
157 const char *
_umem_debug_init(void)158 _umem_debug_init(void)
159 {
160 return ("default,verbose"); /* $UMEM_DEBUG setting */
161 }
162
163 const char *
_umem_logging_init(void)164 _umem_logging_init(void)
165 {
166 return ("fail,contents"); /* $UMEM_LOGGING setting */
167 }
168 #endif
169
170 typedef enum {
171 HELP_ADD,
172 HELP_ATTACH,
173 HELP_CLEAR,
174 HELP_CREATE,
175 HELP_CHECKPOINT,
176 HELP_DDT_PRUNE,
177 HELP_DESTROY,
178 HELP_DETACH,
179 HELP_EXPORT,
180 HELP_HISTORY,
181 HELP_IMPORT,
182 HELP_IOSTAT,
183 HELP_LABELCLEAR,
184 HELP_LIST,
185 HELP_OFFLINE,
186 HELP_ONLINE,
187 HELP_PREFETCH,
188 HELP_REPLACE,
189 HELP_REMOVE,
190 HELP_INITIALIZE,
191 HELP_SCRUB,
192 HELP_RESILVER,
193 HELP_TRIM,
194 HELP_STATUS,
195 HELP_UPGRADE,
196 HELP_EVENTS,
197 HELP_GET,
198 HELP_SET,
199 HELP_SPLIT,
200 HELP_SYNC,
201 HELP_REGUID,
202 HELP_REOPEN,
203 HELP_VERSION,
204 HELP_WAIT
205 } zpool_help_t;
206
207
208 /*
209 * Flags for stats to display with "zpool iostats"
210 */
211 enum iostat_type {
212 IOS_DEFAULT = 0,
213 IOS_LATENCY = 1,
214 IOS_QUEUES = 2,
215 IOS_L_HISTO = 3,
216 IOS_RQ_HISTO = 4,
217 IOS_COUNT, /* always last element */
218 };
219
220 /* iostat_type entries as bitmasks */
221 #define IOS_DEFAULT_M (1ULL << IOS_DEFAULT)
222 #define IOS_LATENCY_M (1ULL << IOS_LATENCY)
223 #define IOS_QUEUES_M (1ULL << IOS_QUEUES)
224 #define IOS_L_HISTO_M (1ULL << IOS_L_HISTO)
225 #define IOS_RQ_HISTO_M (1ULL << IOS_RQ_HISTO)
226
227 /* Mask of all the histo bits */
228 #define IOS_ANYHISTO_M (IOS_L_HISTO_M | IOS_RQ_HISTO_M)
229
230 /*
231 * Lookup table for iostat flags to nvlist names. Basically a list
232 * of all the nvlists a flag requires. Also specifies the order in
233 * which data gets printed in zpool iostat.
234 */
235 static const char *vsx_type_to_nvlist[IOS_COUNT][15] = {
236 [IOS_L_HISTO] = {
237 ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
238 ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
239 ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
240 ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
241 ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO,
242 ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO,
243 ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO,
244 ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO,
245 ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO,
246 ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
247 ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
248 NULL},
249 [IOS_LATENCY] = {
250 ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
251 ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
252 ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
253 ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
254 ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
255 ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
256 NULL},
257 [IOS_QUEUES] = {
258 ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE,
259 ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE,
260 ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE,
261 ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE,
262 ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,
263 ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE,
264 ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE,
265 NULL},
266 [IOS_RQ_HISTO] = {
267 ZPOOL_CONFIG_VDEV_SYNC_IND_R_HISTO,
268 ZPOOL_CONFIG_VDEV_SYNC_AGG_R_HISTO,
269 ZPOOL_CONFIG_VDEV_SYNC_IND_W_HISTO,
270 ZPOOL_CONFIG_VDEV_SYNC_AGG_W_HISTO,
271 ZPOOL_CONFIG_VDEV_ASYNC_IND_R_HISTO,
272 ZPOOL_CONFIG_VDEV_ASYNC_AGG_R_HISTO,
273 ZPOOL_CONFIG_VDEV_ASYNC_IND_W_HISTO,
274 ZPOOL_CONFIG_VDEV_ASYNC_AGG_W_HISTO,
275 ZPOOL_CONFIG_VDEV_IND_SCRUB_HISTO,
276 ZPOOL_CONFIG_VDEV_AGG_SCRUB_HISTO,
277 ZPOOL_CONFIG_VDEV_IND_TRIM_HISTO,
278 ZPOOL_CONFIG_VDEV_AGG_TRIM_HISTO,
279 ZPOOL_CONFIG_VDEV_IND_REBUILD_HISTO,
280 ZPOOL_CONFIG_VDEV_AGG_REBUILD_HISTO,
281 NULL},
282 };
283
284 static const char *pool_scan_func_str[] = {
285 "NONE",
286 "SCRUB",
287 "RESILVER",
288 "ERRORSCRUB"
289 };
290
291 static const char *pool_scan_state_str[] = {
292 "NONE",
293 "SCANNING",
294 "FINISHED",
295 "CANCELED",
296 "ERRORSCRUBBING"
297 };
298
299 static const char *vdev_rebuild_state_str[] = {
300 "NONE",
301 "ACTIVE",
302 "CANCELED",
303 "COMPLETE"
304 };
305
306 static const char *checkpoint_state_str[] = {
307 "NONE",
308 "EXISTS",
309 "DISCARDING"
310 };
311
312 static const char *vdev_state_str[] = {
313 "UNKNOWN",
314 "CLOSED",
315 "OFFLINE",
316 "REMOVED",
317 "CANT_OPEN",
318 "FAULTED",
319 "DEGRADED",
320 "ONLINE"
321 };
322
323 static const char *vdev_aux_str[] = {
324 "NONE",
325 "OPEN_FAILED",
326 "CORRUPT_DATA",
327 "NO_REPLICAS",
328 "BAD_GUID_SUM",
329 "TOO_SMALL",
330 "BAD_LABEL",
331 "VERSION_NEWER",
332 "VERSION_OLDER",
333 "UNSUP_FEAT",
334 "SPARED",
335 "ERR_EXCEEDED",
336 "IO_FAILURE",
337 "BAD_LOG",
338 "EXTERNAL",
339 "SPLIT_POOL",
340 "BAD_ASHIFT",
341 "EXTERNAL_PERSIST",
342 "ACTIVE",
343 "CHILDREN_OFFLINE",
344 "ASHIFT_TOO_BIG"
345 };
346
347 static const char *vdev_init_state_str[] = {
348 "NONE",
349 "ACTIVE",
350 "CANCELED",
351 "SUSPENDED",
352 "COMPLETE"
353 };
354
355 static const char *vdev_trim_state_str[] = {
356 "NONE",
357 "ACTIVE",
358 "CANCELED",
359 "SUSPENDED",
360 "COMPLETE"
361 };
362
363 #define ZFS_NICE_TIMESTAMP 100
364
365 /*
366 * Given a cb->cb_flags with a histogram bit set, return the iostat_type.
367 * Right now, only one histo bit is ever set at one time, so we can
368 * just do a highbit64(a)
369 */
370 #define IOS_HISTO_IDX(a) (highbit64(a & IOS_ANYHISTO_M) - 1)
371
372 typedef struct zpool_command {
373 const char *name;
374 int (*func)(int, char **);
375 zpool_help_t usage;
376 } zpool_command_t;
377
378 /*
379 * Master command table. Each ZFS command has a name, associated function, and
380 * usage message. The usage messages need to be internationalized, so we have
381 * to have a function to return the usage message based on a command index.
382 *
383 * These commands are organized according to how they are displayed in the usage
384 * message. An empty command (one with a NULL name) indicates an empty line in
385 * the generic usage message.
386 */
387 static zpool_command_t command_table[] = {
388 { "version", zpool_do_version, HELP_VERSION },
389 { NULL },
390 { "create", zpool_do_create, HELP_CREATE },
391 { "destroy", zpool_do_destroy, HELP_DESTROY },
392 { NULL },
393 { "add", zpool_do_add, HELP_ADD },
394 { "remove", zpool_do_remove, HELP_REMOVE },
395 { NULL },
396 { "labelclear", zpool_do_labelclear, HELP_LABELCLEAR },
397 { NULL },
398 { "checkpoint", zpool_do_checkpoint, HELP_CHECKPOINT },
399 { "prefetch", zpool_do_prefetch, HELP_PREFETCH },
400 { NULL },
401 { "list", zpool_do_list, HELP_LIST },
402 { "iostat", zpool_do_iostat, HELP_IOSTAT },
403 { "status", zpool_do_status, HELP_STATUS },
404 { NULL },
405 { "online", zpool_do_online, HELP_ONLINE },
406 { "offline", zpool_do_offline, HELP_OFFLINE },
407 { "clear", zpool_do_clear, HELP_CLEAR },
408 { "reopen", zpool_do_reopen, HELP_REOPEN },
409 { NULL },
410 { "attach", zpool_do_attach, HELP_ATTACH },
411 { "detach", zpool_do_detach, HELP_DETACH },
412 { "replace", zpool_do_replace, HELP_REPLACE },
413 { "split", zpool_do_split, HELP_SPLIT },
414 { NULL },
415 { "initialize", zpool_do_initialize, HELP_INITIALIZE },
416 { "resilver", zpool_do_resilver, HELP_RESILVER },
417 { "scrub", zpool_do_scrub, HELP_SCRUB },
418 { "trim", zpool_do_trim, HELP_TRIM },
419 { NULL },
420 { "import", zpool_do_import, HELP_IMPORT },
421 { "export", zpool_do_export, HELP_EXPORT },
422 { "upgrade", zpool_do_upgrade, HELP_UPGRADE },
423 { "reguid", zpool_do_reguid, HELP_REGUID },
424 { NULL },
425 { "history", zpool_do_history, HELP_HISTORY },
426 { "events", zpool_do_events, HELP_EVENTS },
427 { NULL },
428 { "get", zpool_do_get, HELP_GET },
429 { "set", zpool_do_set, HELP_SET },
430 { "sync", zpool_do_sync, HELP_SYNC },
431 { NULL },
432 { "wait", zpool_do_wait, HELP_WAIT },
433 { NULL },
434 { "ddtprune", zpool_do_ddt_prune, HELP_DDT_PRUNE },
435 };
436
437 #define NCOMMAND (ARRAY_SIZE(command_table))
438
439 #define VDEV_ALLOC_CLASS_LOGS "logs"
440
441 #define MAX_CMD_LEN 256
442
443 static zpool_command_t *current_command;
444 static zfs_type_t current_prop_type = (ZFS_TYPE_POOL | ZFS_TYPE_VDEV);
445 static char history_str[HIS_MAX_RECORD_LEN];
446 static boolean_t log_history = B_TRUE;
447 static uint_t timestamp_fmt = NODATE;
448
449 static const char *
get_usage(zpool_help_t idx)450 get_usage(zpool_help_t idx)
451 {
452 switch (idx) {
453 case HELP_ADD:
454 return (gettext("\tadd [-afgLnP] [-o property=value] "
455 "<pool> <vdev> ...\n"));
456 case HELP_ATTACH:
457 return (gettext("\tattach [-fsw] [-o property=value] "
458 "<pool> <device> <new-device>\n"));
459 case HELP_CLEAR:
460 return (gettext("\tclear [[--power]|[-nF]] <pool> [device]\n"));
461 case HELP_CREATE:
462 return (gettext("\tcreate [-fnd] [-o property=value] ... \n"
463 "\t [-O file-system-property=value] ... \n"
464 "\t [-m mountpoint] [-R root] <pool> <vdev> ...\n"));
465 case HELP_CHECKPOINT:
466 return (gettext("\tcheckpoint [-d [-w]] <pool> ...\n"));
467 case HELP_DESTROY:
468 return (gettext("\tdestroy [-f] <pool>\n"));
469 case HELP_DETACH:
470 return (gettext("\tdetach <pool> <device>\n"));
471 case HELP_EXPORT:
472 return (gettext("\texport [-af] <pool> ...\n"));
473 case HELP_HISTORY:
474 return (gettext("\thistory [-il] [<pool>] ...\n"));
475 case HELP_IMPORT:
476 return (gettext("\timport [-d dir] [-D]\n"
477 "\timport [-o mntopts] [-o property=value] ... \n"
478 "\t [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] "
479 "[-R root] [-F [-n]] -a\n"
480 "\timport [-o mntopts] [-o property=value] ... \n"
481 "\t [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] "
482 "[-R root] [-F [-n]]\n"
483 "\t [--rewind-to-checkpoint] <pool | id> [newpool]\n"));
484 case HELP_IOSTAT:
485 return (gettext("\tiostat [[[-c [script1,script2,...]"
486 "[-lq]]|[-rw]] [-T d | u] [-ghHLpPvy]\n"
487 "\t [[pool ...]|[pool vdev ...]|[vdev ...]]"
488 " [[-n] interval [count]]\n"));
489 case HELP_LABELCLEAR:
490 return (gettext("\tlabelclear [-f] <vdev>\n"));
491 case HELP_LIST:
492 return (gettext("\tlist [-gHLpPv] [-o property[,...]] [-j "
493 "[--json-int, --json-pool-key-guid]] ...\n"
494 "\t [-T d|u] [pool] [interval [count]]\n"));
495 case HELP_PREFETCH:
496 return (gettext("\tprefetch -t <type> [<type opts>] <pool>\n"
497 "\t -t ddt <pool>\n"));
498 case HELP_OFFLINE:
499 return (gettext("\toffline [--power]|[[-f][-t]] <pool> "
500 "<device> ...\n"));
501 case HELP_ONLINE:
502 return (gettext("\tonline [--power][-e] <pool> <device> "
503 "...\n"));
504 case HELP_REPLACE:
505 return (gettext("\treplace [-fsw] [-o property=value] "
506 "<pool> <device> [new-device]\n"));
507 case HELP_REMOVE:
508 return (gettext("\tremove [-npsw] <pool> <device> ...\n"));
509 case HELP_REOPEN:
510 return (gettext("\treopen [-n] <pool>\n"));
511 case HELP_INITIALIZE:
512 return (gettext("\tinitialize [-c | -s | -u] [-w] <pool> "
513 "[<device> ...]\n"));
514 case HELP_SCRUB:
515 return (gettext("\tscrub [-e | -s | -p | -C] [-w] "
516 "<pool> ...\n"));
517 case HELP_RESILVER:
518 return (gettext("\tresilver <pool> ...\n"));
519 case HELP_TRIM:
520 return (gettext("\ttrim [-dw] [-r <rate>] [-c | -s] <pool> "
521 "[<device> ...]\n"));
522 case HELP_STATUS:
523 return (gettext("\tstatus [--power] [-j [--json-int, "
524 "--json-flat-vdevs, ...\n"
525 "\t --json-pool-key-guid]] [-c [script1,script2,...]] "
526 "[-dDegiLpPstvx] ...\n"
527 "\t [-T d|u] [pool] [interval [count]]\n"));
528 case HELP_UPGRADE:
529 return (gettext("\tupgrade\n"
530 "\tupgrade -v\n"
531 "\tupgrade [-V version] <-a | pool ...>\n"));
532 case HELP_EVENTS:
533 return (gettext("\tevents [-vHf [pool] | -c]\n"));
534 case HELP_GET:
535 return (gettext("\tget [-Hp] [-j [--json-int, "
536 "--json-pool-key-guid]] ...\n"
537 "\t [-o \"all\" | field[,...]] "
538 "<\"all\" | property[,...]> <pool> ...\n"));
539 case HELP_SET:
540 return (gettext("\tset <property=value> <pool>\n"
541 "\tset <vdev_property=value> <pool> <vdev>\n"));
542 case HELP_SPLIT:
543 return (gettext("\tsplit [-gLnPl] [-R altroot] [-o mntopts]\n"
544 "\t [-o property=value] <pool> <newpool> "
545 "[<device> ...]\n"));
546 case HELP_REGUID:
547 return (gettext("\treguid [-g guid] <pool>\n"));
548 case HELP_SYNC:
549 return (gettext("\tsync [pool] ...\n"));
550 case HELP_VERSION:
551 return (gettext("\tversion [-j]\n"));
552 case HELP_WAIT:
553 return (gettext("\twait [-Hp] [-T d|u] [-t <activity>[,...]] "
554 "<pool> [interval]\n"));
555 case HELP_DDT_PRUNE:
556 return (gettext("\tddtprune -d|-p <amount> <pool>\n"));
557 default:
558 __builtin_unreachable();
559 }
560 }
561
562 static void
zpool_collect_leaves(zpool_handle_t * zhp,nvlist_t * nvroot,nvlist_t * res)563 zpool_collect_leaves(zpool_handle_t *zhp, nvlist_t *nvroot, nvlist_t *res)
564 {
565 uint_t children = 0;
566 nvlist_t **child;
567 uint_t i;
568
569 (void) nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
570 &child, &children);
571
572 if (children == 0) {
573 char *path = zpool_vdev_name(g_zfs, zhp, nvroot,
574 VDEV_NAME_PATH);
575
576 if (strcmp(path, VDEV_TYPE_INDIRECT) != 0 &&
577 strcmp(path, VDEV_TYPE_HOLE) != 0)
578 fnvlist_add_boolean(res, path);
579
580 free(path);
581 return;
582 }
583
584 for (i = 0; i < children; i++) {
585 zpool_collect_leaves(zhp, child[i], res);
586 }
587 }
588
589 /*
590 * Callback routine that will print out a pool property value.
591 */
592 static int
print_pool_prop_cb(int prop,void * cb)593 print_pool_prop_cb(int prop, void *cb)
594 {
595 FILE *fp = cb;
596
597 (void) fprintf(fp, "\t%-19s ", zpool_prop_to_name(prop));
598
599 if (zpool_prop_readonly(prop))
600 (void) fprintf(fp, " NO ");
601 else
602 (void) fprintf(fp, " YES ");
603
604 if (zpool_prop_values(prop) == NULL)
605 (void) fprintf(fp, "-\n");
606 else
607 (void) fprintf(fp, "%s\n", zpool_prop_values(prop));
608
609 return (ZPROP_CONT);
610 }
611
612 /*
613 * Callback routine that will print out a vdev property value.
614 */
615 static int
print_vdev_prop_cb(int prop,void * cb)616 print_vdev_prop_cb(int prop, void *cb)
617 {
618 FILE *fp = cb;
619
620 (void) fprintf(fp, "\t%-19s ", vdev_prop_to_name(prop));
621
622 if (vdev_prop_readonly(prop))
623 (void) fprintf(fp, " NO ");
624 else
625 (void) fprintf(fp, " YES ");
626
627 if (vdev_prop_values(prop) == NULL)
628 (void) fprintf(fp, "-\n");
629 else
630 (void) fprintf(fp, "%s\n", vdev_prop_values(prop));
631
632 return (ZPROP_CONT);
633 }
634
635 /*
636 * Given a leaf vdev name like 'L5' return its VDEV_CONFIG_PATH like
637 * '/dev/disk/by-vdev/L5'.
638 */
639 static const char *
vdev_name_to_path(zpool_handle_t * zhp,char * vdev)640 vdev_name_to_path(zpool_handle_t *zhp, char *vdev)
641 {
642 nvlist_t *vdev_nv = zpool_find_vdev(zhp, vdev, NULL, NULL, NULL);
643 if (vdev_nv == NULL) {
644 return (NULL);
645 }
646 return (fnvlist_lookup_string(vdev_nv, ZPOOL_CONFIG_PATH));
647 }
648
649 static int
zpool_power_on(zpool_handle_t * zhp,char * vdev)650 zpool_power_on(zpool_handle_t *zhp, char *vdev)
651 {
652 return (zpool_power(zhp, vdev, B_TRUE));
653 }
654
655 static int
zpool_power_on_and_disk_wait(zpool_handle_t * zhp,char * vdev)656 zpool_power_on_and_disk_wait(zpool_handle_t *zhp, char *vdev)
657 {
658 int rc;
659
660 rc = zpool_power_on(zhp, vdev);
661 if (rc != 0)
662 return (rc);
663
664 zpool_disk_wait(vdev_name_to_path(zhp, vdev));
665
666 return (0);
667 }
668
669 static int
zpool_power_on_pool_and_wait_for_devices(zpool_handle_t * zhp)670 zpool_power_on_pool_and_wait_for_devices(zpool_handle_t *zhp)
671 {
672 nvlist_t *nv;
673 const char *path = NULL;
674 int rc;
675
676 /* Power up all the devices first */
677 FOR_EACH_REAL_LEAF_VDEV(zhp, nv) {
678 path = fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH);
679 if (path != NULL) {
680 rc = zpool_power_on(zhp, (char *)path);
681 if (rc != 0) {
682 return (rc);
683 }
684 }
685 }
686
687 /*
688 * Wait for their devices to show up. Since we powered them on
689 * at roughly the same time, they should all come online around
690 * the same time.
691 */
692 FOR_EACH_REAL_LEAF_VDEV(zhp, nv) {
693 path = fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH);
694 zpool_disk_wait(path);
695 }
696
697 return (0);
698 }
699
700 static int
zpool_power_off(zpool_handle_t * zhp,char * vdev)701 zpool_power_off(zpool_handle_t *zhp, char *vdev)
702 {
703 return (zpool_power(zhp, vdev, B_FALSE));
704 }
705
706 /*
707 * Display usage message. If we're inside a command, display only the usage for
708 * that command. Otherwise, iterate over the entire command table and display
709 * a complete usage message.
710 */
711 static __attribute__((noreturn)) void
usage(boolean_t requested)712 usage(boolean_t requested)
713 {
714 FILE *fp = requested ? stdout : stderr;
715
716 if (current_command == NULL) {
717 int i;
718
719 (void) fprintf(fp, gettext("usage: zpool command args ...\n"));
720 (void) fprintf(fp,
721 gettext("where 'command' is one of the following:\n\n"));
722
723 for (i = 0; i < NCOMMAND; i++) {
724 if (command_table[i].name == NULL)
725 (void) fprintf(fp, "\n");
726 else
727 (void) fprintf(fp, "%s",
728 get_usage(command_table[i].usage));
729 }
730
731 (void) fprintf(fp,
732 gettext("\nFor further help on a command or topic, "
733 "run: %s\n"), "zpool help [<topic>]");
734 } else {
735 (void) fprintf(fp, gettext("usage:\n"));
736 (void) fprintf(fp, "%s", get_usage(current_command->usage));
737 }
738
739 if (current_command != NULL &&
740 current_prop_type != (ZFS_TYPE_POOL | ZFS_TYPE_VDEV) &&
741 ((strcmp(current_command->name, "set") == 0) ||
742 (strcmp(current_command->name, "get") == 0) ||
743 (strcmp(current_command->name, "list") == 0))) {
744
745 (void) fprintf(fp, "%s",
746 gettext("\nthe following properties are supported:\n"));
747
748 (void) fprintf(fp, "\n\t%-19s %s %s\n\n",
749 "PROPERTY", "EDIT", "VALUES");
750
751 /* Iterate over all properties */
752 if (current_prop_type == ZFS_TYPE_POOL) {
753 (void) zprop_iter(print_pool_prop_cb, fp, B_FALSE,
754 B_TRUE, current_prop_type);
755
756 (void) fprintf(fp, "\t%-19s ", "feature@...");
757 (void) fprintf(fp, "YES "
758 "disabled | enabled | active\n");
759
760 (void) fprintf(fp, gettext("\nThe feature@ properties "
761 "must be appended with a feature name.\n"
762 "See zpool-features(7).\n"));
763 } else if (current_prop_type == ZFS_TYPE_VDEV) {
764 (void) zprop_iter(print_vdev_prop_cb, fp, B_FALSE,
765 B_TRUE, current_prop_type);
766 }
767 }
768
769 /*
770 * See comments at end of main().
771 */
772 if (getenv("ZFS_ABORT") != NULL) {
773 (void) printf("dumping core by request\n");
774 abort();
775 }
776
777 exit(requested ? 0 : 2);
778 }
779
780 /*
781 * zpool initialize [-c | -s | -u] [-w] <pool> [<vdev> ...]
782 * Initialize all unused blocks in the specified vdevs, or all vdevs in the pool
783 * if none specified.
784 *
785 * -c Cancel. Ends active initializing.
786 * -s Suspend. Initializing can then be restarted with no flags.
787 * -u Uninitialize. Clears initialization state.
788 * -w Wait. Blocks until initializing has completed.
789 */
790 int
zpool_do_initialize(int argc,char ** argv)791 zpool_do_initialize(int argc, char **argv)
792 {
793 int c;
794 char *poolname;
795 zpool_handle_t *zhp;
796 nvlist_t *vdevs;
797 int err = 0;
798 boolean_t wait = B_FALSE;
799
800 struct option long_options[] = {
801 {"cancel", no_argument, NULL, 'c'},
802 {"suspend", no_argument, NULL, 's'},
803 {"uninit", no_argument, NULL, 'u'},
804 {"wait", no_argument, NULL, 'w'},
805 {0, 0, 0, 0}
806 };
807
808 pool_initialize_func_t cmd_type = POOL_INITIALIZE_START;
809 while ((c = getopt_long(argc, argv, "csuw", long_options,
810 NULL)) != -1) {
811 switch (c) {
812 case 'c':
813 if (cmd_type != POOL_INITIALIZE_START &&
814 cmd_type != POOL_INITIALIZE_CANCEL) {
815 (void) fprintf(stderr, gettext("-c cannot be "
816 "combined with other options\n"));
817 usage(B_FALSE);
818 }
819 cmd_type = POOL_INITIALIZE_CANCEL;
820 break;
821 case 's':
822 if (cmd_type != POOL_INITIALIZE_START &&
823 cmd_type != POOL_INITIALIZE_SUSPEND) {
824 (void) fprintf(stderr, gettext("-s cannot be "
825 "combined with other options\n"));
826 usage(B_FALSE);
827 }
828 cmd_type = POOL_INITIALIZE_SUSPEND;
829 break;
830 case 'u':
831 if (cmd_type != POOL_INITIALIZE_START &&
832 cmd_type != POOL_INITIALIZE_UNINIT) {
833 (void) fprintf(stderr, gettext("-u cannot be "
834 "combined with other options\n"));
835 usage(B_FALSE);
836 }
837 cmd_type = POOL_INITIALIZE_UNINIT;
838 break;
839 case 'w':
840 wait = B_TRUE;
841 break;
842 case '?':
843 if (optopt != 0) {
844 (void) fprintf(stderr,
845 gettext("invalid option '%c'\n"), optopt);
846 } else {
847 (void) fprintf(stderr,
848 gettext("invalid option '%s'\n"),
849 argv[optind - 1]);
850 }
851 usage(B_FALSE);
852 }
853 }
854
855 argc -= optind;
856 argv += optind;
857
858 if (argc < 1) {
859 (void) fprintf(stderr, gettext("missing pool name argument\n"));
860 usage(B_FALSE);
861 return (-1);
862 }
863
864 if (wait && (cmd_type != POOL_INITIALIZE_START)) {
865 (void) fprintf(stderr, gettext("-w cannot be used with -c, -s"
866 "or -u\n"));
867 usage(B_FALSE);
868 }
869
870 poolname = argv[0];
871 zhp = zpool_open(g_zfs, poolname);
872 if (zhp == NULL)
873 return (-1);
874
875 vdevs = fnvlist_alloc();
876 if (argc == 1) {
877 /* no individual leaf vdevs specified, so add them all */
878 nvlist_t *config = zpool_get_config(zhp, NULL);
879 nvlist_t *nvroot = fnvlist_lookup_nvlist(config,
880 ZPOOL_CONFIG_VDEV_TREE);
881 zpool_collect_leaves(zhp, nvroot, vdevs);
882 } else {
883 for (int i = 1; i < argc; i++) {
884 fnvlist_add_boolean(vdevs, argv[i]);
885 }
886 }
887
888 if (wait)
889 err = zpool_initialize_wait(zhp, cmd_type, vdevs);
890 else
891 err = zpool_initialize(zhp, cmd_type, vdevs);
892
893 fnvlist_free(vdevs);
894 zpool_close(zhp);
895
896 return (err);
897 }
898
899 /*
900 * print a pool vdev config for dry runs
901 */
902 static void
print_vdev_tree(zpool_handle_t * zhp,const char * name,nvlist_t * nv,int indent,const char * match,int name_flags)903 print_vdev_tree(zpool_handle_t *zhp, const char *name, nvlist_t *nv, int indent,
904 const char *match, int name_flags)
905 {
906 nvlist_t **child;
907 uint_t c, children;
908 char *vname;
909 boolean_t printed = B_FALSE;
910
911 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
912 &child, &children) != 0) {
913 if (name != NULL)
914 (void) printf("\t%*s%s\n", indent, "", name);
915 return;
916 }
917
918 for (c = 0; c < children; c++) {
919 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
920 const char *class = "";
921
922 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
923 &is_hole);
924
925 if (is_hole == B_TRUE) {
926 continue;
927 }
928
929 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
930 &is_log);
931 if (is_log)
932 class = VDEV_ALLOC_BIAS_LOG;
933 (void) nvlist_lookup_string(child[c],
934 ZPOOL_CONFIG_ALLOCATION_BIAS, &class);
935 if (strcmp(match, class) != 0)
936 continue;
937
938 if (!printed && name != NULL) {
939 (void) printf("\t%*s%s\n", indent, "", name);
940 printed = B_TRUE;
941 }
942 vname = zpool_vdev_name(g_zfs, zhp, child[c], name_flags);
943 print_vdev_tree(zhp, vname, child[c], indent + 2, "",
944 name_flags);
945 free(vname);
946 }
947 }
948
949 /*
950 * Print the list of l2cache devices for dry runs.
951 */
952 static void
print_cache_list(nvlist_t * nv,int indent)953 print_cache_list(nvlist_t *nv, int indent)
954 {
955 nvlist_t **child;
956 uint_t c, children;
957
958 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
959 &child, &children) == 0 && children > 0) {
960 (void) printf("\t%*s%s\n", indent, "", "cache");
961 } else {
962 return;
963 }
964 for (c = 0; c < children; c++) {
965 char *vname;
966
967 vname = zpool_vdev_name(g_zfs, NULL, child[c], 0);
968 (void) printf("\t%*s%s\n", indent + 2, "", vname);
969 free(vname);
970 }
971 }
972
973 /*
974 * Print the list of spares for dry runs.
975 */
976 static void
print_spare_list(nvlist_t * nv,int indent)977 print_spare_list(nvlist_t *nv, int indent)
978 {
979 nvlist_t **child;
980 uint_t c, children;
981
982 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
983 &child, &children) == 0 && children > 0) {
984 (void) printf("\t%*s%s\n", indent, "", "spares");
985 } else {
986 return;
987 }
988 for (c = 0; c < children; c++) {
989 char *vname;
990
991 vname = zpool_vdev_name(g_zfs, NULL, child[c], 0);
992 (void) printf("\t%*s%s\n", indent + 2, "", vname);
993 free(vname);
994 }
995 }
996
997 typedef struct spare_cbdata {
998 uint64_t cb_guid;
999 zpool_handle_t *cb_zhp;
1000 } spare_cbdata_t;
1001
1002 static boolean_t
find_vdev(nvlist_t * nv,uint64_t search)1003 find_vdev(nvlist_t *nv, uint64_t search)
1004 {
1005 uint64_t guid;
1006 nvlist_t **child;
1007 uint_t c, children;
1008
1009 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0 &&
1010 search == guid)
1011 return (B_TRUE);
1012
1013 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1014 &child, &children) == 0) {
1015 for (c = 0; c < children; c++)
1016 if (find_vdev(child[c], search))
1017 return (B_TRUE);
1018 }
1019
1020 return (B_FALSE);
1021 }
1022
1023 static int
find_spare(zpool_handle_t * zhp,void * data)1024 find_spare(zpool_handle_t *zhp, void *data)
1025 {
1026 spare_cbdata_t *cbp = data;
1027 nvlist_t *config, *nvroot;
1028
1029 config = zpool_get_config(zhp, NULL);
1030 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
1031 &nvroot) == 0);
1032
1033 if (find_vdev(nvroot, cbp->cb_guid)) {
1034 cbp->cb_zhp = zhp;
1035 return (1);
1036 }
1037
1038 zpool_close(zhp);
1039 return (0);
1040 }
1041
1042 static void
nice_num_str_nvlist(nvlist_t * item,const char * key,uint64_t value,boolean_t literal,boolean_t as_int,int format)1043 nice_num_str_nvlist(nvlist_t *item, const char *key, uint64_t value,
1044 boolean_t literal, boolean_t as_int, int format)
1045 {
1046 char buf[256];
1047 if (literal) {
1048 if (!as_int)
1049 snprintf(buf, 256, "%llu", (u_longlong_t)value);
1050 } else {
1051 switch (format) {
1052 case ZFS_NICENUM_1024:
1053 zfs_nicenum_format(value, buf, 256, ZFS_NICENUM_1024);
1054 break;
1055 case ZFS_NICENUM_BYTES:
1056 zfs_nicenum_format(value, buf, 256, ZFS_NICENUM_BYTES);
1057 break;
1058 case ZFS_NICENUM_TIME:
1059 zfs_nicenum_format(value, buf, 256, ZFS_NICENUM_TIME);
1060 break;
1061 case ZFS_NICE_TIMESTAMP:
1062 format_timestamp(value, buf, 256);
1063 break;
1064 default:
1065 fprintf(stderr, "Invalid number format");
1066 exit(1);
1067 }
1068 }
1069 if (as_int)
1070 fnvlist_add_uint64(item, key, value);
1071 else
1072 fnvlist_add_string(item, key, buf);
1073 }
1074
1075 /*
1076 * Generates an nvlist with output version for every command based on params.
1077 * Purpose of this is to add a version of JSON output, considering the schema
1078 * format might be updated for each command in future.
1079 *
1080 * Schema:
1081 *
1082 * "output_version": {
1083 * "command": string,
1084 * "vers_major": integer,
1085 * "vers_minor": integer,
1086 * }
1087 */
1088 static nvlist_t *
zpool_json_schema(int maj_v,int min_v)1089 zpool_json_schema(int maj_v, int min_v)
1090 {
1091 char cmd[MAX_CMD_LEN];
1092 nvlist_t *sch = fnvlist_alloc();
1093 nvlist_t *ov = fnvlist_alloc();
1094
1095 snprintf(cmd, MAX_CMD_LEN, "zpool %s", current_command->name);
1096 fnvlist_add_string(ov, "command", cmd);
1097 fnvlist_add_uint32(ov, "vers_major", maj_v);
1098 fnvlist_add_uint32(ov, "vers_minor", min_v);
1099 fnvlist_add_nvlist(sch, "output_version", ov);
1100 fnvlist_free(ov);
1101 return (sch);
1102 }
1103
1104 static void
fill_pool_info(nvlist_t * list,zpool_handle_t * zhp,boolean_t addtype,boolean_t as_int)1105 fill_pool_info(nvlist_t *list, zpool_handle_t *zhp, boolean_t addtype,
1106 boolean_t as_int)
1107 {
1108 nvlist_t *config = zpool_get_config(zhp, NULL);
1109 uint64_t guid = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID);
1110 uint64_t txg = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG);
1111
1112 fnvlist_add_string(list, "name", zpool_get_name(zhp));
1113 if (addtype)
1114 fnvlist_add_string(list, "type", "POOL");
1115 fnvlist_add_string(list, "state", zpool_get_state_str(zhp));
1116 if (as_int) {
1117 if (guid)
1118 fnvlist_add_uint64(list, ZPOOL_CONFIG_POOL_GUID, guid);
1119 if (txg)
1120 fnvlist_add_uint64(list, ZPOOL_CONFIG_POOL_TXG, txg);
1121 fnvlist_add_uint64(list, "spa_version", SPA_VERSION);
1122 fnvlist_add_uint64(list, "zpl_version", ZPL_VERSION);
1123 } else {
1124 char value[ZFS_MAXPROPLEN];
1125 if (guid) {
1126 snprintf(value, ZFS_MAXPROPLEN, "%llu",
1127 (u_longlong_t)guid);
1128 fnvlist_add_string(list, ZPOOL_CONFIG_POOL_GUID, value);
1129 }
1130 if (txg) {
1131 snprintf(value, ZFS_MAXPROPLEN, "%llu",
1132 (u_longlong_t)txg);
1133 fnvlist_add_string(list, ZPOOL_CONFIG_POOL_TXG, value);
1134 }
1135 fnvlist_add_string(list, "spa_version", SPA_VERSION_STRING);
1136 fnvlist_add_string(list, "zpl_version", ZPL_VERSION_STRING);
1137 }
1138 }
1139
1140 static void
used_by_other(zpool_handle_t * zhp,nvlist_t * nvdev,nvlist_t * list)1141 used_by_other(zpool_handle_t *zhp, nvlist_t *nvdev, nvlist_t *list)
1142 {
1143 spare_cbdata_t spare_cb;
1144 verify(nvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_GUID,
1145 &spare_cb.cb_guid) == 0);
1146 if (zpool_iter(g_zfs, find_spare, &spare_cb) == 1) {
1147 if (strcmp(zpool_get_name(spare_cb.cb_zhp),
1148 zpool_get_name(zhp)) != 0) {
1149 fnvlist_add_string(list, "used_by",
1150 zpool_get_name(spare_cb.cb_zhp));
1151 }
1152 zpool_close(spare_cb.cb_zhp);
1153 }
1154 }
1155
1156 static void
fill_vdev_info(nvlist_t * list,zpool_handle_t * zhp,char * name,boolean_t addtype,boolean_t as_int)1157 fill_vdev_info(nvlist_t *list, zpool_handle_t *zhp, char *name,
1158 boolean_t addtype, boolean_t as_int)
1159 {
1160 boolean_t l2c = B_FALSE;
1161 const char *path, *phys, *devid, *bias = NULL;
1162 uint64_t hole = 0, log = 0, spare = 0;
1163 vdev_stat_t *vs;
1164 uint_t c;
1165 nvlist_t *nvdev;
1166 nvlist_t *nvdev_parent = NULL;
1167 char *_name;
1168
1169 if (strcmp(name, zpool_get_name(zhp)) != 0)
1170 _name = name;
1171 else
1172 _name = (char *)"root-0";
1173
1174 nvdev = zpool_find_vdev(zhp, _name, NULL, &l2c, NULL);
1175
1176 fnvlist_add_string(list, "name", name);
1177 if (addtype)
1178 fnvlist_add_string(list, "type", "VDEV");
1179 if (nvdev) {
1180 const char *type = fnvlist_lookup_string(nvdev,
1181 ZPOOL_CONFIG_TYPE);
1182 if (type)
1183 fnvlist_add_string(list, "vdev_type", type);
1184 uint64_t guid = fnvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_GUID);
1185 if (guid) {
1186 if (as_int) {
1187 fnvlist_add_uint64(list, "guid", guid);
1188 } else {
1189 char buf[ZFS_MAXPROPLEN];
1190 snprintf(buf, ZFS_MAXPROPLEN, "%llu",
1191 (u_longlong_t)guid);
1192 fnvlist_add_string(list, "guid", buf);
1193 }
1194 }
1195 if (nvlist_lookup_string(nvdev, ZPOOL_CONFIG_PATH, &path) == 0)
1196 fnvlist_add_string(list, "path", path);
1197 if (nvlist_lookup_string(nvdev, ZPOOL_CONFIG_PHYS_PATH,
1198 &phys) == 0)
1199 fnvlist_add_string(list, "phys_path", phys);
1200 if (nvlist_lookup_string(nvdev, ZPOOL_CONFIG_DEVID,
1201 &devid) == 0)
1202 fnvlist_add_string(list, "devid", devid);
1203 (void) nvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_IS_LOG, &log);
1204 (void) nvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_IS_SPARE,
1205 &spare);
1206 (void) nvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_IS_HOLE, &hole);
1207 if (hole)
1208 fnvlist_add_string(list, "class", VDEV_TYPE_HOLE);
1209 else if (l2c)
1210 fnvlist_add_string(list, "class", VDEV_TYPE_L2CACHE);
1211 else if (spare)
1212 fnvlist_add_string(list, "class", VDEV_TYPE_SPARE);
1213 else if (log)
1214 fnvlist_add_string(list, "class", VDEV_TYPE_LOG);
1215 else {
1216 (void) nvlist_lookup_string(nvdev,
1217 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
1218 if (bias != NULL)
1219 fnvlist_add_string(list, "class", bias);
1220 else {
1221 nvdev_parent = NULL;
1222 nvdev_parent = zpool_find_parent_vdev(zhp,
1223 _name, NULL, NULL, NULL);
1224
1225 /*
1226 * With a mirrored special device, the parent
1227 * "mirror" vdev will have
1228 * ZPOOL_CONFIG_ALLOCATION_BIAS set to "special"
1229 * not the leaf vdevs. If we're a leaf vdev
1230 * in that case we need to look at our parent
1231 * to see if they're "special" to know if we
1232 * are "special" too.
1233 */
1234 if (nvdev_parent) {
1235 (void) nvlist_lookup_string(
1236 nvdev_parent,
1237 ZPOOL_CONFIG_ALLOCATION_BIAS,
1238 &bias);
1239 }
1240 if (bias != NULL)
1241 fnvlist_add_string(list, "class", bias);
1242 else
1243 fnvlist_add_string(list, "class",
1244 "normal");
1245 }
1246 }
1247 if (nvlist_lookup_uint64_array(nvdev, ZPOOL_CONFIG_VDEV_STATS,
1248 (uint64_t **)&vs, &c) == 0) {
1249 fnvlist_add_string(list, "state",
1250 vdev_state_str[vs->vs_state]);
1251 }
1252 }
1253 }
1254
1255 static boolean_t
prop_list_contains_feature(nvlist_t * proplist)1256 prop_list_contains_feature(nvlist_t *proplist)
1257 {
1258 nvpair_t *nvp;
1259 for (nvp = nvlist_next_nvpair(proplist, NULL); NULL != nvp;
1260 nvp = nvlist_next_nvpair(proplist, nvp)) {
1261 if (zpool_prop_feature(nvpair_name(nvp)))
1262 return (B_TRUE);
1263 }
1264 return (B_FALSE);
1265 }
1266
1267 /*
1268 * Add a property pair (name, string-value) into a property nvlist.
1269 */
1270 static int
add_prop_list(const char * propname,const char * propval,nvlist_t ** props,boolean_t poolprop)1271 add_prop_list(const char *propname, const char *propval, nvlist_t **props,
1272 boolean_t poolprop)
1273 {
1274 zpool_prop_t prop = ZPOOL_PROP_INVAL;
1275 nvlist_t *proplist;
1276 const char *normnm;
1277 const char *strval;
1278
1279 if (*props == NULL &&
1280 nvlist_alloc(props, NV_UNIQUE_NAME, 0) != 0) {
1281 (void) fprintf(stderr,
1282 gettext("internal error: out of memory\n"));
1283 return (1);
1284 }
1285
1286 proplist = *props;
1287
1288 if (poolprop) {
1289 const char *vname = zpool_prop_to_name(ZPOOL_PROP_VERSION);
1290 const char *cname =
1291 zpool_prop_to_name(ZPOOL_PROP_COMPATIBILITY);
1292
1293 if ((prop = zpool_name_to_prop(propname)) == ZPOOL_PROP_INVAL &&
1294 (!zpool_prop_feature(propname) &&
1295 !zpool_prop_vdev(propname))) {
1296 (void) fprintf(stderr, gettext("property '%s' is "
1297 "not a valid pool or vdev property\n"), propname);
1298 return (2);
1299 }
1300
1301 /*
1302 * feature@ properties and version should not be specified
1303 * at the same time.
1304 */
1305 if ((prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname) &&
1306 nvlist_exists(proplist, vname)) ||
1307 (prop == ZPOOL_PROP_VERSION &&
1308 prop_list_contains_feature(proplist))) {
1309 (void) fprintf(stderr, gettext("'feature@' and "
1310 "'version' properties cannot be specified "
1311 "together\n"));
1312 return (2);
1313 }
1314
1315 /*
1316 * if version is specified, only "legacy" compatibility
1317 * may be requested
1318 */
1319 if ((prop == ZPOOL_PROP_COMPATIBILITY &&
1320 strcmp(propval, ZPOOL_COMPAT_LEGACY) != 0 &&
1321 nvlist_exists(proplist, vname)) ||
1322 (prop == ZPOOL_PROP_VERSION &&
1323 nvlist_exists(proplist, cname) &&
1324 strcmp(fnvlist_lookup_string(proplist, cname),
1325 ZPOOL_COMPAT_LEGACY) != 0)) {
1326 (void) fprintf(stderr, gettext("when 'version' is "
1327 "specified, the 'compatibility' feature may only "
1328 "be set to '" ZPOOL_COMPAT_LEGACY "'\n"));
1329 return (2);
1330 }
1331
1332 if (zpool_prop_feature(propname) || zpool_prop_vdev(propname))
1333 normnm = propname;
1334 else
1335 normnm = zpool_prop_to_name(prop);
1336 } else {
1337 zfs_prop_t fsprop = zfs_name_to_prop(propname);
1338
1339 if (zfs_prop_valid_for_type(fsprop, ZFS_TYPE_FILESYSTEM,
1340 B_FALSE)) {
1341 normnm = zfs_prop_to_name(fsprop);
1342 } else if (zfs_prop_user(propname) ||
1343 zfs_prop_userquota(propname)) {
1344 normnm = propname;
1345 } else {
1346 (void) fprintf(stderr, gettext("property '%s' is "
1347 "not a valid filesystem property\n"), propname);
1348 return (2);
1349 }
1350 }
1351
1352 if (nvlist_lookup_string(proplist, normnm, &strval) == 0 &&
1353 prop != ZPOOL_PROP_CACHEFILE) {
1354 (void) fprintf(stderr, gettext("property '%s' "
1355 "specified multiple times\n"), propname);
1356 return (2);
1357 }
1358
1359 if (nvlist_add_string(proplist, normnm, propval) != 0) {
1360 (void) fprintf(stderr, gettext("internal "
1361 "error: out of memory\n"));
1362 return (1);
1363 }
1364
1365 return (0);
1366 }
1367
1368 /*
1369 * Set a default property pair (name, string-value) in a property nvlist
1370 */
1371 static int
add_prop_list_default(const char * propname,const char * propval,nvlist_t ** props)1372 add_prop_list_default(const char *propname, const char *propval,
1373 nvlist_t **props)
1374 {
1375 const char *pval;
1376
1377 if (nvlist_lookup_string(*props, propname, &pval) == 0)
1378 return (0);
1379
1380 return (add_prop_list(propname, propval, props, B_TRUE));
1381 }
1382
1383 /*
1384 * zpool add [-afgLnP] [-o property=value] <pool> <vdev> ...
1385 *
1386 * -a Disable the ashift validation checks
1387 * -f Force addition of devices, even if they appear in use
1388 * -g Display guid for individual vdev name.
1389 * -L Follow links when resolving vdev path name.
1390 * -n Do not add the devices, but display the resulting layout if
1391 * they were to be added.
1392 * -o Set property=value.
1393 * -P Display full path for vdev name.
1394 *
1395 * Adds the given vdevs to 'pool'. As with create, the bulk of this work is
1396 * handled by make_root_vdev(), which constructs the nvlist needed to pass to
1397 * libzfs.
1398 */
1399 int
zpool_do_add(int argc,char ** argv)1400 zpool_do_add(int argc, char **argv)
1401 {
1402 boolean_t check_replication = B_TRUE;
1403 boolean_t check_inuse = B_TRUE;
1404 boolean_t dryrun = B_FALSE;
1405 boolean_t check_ashift = B_TRUE;
1406 boolean_t force = B_FALSE;
1407 int name_flags = 0;
1408 int c;
1409 nvlist_t *nvroot;
1410 char *poolname;
1411 int ret;
1412 zpool_handle_t *zhp;
1413 nvlist_t *config;
1414 nvlist_t *props = NULL;
1415 char *propval;
1416
1417 struct option long_options[] = {
1418 {"allow-in-use", no_argument, NULL, ZPOOL_OPTION_ALLOW_INUSE},
1419 {"allow-replication-mismatch", no_argument, NULL,
1420 ZPOOL_OPTION_ALLOW_REPLICATION_MISMATCH},
1421 {"allow-ashift-mismatch", no_argument, NULL,
1422 ZPOOL_OPTION_ALLOW_ASHIFT_MISMATCH},
1423 {0, 0, 0, 0}
1424 };
1425
1426 /* check options */
1427 while ((c = getopt_long(argc, argv, "fgLno:P", long_options, NULL))
1428 != -1) {
1429 switch (c) {
1430 case 'f':
1431 force = B_TRUE;
1432 break;
1433 case 'g':
1434 name_flags |= VDEV_NAME_GUID;
1435 break;
1436 case 'L':
1437 name_flags |= VDEV_NAME_FOLLOW_LINKS;
1438 break;
1439 case 'n':
1440 dryrun = B_TRUE;
1441 break;
1442 case 'o':
1443 if ((propval = strchr(optarg, '=')) == NULL) {
1444 (void) fprintf(stderr, gettext("missing "
1445 "'=' for -o option\n"));
1446 usage(B_FALSE);
1447 }
1448 *propval = '\0';
1449 propval++;
1450
1451 if ((strcmp(optarg, ZPOOL_CONFIG_ASHIFT) != 0) ||
1452 (add_prop_list(optarg, propval, &props, B_TRUE)))
1453 usage(B_FALSE);
1454 break;
1455 case 'P':
1456 name_flags |= VDEV_NAME_PATH;
1457 break;
1458 case ZPOOL_OPTION_ALLOW_INUSE:
1459 check_inuse = B_FALSE;
1460 break;
1461 case ZPOOL_OPTION_ALLOW_REPLICATION_MISMATCH:
1462 check_replication = B_FALSE;
1463 break;
1464 case ZPOOL_OPTION_ALLOW_ASHIFT_MISMATCH:
1465 check_ashift = B_FALSE;
1466 break;
1467 case '?':
1468 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
1469 optopt);
1470 usage(B_FALSE);
1471 }
1472 }
1473
1474 argc -= optind;
1475 argv += optind;
1476
1477 /* get pool name and check number of arguments */
1478 if (argc < 1) {
1479 (void) fprintf(stderr, gettext("missing pool name argument\n"));
1480 usage(B_FALSE);
1481 }
1482 if (argc < 2) {
1483 (void) fprintf(stderr, gettext("missing vdev specification\n"));
1484 usage(B_FALSE);
1485 }
1486
1487 if (force) {
1488 if (!check_inuse || !check_replication || !check_ashift) {
1489 (void) fprintf(stderr, gettext("'-f' option is not "
1490 "allowed with '--allow-replication-mismatch', "
1491 "'--allow-ashift-mismatch', or "
1492 "'--allow-in-use'\n"));
1493 usage(B_FALSE);
1494 }
1495 check_inuse = B_FALSE;
1496 check_replication = B_FALSE;
1497 check_ashift = B_FALSE;
1498 }
1499
1500 poolname = argv[0];
1501
1502 argc--;
1503 argv++;
1504
1505 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
1506 return (1);
1507
1508 if ((config = zpool_get_config(zhp, NULL)) == NULL) {
1509 (void) fprintf(stderr, gettext("pool '%s' is unavailable\n"),
1510 poolname);
1511 zpool_close(zhp);
1512 return (1);
1513 }
1514
1515 /* unless manually specified use "ashift" pool property (if set) */
1516 if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) {
1517 int intval;
1518 zprop_source_t src;
1519 char strval[ZPOOL_MAXPROPLEN];
1520
1521 intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src);
1522 if (src != ZPROP_SRC_DEFAULT) {
1523 (void) sprintf(strval, "%" PRId32, intval);
1524 verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval,
1525 &props, B_TRUE) == 0);
1526 }
1527 }
1528
1529 /* pass off to make_root_vdev for processing */
1530 nvroot = make_root_vdev(zhp, props, !check_inuse,
1531 check_replication, B_FALSE, dryrun, argc, argv);
1532 if (nvroot == NULL) {
1533 zpool_close(zhp);
1534 return (1);
1535 }
1536
1537 if (dryrun) {
1538 nvlist_t *poolnvroot;
1539 nvlist_t **l2child, **sparechild;
1540 uint_t l2children, sparechildren, c;
1541 char *vname;
1542 boolean_t hadcache = B_FALSE, hadspare = B_FALSE;
1543
1544 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
1545 &poolnvroot) == 0);
1546
1547 (void) printf(gettext("would update '%s' to the following "
1548 "configuration:\n\n"), zpool_get_name(zhp));
1549
1550 /* print original main pool and new tree */
1551 print_vdev_tree(zhp, poolname, poolnvroot, 0, "",
1552 name_flags | VDEV_NAME_TYPE_ID);
1553 print_vdev_tree(zhp, NULL, nvroot, 0, "", name_flags);
1554
1555 /* print other classes: 'dedup', 'special', and 'log' */
1556 if (zfs_special_devs(poolnvroot, VDEV_ALLOC_BIAS_DEDUP)) {
1557 print_vdev_tree(zhp, "dedup", poolnvroot, 0,
1558 VDEV_ALLOC_BIAS_DEDUP, name_flags);
1559 print_vdev_tree(zhp, NULL, nvroot, 0,
1560 VDEV_ALLOC_BIAS_DEDUP, name_flags);
1561 } else if (zfs_special_devs(nvroot, VDEV_ALLOC_BIAS_DEDUP)) {
1562 print_vdev_tree(zhp, "dedup", nvroot, 0,
1563 VDEV_ALLOC_BIAS_DEDUP, name_flags);
1564 }
1565
1566 if (zfs_special_devs(poolnvroot, VDEV_ALLOC_BIAS_SPECIAL)) {
1567 print_vdev_tree(zhp, "special", poolnvroot, 0,
1568 VDEV_ALLOC_BIAS_SPECIAL, name_flags);
1569 print_vdev_tree(zhp, NULL, nvroot, 0,
1570 VDEV_ALLOC_BIAS_SPECIAL, name_flags);
1571 } else if (zfs_special_devs(nvroot, VDEV_ALLOC_BIAS_SPECIAL)) {
1572 print_vdev_tree(zhp, "special", nvroot, 0,
1573 VDEV_ALLOC_BIAS_SPECIAL, name_flags);
1574 }
1575
1576 if (num_logs(poolnvroot) > 0) {
1577 print_vdev_tree(zhp, "logs", poolnvroot, 0,
1578 VDEV_ALLOC_BIAS_LOG, name_flags);
1579 print_vdev_tree(zhp, NULL, nvroot, 0,
1580 VDEV_ALLOC_BIAS_LOG, name_flags);
1581 } else if (num_logs(nvroot) > 0) {
1582 print_vdev_tree(zhp, "logs", nvroot, 0,
1583 VDEV_ALLOC_BIAS_LOG, name_flags);
1584 }
1585
1586 /* Do the same for the caches */
1587 if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_L2CACHE,
1588 &l2child, &l2children) == 0 && l2children) {
1589 hadcache = B_TRUE;
1590 (void) printf(gettext("\tcache\n"));
1591 for (c = 0; c < l2children; c++) {
1592 vname = zpool_vdev_name(g_zfs, NULL,
1593 l2child[c], name_flags);
1594 (void) printf("\t %s\n", vname);
1595 free(vname);
1596 }
1597 }
1598 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1599 &l2child, &l2children) == 0 && l2children) {
1600 if (!hadcache)
1601 (void) printf(gettext("\tcache\n"));
1602 for (c = 0; c < l2children; c++) {
1603 vname = zpool_vdev_name(g_zfs, NULL,
1604 l2child[c], name_flags);
1605 (void) printf("\t %s\n", vname);
1606 free(vname);
1607 }
1608 }
1609 /* And finally the spares */
1610 if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_SPARES,
1611 &sparechild, &sparechildren) == 0 && sparechildren > 0) {
1612 hadspare = B_TRUE;
1613 (void) printf(gettext("\tspares\n"));
1614 for (c = 0; c < sparechildren; c++) {
1615 vname = zpool_vdev_name(g_zfs, NULL,
1616 sparechild[c], name_flags);
1617 (void) printf("\t %s\n", vname);
1618 free(vname);
1619 }
1620 }
1621 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1622 &sparechild, &sparechildren) == 0 && sparechildren > 0) {
1623 if (!hadspare)
1624 (void) printf(gettext("\tspares\n"));
1625 for (c = 0; c < sparechildren; c++) {
1626 vname = zpool_vdev_name(g_zfs, NULL,
1627 sparechild[c], name_flags);
1628 (void) printf("\t %s\n", vname);
1629 free(vname);
1630 }
1631 }
1632
1633 ret = 0;
1634 } else {
1635 ret = (zpool_add(zhp, nvroot, check_ashift) != 0);
1636 }
1637
1638 nvlist_free(props);
1639 nvlist_free(nvroot);
1640 zpool_close(zhp);
1641
1642 return (ret);
1643 }
1644
1645 /*
1646 * zpool remove [-npsw] <pool> <vdev> ...
1647 *
1648 * Removes the given vdev from the pool.
1649 */
1650 int
zpool_do_remove(int argc,char ** argv)1651 zpool_do_remove(int argc, char **argv)
1652 {
1653 char *poolname;
1654 int i, ret = 0;
1655 zpool_handle_t *zhp = NULL;
1656 boolean_t stop = B_FALSE;
1657 int c;
1658 boolean_t noop = B_FALSE;
1659 boolean_t parsable = B_FALSE;
1660 boolean_t wait = B_FALSE;
1661
1662 /* check options */
1663 while ((c = getopt(argc, argv, "npsw")) != -1) {
1664 switch (c) {
1665 case 'n':
1666 noop = B_TRUE;
1667 break;
1668 case 'p':
1669 parsable = B_TRUE;
1670 break;
1671 case 's':
1672 stop = B_TRUE;
1673 break;
1674 case 'w':
1675 wait = B_TRUE;
1676 break;
1677 case '?':
1678 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
1679 optopt);
1680 usage(B_FALSE);
1681 }
1682 }
1683
1684 argc -= optind;
1685 argv += optind;
1686
1687 /* get pool name and check number of arguments */
1688 if (argc < 1) {
1689 (void) fprintf(stderr, gettext("missing pool name argument\n"));
1690 usage(B_FALSE);
1691 }
1692
1693 poolname = argv[0];
1694
1695 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
1696 return (1);
1697
1698 if (stop && noop) {
1699 zpool_close(zhp);
1700 (void) fprintf(stderr, gettext("stop request ignored\n"));
1701 return (0);
1702 }
1703
1704 if (stop) {
1705 if (argc > 1) {
1706 (void) fprintf(stderr, gettext("too many arguments\n"));
1707 usage(B_FALSE);
1708 }
1709 if (zpool_vdev_remove_cancel(zhp) != 0)
1710 ret = 1;
1711 if (wait) {
1712 (void) fprintf(stderr, gettext("invalid option "
1713 "combination: -w cannot be used with -s\n"));
1714 usage(B_FALSE);
1715 }
1716 } else {
1717 if (argc < 2) {
1718 (void) fprintf(stderr, gettext("missing device\n"));
1719 usage(B_FALSE);
1720 }
1721
1722 for (i = 1; i < argc; i++) {
1723 if (noop) {
1724 uint64_t size;
1725
1726 if (zpool_vdev_indirect_size(zhp, argv[i],
1727 &size) != 0) {
1728 ret = 1;
1729 break;
1730 }
1731 if (parsable) {
1732 (void) printf("%s %llu\n",
1733 argv[i], (unsigned long long)size);
1734 } else {
1735 char valstr[32];
1736 zfs_nicenum(size, valstr,
1737 sizeof (valstr));
1738 (void) printf("Memory that will be "
1739 "used after removing %s: %s\n",
1740 argv[i], valstr);
1741 }
1742 } else {
1743 if (zpool_vdev_remove(zhp, argv[i]) != 0)
1744 ret = 1;
1745 }
1746 }
1747
1748 if (ret == 0 && wait)
1749 ret = zpool_wait(zhp, ZPOOL_WAIT_REMOVE);
1750 }
1751 zpool_close(zhp);
1752
1753 return (ret);
1754 }
1755
1756 /*
1757 * Return 1 if a vdev is active (being used in a pool)
1758 * Return 0 if a vdev is inactive (offlined or faulted, or not in active pool)
1759 *
1760 * This is useful for checking if a disk in an active pool is offlined or
1761 * faulted.
1762 */
1763 static int
vdev_is_active(char * vdev_path)1764 vdev_is_active(char *vdev_path)
1765 {
1766 int fd;
1767 fd = open(vdev_path, O_EXCL);
1768 if (fd < 0) {
1769 return (1); /* cant open O_EXCL - disk is active */
1770 }
1771
1772 close(fd);
1773 return (0); /* disk is inactive in the pool */
1774 }
1775
1776 /*
1777 * zpool labelclear [-f] <vdev>
1778 *
1779 * -f Force clearing the label for the vdevs which are members of
1780 * the exported or foreign pools.
1781 *
1782 * Verifies that the vdev is not active and zeros out the label information
1783 * on the device.
1784 */
1785 int
zpool_do_labelclear(int argc,char ** argv)1786 zpool_do_labelclear(int argc, char **argv)
1787 {
1788 char vdev[MAXPATHLEN];
1789 char *name = NULL;
1790 int c, fd = -1, ret = 0;
1791 nvlist_t *config;
1792 pool_state_t state;
1793 boolean_t inuse = B_FALSE;
1794 boolean_t force = B_FALSE;
1795
1796 /* check options */
1797 while ((c = getopt(argc, argv, "f")) != -1) {
1798 switch (c) {
1799 case 'f':
1800 force = B_TRUE;
1801 break;
1802 default:
1803 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
1804 optopt);
1805 usage(B_FALSE);
1806 }
1807 }
1808
1809 argc -= optind;
1810 argv += optind;
1811
1812 /* get vdev name */
1813 if (argc < 1) {
1814 (void) fprintf(stderr, gettext("missing vdev name\n"));
1815 usage(B_FALSE);
1816 }
1817 if (argc > 1) {
1818 (void) fprintf(stderr, gettext("too many arguments\n"));
1819 usage(B_FALSE);
1820 }
1821
1822 (void) strlcpy(vdev, argv[0], sizeof (vdev));
1823
1824 /*
1825 * If we cannot open an absolute path, we quit.
1826 * Otherwise if the provided vdev name doesn't point to a file,
1827 * try prepending expected disk paths and partition numbers.
1828 */
1829 if ((fd = open(vdev, O_RDWR)) < 0) {
1830 int error;
1831 if (vdev[0] == '/') {
1832 (void) fprintf(stderr, gettext("failed to open "
1833 "%s: %s\n"), vdev, strerror(errno));
1834 return (1);
1835 }
1836
1837 error = zfs_resolve_shortname(argv[0], vdev, MAXPATHLEN);
1838 if (error == 0 && zfs_dev_is_whole_disk(vdev)) {
1839 if (zfs_append_partition(vdev, MAXPATHLEN) == -1)
1840 error = ENOENT;
1841 }
1842
1843 if (error || ((fd = open(vdev, O_RDWR)) < 0)) {
1844 if (errno == ENOENT) {
1845 (void) fprintf(stderr, gettext(
1846 "failed to find device %s, try "
1847 "specifying absolute path instead\n"),
1848 argv[0]);
1849 return (1);
1850 }
1851
1852 (void) fprintf(stderr, gettext("failed to open %s:"
1853 " %s\n"), vdev, strerror(errno));
1854 return (1);
1855 }
1856 }
1857
1858 /*
1859 * Flush all dirty pages for the block device. This should not be
1860 * fatal when the device does not support BLKFLSBUF as would be the
1861 * case for a file vdev.
1862 */
1863 if ((zfs_dev_flush(fd) != 0) && (errno != ENOTTY))
1864 (void) fprintf(stderr, gettext("failed to invalidate "
1865 "cache for %s: %s\n"), vdev, strerror(errno));
1866
1867 if (zpool_read_label(fd, &config, NULL) != 0) {
1868 (void) fprintf(stderr,
1869 gettext("failed to read label from %s\n"), vdev);
1870 ret = 1;
1871 goto errout;
1872 }
1873 nvlist_free(config);
1874
1875 ret = zpool_in_use(g_zfs, fd, &state, &name, &inuse);
1876 if (ret != 0) {
1877 (void) fprintf(stderr,
1878 gettext("failed to check state for %s\n"), vdev);
1879 ret = 1;
1880 goto errout;
1881 }
1882
1883 if (!inuse)
1884 goto wipe_label;
1885
1886 switch (state) {
1887 default:
1888 case POOL_STATE_ACTIVE:
1889 case POOL_STATE_SPARE:
1890 case POOL_STATE_L2CACHE:
1891 /*
1892 * We allow the user to call 'zpool offline -f'
1893 * on an offlined disk in an active pool. We can check if
1894 * the disk is online by calling vdev_is_active().
1895 */
1896 if (force && !vdev_is_active(vdev))
1897 break;
1898
1899 (void) fprintf(stderr, gettext(
1900 "%s is a member (%s) of pool \"%s\""),
1901 vdev, zpool_pool_state_to_name(state), name);
1902
1903 if (force) {
1904 (void) fprintf(stderr, gettext(
1905 ". Offline the disk first to clear its label."));
1906 }
1907 printf("\n");
1908 ret = 1;
1909 goto errout;
1910
1911 case POOL_STATE_EXPORTED:
1912 if (force)
1913 break;
1914 (void) fprintf(stderr, gettext(
1915 "use '-f' to override the following error:\n"
1916 "%s is a member of exported pool \"%s\"\n"),
1917 vdev, name);
1918 ret = 1;
1919 goto errout;
1920
1921 case POOL_STATE_POTENTIALLY_ACTIVE:
1922 if (force)
1923 break;
1924 (void) fprintf(stderr, gettext(
1925 "use '-f' to override the following error:\n"
1926 "%s is a member of potentially active pool \"%s\"\n"),
1927 vdev, name);
1928 ret = 1;
1929 goto errout;
1930
1931 case POOL_STATE_DESTROYED:
1932 /* inuse should never be set for a destroyed pool */
1933 assert(0);
1934 break;
1935 }
1936
1937 wipe_label:
1938 ret = zpool_clear_label(fd);
1939 if (ret != 0) {
1940 (void) fprintf(stderr,
1941 gettext("failed to clear label for %s\n"), vdev);
1942 }
1943
1944 errout:
1945 free(name);
1946 (void) close(fd);
1947
1948 return (ret);
1949 }
1950
1951 /*
1952 * zpool create [-fnd] [-o property=value] ...
1953 * [-O file-system-property=value] ...
1954 * [-R root] [-m mountpoint] <pool> <dev> ...
1955 *
1956 * -f Force creation, even if devices appear in use
1957 * -n Do not create the pool, but display the resulting layout if it
1958 * were to be created.
1959 * -R Create a pool under an alternate root
1960 * -m Set default mountpoint for the root dataset. By default it's
1961 * '/<pool>'
1962 * -o Set property=value.
1963 * -o Set feature@feature=enabled|disabled.
1964 * -d Don't automatically enable all supported pool features
1965 * (individual features can be enabled with -o).
1966 * -O Set fsproperty=value in the pool's root file system
1967 *
1968 * Creates the named pool according to the given vdev specification. The
1969 * bulk of the vdev processing is done in make_root_vdev() in zpool_vdev.c.
1970 * Once we get the nvlist back from make_root_vdev(), we either print out the
1971 * contents (if '-n' was specified), or pass it to libzfs to do the creation.
1972 */
1973 int
zpool_do_create(int argc,char ** argv)1974 zpool_do_create(int argc, char **argv)
1975 {
1976 boolean_t force = B_FALSE;
1977 boolean_t dryrun = B_FALSE;
1978 boolean_t enable_pool_features = B_TRUE;
1979
1980 int c;
1981 nvlist_t *nvroot = NULL;
1982 char *poolname;
1983 char *tname = NULL;
1984 int ret = 1;
1985 char *altroot = NULL;
1986 char *compat = NULL;
1987 char *mountpoint = NULL;
1988 nvlist_t *fsprops = NULL;
1989 nvlist_t *props = NULL;
1990 char *propval;
1991
1992 /* check options */
1993 while ((c = getopt(argc, argv, ":fndR:m:o:O:t:")) != -1) {
1994 switch (c) {
1995 case 'f':
1996 force = B_TRUE;
1997 break;
1998 case 'n':
1999 dryrun = B_TRUE;
2000 break;
2001 case 'd':
2002 enable_pool_features = B_FALSE;
2003 break;
2004 case 'R':
2005 altroot = optarg;
2006 if (add_prop_list(zpool_prop_to_name(
2007 ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE))
2008 goto errout;
2009 if (add_prop_list_default(zpool_prop_to_name(
2010 ZPOOL_PROP_CACHEFILE), "none", &props))
2011 goto errout;
2012 break;
2013 case 'm':
2014 /* Equivalent to -O mountpoint=optarg */
2015 mountpoint = optarg;
2016 break;
2017 case 'o':
2018 if ((propval = strchr(optarg, '=')) == NULL) {
2019 (void) fprintf(stderr, gettext("missing "
2020 "'=' for -o option\n"));
2021 goto errout;
2022 }
2023 *propval = '\0';
2024 propval++;
2025
2026 if (add_prop_list(optarg, propval, &props, B_TRUE))
2027 goto errout;
2028
2029 /*
2030 * If the user is creating a pool that doesn't support
2031 * feature flags, don't enable any features.
2032 */
2033 if (zpool_name_to_prop(optarg) == ZPOOL_PROP_VERSION) {
2034 char *end;
2035 u_longlong_t ver;
2036
2037 ver = strtoull(propval, &end, 0);
2038 if (*end == '\0' &&
2039 ver < SPA_VERSION_FEATURES) {
2040 enable_pool_features = B_FALSE;
2041 }
2042 }
2043 if (zpool_name_to_prop(optarg) == ZPOOL_PROP_ALTROOT)
2044 altroot = propval;
2045 if (zpool_name_to_prop(optarg) ==
2046 ZPOOL_PROP_COMPATIBILITY)
2047 compat = propval;
2048 break;
2049 case 'O':
2050 if ((propval = strchr(optarg, '=')) == NULL) {
2051 (void) fprintf(stderr, gettext("missing "
2052 "'=' for -O option\n"));
2053 goto errout;
2054 }
2055 *propval = '\0';
2056 propval++;
2057
2058 /*
2059 * Mountpoints are checked and then added later.
2060 * Uniquely among properties, they can be specified
2061 * more than once, to avoid conflict with -m.
2062 */
2063 if (0 == strcmp(optarg,
2064 zfs_prop_to_name(ZFS_PROP_MOUNTPOINT))) {
2065 mountpoint = propval;
2066 } else if (add_prop_list(optarg, propval, &fsprops,
2067 B_FALSE)) {
2068 goto errout;
2069 }
2070 break;
2071 case 't':
2072 /*
2073 * Sanity check temporary pool name.
2074 */
2075 if (strchr(optarg, '/') != NULL) {
2076 (void) fprintf(stderr, gettext("cannot create "
2077 "'%s': invalid character '/' in temporary "
2078 "name\n"), optarg);
2079 (void) fprintf(stderr, gettext("use 'zfs "
2080 "create' to create a dataset\n"));
2081 goto errout;
2082 }
2083
2084 if (add_prop_list(zpool_prop_to_name(
2085 ZPOOL_PROP_TNAME), optarg, &props, B_TRUE))
2086 goto errout;
2087 if (add_prop_list_default(zpool_prop_to_name(
2088 ZPOOL_PROP_CACHEFILE), "none", &props))
2089 goto errout;
2090 tname = optarg;
2091 break;
2092 case ':':
2093 (void) fprintf(stderr, gettext("missing argument for "
2094 "'%c' option\n"), optopt);
2095 goto badusage;
2096 case '?':
2097 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
2098 optopt);
2099 goto badusage;
2100 }
2101 }
2102
2103 argc -= optind;
2104 argv += optind;
2105
2106 /* get pool name and check number of arguments */
2107 if (argc < 1) {
2108 (void) fprintf(stderr, gettext("missing pool name argument\n"));
2109 goto badusage;
2110 }
2111 if (argc < 2) {
2112 (void) fprintf(stderr, gettext("missing vdev specification\n"));
2113 goto badusage;
2114 }
2115
2116 poolname = argv[0];
2117
2118 /*
2119 * As a special case, check for use of '/' in the name, and direct the
2120 * user to use 'zfs create' instead.
2121 */
2122 if (strchr(poolname, '/') != NULL) {
2123 (void) fprintf(stderr, gettext("cannot create '%s': invalid "
2124 "character '/' in pool name\n"), poolname);
2125 (void) fprintf(stderr, gettext("use 'zfs create' to "
2126 "create a dataset\n"));
2127 goto errout;
2128 }
2129
2130 /* pass off to make_root_vdev for bulk processing */
2131 nvroot = make_root_vdev(NULL, props, force, !force, B_FALSE, dryrun,
2132 argc - 1, argv + 1);
2133 if (nvroot == NULL)
2134 goto errout;
2135
2136 /* make_root_vdev() allows 0 toplevel children if there are spares */
2137 if (!zfs_allocatable_devs(nvroot)) {
2138 (void) fprintf(stderr, gettext("invalid vdev "
2139 "specification: at least one toplevel vdev must be "
2140 "specified\n"));
2141 goto errout;
2142 }
2143
2144 if (altroot != NULL && altroot[0] != '/') {
2145 (void) fprintf(stderr, gettext("invalid alternate root '%s': "
2146 "must be an absolute path\n"), altroot);
2147 goto errout;
2148 }
2149
2150 /*
2151 * Check the validity of the mountpoint and direct the user to use the
2152 * '-m' mountpoint option if it looks like its in use.
2153 */
2154 if (mountpoint == NULL ||
2155 (strcmp(mountpoint, ZFS_MOUNTPOINT_LEGACY) != 0 &&
2156 strcmp(mountpoint, ZFS_MOUNTPOINT_NONE) != 0)) {
2157 char buf[MAXPATHLEN];
2158 DIR *dirp;
2159
2160 if (mountpoint && mountpoint[0] != '/') {
2161 (void) fprintf(stderr, gettext("invalid mountpoint "
2162 "'%s': must be an absolute path, 'legacy', or "
2163 "'none'\n"), mountpoint);
2164 goto errout;
2165 }
2166
2167 if (mountpoint == NULL) {
2168 if (altroot != NULL)
2169 (void) snprintf(buf, sizeof (buf), "%s/%s",
2170 altroot, poolname);
2171 else
2172 (void) snprintf(buf, sizeof (buf), "/%s",
2173 poolname);
2174 } else {
2175 if (altroot != NULL)
2176 (void) snprintf(buf, sizeof (buf), "%s%s",
2177 altroot, mountpoint);
2178 else
2179 (void) snprintf(buf, sizeof (buf), "%s",
2180 mountpoint);
2181 }
2182
2183 if ((dirp = opendir(buf)) == NULL && errno != ENOENT) {
2184 (void) fprintf(stderr, gettext("mountpoint '%s' : "
2185 "%s\n"), buf, strerror(errno));
2186 (void) fprintf(stderr, gettext("use '-m' "
2187 "option to provide a different default\n"));
2188 goto errout;
2189 } else if (dirp) {
2190 int count = 0;
2191
2192 while (count < 3 && readdir(dirp) != NULL)
2193 count++;
2194 (void) closedir(dirp);
2195
2196 if (count > 2) {
2197 (void) fprintf(stderr, gettext("mountpoint "
2198 "'%s' exists and is not empty\n"), buf);
2199 (void) fprintf(stderr, gettext("use '-m' "
2200 "option to provide a "
2201 "different default\n"));
2202 goto errout;
2203 }
2204 }
2205 }
2206
2207 /*
2208 * Now that the mountpoint's validity has been checked, ensure that
2209 * the property is set appropriately prior to creating the pool.
2210 */
2211 if (mountpoint != NULL) {
2212 ret = add_prop_list(zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
2213 mountpoint, &fsprops, B_FALSE);
2214 if (ret != 0)
2215 goto errout;
2216 }
2217
2218 ret = 1;
2219 if (dryrun) {
2220 /*
2221 * For a dry run invocation, print out a basic message and run
2222 * through all the vdevs in the list and print out in an
2223 * appropriate hierarchy.
2224 */
2225 (void) printf(gettext("would create '%s' with the "
2226 "following layout:\n\n"), poolname);
2227
2228 print_vdev_tree(NULL, poolname, nvroot, 0, "", 0);
2229 print_vdev_tree(NULL, "dedup", nvroot, 0,
2230 VDEV_ALLOC_BIAS_DEDUP, 0);
2231 print_vdev_tree(NULL, "special", nvroot, 0,
2232 VDEV_ALLOC_BIAS_SPECIAL, 0);
2233 print_vdev_tree(NULL, "logs", nvroot, 0,
2234 VDEV_ALLOC_BIAS_LOG, 0);
2235 print_cache_list(nvroot, 0);
2236 print_spare_list(nvroot, 0);
2237
2238 ret = 0;
2239 } else {
2240 /*
2241 * Load in feature set.
2242 * Note: if compatibility property not given, we'll have
2243 * NULL, which means 'all features'.
2244 */
2245 boolean_t requested_features[SPA_FEATURES];
2246 if (zpool_do_load_compat(compat, requested_features) !=
2247 ZPOOL_COMPATIBILITY_OK)
2248 goto errout;
2249
2250 /*
2251 * props contains list of features to enable.
2252 * For each feature:
2253 * - remove it if feature@name=disabled
2254 * - leave it there if feature@name=enabled
2255 * - add it if:
2256 * - enable_pool_features (ie: no '-d' or '-o version')
2257 * - it's supported by the kernel module
2258 * - it's in the requested feature set
2259 * - warn if it's enabled but not in compat
2260 */
2261 for (spa_feature_t i = 0; i < SPA_FEATURES; i++) {
2262 char propname[MAXPATHLEN];
2263 const char *propval;
2264 zfeature_info_t *feat = &spa_feature_table[i];
2265
2266 (void) snprintf(propname, sizeof (propname),
2267 "feature@%s", feat->fi_uname);
2268
2269 if (!nvlist_lookup_string(props, propname, &propval)) {
2270 if (strcmp(propval,
2271 ZFS_FEATURE_DISABLED) == 0) {
2272 (void) nvlist_remove_all(props,
2273 propname);
2274 } else if (strcmp(propval,
2275 ZFS_FEATURE_ENABLED) == 0 &&
2276 !requested_features[i]) {
2277 (void) fprintf(stderr, gettext(
2278 "Warning: feature \"%s\" enabled "
2279 "but is not in specified "
2280 "'compatibility' feature set.\n"),
2281 feat->fi_uname);
2282 }
2283 } else if (
2284 enable_pool_features &&
2285 feat->fi_zfs_mod_supported &&
2286 requested_features[i]) {
2287 ret = add_prop_list(propname,
2288 ZFS_FEATURE_ENABLED, &props, B_TRUE);
2289 if (ret != 0)
2290 goto errout;
2291 }
2292 }
2293
2294 ret = 1;
2295 if (zpool_create(g_zfs, poolname,
2296 nvroot, props, fsprops) == 0) {
2297 zfs_handle_t *pool = zfs_open(g_zfs,
2298 tname ? tname : poolname, ZFS_TYPE_FILESYSTEM);
2299 if (pool != NULL) {
2300 if (zfs_mount(pool, NULL, 0) == 0) {
2301 ret = zfs_share(pool, NULL);
2302 zfs_commit_shares(NULL);
2303 }
2304 zfs_close(pool);
2305 }
2306 } else if (libzfs_errno(g_zfs) == EZFS_INVALIDNAME) {
2307 (void) fprintf(stderr, gettext("pool name may have "
2308 "been omitted\n"));
2309 }
2310 }
2311
2312 errout:
2313 nvlist_free(nvroot);
2314 nvlist_free(fsprops);
2315 nvlist_free(props);
2316 return (ret);
2317 badusage:
2318 nvlist_free(fsprops);
2319 nvlist_free(props);
2320 usage(B_FALSE);
2321 return (2);
2322 }
2323
2324 /*
2325 * zpool destroy <pool>
2326 *
2327 * -f Forcefully unmount any datasets
2328 *
2329 * Destroy the given pool. Automatically unmounts any datasets in the pool.
2330 */
2331 int
zpool_do_destroy(int argc,char ** argv)2332 zpool_do_destroy(int argc, char **argv)
2333 {
2334 boolean_t force = B_FALSE;
2335 int c;
2336 char *pool;
2337 zpool_handle_t *zhp;
2338 int ret;
2339
2340 /* check options */
2341 while ((c = getopt(argc, argv, "f")) != -1) {
2342 switch (c) {
2343 case 'f':
2344 force = B_TRUE;
2345 break;
2346 case '?':
2347 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
2348 optopt);
2349 usage(B_FALSE);
2350 }
2351 }
2352
2353 argc -= optind;
2354 argv += optind;
2355
2356 /* check arguments */
2357 if (argc < 1) {
2358 (void) fprintf(stderr, gettext("missing pool argument\n"));
2359 usage(B_FALSE);
2360 }
2361 if (argc > 1) {
2362 (void) fprintf(stderr, gettext("too many arguments\n"));
2363 usage(B_FALSE);
2364 }
2365
2366 pool = argv[0];
2367
2368 if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) {
2369 /*
2370 * As a special case, check for use of '/' in the name, and
2371 * direct the user to use 'zfs destroy' instead.
2372 */
2373 if (strchr(pool, '/') != NULL)
2374 (void) fprintf(stderr, gettext("use 'zfs destroy' to "
2375 "destroy a dataset\n"));
2376 return (1);
2377 }
2378
2379 if (zpool_disable_datasets(zhp, force) != 0) {
2380 (void) fprintf(stderr, gettext("could not destroy '%s': "
2381 "could not unmount datasets\n"), zpool_get_name(zhp));
2382 zpool_close(zhp);
2383 return (1);
2384 }
2385
2386 /* The history must be logged as part of the export */
2387 log_history = B_FALSE;
2388
2389 ret = (zpool_destroy(zhp, history_str) != 0);
2390
2391 zpool_close(zhp);
2392
2393 return (ret);
2394 }
2395
2396 typedef struct export_cbdata {
2397 tpool_t *tpool;
2398 pthread_mutex_t mnttab_lock;
2399 boolean_t force;
2400 boolean_t hardforce;
2401 int retval;
2402 } export_cbdata_t;
2403
2404
2405 typedef struct {
2406 char *aea_poolname;
2407 export_cbdata_t *aea_cbdata;
2408 } async_export_args_t;
2409
2410 /*
2411 * Export one pool
2412 */
2413 static int
zpool_export_one(zpool_handle_t * zhp,void * data)2414 zpool_export_one(zpool_handle_t *zhp, void *data)
2415 {
2416 export_cbdata_t *cb = data;
2417
2418 /*
2419 * zpool_disable_datasets() is not thread-safe for mnttab access.
2420 * So we serialize access here for 'zpool export -a' parallel case.
2421 */
2422 if (cb->tpool != NULL)
2423 pthread_mutex_lock(&cb->mnttab_lock);
2424
2425 int retval = zpool_disable_datasets(zhp, cb->force);
2426
2427 if (cb->tpool != NULL)
2428 pthread_mutex_unlock(&cb->mnttab_lock);
2429
2430 if (retval)
2431 return (1);
2432
2433 if (cb->hardforce) {
2434 if (zpool_export_force(zhp, history_str) != 0)
2435 return (1);
2436 } else if (zpool_export(zhp, cb->force, history_str) != 0) {
2437 return (1);
2438 }
2439
2440 return (0);
2441 }
2442
2443 /*
2444 * Asynchronous export request
2445 */
2446 static void
zpool_export_task(void * arg)2447 zpool_export_task(void *arg)
2448 {
2449 async_export_args_t *aea = arg;
2450
2451 zpool_handle_t *zhp = zpool_open(g_zfs, aea->aea_poolname);
2452 if (zhp != NULL) {
2453 int ret = zpool_export_one(zhp, aea->aea_cbdata);
2454 if (ret != 0)
2455 aea->aea_cbdata->retval = ret;
2456 zpool_close(zhp);
2457 } else {
2458 aea->aea_cbdata->retval = 1;
2459 }
2460
2461 free(aea->aea_poolname);
2462 free(aea);
2463 }
2464
2465 /*
2466 * Process an export request in parallel
2467 */
2468 static int
zpool_export_one_async(zpool_handle_t * zhp,void * data)2469 zpool_export_one_async(zpool_handle_t *zhp, void *data)
2470 {
2471 tpool_t *tpool = ((export_cbdata_t *)data)->tpool;
2472 async_export_args_t *aea = safe_malloc(sizeof (async_export_args_t));
2473
2474 /* save pool name since zhp will go out of scope */
2475 aea->aea_poolname = strdup(zpool_get_name(zhp));
2476 aea->aea_cbdata = data;
2477
2478 /* ship off actual export to another thread */
2479 if (tpool_dispatch(tpool, zpool_export_task, (void *)aea) != 0)
2480 return (errno); /* unlikely */
2481 else
2482 return (0);
2483 }
2484
2485 /*
2486 * zpool export [-f] <pool> ...
2487 *
2488 * -a Export all pools
2489 * -f Forcefully unmount datasets
2490 *
2491 * Export the given pools. By default, the command will attempt to cleanly
2492 * unmount any active datasets within the pool. If the '-f' flag is specified,
2493 * then the datasets will be forcefully unmounted.
2494 */
2495 int
zpool_do_export(int argc,char ** argv)2496 zpool_do_export(int argc, char **argv)
2497 {
2498 export_cbdata_t cb;
2499 boolean_t do_all = B_FALSE;
2500 boolean_t force = B_FALSE;
2501 boolean_t hardforce = B_FALSE;
2502 int c, ret;
2503
2504 /* check options */
2505 while ((c = getopt(argc, argv, "afF")) != -1) {
2506 switch (c) {
2507 case 'a':
2508 do_all = B_TRUE;
2509 break;
2510 case 'f':
2511 force = B_TRUE;
2512 break;
2513 case 'F':
2514 hardforce = B_TRUE;
2515 break;
2516 case '?':
2517 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
2518 optopt);
2519 usage(B_FALSE);
2520 }
2521 }
2522
2523 cb.force = force;
2524 cb.hardforce = hardforce;
2525 cb.tpool = NULL;
2526 cb.retval = 0;
2527 argc -= optind;
2528 argv += optind;
2529
2530 /* The history will be logged as part of the export itself */
2531 log_history = B_FALSE;
2532
2533 if (do_all) {
2534 if (argc != 0) {
2535 (void) fprintf(stderr, gettext("too many arguments\n"));
2536 usage(B_FALSE);
2537 }
2538
2539 cb.tpool = tpool_create(1, 5 * sysconf(_SC_NPROCESSORS_ONLN),
2540 0, NULL);
2541 pthread_mutex_init(&cb.mnttab_lock, NULL);
2542
2543 /* Asynchronously call zpool_export_one using thread pool */
2544 ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
2545 B_FALSE, zpool_export_one_async, &cb);
2546
2547 tpool_wait(cb.tpool);
2548 tpool_destroy(cb.tpool);
2549 (void) pthread_mutex_destroy(&cb.mnttab_lock);
2550
2551 return (ret | cb.retval);
2552 }
2553
2554 /* check arguments */
2555 if (argc < 1) {
2556 (void) fprintf(stderr, gettext("missing pool argument\n"));
2557 usage(B_FALSE);
2558 }
2559
2560 ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
2561 B_FALSE, zpool_export_one, &cb);
2562
2563 return (ret);
2564 }
2565
2566 /*
2567 * Given a vdev configuration, determine the maximum width needed for the device
2568 * name column.
2569 */
2570 static int
max_width(zpool_handle_t * zhp,nvlist_t * nv,int depth,int max,int name_flags)2571 max_width(zpool_handle_t *zhp, nvlist_t *nv, int depth, int max,
2572 int name_flags)
2573 {
2574 static const char *const subtypes[] =
2575 {ZPOOL_CONFIG_SPARES, ZPOOL_CONFIG_L2CACHE, ZPOOL_CONFIG_CHILDREN};
2576
2577 char *name = zpool_vdev_name(g_zfs, zhp, nv, name_flags);
2578 max = MAX(strlen(name) + depth, max);
2579 free(name);
2580
2581 nvlist_t **child;
2582 uint_t children;
2583 for (size_t i = 0; i < ARRAY_SIZE(subtypes); ++i)
2584 if (nvlist_lookup_nvlist_array(nv, subtypes[i],
2585 &child, &children) == 0)
2586 for (uint_t c = 0; c < children; ++c)
2587 max = MAX(max_width(zhp, child[c], depth + 2,
2588 max, name_flags), max);
2589
2590 return (max);
2591 }
2592
2593 typedef struct status_cbdata {
2594 int cb_count;
2595 int cb_name_flags;
2596 int cb_namewidth;
2597 boolean_t cb_allpools;
2598 boolean_t cb_verbose;
2599 boolean_t cb_literal;
2600 boolean_t cb_explain;
2601 boolean_t cb_first;
2602 boolean_t cb_dedup_stats;
2603 boolean_t cb_print_unhealthy;
2604 boolean_t cb_print_status;
2605 boolean_t cb_print_slow_ios;
2606 boolean_t cb_print_dio_verify;
2607 boolean_t cb_print_vdev_init;
2608 boolean_t cb_print_vdev_trim;
2609 vdev_cmd_data_list_t *vcdl;
2610 boolean_t cb_print_power;
2611 boolean_t cb_json;
2612 boolean_t cb_flat_vdevs;
2613 nvlist_t *cb_jsobj;
2614 boolean_t cb_json_as_int;
2615 boolean_t cb_json_pool_key_guid;
2616 } status_cbdata_t;
2617
2618 /* Return 1 if string is NULL, empty, or whitespace; return 0 otherwise. */
2619 static boolean_t
is_blank_str(const char * str)2620 is_blank_str(const char *str)
2621 {
2622 for (; str != NULL && *str != '\0'; ++str)
2623 if (!isblank(*str))
2624 return (B_FALSE);
2625 return (B_TRUE);
2626 }
2627
2628 static void
zpool_nvlist_cmd(vdev_cmd_data_list_t * vcdl,const char * pool,const char * path,nvlist_t * item)2629 zpool_nvlist_cmd(vdev_cmd_data_list_t *vcdl, const char *pool, const char *path,
2630 nvlist_t *item)
2631 {
2632 vdev_cmd_data_t *data;
2633 int i, j, k = 1;
2634 char tmp[256];
2635 const char *val;
2636
2637 for (i = 0; i < vcdl->count; i++) {
2638 if ((strcmp(vcdl->data[i].path, path) != 0) ||
2639 (strcmp(vcdl->data[i].pool, pool) != 0))
2640 continue;
2641
2642 data = &vcdl->data[i];
2643 for (j = 0; j < vcdl->uniq_cols_cnt; j++) {
2644 val = NULL;
2645 for (int k = 0; k < data->cols_cnt; k++) {
2646 if (strcmp(data->cols[k],
2647 vcdl->uniq_cols[j]) == 0) {
2648 val = data->lines[k];
2649 break;
2650 }
2651 }
2652 if (val == NULL || is_blank_str(val))
2653 val = "-";
2654 fnvlist_add_string(item, vcdl->uniq_cols[j], val);
2655 }
2656
2657 for (j = data->cols_cnt; j < data->lines_cnt; j++) {
2658 if (data->lines[j]) {
2659 snprintf(tmp, 256, "extra_%d", k++);
2660 fnvlist_add_string(item, tmp,
2661 data->lines[j]);
2662 }
2663 }
2664 break;
2665 }
2666 }
2667
2668 /* Print command output lines for specific vdev in a specific pool */
2669 static void
zpool_print_cmd(vdev_cmd_data_list_t * vcdl,const char * pool,const char * path)2670 zpool_print_cmd(vdev_cmd_data_list_t *vcdl, const char *pool, const char *path)
2671 {
2672 vdev_cmd_data_t *data;
2673 int i, j;
2674 const char *val;
2675
2676 for (i = 0; i < vcdl->count; i++) {
2677 if ((strcmp(vcdl->data[i].path, path) != 0) ||
2678 (strcmp(vcdl->data[i].pool, pool) != 0)) {
2679 /* Not the vdev we're looking for */
2680 continue;
2681 }
2682
2683 data = &vcdl->data[i];
2684 /* Print out all the output values for this vdev */
2685 for (j = 0; j < vcdl->uniq_cols_cnt; j++) {
2686 val = NULL;
2687 /* Does this vdev have values for this column? */
2688 for (int k = 0; k < data->cols_cnt; k++) {
2689 if (strcmp(data->cols[k],
2690 vcdl->uniq_cols[j]) == 0) {
2691 /* yes it does, record the value */
2692 val = data->lines[k];
2693 break;
2694 }
2695 }
2696 /*
2697 * Mark empty values with dashes to make output
2698 * awk-able.
2699 */
2700 if (val == NULL || is_blank_str(val))
2701 val = "-";
2702
2703 printf("%*s", vcdl->uniq_cols_width[j], val);
2704 if (j < vcdl->uniq_cols_cnt - 1)
2705 fputs(" ", stdout);
2706 }
2707
2708 /* Print out any values that aren't in a column at the end */
2709 for (j = data->cols_cnt; j < data->lines_cnt; j++) {
2710 /* Did we have any columns? If so print a spacer. */
2711 if (vcdl->uniq_cols_cnt > 0)
2712 fputs(" ", stdout);
2713
2714 val = data->lines[j];
2715 fputs(val ?: "", stdout);
2716 }
2717 break;
2718 }
2719 }
2720
2721 /*
2722 * Print vdev initialization status for leaves
2723 */
2724 static void
print_status_initialize(vdev_stat_t * vs,boolean_t verbose)2725 print_status_initialize(vdev_stat_t *vs, boolean_t verbose)
2726 {
2727 if (verbose) {
2728 if ((vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE ||
2729 vs->vs_initialize_state == VDEV_INITIALIZE_SUSPENDED ||
2730 vs->vs_initialize_state == VDEV_INITIALIZE_COMPLETE) &&
2731 !vs->vs_scan_removing) {
2732 char zbuf[1024];
2733 char tbuf[256];
2734
2735 time_t t = vs->vs_initialize_action_time;
2736 int initialize_pct = 100;
2737 if (vs->vs_initialize_state !=
2738 VDEV_INITIALIZE_COMPLETE) {
2739 initialize_pct = (vs->vs_initialize_bytes_done *
2740 100 / (vs->vs_initialize_bytes_est + 1));
2741 }
2742
2743 (void) ctime_r(&t, tbuf);
2744 tbuf[24] = 0;
2745
2746 switch (vs->vs_initialize_state) {
2747 case VDEV_INITIALIZE_SUSPENDED:
2748 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2749 gettext("suspended, started at"), tbuf);
2750 break;
2751 case VDEV_INITIALIZE_ACTIVE:
2752 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2753 gettext("started at"), tbuf);
2754 break;
2755 case VDEV_INITIALIZE_COMPLETE:
2756 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2757 gettext("completed at"), tbuf);
2758 break;
2759 }
2760
2761 (void) printf(gettext(" (%d%% initialized%s)"),
2762 initialize_pct, zbuf);
2763 } else {
2764 (void) printf(gettext(" (uninitialized)"));
2765 }
2766 } else if (vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE) {
2767 (void) printf(gettext(" (initializing)"));
2768 }
2769 }
2770
2771 /*
2772 * Print vdev TRIM status for leaves
2773 */
2774 static void
print_status_trim(vdev_stat_t * vs,boolean_t verbose)2775 print_status_trim(vdev_stat_t *vs, boolean_t verbose)
2776 {
2777 if (verbose) {
2778 if ((vs->vs_trim_state == VDEV_TRIM_ACTIVE ||
2779 vs->vs_trim_state == VDEV_TRIM_SUSPENDED ||
2780 vs->vs_trim_state == VDEV_TRIM_COMPLETE) &&
2781 !vs->vs_scan_removing) {
2782 char zbuf[1024];
2783 char tbuf[256];
2784
2785 time_t t = vs->vs_trim_action_time;
2786 int trim_pct = 100;
2787 if (vs->vs_trim_state != VDEV_TRIM_COMPLETE) {
2788 trim_pct = (vs->vs_trim_bytes_done *
2789 100 / (vs->vs_trim_bytes_est + 1));
2790 }
2791
2792 (void) ctime_r(&t, tbuf);
2793 tbuf[24] = 0;
2794
2795 switch (vs->vs_trim_state) {
2796 case VDEV_TRIM_SUSPENDED:
2797 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2798 gettext("suspended, started at"), tbuf);
2799 break;
2800 case VDEV_TRIM_ACTIVE:
2801 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2802 gettext("started at"), tbuf);
2803 break;
2804 case VDEV_TRIM_COMPLETE:
2805 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2806 gettext("completed at"), tbuf);
2807 break;
2808 }
2809
2810 (void) printf(gettext(" (%d%% trimmed%s)"),
2811 trim_pct, zbuf);
2812 } else if (vs->vs_trim_notsup) {
2813 (void) printf(gettext(" (trim unsupported)"));
2814 } else {
2815 (void) printf(gettext(" (untrimmed)"));
2816 }
2817 } else if (vs->vs_trim_state == VDEV_TRIM_ACTIVE) {
2818 (void) printf(gettext(" (trimming)"));
2819 }
2820 }
2821
2822 /*
2823 * Return the color associated with a health string. This includes returning
2824 * NULL for no color change.
2825 */
2826 static const char *
health_str_to_color(const char * health)2827 health_str_to_color(const char *health)
2828 {
2829 if (strcmp(health, gettext("FAULTED")) == 0 ||
2830 strcmp(health, gettext("SUSPENDED")) == 0 ||
2831 strcmp(health, gettext("UNAVAIL")) == 0) {
2832 return (ANSI_RED);
2833 }
2834
2835 if (strcmp(health, gettext("OFFLINE")) == 0 ||
2836 strcmp(health, gettext("DEGRADED")) == 0 ||
2837 strcmp(health, gettext("REMOVED")) == 0) {
2838 return (ANSI_YELLOW);
2839 }
2840
2841 return (NULL);
2842 }
2843
2844 /*
2845 * Called for each leaf vdev. Returns 0 if the vdev is healthy.
2846 * A vdev is unhealthy if any of the following are true:
2847 * 1) there are read, write, or checksum errors,
2848 * 2) its state is not ONLINE, or
2849 * 3) slow IO reporting was requested (-s) and there are slow IOs.
2850 */
2851 static int
vdev_health_check_cb(void * hdl_data,nvlist_t * nv,void * data)2852 vdev_health_check_cb(void *hdl_data, nvlist_t *nv, void *data)
2853 {
2854 status_cbdata_t *cb = data;
2855 vdev_stat_t *vs;
2856 uint_t vsc;
2857 (void) hdl_data;
2858
2859 if (nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
2860 (uint64_t **)&vs, &vsc) != 0)
2861 return (1);
2862
2863 if (vs->vs_checksum_errors || vs->vs_read_errors ||
2864 vs->vs_write_errors || vs->vs_state != VDEV_STATE_HEALTHY)
2865 return (1);
2866
2867 if (cb->cb_print_slow_ios && vs->vs_slow_ios)
2868 return (1);
2869
2870 return (0);
2871 }
2872
2873 /*
2874 * Print out configuration state as requested by status_callback.
2875 */
2876 static void
print_status_config(zpool_handle_t * zhp,status_cbdata_t * cb,const char * name,nvlist_t * nv,int depth,boolean_t isspare,vdev_rebuild_stat_t * vrs)2877 print_status_config(zpool_handle_t *zhp, status_cbdata_t *cb, const char *name,
2878 nvlist_t *nv, int depth, boolean_t isspare, vdev_rebuild_stat_t *vrs)
2879 {
2880 nvlist_t **child, *root;
2881 uint_t c, i, vsc, children;
2882 pool_scan_stat_t *ps = NULL;
2883 vdev_stat_t *vs;
2884 char rbuf[6], wbuf[6], cbuf[6], dbuf[6];
2885 char *vname;
2886 uint64_t notpresent;
2887 spare_cbdata_t spare_cb;
2888 const char *state;
2889 const char *type;
2890 const char *path = NULL;
2891 const char *rcolor = NULL, *wcolor = NULL, *ccolor = NULL,
2892 *scolor = NULL;
2893
2894 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2895 &child, &children) != 0)
2896 children = 0;
2897
2898 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
2899 (uint64_t **)&vs, &vsc) == 0);
2900
2901 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
2902
2903 if (strcmp(type, VDEV_TYPE_INDIRECT) == 0)
2904 return;
2905
2906 state = zpool_state_to_name(vs->vs_state, vs->vs_aux);
2907
2908 if (isspare) {
2909 /*
2910 * For hot spares, we use the terms 'INUSE' and 'AVAILABLE' for
2911 * online drives.
2912 */
2913 if (vs->vs_aux == VDEV_AUX_SPARED)
2914 state = gettext("INUSE");
2915 else if (vs->vs_state == VDEV_STATE_HEALTHY)
2916 state = gettext("AVAIL");
2917 }
2918
2919 /*
2920 * If '-e' is specified then top-level vdevs and their children
2921 * can be pruned if all of their leaves are healthy.
2922 */
2923 if (cb->cb_print_unhealthy && depth > 0 &&
2924 for_each_vdev_in_nvlist(nv, vdev_health_check_cb, cb) == 0) {
2925 return;
2926 }
2927
2928 printf_color(health_str_to_color(state),
2929 "\t%*s%-*s %-8s", depth, "", cb->cb_namewidth - depth,
2930 name, state);
2931
2932 if (!isspare) {
2933 if (vs->vs_read_errors)
2934 rcolor = ANSI_RED;
2935
2936 if (vs->vs_write_errors)
2937 wcolor = ANSI_RED;
2938
2939 if (vs->vs_checksum_errors)
2940 ccolor = ANSI_RED;
2941
2942 if (vs->vs_slow_ios)
2943 scolor = ANSI_BLUE;
2944
2945 if (cb->cb_literal) {
2946 fputc(' ', stdout);
2947 printf_color(rcolor, "%5llu",
2948 (u_longlong_t)vs->vs_read_errors);
2949 fputc(' ', stdout);
2950 printf_color(wcolor, "%5llu",
2951 (u_longlong_t)vs->vs_write_errors);
2952 fputc(' ', stdout);
2953 printf_color(ccolor, "%5llu",
2954 (u_longlong_t)vs->vs_checksum_errors);
2955 } else {
2956 zfs_nicenum(vs->vs_read_errors, rbuf, sizeof (rbuf));
2957 zfs_nicenum(vs->vs_write_errors, wbuf, sizeof (wbuf));
2958 zfs_nicenum(vs->vs_checksum_errors, cbuf,
2959 sizeof (cbuf));
2960 fputc(' ', stdout);
2961 printf_color(rcolor, "%5s", rbuf);
2962 fputc(' ', stdout);
2963 printf_color(wcolor, "%5s", wbuf);
2964 fputc(' ', stdout);
2965 printf_color(ccolor, "%5s", cbuf);
2966 }
2967 if (cb->cb_print_slow_ios) {
2968 if (children == 0) {
2969 /* Only leafs vdevs have slow IOs */
2970 zfs_nicenum(vs->vs_slow_ios, rbuf,
2971 sizeof (rbuf));
2972 } else {
2973 snprintf(rbuf, sizeof (rbuf), "-");
2974 }
2975
2976 if (cb->cb_literal)
2977 printf_color(scolor, " %5llu",
2978 (u_longlong_t)vs->vs_slow_ios);
2979 else
2980 printf_color(scolor, " %5s", rbuf);
2981 }
2982 if (cb->cb_print_power) {
2983 if (children == 0) {
2984 /* Only leaf vdevs have physical slots */
2985 switch (zpool_power_current_state(zhp, (char *)
2986 fnvlist_lookup_string(nv,
2987 ZPOOL_CONFIG_PATH))) {
2988 case 0:
2989 printf_color(ANSI_RED, " %5s",
2990 gettext("off"));
2991 break;
2992 case 1:
2993 printf(" %5s", gettext("on"));
2994 break;
2995 default:
2996 printf(" %5s", "-");
2997 }
2998 } else {
2999 printf(" %5s", "-");
3000 }
3001 }
3002 if (VDEV_STAT_VALID(vs_dio_verify_errors, vsc) &&
3003 cb->cb_print_dio_verify) {
3004 zfs_nicenum(vs->vs_dio_verify_errors, dbuf,
3005 sizeof (dbuf));
3006
3007 if (cb->cb_literal)
3008 printf(" %5llu",
3009 (u_longlong_t)vs->vs_dio_verify_errors);
3010 else
3011 printf(" %5s", dbuf);
3012 }
3013 }
3014
3015 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
3016 ¬present) == 0) {
3017 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0);
3018 (void) printf(" %s %s", gettext("was"), path);
3019 } else if (vs->vs_aux != 0) {
3020 (void) printf(" ");
3021 color_start(ANSI_RED);
3022 switch (vs->vs_aux) {
3023 case VDEV_AUX_OPEN_FAILED:
3024 (void) printf(gettext("cannot open"));
3025 break;
3026
3027 case VDEV_AUX_BAD_GUID_SUM:
3028 (void) printf(gettext("missing device"));
3029 break;
3030
3031 case VDEV_AUX_NO_REPLICAS:
3032 (void) printf(gettext("insufficient replicas"));
3033 break;
3034
3035 case VDEV_AUX_VERSION_NEWER:
3036 (void) printf(gettext("newer version"));
3037 break;
3038
3039 case VDEV_AUX_UNSUP_FEAT:
3040 (void) printf(gettext("unsupported feature(s)"));
3041 break;
3042
3043 case VDEV_AUX_ASHIFT_TOO_BIG:
3044 (void) printf(gettext("unsupported minimum blocksize"));
3045 break;
3046
3047 case VDEV_AUX_SPARED:
3048 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3049 &spare_cb.cb_guid) == 0);
3050 if (zpool_iter(g_zfs, find_spare, &spare_cb) == 1) {
3051 if (strcmp(zpool_get_name(spare_cb.cb_zhp),
3052 zpool_get_name(zhp)) == 0)
3053 (void) printf(gettext("currently in "
3054 "use"));
3055 else
3056 (void) printf(gettext("in use by "
3057 "pool '%s'"),
3058 zpool_get_name(spare_cb.cb_zhp));
3059 zpool_close(spare_cb.cb_zhp);
3060 } else {
3061 (void) printf(gettext("currently in use"));
3062 }
3063 break;
3064
3065 case VDEV_AUX_ERR_EXCEEDED:
3066 if (vs->vs_read_errors + vs->vs_write_errors +
3067 vs->vs_checksum_errors == 0 && children == 0 &&
3068 vs->vs_slow_ios > 0) {
3069 (void) printf(gettext("too many slow I/Os"));
3070 } else {
3071 (void) printf(gettext("too many errors"));
3072 }
3073 break;
3074
3075 case VDEV_AUX_IO_FAILURE:
3076 (void) printf(gettext("experienced I/O failures"));
3077 break;
3078
3079 case VDEV_AUX_BAD_LOG:
3080 (void) printf(gettext("bad intent log"));
3081 break;
3082
3083 case VDEV_AUX_EXTERNAL:
3084 (void) printf(gettext("external device fault"));
3085 break;
3086
3087 case VDEV_AUX_SPLIT_POOL:
3088 (void) printf(gettext("split into new pool"));
3089 break;
3090
3091 case VDEV_AUX_ACTIVE:
3092 (void) printf(gettext("currently in use"));
3093 break;
3094
3095 case VDEV_AUX_CHILDREN_OFFLINE:
3096 (void) printf(gettext("all children offline"));
3097 break;
3098
3099 case VDEV_AUX_BAD_LABEL:
3100 (void) printf(gettext("invalid label"));
3101 break;
3102
3103 default:
3104 (void) printf(gettext("corrupted data"));
3105 break;
3106 }
3107 color_end();
3108 } else if (children == 0 && !isspare &&
3109 getenv("ZPOOL_STATUS_NON_NATIVE_ASHIFT_IGNORE") == NULL &&
3110 VDEV_STAT_VALID(vs_physical_ashift, vsc) &&
3111 vs->vs_configured_ashift < vs->vs_physical_ashift) {
3112 (void) printf(
3113 gettext(" block size: %dB configured, %dB native"),
3114 1 << vs->vs_configured_ashift, 1 << vs->vs_physical_ashift);
3115 }
3116
3117 if (vs->vs_scan_removing != 0) {
3118 (void) printf(gettext(" (removing)"));
3119 } else if (VDEV_STAT_VALID(vs_noalloc, vsc) && vs->vs_noalloc != 0) {
3120 (void) printf(gettext(" (non-allocating)"));
3121 }
3122
3123 /* The root vdev has the scrub/resilver stats */
3124 root = fnvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
3125 ZPOOL_CONFIG_VDEV_TREE);
3126 (void) nvlist_lookup_uint64_array(root, ZPOOL_CONFIG_SCAN_STATS,
3127 (uint64_t **)&ps, &c);
3128
3129 /*
3130 * If you force fault a drive that's resilvering, its scan stats can
3131 * get frozen in time, giving the false impression that it's
3132 * being resilvered. That's why we check the state to see if the vdev
3133 * is healthy before reporting "resilvering" or "repairing".
3134 */
3135 if (ps != NULL && ps->pss_state == DSS_SCANNING && children == 0 &&
3136 vs->vs_state == VDEV_STATE_HEALTHY) {
3137 if (vs->vs_scan_processed != 0) {
3138 (void) printf(gettext(" (%s)"),
3139 (ps->pss_func == POOL_SCAN_RESILVER) ?
3140 "resilvering" : "repairing");
3141 } else if (vs->vs_resilver_deferred) {
3142 (void) printf(gettext(" (awaiting resilver)"));
3143 }
3144 }
3145
3146 /* The top-level vdevs have the rebuild stats */
3147 if (vrs != NULL && vrs->vrs_state == VDEV_REBUILD_ACTIVE &&
3148 children == 0 && vs->vs_state == VDEV_STATE_HEALTHY) {
3149 if (vs->vs_rebuild_processed != 0) {
3150 (void) printf(gettext(" (resilvering)"));
3151 }
3152 }
3153
3154 if (cb->vcdl != NULL) {
3155 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
3156 printf(" ");
3157 zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path);
3158 }
3159 }
3160
3161 /* Display vdev initialization and trim status for leaves. */
3162 if (children == 0) {
3163 print_status_initialize(vs, cb->cb_print_vdev_init);
3164 print_status_trim(vs, cb->cb_print_vdev_trim);
3165 }
3166
3167 (void) printf("\n");
3168
3169 for (c = 0; c < children; c++) {
3170 uint64_t islog = B_FALSE, ishole = B_FALSE;
3171
3172 /* Don't print logs or holes here */
3173 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
3174 &islog);
3175 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
3176 &ishole);
3177 if (islog || ishole)
3178 continue;
3179 /* Only print normal classes here */
3180 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
3181 continue;
3182
3183 /* Provide vdev_rebuild_stats to children if available */
3184 if (vrs == NULL) {
3185 (void) nvlist_lookup_uint64_array(nv,
3186 ZPOOL_CONFIG_REBUILD_STATS,
3187 (uint64_t **)&vrs, &i);
3188 }
3189
3190 vname = zpool_vdev_name(g_zfs, zhp, child[c],
3191 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
3192 print_status_config(zhp, cb, vname, child[c], depth + 2,
3193 isspare, vrs);
3194 free(vname);
3195 }
3196 }
3197
3198 /*
3199 * Print the configuration of an exported pool. Iterate over all vdevs in the
3200 * pool, printing out the name and status for each one.
3201 */
3202 static void
print_import_config(status_cbdata_t * cb,const char * name,nvlist_t * nv,int depth)3203 print_import_config(status_cbdata_t *cb, const char *name, nvlist_t *nv,
3204 int depth)
3205 {
3206 nvlist_t **child;
3207 uint_t c, children;
3208 vdev_stat_t *vs;
3209 const char *type;
3210 char *vname;
3211
3212 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
3213 if (strcmp(type, VDEV_TYPE_MISSING) == 0 ||
3214 strcmp(type, VDEV_TYPE_HOLE) == 0)
3215 return;
3216
3217 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
3218 (uint64_t **)&vs, &c) == 0);
3219
3220 (void) printf("\t%*s%-*s", depth, "", cb->cb_namewidth - depth, name);
3221 (void) printf(" %s", zpool_state_to_name(vs->vs_state, vs->vs_aux));
3222
3223 if (vs->vs_aux != 0) {
3224 (void) printf(" ");
3225
3226 switch (vs->vs_aux) {
3227 case VDEV_AUX_OPEN_FAILED:
3228 (void) printf(gettext("cannot open"));
3229 break;
3230
3231 case VDEV_AUX_BAD_GUID_SUM:
3232 (void) printf(gettext("missing device"));
3233 break;
3234
3235 case VDEV_AUX_NO_REPLICAS:
3236 (void) printf(gettext("insufficient replicas"));
3237 break;
3238
3239 case VDEV_AUX_VERSION_NEWER:
3240 (void) printf(gettext("newer version"));
3241 break;
3242
3243 case VDEV_AUX_UNSUP_FEAT:
3244 (void) printf(gettext("unsupported feature(s)"));
3245 break;
3246
3247 case VDEV_AUX_ERR_EXCEEDED:
3248 (void) printf(gettext("too many errors"));
3249 break;
3250
3251 case VDEV_AUX_ACTIVE:
3252 (void) printf(gettext("currently in use"));
3253 break;
3254
3255 case VDEV_AUX_CHILDREN_OFFLINE:
3256 (void) printf(gettext("all children offline"));
3257 break;
3258
3259 case VDEV_AUX_BAD_LABEL:
3260 (void) printf(gettext("invalid label"));
3261 break;
3262
3263 default:
3264 (void) printf(gettext("corrupted data"));
3265 break;
3266 }
3267 }
3268 (void) printf("\n");
3269
3270 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
3271 &child, &children) != 0)
3272 return;
3273
3274 for (c = 0; c < children; c++) {
3275 uint64_t is_log = B_FALSE;
3276
3277 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
3278 &is_log);
3279 if (is_log)
3280 continue;
3281 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
3282 continue;
3283
3284 vname = zpool_vdev_name(g_zfs, NULL, child[c],
3285 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
3286 print_import_config(cb, vname, child[c], depth + 2);
3287 free(vname);
3288 }
3289
3290 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
3291 &child, &children) == 0) {
3292 (void) printf(gettext("\tcache\n"));
3293 for (c = 0; c < children; c++) {
3294 vname = zpool_vdev_name(g_zfs, NULL, child[c],
3295 cb->cb_name_flags);
3296 (void) printf("\t %s\n", vname);
3297 free(vname);
3298 }
3299 }
3300
3301 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
3302 &child, &children) == 0) {
3303 (void) printf(gettext("\tspares\n"));
3304 for (c = 0; c < children; c++) {
3305 vname = zpool_vdev_name(g_zfs, NULL, child[c],
3306 cb->cb_name_flags);
3307 (void) printf("\t %s\n", vname);
3308 free(vname);
3309 }
3310 }
3311 }
3312
3313 /*
3314 * Print specialized class vdevs.
3315 *
3316 * These are recorded as top level vdevs in the main pool child array
3317 * but with "is_log" set to 1 or an "alloc_bias" string. We use either
3318 * print_status_config() or print_import_config() to print the top level
3319 * class vdevs then any of their children (eg mirrored slogs) are printed
3320 * recursively - which works because only the top level vdev is marked.
3321 */
3322 static void
print_class_vdevs(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t * nv,const char * class)3323 print_class_vdevs(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,
3324 const char *class)
3325 {
3326 uint_t c, children;
3327 nvlist_t **child;
3328 boolean_t printed = B_FALSE;
3329
3330 assert(zhp != NULL || !cb->cb_verbose);
3331
3332 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &child,
3333 &children) != 0)
3334 return;
3335
3336 for (c = 0; c < children; c++) {
3337 uint64_t is_log = B_FALSE;
3338 const char *bias = NULL;
3339 const char *type = NULL;
3340
3341 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
3342 &is_log);
3343
3344 if (is_log) {
3345 bias = (char *)VDEV_ALLOC_CLASS_LOGS;
3346 } else {
3347 (void) nvlist_lookup_string(child[c],
3348 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
3349 (void) nvlist_lookup_string(child[c],
3350 ZPOOL_CONFIG_TYPE, &type);
3351 }
3352
3353 if (bias == NULL || strcmp(bias, class) != 0)
3354 continue;
3355 if (!is_log && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
3356 continue;
3357
3358 if (!printed) {
3359 (void) printf("\t%s\t\n", gettext(class));
3360 printed = B_TRUE;
3361 }
3362
3363 char *name = zpool_vdev_name(g_zfs, zhp, child[c],
3364 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
3365 if (cb->cb_print_status)
3366 print_status_config(zhp, cb, name, child[c], 2,
3367 B_FALSE, NULL);
3368 else
3369 print_import_config(cb, name, child[c], 2);
3370 free(name);
3371 }
3372 }
3373
3374 /*
3375 * Display the status for the given pool.
3376 */
3377 static int
show_import(nvlist_t * config,boolean_t report_error)3378 show_import(nvlist_t *config, boolean_t report_error)
3379 {
3380 uint64_t pool_state;
3381 vdev_stat_t *vs;
3382 const char *name;
3383 uint64_t guid;
3384 uint64_t hostid = 0;
3385 const char *msgid;
3386 const char *hostname = "unknown";
3387 nvlist_t *nvroot, *nvinfo;
3388 zpool_status_t reason;
3389 zpool_errata_t errata;
3390 const char *health;
3391 uint_t vsc;
3392 const char *comment;
3393 const char *indent;
3394 char buf[2048];
3395 status_cbdata_t cb = { 0 };
3396
3397 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
3398 &name) == 0);
3399 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
3400 &guid) == 0);
3401 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
3402 &pool_state) == 0);
3403 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
3404 &nvroot) == 0);
3405
3406 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
3407 (uint64_t **)&vs, &vsc) == 0);
3408 health = zpool_state_to_name(vs->vs_state, vs->vs_aux);
3409
3410 reason = zpool_import_status(config, &msgid, &errata);
3411
3412 /*
3413 * If we're importing using a cachefile, then we won't report any
3414 * errors unless we are in the scan phase of the import.
3415 */
3416 if (reason != ZPOOL_STATUS_OK && !report_error)
3417 return (reason);
3418
3419 if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0) {
3420 indent = " ";
3421 } else {
3422 comment = NULL;
3423 indent = "";
3424 }
3425
3426 (void) printf(gettext("%s pool: %s\n"), indent, name);
3427 (void) printf(gettext("%s id: %llu\n"), indent, (u_longlong_t)guid);
3428 (void) printf(gettext("%s state: %s"), indent, health);
3429 if (pool_state == POOL_STATE_DESTROYED)
3430 (void) printf(gettext(" (DESTROYED)"));
3431 (void) printf("\n");
3432
3433 if (reason != ZPOOL_STATUS_OK) {
3434 (void) printf("%s", indent);
3435 printf_color(ANSI_BOLD, gettext("status: "));
3436 }
3437 switch (reason) {
3438 case ZPOOL_STATUS_MISSING_DEV_R:
3439 case ZPOOL_STATUS_MISSING_DEV_NR:
3440 case ZPOOL_STATUS_BAD_GUID_SUM:
3441 printf_color(ANSI_YELLOW, gettext("One or more devices are "
3442 "missing from the system.\n"));
3443 break;
3444
3445 case ZPOOL_STATUS_CORRUPT_LABEL_R:
3446 case ZPOOL_STATUS_CORRUPT_LABEL_NR:
3447 printf_color(ANSI_YELLOW, gettext("One or more devices "
3448 "contains corrupted data.\n"));
3449 break;
3450
3451 case ZPOOL_STATUS_CORRUPT_DATA:
3452 printf_color(ANSI_YELLOW, gettext("The pool data is "
3453 "corrupted.\n"));
3454 break;
3455
3456 case ZPOOL_STATUS_OFFLINE_DEV:
3457 printf_color(ANSI_YELLOW, gettext("One or more devices "
3458 "are offlined.\n"));
3459 break;
3460
3461 case ZPOOL_STATUS_CORRUPT_POOL:
3462 printf_color(ANSI_YELLOW, gettext("The pool metadata is "
3463 "corrupted.\n"));
3464 break;
3465
3466 case ZPOOL_STATUS_VERSION_OLDER:
3467 printf_color(ANSI_YELLOW, gettext("The pool is formatted using "
3468 "a legacy on-disk version.\n"));
3469 break;
3470
3471 case ZPOOL_STATUS_VERSION_NEWER:
3472 printf_color(ANSI_YELLOW, gettext("The pool is formatted using "
3473 "an incompatible version.\n"));
3474 break;
3475
3476 case ZPOOL_STATUS_FEAT_DISABLED:
3477 printf_color(ANSI_YELLOW, gettext("Some supported "
3478 "features are not enabled on the pool.\n"
3479 "\t%s(Note that they may be intentionally disabled if the\n"
3480 "\t%s'compatibility' property is set.)\n"), indent, indent);
3481 break;
3482
3483 case ZPOOL_STATUS_COMPATIBILITY_ERR:
3484 printf_color(ANSI_YELLOW, gettext("Error reading or parsing "
3485 "the file(s) indicated by the 'compatibility'\n"
3486 "\t%sproperty.\n"), indent);
3487 break;
3488
3489 case ZPOOL_STATUS_INCOMPATIBLE_FEAT:
3490 printf_color(ANSI_YELLOW, gettext("One or more features "
3491 "are enabled on the pool despite not being\n"
3492 "\t%srequested by the 'compatibility' property.\n"),
3493 indent);
3494 break;
3495
3496 case ZPOOL_STATUS_UNSUP_FEAT_READ:
3497 printf_color(ANSI_YELLOW, gettext("The pool uses the following "
3498 "feature(s) not supported on this system:\n"));
3499 color_start(ANSI_YELLOW);
3500 zpool_collect_unsup_feat(config, buf, 2048);
3501 (void) printf("%s", buf);
3502 color_end();
3503 break;
3504
3505 case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
3506 printf_color(ANSI_YELLOW, gettext("The pool can only be "
3507 "accessed in read-only mode on this system. It\n"
3508 "\t%scannot be accessed in read-write mode because it uses "
3509 "the following\n"
3510 "\t%sfeature(s) not supported on this system:\n"),
3511 indent, indent);
3512 color_start(ANSI_YELLOW);
3513 zpool_collect_unsup_feat(config, buf, 2048);
3514 (void) printf("%s", buf);
3515 color_end();
3516 break;
3517
3518 case ZPOOL_STATUS_HOSTID_ACTIVE:
3519 printf_color(ANSI_YELLOW, gettext("The pool is currently "
3520 "imported by another system.\n"));
3521 break;
3522
3523 case ZPOOL_STATUS_HOSTID_REQUIRED:
3524 printf_color(ANSI_YELLOW, gettext("The pool has the "
3525 "multihost property on. It cannot\n"
3526 "\t%sbe safely imported when the system hostid is not "
3527 "set.\n"), indent);
3528 break;
3529
3530 case ZPOOL_STATUS_HOSTID_MISMATCH:
3531 printf_color(ANSI_YELLOW, gettext("The pool was last accessed "
3532 "by another system.\n"));
3533 break;
3534
3535 case ZPOOL_STATUS_FAULTED_DEV_R:
3536 case ZPOOL_STATUS_FAULTED_DEV_NR:
3537 printf_color(ANSI_YELLOW, gettext("One or more devices are "
3538 "faulted.\n"));
3539 break;
3540
3541 case ZPOOL_STATUS_BAD_LOG:
3542 printf_color(ANSI_YELLOW, gettext("An intent log record cannot "
3543 "be read.\n"));
3544 break;
3545
3546 case ZPOOL_STATUS_RESILVERING:
3547 case ZPOOL_STATUS_REBUILDING:
3548 printf_color(ANSI_YELLOW, gettext("One or more devices were "
3549 "being resilvered.\n"));
3550 break;
3551
3552 case ZPOOL_STATUS_ERRATA:
3553 printf_color(ANSI_YELLOW, gettext("Errata #%d detected.\n"),
3554 errata);
3555 break;
3556
3557 case ZPOOL_STATUS_NON_NATIVE_ASHIFT:
3558 printf_color(ANSI_YELLOW, gettext("One or more devices are "
3559 "configured to use a non-native block size.\n"
3560 "\t%sExpect reduced performance.\n"), indent);
3561 break;
3562
3563 default:
3564 /*
3565 * No other status can be seen when importing pools.
3566 */
3567 assert(reason == ZPOOL_STATUS_OK);
3568 }
3569
3570 /*
3571 * Print out an action according to the overall state of the pool.
3572 */
3573 if (vs->vs_state != VDEV_STATE_HEALTHY ||
3574 reason != ZPOOL_STATUS_ERRATA || errata != ZPOOL_ERRATA_NONE) {
3575 (void) printf("%s", indent);
3576 (void) printf(gettext("action: "));
3577 }
3578 if (vs->vs_state == VDEV_STATE_HEALTHY) {
3579 if (reason == ZPOOL_STATUS_VERSION_OLDER ||
3580 reason == ZPOOL_STATUS_FEAT_DISABLED) {
3581 (void) printf(gettext("The pool can be imported using "
3582 "its name or numeric identifier, though\n"
3583 "\t%ssome features will not be available without "
3584 "an explicit 'zpool upgrade'.\n"), indent);
3585 } else if (reason == ZPOOL_STATUS_COMPATIBILITY_ERR) {
3586 (void) printf(gettext("The pool can be imported using "
3587 "its name or numeric\n"
3588 "\t%sidentifier, though the file(s) indicated by "
3589 "its 'compatibility'\n"
3590 "\t%sproperty cannot be parsed at this time.\n"),
3591 indent, indent);
3592 } else if (reason == ZPOOL_STATUS_HOSTID_MISMATCH) {
3593 (void) printf(gettext("The pool can be imported using "
3594 "its name or numeric identifier and\n"
3595 "\t%sthe '-f' flag.\n"), indent);
3596 } else if (reason == ZPOOL_STATUS_ERRATA) {
3597 switch (errata) {
3598 case ZPOOL_ERRATA_ZOL_2094_SCRUB:
3599 (void) printf(gettext("The pool can be "
3600 "imported using its name or numeric "
3601 "identifier,\n"
3602 "\t%showever there is a compatibility "
3603 "issue which should be corrected\n"
3604 "\t%sby running 'zpool scrub'\n"),
3605 indent, indent);
3606 break;
3607
3608 case ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY:
3609 (void) printf(gettext("The pool cannot be "
3610 "imported with this version of ZFS due to\n"
3611 "\t%san active asynchronous destroy. "
3612 "Revert to an earlier version\n"
3613 "\t%sand allow the destroy to complete "
3614 "before updating.\n"), indent, indent);
3615 break;
3616
3617 case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION:
3618 (void) printf(gettext("Existing encrypted "
3619 "datasets contain an on-disk "
3620 "incompatibility, which\n"
3621 "\t%sneeds to be corrected. Backup these "
3622 "datasets to new encrypted datasets\n"
3623 "\t%sand destroy the old ones.\n"),
3624 indent, indent);
3625 break;
3626
3627 case ZPOOL_ERRATA_ZOL_8308_ENCRYPTION:
3628 (void) printf(gettext("Existing encrypted "
3629 "snapshots and bookmarks contain an "
3630 "on-disk\n"
3631 "\t%sincompatibility. This may cause "
3632 "on-disk corruption if they are used\n"
3633 "\t%swith 'zfs recv'. To correct the "
3634 "issue, enable the bookmark_v2 feature.\n"
3635 "\t%sNo additional action is needed if "
3636 "there are no encrypted snapshots or\n"
3637 "\t%sbookmarks. If preserving the "
3638 "encrypted snapshots and bookmarks is\n"
3639 "\t%srequired, use a non-raw send to "
3640 "backup and restore them. Alternately,\n"
3641 "\t%sthey may be removed to resolve the "
3642 "incompatibility.\n"), indent, indent,
3643 indent, indent, indent, indent);
3644 break;
3645 default:
3646 /*
3647 * All errata must contain an action message.
3648 */
3649 assert(errata == ZPOOL_ERRATA_NONE);
3650 }
3651 } else {
3652 (void) printf(gettext("The pool can be imported using "
3653 "its name or numeric identifier.\n"));
3654 }
3655 } else if (vs->vs_state == VDEV_STATE_DEGRADED) {
3656 (void) printf(gettext("The pool can be imported despite "
3657 "missing or damaged devices. The\n"
3658 "\t%sfault tolerance of the pool may be compromised if "
3659 "imported.\n"), indent);
3660 } else {
3661 switch (reason) {
3662 case ZPOOL_STATUS_VERSION_NEWER:
3663 (void) printf(gettext("The pool cannot be imported. "
3664 "Access the pool on a system running newer\n"
3665 "\t%ssoftware, or recreate the pool from "
3666 "backup.\n"), indent);
3667 break;
3668 case ZPOOL_STATUS_UNSUP_FEAT_READ:
3669 (void) printf(gettext("The pool cannot be imported. "
3670 "Access the pool on a system that supports\n"
3671 "\t%sthe required feature(s), or recreate the pool "
3672 "from backup.\n"), indent);
3673 break;
3674 case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
3675 (void) printf(gettext("The pool cannot be imported in "
3676 "read-write mode. Import the pool with\n"
3677 "\t%s'-o readonly=on', access the pool on a system "
3678 "that supports the\n"
3679 "\t%srequired feature(s), or recreate the pool "
3680 "from backup.\n"), indent, indent);
3681 break;
3682 case ZPOOL_STATUS_MISSING_DEV_R:
3683 case ZPOOL_STATUS_MISSING_DEV_NR:
3684 case ZPOOL_STATUS_BAD_GUID_SUM:
3685 (void) printf(gettext("The pool cannot be imported. "
3686 "Attach the missing\n"
3687 "\t%sdevices and try again.\n"), indent);
3688 break;
3689 case ZPOOL_STATUS_HOSTID_ACTIVE:
3690 VERIFY0(nvlist_lookup_nvlist(config,
3691 ZPOOL_CONFIG_LOAD_INFO, &nvinfo));
3692
3693 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME))
3694 hostname = fnvlist_lookup_string(nvinfo,
3695 ZPOOL_CONFIG_MMP_HOSTNAME);
3696
3697 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID))
3698 hostid = fnvlist_lookup_uint64(nvinfo,
3699 ZPOOL_CONFIG_MMP_HOSTID);
3700
3701 (void) printf(gettext("The pool must be exported from "
3702 "%s (hostid=%"PRIx64")\n"
3703 "\t%sbefore it can be safely imported.\n"),
3704 hostname, hostid, indent);
3705 break;
3706 case ZPOOL_STATUS_HOSTID_REQUIRED:
3707 (void) printf(gettext("Set a unique system hostid with "
3708 "the zgenhostid(8) command.\n"));
3709 break;
3710 default:
3711 (void) printf(gettext("The pool cannot be imported due "
3712 "to damaged devices or data.\n"));
3713 }
3714 }
3715
3716 /* Print the comment attached to the pool. */
3717 if (comment != NULL)
3718 (void) printf(gettext("comment: %s\n"), comment);
3719
3720 /*
3721 * If the state is "closed" or "can't open", and the aux state
3722 * is "corrupt data":
3723 */
3724 if ((vs->vs_state == VDEV_STATE_CLOSED ||
3725 vs->vs_state == VDEV_STATE_CANT_OPEN) &&
3726 vs->vs_aux == VDEV_AUX_CORRUPT_DATA) {
3727 if (pool_state == POOL_STATE_DESTROYED)
3728 (void) printf(gettext("\t%sThe pool was destroyed, "
3729 "but can be imported using the '-Df' flags.\n"),
3730 indent);
3731 else if (pool_state != POOL_STATE_EXPORTED)
3732 (void) printf(gettext("\t%sThe pool may be active on "
3733 "another system, but can be imported using\n"
3734 "\t%sthe '-f' flag.\n"), indent, indent);
3735 }
3736
3737 if (msgid != NULL) {
3738 (void) printf(gettext("%s see: "
3739 "https://openzfs.github.io/openzfs-docs/msg/%s\n"),
3740 indent, msgid);
3741 }
3742
3743 (void) printf(gettext("%sconfig:\n\n"), indent);
3744
3745 cb.cb_namewidth = max_width(NULL, nvroot, 0, strlen(name),
3746 VDEV_NAME_TYPE_ID);
3747 if (cb.cb_namewidth < 10)
3748 cb.cb_namewidth = 10;
3749
3750 print_import_config(&cb, name, nvroot, 0);
3751
3752 print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_BIAS_DEDUP);
3753 print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_BIAS_SPECIAL);
3754 print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_CLASS_LOGS);
3755
3756 if (reason == ZPOOL_STATUS_BAD_GUID_SUM) {
3757 (void) printf(gettext("\n\t%sAdditional devices are known to "
3758 "be part of this pool, though their\n"
3759 "\t%sexact configuration cannot be determined.\n"),
3760 indent, indent);
3761 }
3762 return (0);
3763 }
3764
3765 static boolean_t
zfs_force_import_required(nvlist_t * config)3766 zfs_force_import_required(nvlist_t *config)
3767 {
3768 uint64_t state;
3769 uint64_t hostid = 0;
3770 nvlist_t *nvinfo;
3771
3772 state = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE);
3773 nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
3774
3775 /*
3776 * The hostid on LOAD_INFO comes from the MOS label via
3777 * spa_tryimport(). If its not there then we're likely talking to an
3778 * older kernel, so use the top one, which will be from the label
3779 * discovered in zpool_find_import(), or if a cachefile is in use, the
3780 * local hostid.
3781 */
3782 if (nvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_HOSTID, &hostid) != 0)
3783 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_HOSTID,
3784 &hostid);
3785
3786 if (state != POOL_STATE_EXPORTED && hostid != get_system_hostid())
3787 return (B_TRUE);
3788
3789 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE)) {
3790 mmp_state_t mmp_state = fnvlist_lookup_uint64(nvinfo,
3791 ZPOOL_CONFIG_MMP_STATE);
3792
3793 if (mmp_state != MMP_STATE_INACTIVE)
3794 return (B_TRUE);
3795 }
3796
3797 return (B_FALSE);
3798 }
3799
3800 /*
3801 * Perform the import for the given configuration. This passes the heavy
3802 * lifting off to zpool_import_props(), and then mounts the datasets contained
3803 * within the pool.
3804 */
3805 static int
do_import(nvlist_t * config,const char * newname,const char * mntopts,nvlist_t * props,int flags,uint_t mntthreads)3806 do_import(nvlist_t *config, const char *newname, const char *mntopts,
3807 nvlist_t *props, int flags, uint_t mntthreads)
3808 {
3809 int ret = 0;
3810 int ms_status = 0;
3811 zpool_handle_t *zhp;
3812 const char *name;
3813 uint64_t version;
3814
3815 name = fnvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME);
3816 version = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION);
3817
3818 if (!SPA_VERSION_IS_SUPPORTED(version)) {
3819 (void) fprintf(stderr, gettext("cannot import '%s': pool "
3820 "is formatted using an unsupported ZFS version\n"), name);
3821 return (1);
3822 } else if (zfs_force_import_required(config) &&
3823 !(flags & ZFS_IMPORT_ANY_HOST)) {
3824 mmp_state_t mmp_state = MMP_STATE_INACTIVE;
3825 nvlist_t *nvinfo;
3826
3827 nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
3828 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE))
3829 mmp_state = fnvlist_lookup_uint64(nvinfo,
3830 ZPOOL_CONFIG_MMP_STATE);
3831
3832 if (mmp_state == MMP_STATE_ACTIVE) {
3833 const char *hostname = "<unknown>";
3834 uint64_t hostid = 0;
3835
3836 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME))
3837 hostname = fnvlist_lookup_string(nvinfo,
3838 ZPOOL_CONFIG_MMP_HOSTNAME);
3839
3840 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID))
3841 hostid = fnvlist_lookup_uint64(nvinfo,
3842 ZPOOL_CONFIG_MMP_HOSTID);
3843
3844 (void) fprintf(stderr, gettext("cannot import '%s': "
3845 "pool is imported on %s (hostid: "
3846 "0x%"PRIx64")\nExport the pool on the other "
3847 "system, then run 'zpool import'.\n"),
3848 name, hostname, hostid);
3849 } else if (mmp_state == MMP_STATE_NO_HOSTID) {
3850 (void) fprintf(stderr, gettext("Cannot import '%s': "
3851 "pool has the multihost property on and the\n"
3852 "system's hostid is not set. Set a unique hostid "
3853 "with the zgenhostid(8) command.\n"), name);
3854 } else {
3855 const char *hostname = "<unknown>";
3856 time_t timestamp = 0;
3857 uint64_t hostid = 0;
3858
3859 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_HOSTNAME))
3860 hostname = fnvlist_lookup_string(nvinfo,
3861 ZPOOL_CONFIG_HOSTNAME);
3862 else if (nvlist_exists(config, ZPOOL_CONFIG_HOSTNAME))
3863 hostname = fnvlist_lookup_string(config,
3864 ZPOOL_CONFIG_HOSTNAME);
3865
3866 if (nvlist_exists(config, ZPOOL_CONFIG_TIMESTAMP))
3867 timestamp = fnvlist_lookup_uint64(config,
3868 ZPOOL_CONFIG_TIMESTAMP);
3869
3870 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_HOSTID))
3871 hostid = fnvlist_lookup_uint64(nvinfo,
3872 ZPOOL_CONFIG_HOSTID);
3873 else if (nvlist_exists(config, ZPOOL_CONFIG_HOSTID))
3874 hostid = fnvlist_lookup_uint64(config,
3875 ZPOOL_CONFIG_HOSTID);
3876
3877 (void) fprintf(stderr, gettext("cannot import '%s': "
3878 "pool was previously in use from another system.\n"
3879 "Last accessed by %s (hostid=%"PRIx64") at %s"
3880 "The pool can be imported, use 'zpool import -f' "
3881 "to import the pool.\n"), name, hostname,
3882 hostid, ctime(×tamp));
3883 }
3884
3885 return (1);
3886 }
3887
3888 if (zpool_import_props(g_zfs, config, newname, props, flags) != 0)
3889 return (1);
3890
3891 if (newname != NULL)
3892 name = newname;
3893
3894 if ((zhp = zpool_open_canfail(g_zfs, name)) == NULL)
3895 return (1);
3896
3897 /*
3898 * Loading keys is best effort. We don't want to return immediately
3899 * if it fails but we do want to give the error to the caller.
3900 */
3901 if (flags & ZFS_IMPORT_LOAD_KEYS &&
3902 zfs_crypto_attempt_load_keys(g_zfs, name) != 0)
3903 ret = 1;
3904
3905 if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL &&
3906 !(flags & ZFS_IMPORT_ONLY)) {
3907 ms_status = zpool_enable_datasets(zhp, mntopts, 0, mntthreads);
3908 if (ms_status == EZFS_SHAREFAILED) {
3909 (void) fprintf(stderr, gettext("Import was "
3910 "successful, but unable to share some datasets\n"));
3911 } else if (ms_status == EZFS_MOUNTFAILED) {
3912 (void) fprintf(stderr, gettext("Import was "
3913 "successful, but unable to mount some datasets\n"));
3914 }
3915 }
3916
3917 zpool_close(zhp);
3918 return (ret);
3919 }
3920
3921 typedef struct import_parameters {
3922 nvlist_t *ip_config;
3923 const char *ip_mntopts;
3924 nvlist_t *ip_props;
3925 int ip_flags;
3926 uint_t ip_mntthreads;
3927 int *ip_err;
3928 } import_parameters_t;
3929
3930 static void
do_import_task(void * arg)3931 do_import_task(void *arg)
3932 {
3933 import_parameters_t *ip = arg;
3934 *ip->ip_err |= do_import(ip->ip_config, NULL, ip->ip_mntopts,
3935 ip->ip_props, ip->ip_flags, ip->ip_mntthreads);
3936 free(ip);
3937 }
3938
3939
3940 static int
import_pools(nvlist_t * pools,nvlist_t * props,char * mntopts,int flags,char * orig_name,char * new_name,importargs_t * import)3941 import_pools(nvlist_t *pools, nvlist_t *props, char *mntopts, int flags,
3942 char *orig_name, char *new_name, importargs_t *import)
3943 {
3944 nvlist_t *config = NULL;
3945 nvlist_t *found_config = NULL;
3946 uint64_t pool_state;
3947 boolean_t pool_specified = (import->poolname != NULL ||
3948 import->guid != 0);
3949 uint_t npools = 0;
3950
3951
3952 tpool_t *tp = NULL;
3953 if (import->do_all) {
3954 tp = tpool_create(1, 5 * sysconf(_SC_NPROCESSORS_ONLN),
3955 0, NULL);
3956 }
3957
3958 /*
3959 * At this point we have a list of import candidate configs. Even if
3960 * we were searching by pool name or guid, we still need to
3961 * post-process the list to deal with pool state and possible
3962 * duplicate names.
3963 */
3964 int err = 0;
3965 nvpair_t *elem = NULL;
3966 boolean_t first = B_TRUE;
3967 if (!pool_specified && import->do_all) {
3968 while ((elem = nvlist_next_nvpair(pools, elem)) != NULL)
3969 npools++;
3970 }
3971 while ((elem = nvlist_next_nvpair(pools, elem)) != NULL) {
3972
3973 verify(nvpair_value_nvlist(elem, &config) == 0);
3974
3975 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
3976 &pool_state) == 0);
3977 if (!import->do_destroyed &&
3978 pool_state == POOL_STATE_DESTROYED)
3979 continue;
3980 if (import->do_destroyed &&
3981 pool_state != POOL_STATE_DESTROYED)
3982 continue;
3983
3984 verify(nvlist_add_nvlist(config, ZPOOL_LOAD_POLICY,
3985 import->policy) == 0);
3986
3987 if (!pool_specified) {
3988 if (first)
3989 first = B_FALSE;
3990 else if (!import->do_all)
3991 (void) fputc('\n', stdout);
3992
3993 if (import->do_all) {
3994 import_parameters_t *ip = safe_malloc(
3995 sizeof (import_parameters_t));
3996
3997 ip->ip_config = config;
3998 ip->ip_mntopts = mntopts;
3999 ip->ip_props = props;
4000 ip->ip_flags = flags;
4001 ip->ip_mntthreads = mount_tp_nthr / npools;
4002 ip->ip_err = &err;
4003
4004 (void) tpool_dispatch(tp, do_import_task,
4005 (void *)ip);
4006 } else {
4007 /*
4008 * If we're importing from cachefile, then
4009 * we don't want to report errors until we
4010 * are in the scan phase of the import. If
4011 * we get an error, then we return that error
4012 * to invoke the scan phase.
4013 */
4014 if (import->cachefile && !import->scan)
4015 err = show_import(config, B_FALSE);
4016 else
4017 (void) show_import(config, B_TRUE);
4018 }
4019 } else if (import->poolname != NULL) {
4020 const char *name;
4021
4022 /*
4023 * We are searching for a pool based on name.
4024 */
4025 verify(nvlist_lookup_string(config,
4026 ZPOOL_CONFIG_POOL_NAME, &name) == 0);
4027
4028 if (strcmp(name, import->poolname) == 0) {
4029 if (found_config != NULL) {
4030 (void) fprintf(stderr, gettext(
4031 "cannot import '%s': more than "
4032 "one matching pool\n"),
4033 import->poolname);
4034 (void) fprintf(stderr, gettext(
4035 "import by numeric ID instead\n"));
4036 err = B_TRUE;
4037 }
4038 found_config = config;
4039 }
4040 } else {
4041 uint64_t guid;
4042
4043 /*
4044 * Search for a pool by guid.
4045 */
4046 verify(nvlist_lookup_uint64(config,
4047 ZPOOL_CONFIG_POOL_GUID, &guid) == 0);
4048
4049 if (guid == import->guid)
4050 found_config = config;
4051 }
4052 }
4053 if (import->do_all) {
4054 tpool_wait(tp);
4055 tpool_destroy(tp);
4056 }
4057
4058 /*
4059 * If we were searching for a specific pool, verify that we found a
4060 * pool, and then do the import.
4061 */
4062 if (pool_specified && err == 0) {
4063 if (found_config == NULL) {
4064 (void) fprintf(stderr, gettext("cannot import '%s': "
4065 "no such pool available\n"), orig_name);
4066 err = B_TRUE;
4067 } else {
4068 err |= do_import(found_config, new_name,
4069 mntopts, props, flags, mount_tp_nthr);
4070 }
4071 }
4072
4073 /*
4074 * If we were just looking for pools, report an error if none were
4075 * found.
4076 */
4077 if (!pool_specified && first)
4078 (void) fprintf(stderr,
4079 gettext("no pools available to import\n"));
4080 return (err);
4081 }
4082
4083 typedef struct target_exists_args {
4084 const char *poolname;
4085 uint64_t poolguid;
4086 } target_exists_args_t;
4087
4088 static int
name_or_guid_exists(zpool_handle_t * zhp,void * data)4089 name_or_guid_exists(zpool_handle_t *zhp, void *data)
4090 {
4091 target_exists_args_t *args = data;
4092 nvlist_t *config = zpool_get_config(zhp, NULL);
4093 int found = 0;
4094
4095 if (config == NULL)
4096 return (0);
4097
4098 if (args->poolname != NULL) {
4099 const char *pool_name;
4100
4101 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
4102 &pool_name) == 0);
4103 if (strcmp(pool_name, args->poolname) == 0)
4104 found = 1;
4105 } else {
4106 uint64_t pool_guid;
4107
4108 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
4109 &pool_guid) == 0);
4110 if (pool_guid == args->poolguid)
4111 found = 1;
4112 }
4113 zpool_close(zhp);
4114
4115 return (found);
4116 }
4117 /*
4118 * zpool checkpoint <pool>
4119 * checkpoint --discard <pool>
4120 *
4121 * -d Discard the checkpoint from a checkpointed
4122 * --discard pool.
4123 *
4124 * -w Wait for discarding a checkpoint to complete.
4125 * --wait
4126 *
4127 * Checkpoints the specified pool, by taking a "snapshot" of its
4128 * current state. A pool can only have one checkpoint at a time.
4129 */
4130 int
zpool_do_checkpoint(int argc,char ** argv)4131 zpool_do_checkpoint(int argc, char **argv)
4132 {
4133 boolean_t discard, wait;
4134 char *pool;
4135 zpool_handle_t *zhp;
4136 int c, err;
4137
4138 struct option long_options[] = {
4139 {"discard", no_argument, NULL, 'd'},
4140 {"wait", no_argument, NULL, 'w'},
4141 {0, 0, 0, 0}
4142 };
4143
4144 discard = B_FALSE;
4145 wait = B_FALSE;
4146 while ((c = getopt_long(argc, argv, ":dw", long_options, NULL)) != -1) {
4147 switch (c) {
4148 case 'd':
4149 discard = B_TRUE;
4150 break;
4151 case 'w':
4152 wait = B_TRUE;
4153 break;
4154 case '?':
4155 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
4156 optopt);
4157 usage(B_FALSE);
4158 }
4159 }
4160
4161 if (wait && !discard) {
4162 (void) fprintf(stderr, gettext("--wait only valid when "
4163 "--discard also specified\n"));
4164 usage(B_FALSE);
4165 }
4166
4167 argc -= optind;
4168 argv += optind;
4169
4170 if (argc < 1) {
4171 (void) fprintf(stderr, gettext("missing pool argument\n"));
4172 usage(B_FALSE);
4173 }
4174
4175 if (argc > 1) {
4176 (void) fprintf(stderr, gettext("too many arguments\n"));
4177 usage(B_FALSE);
4178 }
4179
4180 pool = argv[0];
4181
4182 if ((zhp = zpool_open(g_zfs, pool)) == NULL) {
4183 /* As a special case, check for use of '/' in the name */
4184 if (strchr(pool, '/') != NULL)
4185 (void) fprintf(stderr, gettext("'zpool checkpoint' "
4186 "doesn't work on datasets. To save the state "
4187 "of a dataset from a specific point in time "
4188 "please use 'zfs snapshot'\n"));
4189 return (1);
4190 }
4191
4192 if (discard) {
4193 err = (zpool_discard_checkpoint(zhp) != 0);
4194 if (err == 0 && wait)
4195 err = zpool_wait(zhp, ZPOOL_WAIT_CKPT_DISCARD);
4196 } else {
4197 err = (zpool_checkpoint(zhp) != 0);
4198 }
4199
4200 zpool_close(zhp);
4201
4202 return (err);
4203 }
4204
4205 #define CHECKPOINT_OPT 1024
4206
4207 /*
4208 * zpool prefetch <type> [<type opts>] <pool>
4209 *
4210 * Prefetchs a particular type of data in the specified pool.
4211 */
4212 int
zpool_do_prefetch(int argc,char ** argv)4213 zpool_do_prefetch(int argc, char **argv)
4214 {
4215 int c;
4216 char *poolname;
4217 char *typestr = NULL;
4218 zpool_prefetch_type_t type;
4219 zpool_handle_t *zhp;
4220 int err = 0;
4221
4222 while ((c = getopt(argc, argv, "t:")) != -1) {
4223 switch (c) {
4224 case 't':
4225 typestr = optarg;
4226 break;
4227 case ':':
4228 (void) fprintf(stderr, gettext("missing argument for "
4229 "'%c' option\n"), optopt);
4230 usage(B_FALSE);
4231 break;
4232 case '?':
4233 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
4234 optopt);
4235 usage(B_FALSE);
4236 }
4237 }
4238 argc -= optind;
4239 argv += optind;
4240
4241 if (argc < 1) {
4242 (void) fprintf(stderr, gettext("missing pool name argument\n"));
4243 usage(B_FALSE);
4244 }
4245
4246 if (argc > 1) {
4247 (void) fprintf(stderr, gettext("too many arguments\n"));
4248 usage(B_FALSE);
4249 }
4250
4251 poolname = argv[0];
4252
4253 argc--;
4254 argv++;
4255
4256 if (strcmp(typestr, "ddt") == 0) {
4257 type = ZPOOL_PREFETCH_DDT;
4258 } else {
4259 (void) fprintf(stderr, gettext("unsupported prefetch type\n"));
4260 usage(B_FALSE);
4261 }
4262
4263 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
4264 return (1);
4265
4266 err = zpool_prefetch(zhp, type);
4267
4268 zpool_close(zhp);
4269
4270 return (err);
4271 }
4272
4273 /*
4274 * zpool import [-d dir] [-D]
4275 * import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l]
4276 * [-d dir | -c cachefile | -s] [-f] -a
4277 * import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l]
4278 * [-d dir | -c cachefile | -s] [-f] [-n] [-F] <pool | id>
4279 * [newpool]
4280 *
4281 * -c Read pool information from a cachefile instead of searching
4282 * devices. If importing from a cachefile config fails, then
4283 * fallback to searching for devices only in the directories that
4284 * exist in the cachefile.
4285 *
4286 * -d Scan in a specific directory, other than /dev/. More than
4287 * one directory can be specified using multiple '-d' options.
4288 *
4289 * -D Scan for previously destroyed pools or import all or only
4290 * specified destroyed pools.
4291 *
4292 * -R Temporarily import the pool, with all mountpoints relative to
4293 * the given root. The pool will remain exported when the machine
4294 * is rebooted.
4295 *
4296 * -V Import even in the presence of faulted vdevs. This is an
4297 * intentionally undocumented option for testing purposes, and
4298 * treats the pool configuration as complete, leaving any bad
4299 * vdevs in the FAULTED state. In other words, it does verbatim
4300 * import.
4301 *
4302 * -f Force import, even if it appears that the pool is active.
4303 *
4304 * -F Attempt rewind if necessary.
4305 *
4306 * -n See if rewind would work, but don't actually rewind.
4307 *
4308 * -N Import the pool but don't mount datasets.
4309 *
4310 * -T Specify a starting txg to use for import. This option is
4311 * intentionally undocumented option for testing purposes.
4312 *
4313 * -a Import all pools found.
4314 *
4315 * -l Load encryption keys while importing.
4316 *
4317 * -o Set property=value and/or temporary mount options (without '=').
4318 *
4319 * -s Scan using the default search path, the libblkid cache will
4320 * not be consulted.
4321 *
4322 * --rewind-to-checkpoint
4323 * Import the pool and revert back to the checkpoint.
4324 *
4325 * The import command scans for pools to import, and import pools based on pool
4326 * name and GUID. The pool can also be renamed as part of the import process.
4327 */
4328 int
zpool_do_import(int argc,char ** argv)4329 zpool_do_import(int argc, char **argv)
4330 {
4331 char **searchdirs = NULL;
4332 char *env, *envdup = NULL;
4333 int nsearch = 0;
4334 int c;
4335 int err = 0;
4336 nvlist_t *pools = NULL;
4337 boolean_t do_all = B_FALSE;
4338 boolean_t do_destroyed = B_FALSE;
4339 char *mntopts = NULL;
4340 uint64_t searchguid = 0;
4341 char *searchname = NULL;
4342 char *propval;
4343 nvlist_t *policy = NULL;
4344 nvlist_t *props = NULL;
4345 int flags = ZFS_IMPORT_NORMAL;
4346 uint32_t rewind_policy = ZPOOL_NO_REWIND;
4347 boolean_t dryrun = B_FALSE;
4348 boolean_t do_rewind = B_FALSE;
4349 boolean_t xtreme_rewind = B_FALSE;
4350 boolean_t do_scan = B_FALSE;
4351 boolean_t pool_exists = B_FALSE;
4352 uint64_t txg = -1ULL;
4353 char *cachefile = NULL;
4354 importargs_t idata = { 0 };
4355 char *endptr;
4356
4357 struct option long_options[] = {
4358 {"rewind-to-checkpoint", no_argument, NULL, CHECKPOINT_OPT},
4359 {0, 0, 0, 0}
4360 };
4361
4362 /* check options */
4363 while ((c = getopt_long(argc, argv, ":aCc:d:DEfFlmnNo:R:stT:VX",
4364 long_options, NULL)) != -1) {
4365 switch (c) {
4366 case 'a':
4367 do_all = B_TRUE;
4368 break;
4369 case 'c':
4370 cachefile = optarg;
4371 break;
4372 case 'd':
4373 searchdirs = safe_realloc(searchdirs,
4374 (nsearch + 1) * sizeof (char *));
4375 searchdirs[nsearch++] = optarg;
4376 break;
4377 case 'D':
4378 do_destroyed = B_TRUE;
4379 break;
4380 case 'f':
4381 flags |= ZFS_IMPORT_ANY_HOST;
4382 break;
4383 case 'F':
4384 do_rewind = B_TRUE;
4385 break;
4386 case 'l':
4387 flags |= ZFS_IMPORT_LOAD_KEYS;
4388 break;
4389 case 'm':
4390 flags |= ZFS_IMPORT_MISSING_LOG;
4391 break;
4392 case 'n':
4393 dryrun = B_TRUE;
4394 break;
4395 case 'N':
4396 flags |= ZFS_IMPORT_ONLY;
4397 break;
4398 case 'o':
4399 if ((propval = strchr(optarg, '=')) != NULL) {
4400 *propval = '\0';
4401 propval++;
4402 if (add_prop_list(optarg, propval,
4403 &props, B_TRUE))
4404 goto error;
4405 } else {
4406 mntopts = optarg;
4407 }
4408 break;
4409 case 'R':
4410 if (add_prop_list(zpool_prop_to_name(
4411 ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE))
4412 goto error;
4413 if (add_prop_list_default(zpool_prop_to_name(
4414 ZPOOL_PROP_CACHEFILE), "none", &props))
4415 goto error;
4416 break;
4417 case 's':
4418 do_scan = B_TRUE;
4419 break;
4420 case 't':
4421 flags |= ZFS_IMPORT_TEMP_NAME;
4422 if (add_prop_list_default(zpool_prop_to_name(
4423 ZPOOL_PROP_CACHEFILE), "none", &props))
4424 goto error;
4425 break;
4426
4427 case 'T':
4428 errno = 0;
4429 txg = strtoull(optarg, &endptr, 0);
4430 if (errno != 0 || *endptr != '\0') {
4431 (void) fprintf(stderr,
4432 gettext("invalid txg value\n"));
4433 usage(B_FALSE);
4434 }
4435 rewind_policy = ZPOOL_DO_REWIND | ZPOOL_EXTREME_REWIND;
4436 break;
4437 case 'V':
4438 flags |= ZFS_IMPORT_VERBATIM;
4439 break;
4440 case 'X':
4441 xtreme_rewind = B_TRUE;
4442 break;
4443 case CHECKPOINT_OPT:
4444 flags |= ZFS_IMPORT_CHECKPOINT;
4445 break;
4446 case ':':
4447 (void) fprintf(stderr, gettext("missing argument for "
4448 "'%c' option\n"), optopt);
4449 usage(B_FALSE);
4450 break;
4451 case '?':
4452 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
4453 optopt);
4454 usage(B_FALSE);
4455 }
4456 }
4457
4458 argc -= optind;
4459 argv += optind;
4460
4461 if (cachefile && nsearch != 0) {
4462 (void) fprintf(stderr, gettext("-c is incompatible with -d\n"));
4463 usage(B_FALSE);
4464 }
4465
4466 if (cachefile && do_scan) {
4467 (void) fprintf(stderr, gettext("-c is incompatible with -s\n"));
4468 usage(B_FALSE);
4469 }
4470
4471 if ((flags & ZFS_IMPORT_LOAD_KEYS) && (flags & ZFS_IMPORT_ONLY)) {
4472 (void) fprintf(stderr, gettext("-l is incompatible with -N\n"));
4473 usage(B_FALSE);
4474 }
4475
4476 if ((flags & ZFS_IMPORT_LOAD_KEYS) && !do_all && argc == 0) {
4477 (void) fprintf(stderr, gettext("-l is only meaningful during "
4478 "an import\n"));
4479 usage(B_FALSE);
4480 }
4481
4482 if ((dryrun || xtreme_rewind) && !do_rewind) {
4483 (void) fprintf(stderr,
4484 gettext("-n or -X only meaningful with -F\n"));
4485 usage(B_FALSE);
4486 }
4487 if (dryrun)
4488 rewind_policy = ZPOOL_TRY_REWIND;
4489 else if (do_rewind)
4490 rewind_policy = ZPOOL_DO_REWIND;
4491 if (xtreme_rewind)
4492 rewind_policy |= ZPOOL_EXTREME_REWIND;
4493
4494 /* In the future, we can capture further policy and include it here */
4495 if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 ||
4496 nvlist_add_uint64(policy, ZPOOL_LOAD_REQUEST_TXG, txg) != 0 ||
4497 nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY,
4498 rewind_policy) != 0)
4499 goto error;
4500
4501 /* check argument count */
4502 if (do_all) {
4503 if (argc != 0) {
4504 (void) fprintf(stderr, gettext("too many arguments\n"));
4505 usage(B_FALSE);
4506 }
4507 } else {
4508 if (argc > 2) {
4509 (void) fprintf(stderr, gettext("too many arguments\n"));
4510 usage(B_FALSE);
4511 }
4512 }
4513
4514 /*
4515 * Check for the effective uid. We do this explicitly here because
4516 * otherwise any attempt to discover pools will silently fail.
4517 */
4518 if (argc == 0 && geteuid() != 0) {
4519 (void) fprintf(stderr, gettext("cannot "
4520 "discover pools: permission denied\n"));
4521
4522 free(searchdirs);
4523 nvlist_free(props);
4524 nvlist_free(policy);
4525 return (1);
4526 }
4527
4528 /*
4529 * Depending on the arguments given, we do one of the following:
4530 *
4531 * <none> Iterate through all pools and display information about
4532 * each one.
4533 *
4534 * -a Iterate through all pools and try to import each one.
4535 *
4536 * <id> Find the pool that corresponds to the given GUID/pool
4537 * name and import that one.
4538 *
4539 * -D Above options applies only to destroyed pools.
4540 */
4541 if (argc != 0) {
4542 char *endptr;
4543
4544 errno = 0;
4545 searchguid = strtoull(argv[0], &endptr, 10);
4546 if (errno != 0 || *endptr != '\0') {
4547 searchname = argv[0];
4548 searchguid = 0;
4549 }
4550
4551 /*
4552 * User specified a name or guid. Ensure it's unique.
4553 */
4554 target_exists_args_t search = {searchname, searchguid};
4555 pool_exists = zpool_iter(g_zfs, name_or_guid_exists, &search);
4556 }
4557
4558 /*
4559 * Check the environment for the preferred search path.
4560 */
4561 if ((searchdirs == NULL) && (env = getenv("ZPOOL_IMPORT_PATH"))) {
4562 char *dir, *tmp = NULL;
4563
4564 envdup = strdup(env);
4565
4566 for (dir = strtok_r(envdup, ":", &tmp);
4567 dir != NULL;
4568 dir = strtok_r(NULL, ":", &tmp)) {
4569 searchdirs = safe_realloc(searchdirs,
4570 (nsearch + 1) * sizeof (char *));
4571 searchdirs[nsearch++] = dir;
4572 }
4573 }
4574
4575 idata.path = searchdirs;
4576 idata.paths = nsearch;
4577 idata.poolname = searchname;
4578 idata.guid = searchguid;
4579 idata.cachefile = cachefile;
4580 idata.scan = do_scan;
4581 idata.policy = policy;
4582 idata.do_destroyed = do_destroyed;
4583 idata.do_all = do_all;
4584
4585 libpc_handle_t lpch = {
4586 .lpc_lib_handle = g_zfs,
4587 .lpc_ops = &libzfs_config_ops,
4588 .lpc_printerr = B_TRUE
4589 };
4590 pools = zpool_search_import(&lpch, &idata);
4591
4592 if (pools != NULL && pool_exists &&
4593 (argc == 1 || strcmp(argv[0], argv[1]) == 0)) {
4594 (void) fprintf(stderr, gettext("cannot import '%s': "
4595 "a pool with that name already exists\n"),
4596 argv[0]);
4597 (void) fprintf(stderr, gettext("use the form '%s "
4598 "<pool | id> <newpool>' to give it a new name\n"),
4599 "zpool import");
4600 err = 1;
4601 } else if (pools == NULL && pool_exists) {
4602 (void) fprintf(stderr, gettext("cannot import '%s': "
4603 "a pool with that name is already created/imported,\n"),
4604 argv[0]);
4605 (void) fprintf(stderr, gettext("and no additional pools "
4606 "with that name were found\n"));
4607 err = 1;
4608 } else if (pools == NULL) {
4609 if (argc != 0) {
4610 (void) fprintf(stderr, gettext("cannot import '%s': "
4611 "no such pool available\n"), argv[0]);
4612 }
4613 err = 1;
4614 }
4615
4616 if (err == 1) {
4617 free(searchdirs);
4618 free(envdup);
4619 nvlist_free(policy);
4620 nvlist_free(pools);
4621 nvlist_free(props);
4622 return (1);
4623 }
4624
4625 err = import_pools(pools, props, mntopts, flags,
4626 argc >= 1 ? argv[0] : NULL, argc >= 2 ? argv[1] : NULL, &idata);
4627
4628 /*
4629 * If we're using the cachefile and we failed to import, then
4630 * fallback to scanning the directory for pools that match
4631 * those in the cachefile.
4632 */
4633 if (err != 0 && cachefile != NULL) {
4634 (void) printf(gettext("cachefile import failed, retrying\n"));
4635
4636 /*
4637 * We use the scan flag to gather the directories that exist
4638 * in the cachefile. If we need to fallback to searching for
4639 * the pool config, we will only search devices in these
4640 * directories.
4641 */
4642 idata.scan = B_TRUE;
4643 nvlist_free(pools);
4644 pools = zpool_search_import(&lpch, &idata);
4645
4646 err = import_pools(pools, props, mntopts, flags,
4647 argc >= 1 ? argv[0] : NULL, argc >= 2 ? argv[1] : NULL,
4648 &idata);
4649 }
4650
4651 error:
4652 nvlist_free(props);
4653 nvlist_free(pools);
4654 nvlist_free(policy);
4655 free(searchdirs);
4656 free(envdup);
4657
4658 return (err ? 1 : 0);
4659 }
4660
4661 /*
4662 * zpool sync [-f] [pool] ...
4663 *
4664 * -f (undocumented) force uberblock (and config including zpool cache file)
4665 * update.
4666 *
4667 * Sync the specified pool(s).
4668 * Without arguments "zpool sync" will sync all pools.
4669 * This command initiates TXG sync(s) and will return after the TXG(s) commit.
4670 *
4671 */
4672 static int
zpool_do_sync(int argc,char ** argv)4673 zpool_do_sync(int argc, char **argv)
4674 {
4675 int ret;
4676 boolean_t force = B_FALSE;
4677
4678 /* check options */
4679 while ((ret = getopt(argc, argv, "f")) != -1) {
4680 switch (ret) {
4681 case 'f':
4682 force = B_TRUE;
4683 break;
4684 case '?':
4685 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
4686 optopt);
4687 usage(B_FALSE);
4688 }
4689 }
4690
4691 argc -= optind;
4692 argv += optind;
4693
4694 /* if argc == 0 we will execute zpool_sync_one on all pools */
4695 ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL,
4696 B_FALSE, zpool_sync_one, &force);
4697
4698 return (ret);
4699 }
4700
4701 typedef struct iostat_cbdata {
4702 uint64_t cb_flags;
4703 int cb_namewidth;
4704 int cb_iteration;
4705 boolean_t cb_verbose;
4706 boolean_t cb_literal;
4707 boolean_t cb_scripted;
4708 zpool_list_t *cb_list;
4709 vdev_cmd_data_list_t *vcdl;
4710 vdev_cbdata_t cb_vdevs;
4711 } iostat_cbdata_t;
4712
4713 /* iostat labels */
4714 typedef struct name_and_columns {
4715 const char *name; /* Column name */
4716 unsigned int columns; /* Center name to this number of columns */
4717 } name_and_columns_t;
4718
4719 #define IOSTAT_MAX_LABELS 15 /* Max number of labels on one line */
4720
4721 static const name_and_columns_t iostat_top_labels[][IOSTAT_MAX_LABELS] =
4722 {
4723 [IOS_DEFAULT] = {{"capacity", 2}, {"operations", 2}, {"bandwidth", 2},
4724 {NULL}},
4725 [IOS_LATENCY] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2},
4726 {"asyncq_wait", 2}, {"scrub", 1}, {"trim", 1}, {"rebuild", 1},
4727 {NULL}},
4728 [IOS_QUEUES] = {{"syncq_read", 2}, {"syncq_write", 2},
4729 {"asyncq_read", 2}, {"asyncq_write", 2}, {"scrubq_read", 2},
4730 {"trimq_write", 2}, {"rebuildq_write", 2}, {NULL}},
4731 [IOS_L_HISTO] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2},
4732 {"asyncq_wait", 2}, {NULL}},
4733 [IOS_RQ_HISTO] = {{"sync_read", 2}, {"sync_write", 2},
4734 {"async_read", 2}, {"async_write", 2}, {"scrub", 2},
4735 {"trim", 2}, {"rebuild", 2}, {NULL}},
4736 };
4737
4738 /* Shorthand - if "columns" field not set, default to 1 column */
4739 static const name_and_columns_t iostat_bottom_labels[][IOSTAT_MAX_LABELS] =
4740 {
4741 [IOS_DEFAULT] = {{"alloc"}, {"free"}, {"read"}, {"write"}, {"read"},
4742 {"write"}, {NULL}},
4743 [IOS_LATENCY] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"},
4744 {"write"}, {"read"}, {"write"}, {"wait"}, {"wait"}, {"wait"},
4745 {NULL}},
4746 [IOS_QUEUES] = {{"pend"}, {"activ"}, {"pend"}, {"activ"}, {"pend"},
4747 {"activ"}, {"pend"}, {"activ"}, {"pend"}, {"activ"},
4748 {"pend"}, {"activ"}, {"pend"}, {"activ"}, {NULL}},
4749 [IOS_L_HISTO] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"},
4750 {"write"}, {"read"}, {"write"}, {"scrub"}, {"trim"}, {"rebuild"},
4751 {NULL}},
4752 [IOS_RQ_HISTO] = {{"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"},
4753 {"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"},
4754 {"ind"}, {"agg"}, {NULL}},
4755 };
4756
4757 static const char *histo_to_title[] = {
4758 [IOS_L_HISTO] = "latency",
4759 [IOS_RQ_HISTO] = "req_size",
4760 };
4761
4762 /*
4763 * Return the number of labels in a null-terminated name_and_columns_t
4764 * array.
4765 *
4766 */
4767 static unsigned int
label_array_len(const name_and_columns_t * labels)4768 label_array_len(const name_and_columns_t *labels)
4769 {
4770 int i = 0;
4771
4772 while (labels[i].name)
4773 i++;
4774
4775 return (i);
4776 }
4777
4778 /*
4779 * Return the number of strings in a null-terminated string array.
4780 * For example:
4781 *
4782 * const char foo[] = {"bar", "baz", NULL}
4783 *
4784 * returns 2
4785 */
4786 static uint64_t
str_array_len(const char * array[])4787 str_array_len(const char *array[])
4788 {
4789 uint64_t i = 0;
4790 while (array[i])
4791 i++;
4792
4793 return (i);
4794 }
4795
4796
4797 /*
4798 * Return a default column width for default/latency/queue columns. This does
4799 * not include histograms, which have their columns autosized.
4800 */
4801 static unsigned int
default_column_width(iostat_cbdata_t * cb,enum iostat_type type)4802 default_column_width(iostat_cbdata_t *cb, enum iostat_type type)
4803 {
4804 unsigned long column_width = 5; /* Normal niceprint */
4805 static unsigned long widths[] = {
4806 /*
4807 * Choose some sane default column sizes for printing the
4808 * raw numbers.
4809 */
4810 [IOS_DEFAULT] = 15, /* 1PB capacity */
4811 [IOS_LATENCY] = 10, /* 1B ns = 10sec */
4812 [IOS_QUEUES] = 6, /* 1M queue entries */
4813 [IOS_L_HISTO] = 10, /* 1B ns = 10sec */
4814 [IOS_RQ_HISTO] = 6, /* 1M queue entries */
4815 };
4816
4817 if (cb->cb_literal)
4818 column_width = widths[type];
4819
4820 return (column_width);
4821 }
4822
4823 /*
4824 * Print the column labels, i.e:
4825 *
4826 * capacity operations bandwidth
4827 * alloc free read write read write ...
4828 *
4829 * If force_column_width is set, use it for the column width. If not set, use
4830 * the default column width.
4831 */
4832 static void
print_iostat_labels(iostat_cbdata_t * cb,unsigned int force_column_width,const name_and_columns_t labels[][IOSTAT_MAX_LABELS])4833 print_iostat_labels(iostat_cbdata_t *cb, unsigned int force_column_width,
4834 const name_and_columns_t labels[][IOSTAT_MAX_LABELS])
4835 {
4836 int i, idx, s;
4837 int text_start, rw_column_width, spaces_to_end;
4838 uint64_t flags = cb->cb_flags;
4839 uint64_t f;
4840 unsigned int column_width = force_column_width;
4841
4842 /* For each bit set in flags */
4843 for (f = flags; f; f &= ~(1ULL << idx)) {
4844 idx = lowbit64(f) - 1;
4845 if (!force_column_width)
4846 column_width = default_column_width(cb, idx);
4847 /* Print our top labels centered over "read write" label. */
4848 for (i = 0; i < label_array_len(labels[idx]); i++) {
4849 const char *name = labels[idx][i].name;
4850 /*
4851 * We treat labels[][].columns == 0 as shorthand
4852 * for one column. It makes writing out the label
4853 * tables more concise.
4854 */
4855 unsigned int columns = MAX(1, labels[idx][i].columns);
4856 unsigned int slen = strlen(name);
4857
4858 rw_column_width = (column_width * columns) +
4859 (2 * (columns - 1));
4860
4861 text_start = (int)((rw_column_width) / columns -
4862 slen / columns);
4863 if (text_start < 0)
4864 text_start = 0;
4865
4866 printf(" "); /* Two spaces between columns */
4867
4868 /* Space from beginning of column to label */
4869 for (s = 0; s < text_start; s++)
4870 printf(" ");
4871
4872 printf("%s", name);
4873
4874 /* Print space after label to end of column */
4875 spaces_to_end = rw_column_width - text_start - slen;
4876 if (spaces_to_end < 0)
4877 spaces_to_end = 0;
4878
4879 for (s = 0; s < spaces_to_end; s++)
4880 printf(" ");
4881 }
4882 }
4883 }
4884
4885
4886 /*
4887 * print_cmd_columns - Print custom column titles from -c
4888 *
4889 * If the user specified the "zpool status|iostat -c" then print their custom
4890 * column titles in the header. For example, print_cmd_columns() would print
4891 * the " col1 col2" part of this:
4892 *
4893 * $ zpool iostat -vc 'echo col1=val1; echo col2=val2'
4894 * ...
4895 * capacity operations bandwidth
4896 * pool alloc free read write read write col1 col2
4897 * ---------- ----- ----- ----- ----- ----- ----- ---- ----
4898 * mypool 269K 1008M 0 0 107 946
4899 * mirror 269K 1008M 0 0 107 946
4900 * sdb - - 0 0 102 473 val1 val2
4901 * sdc - - 0 0 5 473 val1 val2
4902 * ---------- ----- ----- ----- ----- ----- ----- ---- ----
4903 */
4904 static void
print_cmd_columns(vdev_cmd_data_list_t * vcdl,int use_dashes)4905 print_cmd_columns(vdev_cmd_data_list_t *vcdl, int use_dashes)
4906 {
4907 int i, j;
4908 vdev_cmd_data_t *data = &vcdl->data[0];
4909
4910 if (vcdl->count == 0 || data == NULL)
4911 return;
4912
4913 /*
4914 * Each vdev cmd should have the same column names unless the user did
4915 * something weird with their cmd. Just take the column names from the
4916 * first vdev and assume it works for all of them.
4917 */
4918 for (i = 0; i < vcdl->uniq_cols_cnt; i++) {
4919 printf(" ");
4920 if (use_dashes) {
4921 for (j = 0; j < vcdl->uniq_cols_width[i]; j++)
4922 printf("-");
4923 } else {
4924 printf_color(ANSI_BOLD, "%*s", vcdl->uniq_cols_width[i],
4925 vcdl->uniq_cols[i]);
4926 }
4927 }
4928 }
4929
4930
4931 /*
4932 * Utility function to print out a line of dashes like:
4933 *
4934 * -------------------------------- ----- ----- ----- ----- -----
4935 *
4936 * ...or a dashed named-row line like:
4937 *
4938 * logs - - - - -
4939 *
4940 * @cb: iostat data
4941 *
4942 * @force_column_width If non-zero, use the value as the column width.
4943 * Otherwise use the default column widths.
4944 *
4945 * @name: Print a dashed named-row line starting
4946 * with @name. Otherwise, print a regular
4947 * dashed line.
4948 */
4949 static void
print_iostat_dashes(iostat_cbdata_t * cb,unsigned int force_column_width,const char * name)4950 print_iostat_dashes(iostat_cbdata_t *cb, unsigned int force_column_width,
4951 const char *name)
4952 {
4953 int i;
4954 unsigned int namewidth;
4955 uint64_t flags = cb->cb_flags;
4956 uint64_t f;
4957 int idx;
4958 const name_and_columns_t *labels;
4959 const char *title;
4960
4961
4962 if (cb->cb_flags & IOS_ANYHISTO_M) {
4963 title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)];
4964 } else if (cb->cb_vdevs.cb_names_count) {
4965 title = "vdev";
4966 } else {
4967 title = "pool";
4968 }
4969
4970 namewidth = MAX(MAX(strlen(title), cb->cb_namewidth),
4971 name ? strlen(name) : 0);
4972
4973
4974 if (name) {
4975 printf("%-*s", namewidth, name);
4976 } else {
4977 for (i = 0; i < namewidth; i++)
4978 (void) printf("-");
4979 }
4980
4981 /* For each bit in flags */
4982 for (f = flags; f; f &= ~(1ULL << idx)) {
4983 unsigned int column_width;
4984 idx = lowbit64(f) - 1;
4985 if (force_column_width)
4986 column_width = force_column_width;
4987 else
4988 column_width = default_column_width(cb, idx);
4989
4990 labels = iostat_bottom_labels[idx];
4991 for (i = 0; i < label_array_len(labels); i++) {
4992 if (name)
4993 printf(" %*s-", column_width - 1, " ");
4994 else
4995 printf(" %.*s", column_width,
4996 "--------------------");
4997 }
4998 }
4999 }
5000
5001
5002 static void
print_iostat_separator_impl(iostat_cbdata_t * cb,unsigned int force_column_width)5003 print_iostat_separator_impl(iostat_cbdata_t *cb,
5004 unsigned int force_column_width)
5005 {
5006 print_iostat_dashes(cb, force_column_width, NULL);
5007 }
5008
5009 static void
print_iostat_separator(iostat_cbdata_t * cb)5010 print_iostat_separator(iostat_cbdata_t *cb)
5011 {
5012 print_iostat_separator_impl(cb, 0);
5013 }
5014
5015 static void
print_iostat_header_impl(iostat_cbdata_t * cb,unsigned int force_column_width,const char * histo_vdev_name)5016 print_iostat_header_impl(iostat_cbdata_t *cb, unsigned int force_column_width,
5017 const char *histo_vdev_name)
5018 {
5019 unsigned int namewidth;
5020 const char *title;
5021
5022 color_start(ANSI_BOLD);
5023
5024 if (cb->cb_flags & IOS_ANYHISTO_M) {
5025 title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)];
5026 } else if (cb->cb_vdevs.cb_names_count) {
5027 title = "vdev";
5028 } else {
5029 title = "pool";
5030 }
5031
5032 namewidth = MAX(MAX(strlen(title), cb->cb_namewidth),
5033 histo_vdev_name ? strlen(histo_vdev_name) : 0);
5034
5035 if (histo_vdev_name)
5036 printf("%-*s", namewidth, histo_vdev_name);
5037 else
5038 printf("%*s", namewidth, "");
5039
5040
5041 print_iostat_labels(cb, force_column_width, iostat_top_labels);
5042 printf("\n");
5043
5044 printf("%-*s", namewidth, title);
5045
5046 print_iostat_labels(cb, force_column_width, iostat_bottom_labels);
5047 if (cb->vcdl != NULL)
5048 print_cmd_columns(cb->vcdl, 0);
5049
5050 printf("\n");
5051
5052 print_iostat_separator_impl(cb, force_column_width);
5053
5054 if (cb->vcdl != NULL)
5055 print_cmd_columns(cb->vcdl, 1);
5056
5057 color_end();
5058
5059 printf("\n");
5060 }
5061
5062 static void
print_iostat_header(iostat_cbdata_t * cb)5063 print_iostat_header(iostat_cbdata_t *cb)
5064 {
5065 print_iostat_header_impl(cb, 0, NULL);
5066 }
5067
5068 /*
5069 * Prints a size string (i.e. 120M) with the suffix ("M") colored
5070 * by order of magnitude. Uses column_size to add padding.
5071 */
5072 static void
print_stat_color(const char * statbuf,unsigned int column_size)5073 print_stat_color(const char *statbuf, unsigned int column_size)
5074 {
5075 fputs(" ", stdout);
5076 size_t len = strlen(statbuf);
5077 while (len < column_size) {
5078 fputc(' ', stdout);
5079 column_size--;
5080 }
5081 if (*statbuf == '0') {
5082 color_start(ANSI_GRAY);
5083 fputc('0', stdout);
5084 } else {
5085 for (; *statbuf; statbuf++) {
5086 if (*statbuf == 'K') color_start(ANSI_GREEN);
5087 else if (*statbuf == 'M') color_start(ANSI_YELLOW);
5088 else if (*statbuf == 'G') color_start(ANSI_RED);
5089 else if (*statbuf == 'T') color_start(ANSI_BOLD_BLUE);
5090 else if (*statbuf == 'P') color_start(ANSI_MAGENTA);
5091 else if (*statbuf == 'E') color_start(ANSI_CYAN);
5092 fputc(*statbuf, stdout);
5093 if (--column_size <= 0)
5094 break;
5095 }
5096 }
5097 color_end();
5098 }
5099
5100 /*
5101 * Display a single statistic.
5102 */
5103 static void
print_one_stat(uint64_t value,enum zfs_nicenum_format format,unsigned int column_size,boolean_t scripted)5104 print_one_stat(uint64_t value, enum zfs_nicenum_format format,
5105 unsigned int column_size, boolean_t scripted)
5106 {
5107 char buf[64];
5108
5109 zfs_nicenum_format(value, buf, sizeof (buf), format);
5110
5111 if (scripted)
5112 printf("\t%s", buf);
5113 else
5114 print_stat_color(buf, column_size);
5115 }
5116
5117 /*
5118 * Calculate the default vdev stats
5119 *
5120 * Subtract oldvs from newvs, apply a scaling factor, and save the resulting
5121 * stats into calcvs.
5122 */
5123 static void
calc_default_iostats(vdev_stat_t * oldvs,vdev_stat_t * newvs,vdev_stat_t * calcvs)5124 calc_default_iostats(vdev_stat_t *oldvs, vdev_stat_t *newvs,
5125 vdev_stat_t *calcvs)
5126 {
5127 int i;
5128
5129 memcpy(calcvs, newvs, sizeof (*calcvs));
5130 for (i = 0; i < ARRAY_SIZE(calcvs->vs_ops); i++)
5131 calcvs->vs_ops[i] = (newvs->vs_ops[i] - oldvs->vs_ops[i]);
5132
5133 for (i = 0; i < ARRAY_SIZE(calcvs->vs_bytes); i++)
5134 calcvs->vs_bytes[i] = (newvs->vs_bytes[i] - oldvs->vs_bytes[i]);
5135 }
5136
5137 /*
5138 * Internal representation of the extended iostats data.
5139 *
5140 * The extended iostat stats are exported in nvlists as either uint64_t arrays
5141 * or single uint64_t's. We make both look like arrays to make them easier
5142 * to process. In order to make single uint64_t's look like arrays, we set
5143 * __data to the stat data, and then set *data = &__data with count = 1. Then,
5144 * we can just use *data and count.
5145 */
5146 struct stat_array {
5147 uint64_t *data;
5148 uint_t count; /* Number of entries in data[] */
5149 uint64_t __data; /* Only used when data is a single uint64_t */
5150 };
5151
5152 static uint64_t
stat_histo_max(struct stat_array * nva,unsigned int len)5153 stat_histo_max(struct stat_array *nva, unsigned int len)
5154 {
5155 uint64_t max = 0;
5156 int i;
5157 for (i = 0; i < len; i++)
5158 max = MAX(max, array64_max(nva[i].data, nva[i].count));
5159
5160 return (max);
5161 }
5162
5163 /*
5164 * Helper function to lookup a uint64_t array or uint64_t value and store its
5165 * data as a stat_array. If the nvpair is a single uint64_t value, then we make
5166 * it look like a one element array to make it easier to process.
5167 */
5168 static int
nvpair64_to_stat_array(nvlist_t * nvl,const char * name,struct stat_array * nva)5169 nvpair64_to_stat_array(nvlist_t *nvl, const char *name,
5170 struct stat_array *nva)
5171 {
5172 nvpair_t *tmp;
5173 int ret;
5174
5175 verify(nvlist_lookup_nvpair(nvl, name, &tmp) == 0);
5176 switch (nvpair_type(tmp)) {
5177 case DATA_TYPE_UINT64_ARRAY:
5178 ret = nvpair_value_uint64_array(tmp, &nva->data, &nva->count);
5179 break;
5180 case DATA_TYPE_UINT64:
5181 ret = nvpair_value_uint64(tmp, &nva->__data);
5182 nva->data = &nva->__data;
5183 nva->count = 1;
5184 break;
5185 default:
5186 /* Not a uint64_t */
5187 ret = EINVAL;
5188 break;
5189 }
5190
5191 return (ret);
5192 }
5193
5194 /*
5195 * Given a list of nvlist names, look up the extended stats in newnv and oldnv,
5196 * subtract them, and return the results in a newly allocated stat_array.
5197 * You must free the returned array after you are done with it with
5198 * free_calc_stats().
5199 *
5200 * Additionally, you can set "oldnv" to NULL if you simply want the newnv
5201 * values.
5202 */
5203 static struct stat_array *
calc_and_alloc_stats_ex(const char ** names,unsigned int len,nvlist_t * oldnv,nvlist_t * newnv)5204 calc_and_alloc_stats_ex(const char **names, unsigned int len, nvlist_t *oldnv,
5205 nvlist_t *newnv)
5206 {
5207 nvlist_t *oldnvx = NULL, *newnvx;
5208 struct stat_array *oldnva, *newnva, *calcnva;
5209 int i, j;
5210 unsigned int alloc_size = (sizeof (struct stat_array)) * len;
5211
5212 /* Extract our extended stats nvlist from the main list */
5213 verify(nvlist_lookup_nvlist(newnv, ZPOOL_CONFIG_VDEV_STATS_EX,
5214 &newnvx) == 0);
5215 if (oldnv) {
5216 verify(nvlist_lookup_nvlist(oldnv, ZPOOL_CONFIG_VDEV_STATS_EX,
5217 &oldnvx) == 0);
5218 }
5219
5220 newnva = safe_malloc(alloc_size);
5221 oldnva = safe_malloc(alloc_size);
5222 calcnva = safe_malloc(alloc_size);
5223
5224 for (j = 0; j < len; j++) {
5225 verify(nvpair64_to_stat_array(newnvx, names[j],
5226 &newnva[j]) == 0);
5227 calcnva[j].count = newnva[j].count;
5228 alloc_size = calcnva[j].count * sizeof (calcnva[j].data[0]);
5229 calcnva[j].data = safe_malloc(alloc_size);
5230 memcpy(calcnva[j].data, newnva[j].data, alloc_size);
5231
5232 if (oldnvx) {
5233 verify(nvpair64_to_stat_array(oldnvx, names[j],
5234 &oldnva[j]) == 0);
5235 for (i = 0; i < oldnva[j].count; i++)
5236 calcnva[j].data[i] -= oldnva[j].data[i];
5237 }
5238 }
5239 free(newnva);
5240 free(oldnva);
5241 return (calcnva);
5242 }
5243
5244 static void
free_calc_stats(struct stat_array * nva,unsigned int len)5245 free_calc_stats(struct stat_array *nva, unsigned int len)
5246 {
5247 int i;
5248 for (i = 0; i < len; i++)
5249 free(nva[i].data);
5250
5251 free(nva);
5252 }
5253
5254 static void
print_iostat_histo(struct stat_array * nva,unsigned int len,iostat_cbdata_t * cb,unsigned int column_width,unsigned int namewidth,double scale)5255 print_iostat_histo(struct stat_array *nva, unsigned int len,
5256 iostat_cbdata_t *cb, unsigned int column_width, unsigned int namewidth,
5257 double scale)
5258 {
5259 int i, j;
5260 char buf[6];
5261 uint64_t val;
5262 enum zfs_nicenum_format format;
5263 unsigned int buckets;
5264 unsigned int start_bucket;
5265
5266 if (cb->cb_literal)
5267 format = ZFS_NICENUM_RAW;
5268 else
5269 format = ZFS_NICENUM_1024;
5270
5271 /* All these histos are the same size, so just use nva[0].count */
5272 buckets = nva[0].count;
5273
5274 if (cb->cb_flags & IOS_RQ_HISTO_M) {
5275 /* Start at 512 - req size should never be lower than this */
5276 start_bucket = 9;
5277 } else {
5278 start_bucket = 0;
5279 }
5280
5281 for (j = start_bucket; j < buckets; j++) {
5282 /* Print histogram bucket label */
5283 if (cb->cb_flags & IOS_L_HISTO_M) {
5284 /* Ending range of this bucket */
5285 val = (1UL << (j + 1)) - 1;
5286 zfs_nicetime(val, buf, sizeof (buf));
5287 } else {
5288 /* Request size (starting range of bucket) */
5289 val = (1UL << j);
5290 zfs_nicenum(val, buf, sizeof (buf));
5291 }
5292
5293 if (cb->cb_scripted)
5294 printf("%llu", (u_longlong_t)val);
5295 else
5296 printf("%-*s", namewidth, buf);
5297
5298 /* Print the values on the line */
5299 for (i = 0; i < len; i++) {
5300 print_one_stat(nva[i].data[j] * scale, format,
5301 column_width, cb->cb_scripted);
5302 }
5303 printf("\n");
5304 }
5305 }
5306
5307 static void
print_solid_separator(unsigned int length)5308 print_solid_separator(unsigned int length)
5309 {
5310 while (length--)
5311 printf("-");
5312 printf("\n");
5313 }
5314
5315 static void
print_iostat_histos(iostat_cbdata_t * cb,nvlist_t * oldnv,nvlist_t * newnv,double scale,const char * name)5316 print_iostat_histos(iostat_cbdata_t *cb, nvlist_t *oldnv,
5317 nvlist_t *newnv, double scale, const char *name)
5318 {
5319 unsigned int column_width;
5320 unsigned int namewidth;
5321 unsigned int entire_width;
5322 enum iostat_type type;
5323 struct stat_array *nva;
5324 const char **names;
5325 unsigned int names_len;
5326
5327 /* What type of histo are we? */
5328 type = IOS_HISTO_IDX(cb->cb_flags);
5329
5330 /* Get NULL-terminated array of nvlist names for our histo */
5331 names = vsx_type_to_nvlist[type];
5332 names_len = str_array_len(names); /* num of names */
5333
5334 nva = calc_and_alloc_stats_ex(names, names_len, oldnv, newnv);
5335
5336 if (cb->cb_literal) {
5337 column_width = MAX(5,
5338 (unsigned int) log10(stat_histo_max(nva, names_len)) + 1);
5339 } else {
5340 column_width = 5;
5341 }
5342
5343 namewidth = MAX(cb->cb_namewidth,
5344 strlen(histo_to_title[IOS_HISTO_IDX(cb->cb_flags)]));
5345
5346 /*
5347 * Calculate the entire line width of what we're printing. The
5348 * +2 is for the two spaces between columns:
5349 */
5350 /* read write */
5351 /* ----- ----- */
5352 /* |___| <---------- column_width */
5353 /* */
5354 /* |__________| <--- entire_width */
5355 /* */
5356 entire_width = namewidth + (column_width + 2) *
5357 label_array_len(iostat_bottom_labels[type]);
5358
5359 if (cb->cb_scripted)
5360 printf("%s\n", name);
5361 else
5362 print_iostat_header_impl(cb, column_width, name);
5363
5364 print_iostat_histo(nva, names_len, cb, column_width,
5365 namewidth, scale);
5366
5367 free_calc_stats(nva, names_len);
5368 if (!cb->cb_scripted)
5369 print_solid_separator(entire_width);
5370 }
5371
5372 /*
5373 * Calculate the average latency of a power-of-two latency histogram
5374 */
5375 static uint64_t
single_histo_average(uint64_t * histo,unsigned int buckets)5376 single_histo_average(uint64_t *histo, unsigned int buckets)
5377 {
5378 int i;
5379 uint64_t count = 0, total = 0;
5380
5381 for (i = 0; i < buckets; i++) {
5382 /*
5383 * Our buckets are power-of-two latency ranges. Use the
5384 * midpoint latency of each bucket to calculate the average.
5385 * For example:
5386 *
5387 * Bucket Midpoint
5388 * 8ns-15ns: 12ns
5389 * 16ns-31ns: 24ns
5390 * ...
5391 */
5392 if (histo[i] != 0) {
5393 total += histo[i] * (((1UL << i) + ((1UL << i)/2)));
5394 count += histo[i];
5395 }
5396 }
5397
5398 /* Prevent divide by zero */
5399 return (count == 0 ? 0 : total / count);
5400 }
5401
5402 static void
print_iostat_queues(iostat_cbdata_t * cb,nvlist_t * newnv)5403 print_iostat_queues(iostat_cbdata_t *cb, nvlist_t *newnv)
5404 {
5405 const char *names[] = {
5406 ZPOOL_CONFIG_VDEV_SYNC_R_PEND_QUEUE,
5407 ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE,
5408 ZPOOL_CONFIG_VDEV_SYNC_W_PEND_QUEUE,
5409 ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE,
5410 ZPOOL_CONFIG_VDEV_ASYNC_R_PEND_QUEUE,
5411 ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE,
5412 ZPOOL_CONFIG_VDEV_ASYNC_W_PEND_QUEUE,
5413 ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE,
5414 ZPOOL_CONFIG_VDEV_SCRUB_PEND_QUEUE,
5415 ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,
5416 ZPOOL_CONFIG_VDEV_TRIM_PEND_QUEUE,
5417 ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE,
5418 ZPOOL_CONFIG_VDEV_REBUILD_PEND_QUEUE,
5419 ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE,
5420 };
5421
5422 struct stat_array *nva;
5423
5424 unsigned int column_width = default_column_width(cb, IOS_QUEUES);
5425 enum zfs_nicenum_format format;
5426
5427 nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), NULL, newnv);
5428
5429 if (cb->cb_literal)
5430 format = ZFS_NICENUM_RAW;
5431 else
5432 format = ZFS_NICENUM_1024;
5433
5434 for (int i = 0; i < ARRAY_SIZE(names); i++) {
5435 uint64_t val = nva[i].data[0];
5436 print_one_stat(val, format, column_width, cb->cb_scripted);
5437 }
5438
5439 free_calc_stats(nva, ARRAY_SIZE(names));
5440 }
5441
5442 static void
print_iostat_latency(iostat_cbdata_t * cb,nvlist_t * oldnv,nvlist_t * newnv)5443 print_iostat_latency(iostat_cbdata_t *cb, nvlist_t *oldnv,
5444 nvlist_t *newnv)
5445 {
5446 int i;
5447 uint64_t val;
5448 const char *names[] = {
5449 ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
5450 ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
5451 ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
5452 ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
5453 ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO,
5454 ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO,
5455 ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO,
5456 ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO,
5457 ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO,
5458 ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
5459 ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
5460 };
5461 struct stat_array *nva;
5462
5463 unsigned int column_width = default_column_width(cb, IOS_LATENCY);
5464 enum zfs_nicenum_format format;
5465
5466 nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), oldnv, newnv);
5467
5468 if (cb->cb_literal)
5469 format = ZFS_NICENUM_RAWTIME;
5470 else
5471 format = ZFS_NICENUM_TIME;
5472
5473 /* Print our avg latencies on the line */
5474 for (i = 0; i < ARRAY_SIZE(names); i++) {
5475 /* Compute average latency for a latency histo */
5476 val = single_histo_average(nva[i].data, nva[i].count);
5477 print_one_stat(val, format, column_width, cb->cb_scripted);
5478 }
5479 free_calc_stats(nva, ARRAY_SIZE(names));
5480 }
5481
5482 /*
5483 * Print default statistics (capacity/operations/bandwidth)
5484 */
5485 static void
print_iostat_default(vdev_stat_t * vs,iostat_cbdata_t * cb,double scale)5486 print_iostat_default(vdev_stat_t *vs, iostat_cbdata_t *cb, double scale)
5487 {
5488 unsigned int column_width = default_column_width(cb, IOS_DEFAULT);
5489 enum zfs_nicenum_format format;
5490 char na; /* char to print for "not applicable" values */
5491
5492 if (cb->cb_literal) {
5493 format = ZFS_NICENUM_RAW;
5494 na = '0';
5495 } else {
5496 format = ZFS_NICENUM_1024;
5497 na = '-';
5498 }
5499
5500 /* only toplevel vdevs have capacity stats */
5501 if (vs->vs_space == 0) {
5502 if (cb->cb_scripted)
5503 printf("\t%c\t%c", na, na);
5504 else
5505 printf(" %*c %*c", column_width, na, column_width,
5506 na);
5507 } else {
5508 print_one_stat(vs->vs_alloc, format, column_width,
5509 cb->cb_scripted);
5510 print_one_stat(vs->vs_space - vs->vs_alloc, format,
5511 column_width, cb->cb_scripted);
5512 }
5513
5514 print_one_stat((uint64_t)(vs->vs_ops[ZIO_TYPE_READ] * scale),
5515 format, column_width, cb->cb_scripted);
5516 print_one_stat((uint64_t)(vs->vs_ops[ZIO_TYPE_WRITE] * scale),
5517 format, column_width, cb->cb_scripted);
5518 print_one_stat((uint64_t)(vs->vs_bytes[ZIO_TYPE_READ] * scale),
5519 format, column_width, cb->cb_scripted);
5520 print_one_stat((uint64_t)(vs->vs_bytes[ZIO_TYPE_WRITE] * scale),
5521 format, column_width, cb->cb_scripted);
5522 }
5523
5524 static const char *const class_name[] = {
5525 VDEV_ALLOC_BIAS_DEDUP,
5526 VDEV_ALLOC_BIAS_SPECIAL,
5527 VDEV_ALLOC_CLASS_LOGS
5528 };
5529
5530 /*
5531 * Print out all the statistics for the given vdev. This can either be the
5532 * toplevel configuration, or called recursively. If 'name' is NULL, then this
5533 * is a verbose output, and we don't want to display the toplevel pool stats.
5534 *
5535 * Returns the number of stat lines printed.
5536 */
5537 static unsigned int
print_vdev_stats(zpool_handle_t * zhp,const char * name,nvlist_t * oldnv,nvlist_t * newnv,iostat_cbdata_t * cb,int depth)5538 print_vdev_stats(zpool_handle_t *zhp, const char *name, nvlist_t *oldnv,
5539 nvlist_t *newnv, iostat_cbdata_t *cb, int depth)
5540 {
5541 nvlist_t **oldchild, **newchild;
5542 uint_t c, children, oldchildren;
5543 vdev_stat_t *oldvs, *newvs, *calcvs;
5544 vdev_stat_t zerovs = { 0 };
5545 char *vname;
5546 int i;
5547 int ret = 0;
5548 uint64_t tdelta;
5549 double scale;
5550
5551 if (strcmp(name, VDEV_TYPE_INDIRECT) == 0)
5552 return (ret);
5553
5554 calcvs = safe_malloc(sizeof (*calcvs));
5555
5556 if (oldnv != NULL) {
5557 verify(nvlist_lookup_uint64_array(oldnv,
5558 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&oldvs, &c) == 0);
5559 } else {
5560 oldvs = &zerovs;
5561 }
5562
5563 /* Do we only want to see a specific vdev? */
5564 for (i = 0; i < cb->cb_vdevs.cb_names_count; i++) {
5565 /* Yes we do. Is this the vdev? */
5566 if (strcmp(name, cb->cb_vdevs.cb_names[i]) == 0) {
5567 /*
5568 * This is our vdev. Since it is the only vdev we
5569 * will be displaying, make depth = 0 so that it
5570 * doesn't get indented.
5571 */
5572 depth = 0;
5573 break;
5574 }
5575 }
5576
5577 if (cb->cb_vdevs.cb_names_count && (i == cb->cb_vdevs.cb_names_count)) {
5578 /* Couldn't match the name */
5579 goto children;
5580 }
5581
5582
5583 verify(nvlist_lookup_uint64_array(newnv, ZPOOL_CONFIG_VDEV_STATS,
5584 (uint64_t **)&newvs, &c) == 0);
5585
5586 /*
5587 * Print the vdev name unless it's is a histogram. Histograms
5588 * display the vdev name in the header itself.
5589 */
5590 if (!(cb->cb_flags & IOS_ANYHISTO_M)) {
5591 if (cb->cb_scripted) {
5592 printf("%s", name);
5593 } else {
5594 if (strlen(name) + depth > cb->cb_namewidth)
5595 (void) printf("%*s%s", depth, "", name);
5596 else
5597 (void) printf("%*s%s%*s", depth, "", name,
5598 (int)(cb->cb_namewidth - strlen(name) -
5599 depth), "");
5600 }
5601 }
5602
5603 /* Calculate our scaling factor */
5604 tdelta = newvs->vs_timestamp - oldvs->vs_timestamp;
5605 if ((oldvs->vs_timestamp == 0) && (cb->cb_flags & IOS_ANYHISTO_M)) {
5606 /*
5607 * If we specify printing histograms with no time interval, then
5608 * print the histogram numbers over the entire lifetime of the
5609 * vdev.
5610 */
5611 scale = 1;
5612 } else {
5613 if (tdelta == 0)
5614 scale = 1.0;
5615 else
5616 scale = (double)NANOSEC / tdelta;
5617 }
5618
5619 if (cb->cb_flags & IOS_DEFAULT_M) {
5620 calc_default_iostats(oldvs, newvs, calcvs);
5621 print_iostat_default(calcvs, cb, scale);
5622 }
5623 if (cb->cb_flags & IOS_LATENCY_M)
5624 print_iostat_latency(cb, oldnv, newnv);
5625 if (cb->cb_flags & IOS_QUEUES_M)
5626 print_iostat_queues(cb, newnv);
5627 if (cb->cb_flags & IOS_ANYHISTO_M) {
5628 printf("\n");
5629 print_iostat_histos(cb, oldnv, newnv, scale, name);
5630 }
5631
5632 if (cb->vcdl != NULL) {
5633 const char *path;
5634 if (nvlist_lookup_string(newnv, ZPOOL_CONFIG_PATH,
5635 &path) == 0) {
5636 printf(" ");
5637 zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path);
5638 }
5639 }
5640
5641 if (!(cb->cb_flags & IOS_ANYHISTO_M))
5642 printf("\n");
5643
5644 ret++;
5645
5646 children:
5647
5648 free(calcvs);
5649
5650 if (!cb->cb_verbose)
5651 return (ret);
5652
5653 if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_CHILDREN,
5654 &newchild, &children) != 0)
5655 return (ret);
5656
5657 if (oldnv) {
5658 if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_CHILDREN,
5659 &oldchild, &oldchildren) != 0)
5660 return (ret);
5661
5662 children = MIN(oldchildren, children);
5663 }
5664
5665 /*
5666 * print normal top-level devices
5667 */
5668 for (c = 0; c < children; c++) {
5669 uint64_t ishole = B_FALSE, islog = B_FALSE;
5670
5671 (void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_HOLE,
5672 &ishole);
5673
5674 (void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_LOG,
5675 &islog);
5676
5677 if (ishole || islog)
5678 continue;
5679
5680 if (nvlist_exists(newchild[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
5681 continue;
5682
5683 vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
5684 cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID);
5685 ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c] : NULL,
5686 newchild[c], cb, depth + 2);
5687 free(vname);
5688 }
5689
5690 /*
5691 * print all other top-level devices
5692 */
5693 for (uint_t n = 0; n < ARRAY_SIZE(class_name); n++) {
5694 boolean_t printed = B_FALSE;
5695
5696 for (c = 0; c < children; c++) {
5697 uint64_t islog = B_FALSE;
5698 const char *bias = NULL;
5699 const char *type = NULL;
5700
5701 (void) nvlist_lookup_uint64(newchild[c],
5702 ZPOOL_CONFIG_IS_LOG, &islog);
5703 if (islog) {
5704 bias = VDEV_ALLOC_CLASS_LOGS;
5705 } else {
5706 (void) nvlist_lookup_string(newchild[c],
5707 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
5708 (void) nvlist_lookup_string(newchild[c],
5709 ZPOOL_CONFIG_TYPE, &type);
5710 }
5711 if (bias == NULL || strcmp(bias, class_name[n]) != 0)
5712 continue;
5713 if (!islog && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
5714 continue;
5715
5716 if (!printed) {
5717 if ((!(cb->cb_flags & IOS_ANYHISTO_M)) &&
5718 !cb->cb_scripted &&
5719 !cb->cb_vdevs.cb_names) {
5720 print_iostat_dashes(cb, 0,
5721 class_name[n]);
5722 }
5723 printf("\n");
5724 printed = B_TRUE;
5725 }
5726
5727 vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
5728 cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID);
5729 ret += print_vdev_stats(zhp, vname, oldnv ?
5730 oldchild[c] : NULL, newchild[c], cb, depth + 2);
5731 free(vname);
5732 }
5733 }
5734
5735 /*
5736 * Include level 2 ARC devices in iostat output
5737 */
5738 if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_L2CACHE,
5739 &newchild, &children) != 0)
5740 return (ret);
5741
5742 if (oldnv) {
5743 if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_L2CACHE,
5744 &oldchild, &oldchildren) != 0)
5745 return (ret);
5746
5747 children = MIN(oldchildren, children);
5748 }
5749
5750 if (children > 0) {
5751 if ((!(cb->cb_flags & IOS_ANYHISTO_M)) && !cb->cb_scripted &&
5752 !cb->cb_vdevs.cb_names) {
5753 print_iostat_dashes(cb, 0, "cache");
5754 }
5755 printf("\n");
5756
5757 for (c = 0; c < children; c++) {
5758 vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
5759 cb->cb_vdevs.cb_name_flags);
5760 ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c]
5761 : NULL, newchild[c], cb, depth + 2);
5762 free(vname);
5763 }
5764 }
5765
5766 return (ret);
5767 }
5768
5769 static int
refresh_iostat(zpool_handle_t * zhp,void * data)5770 refresh_iostat(zpool_handle_t *zhp, void *data)
5771 {
5772 iostat_cbdata_t *cb = data;
5773 boolean_t missing;
5774
5775 /*
5776 * If the pool has disappeared, remove it from the list and continue.
5777 */
5778 if (zpool_refresh_stats(zhp, &missing) != 0)
5779 return (-1);
5780
5781 if (missing)
5782 pool_list_remove(cb->cb_list, zhp);
5783
5784 return (0);
5785 }
5786
5787 /*
5788 * Callback to print out the iostats for the given pool.
5789 */
5790 static int
print_iostat(zpool_handle_t * zhp,void * data)5791 print_iostat(zpool_handle_t *zhp, void *data)
5792 {
5793 iostat_cbdata_t *cb = data;
5794 nvlist_t *oldconfig, *newconfig;
5795 nvlist_t *oldnvroot, *newnvroot;
5796 int ret;
5797
5798 newconfig = zpool_get_config(zhp, &oldconfig);
5799
5800 if (cb->cb_iteration == 1)
5801 oldconfig = NULL;
5802
5803 verify(nvlist_lookup_nvlist(newconfig, ZPOOL_CONFIG_VDEV_TREE,
5804 &newnvroot) == 0);
5805
5806 if (oldconfig == NULL)
5807 oldnvroot = NULL;
5808 else
5809 verify(nvlist_lookup_nvlist(oldconfig, ZPOOL_CONFIG_VDEV_TREE,
5810 &oldnvroot) == 0);
5811
5812 ret = print_vdev_stats(zhp, zpool_get_name(zhp), oldnvroot, newnvroot,
5813 cb, 0);
5814 if ((ret != 0) && !(cb->cb_flags & IOS_ANYHISTO_M) &&
5815 !cb->cb_scripted && cb->cb_verbose &&
5816 !cb->cb_vdevs.cb_names_count) {
5817 print_iostat_separator(cb);
5818 if (cb->vcdl != NULL) {
5819 print_cmd_columns(cb->vcdl, 1);
5820 }
5821 printf("\n");
5822 }
5823
5824 return (ret);
5825 }
5826
5827 static int
get_columns(void)5828 get_columns(void)
5829 {
5830 struct winsize ws;
5831 int columns = 80;
5832 int error;
5833
5834 if (isatty(STDOUT_FILENO)) {
5835 error = ioctl(STDOUT_FILENO, TIOCGWINSZ, &ws);
5836 if (error == 0)
5837 columns = ws.ws_col;
5838 } else {
5839 columns = 999;
5840 }
5841
5842 return (columns);
5843 }
5844
5845 /*
5846 * Return the required length of the pool/vdev name column. The minimum
5847 * allowed width and output formatting flags must be provided.
5848 */
5849 static int
get_namewidth(zpool_handle_t * zhp,int min_width,int flags,boolean_t verbose)5850 get_namewidth(zpool_handle_t *zhp, int min_width, int flags, boolean_t verbose)
5851 {
5852 nvlist_t *config, *nvroot;
5853 int width = min_width;
5854
5855 if ((config = zpool_get_config(zhp, NULL)) != NULL) {
5856 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
5857 &nvroot) == 0);
5858 size_t poolname_len = strlen(zpool_get_name(zhp));
5859 if (verbose == B_FALSE) {
5860 width = MAX(poolname_len, min_width);
5861 } else {
5862 width = MAX(poolname_len,
5863 max_width(zhp, nvroot, 0, min_width, flags));
5864 }
5865 }
5866
5867 return (width);
5868 }
5869
5870 /*
5871 * Parse the input string, get the 'interval' and 'count' value if there is one.
5872 */
5873 static void
get_interval_count(int * argcp,char ** argv,float * iv,unsigned long * cnt)5874 get_interval_count(int *argcp, char **argv, float *iv,
5875 unsigned long *cnt)
5876 {
5877 float interval = 0;
5878 unsigned long count = 0;
5879 int argc = *argcp;
5880
5881 /*
5882 * Determine if the last argument is an integer or a pool name
5883 */
5884 if (argc > 0 && zfs_isnumber(argv[argc - 1])) {
5885 char *end;
5886
5887 errno = 0;
5888 interval = strtof(argv[argc - 1], &end);
5889
5890 if (*end == '\0' && errno == 0) {
5891 if (interval == 0) {
5892 (void) fprintf(stderr, gettext(
5893 "interval cannot be zero\n"));
5894 usage(B_FALSE);
5895 }
5896 /*
5897 * Ignore the last parameter
5898 */
5899 argc--;
5900 } else {
5901 /*
5902 * If this is not a valid number, just plow on. The
5903 * user will get a more informative error message later
5904 * on.
5905 */
5906 interval = 0;
5907 }
5908 }
5909
5910 /*
5911 * If the last argument is also an integer, then we have both a count
5912 * and an interval.
5913 */
5914 if (argc > 0 && zfs_isnumber(argv[argc - 1])) {
5915 char *end;
5916
5917 errno = 0;
5918 count = interval;
5919 interval = strtof(argv[argc - 1], &end);
5920
5921 if (*end == '\0' && errno == 0) {
5922 if (interval == 0) {
5923 (void) fprintf(stderr, gettext(
5924 "interval cannot be zero\n"));
5925 usage(B_FALSE);
5926 }
5927
5928 /*
5929 * Ignore the last parameter
5930 */
5931 argc--;
5932 } else {
5933 interval = 0;
5934 }
5935 }
5936
5937 *iv = interval;
5938 *cnt = count;
5939 *argcp = argc;
5940 }
5941
5942 static void
get_timestamp_arg(char c)5943 get_timestamp_arg(char c)
5944 {
5945 if (c == 'u')
5946 timestamp_fmt = UDATE;
5947 else if (c == 'd')
5948 timestamp_fmt = DDATE;
5949 else
5950 usage(B_FALSE);
5951 }
5952
5953 /*
5954 * Return stat flags that are supported by all pools by both the module and
5955 * zpool iostat. "*data" should be initialized to all 0xFFs before running.
5956 * It will get ANDed down until only the flags that are supported on all pools
5957 * remain.
5958 */
5959 static int
get_stat_flags_cb(zpool_handle_t * zhp,void * data)5960 get_stat_flags_cb(zpool_handle_t *zhp, void *data)
5961 {
5962 uint64_t *mask = data;
5963 nvlist_t *config, *nvroot, *nvx;
5964 uint64_t flags = 0;
5965 int i, j;
5966
5967 config = zpool_get_config(zhp, NULL);
5968 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
5969 &nvroot) == 0);
5970
5971 /* Default stats are always supported, but for completeness.. */
5972 if (nvlist_exists(nvroot, ZPOOL_CONFIG_VDEV_STATS))
5973 flags |= IOS_DEFAULT_M;
5974
5975 /* Get our extended stats nvlist from the main list */
5976 if (nvlist_lookup_nvlist(nvroot, ZPOOL_CONFIG_VDEV_STATS_EX,
5977 &nvx) != 0) {
5978 /*
5979 * No extended stats; they're probably running an older
5980 * module. No big deal, we support that too.
5981 */
5982 goto end;
5983 }
5984
5985 /* For each extended stat, make sure all its nvpairs are supported */
5986 for (j = 0; j < ARRAY_SIZE(vsx_type_to_nvlist); j++) {
5987 if (!vsx_type_to_nvlist[j][0])
5988 continue;
5989
5990 /* Start off by assuming the flag is supported, then check */
5991 flags |= (1ULL << j);
5992 for (i = 0; vsx_type_to_nvlist[j][i]; i++) {
5993 if (!nvlist_exists(nvx, vsx_type_to_nvlist[j][i])) {
5994 /* flag isn't supported */
5995 flags = flags & ~(1ULL << j);
5996 break;
5997 }
5998 }
5999 }
6000 end:
6001 *mask = *mask & flags;
6002 return (0);
6003 }
6004
6005 /*
6006 * Return a bitmask of stats that are supported on all pools by both the module
6007 * and zpool iostat.
6008 */
6009 static uint64_t
get_stat_flags(zpool_list_t * list)6010 get_stat_flags(zpool_list_t *list)
6011 {
6012 uint64_t mask = -1;
6013
6014 /*
6015 * get_stat_flags_cb() will lop off bits from "mask" until only the
6016 * flags that are supported on all pools remain.
6017 */
6018 pool_list_iter(list, B_FALSE, get_stat_flags_cb, &mask);
6019 return (mask);
6020 }
6021
6022 /*
6023 * Return 1 if cb_data->cb_names[0] is this vdev's name, 0 otherwise.
6024 */
6025 static int
is_vdev_cb(void * zhp_data,nvlist_t * nv,void * cb_data)6026 is_vdev_cb(void *zhp_data, nvlist_t *nv, void *cb_data)
6027 {
6028 uint64_t guid;
6029 vdev_cbdata_t *cb = cb_data;
6030 zpool_handle_t *zhp = zhp_data;
6031
6032 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
6033 return (0);
6034
6035 return (guid == zpool_vdev_path_to_guid(zhp, cb->cb_names[0]));
6036 }
6037
6038 /*
6039 * Returns 1 if cb_data->cb_names[0] is a vdev name, 0 otherwise.
6040 */
6041 static int
is_vdev(zpool_handle_t * zhp,void * cb_data)6042 is_vdev(zpool_handle_t *zhp, void *cb_data)
6043 {
6044 return (for_each_vdev(zhp, is_vdev_cb, cb_data));
6045 }
6046
6047 /*
6048 * Check if vdevs are in a pool
6049 *
6050 * Return 1 if all argv[] strings are vdev names in pool "pool_name". Otherwise
6051 * return 0. If pool_name is NULL, then search all pools.
6052 */
6053 static int
are_vdevs_in_pool(int argc,char ** argv,char * pool_name,vdev_cbdata_t * cb)6054 are_vdevs_in_pool(int argc, char **argv, char *pool_name,
6055 vdev_cbdata_t *cb)
6056 {
6057 char **tmp_name;
6058 int ret = 0;
6059 int i;
6060 int pool_count = 0;
6061
6062 if ((argc == 0) || !*argv)
6063 return (0);
6064
6065 if (pool_name)
6066 pool_count = 1;
6067
6068 /* Temporarily hijack cb_names for a second... */
6069 tmp_name = cb->cb_names;
6070
6071 /* Go though our list of prospective vdev names */
6072 for (i = 0; i < argc; i++) {
6073 cb->cb_names = argv + i;
6074
6075 /* Is this name a vdev in our pools? */
6076 ret = for_each_pool(pool_count, &pool_name, B_TRUE, NULL,
6077 ZFS_TYPE_POOL, B_FALSE, is_vdev, cb);
6078 if (!ret) {
6079 /* No match */
6080 break;
6081 }
6082 }
6083
6084 cb->cb_names = tmp_name;
6085
6086 return (ret);
6087 }
6088
6089 static int
is_pool_cb(zpool_handle_t * zhp,void * data)6090 is_pool_cb(zpool_handle_t *zhp, void *data)
6091 {
6092 char *name = data;
6093 if (strcmp(name, zpool_get_name(zhp)) == 0)
6094 return (1);
6095
6096 return (0);
6097 }
6098
6099 /*
6100 * Do we have a pool named *name? If so, return 1, otherwise 0.
6101 */
6102 static int
is_pool(char * name)6103 is_pool(char *name)
6104 {
6105 return (for_each_pool(0, NULL, B_TRUE, NULL, ZFS_TYPE_POOL, B_FALSE,
6106 is_pool_cb, name));
6107 }
6108
6109 /* Are all our argv[] strings pool names? If so return 1, 0 otherwise. */
6110 static int
are_all_pools(int argc,char ** argv)6111 are_all_pools(int argc, char **argv)
6112 {
6113 if ((argc == 0) || !*argv)
6114 return (0);
6115
6116 while (--argc >= 0)
6117 if (!is_pool(argv[argc]))
6118 return (0);
6119
6120 return (1);
6121 }
6122
6123 /*
6124 * Helper function to print out vdev/pool names we can't resolve. Used for an
6125 * error message.
6126 */
6127 static void
error_list_unresolved_vdevs(int argc,char ** argv,char * pool_name,vdev_cbdata_t * cb)6128 error_list_unresolved_vdevs(int argc, char **argv, char *pool_name,
6129 vdev_cbdata_t *cb)
6130 {
6131 int i;
6132 char *name;
6133 char *str;
6134 for (i = 0; i < argc; i++) {
6135 name = argv[i];
6136
6137 if (is_pool(name))
6138 str = gettext("pool");
6139 else if (are_vdevs_in_pool(1, &name, pool_name, cb))
6140 str = gettext("vdev in this pool");
6141 else if (are_vdevs_in_pool(1, &name, NULL, cb))
6142 str = gettext("vdev in another pool");
6143 else
6144 str = gettext("unknown");
6145
6146 fprintf(stderr, "\t%s (%s)\n", name, str);
6147 }
6148 }
6149
6150 /*
6151 * Same as get_interval_count(), but with additional checks to not misinterpret
6152 * guids as interval/count values. Assumes VDEV_NAME_GUID is set in
6153 * cb.cb_vdevs.cb_name_flags.
6154 */
6155 static void
get_interval_count_filter_guids(int * argc,char ** argv,float * interval,unsigned long * count,iostat_cbdata_t * cb)6156 get_interval_count_filter_guids(int *argc, char **argv, float *interval,
6157 unsigned long *count, iostat_cbdata_t *cb)
6158 {
6159 char **tmpargv = argv;
6160 int argc_for_interval = 0;
6161
6162 /* Is the last arg an interval value? Or a guid? */
6163 if (*argc >= 1 && !are_vdevs_in_pool(1, &argv[*argc - 1], NULL,
6164 &cb->cb_vdevs)) {
6165 /*
6166 * The last arg is not a guid, so it's probably an
6167 * interval value.
6168 */
6169 argc_for_interval++;
6170
6171 if (*argc >= 2 &&
6172 !are_vdevs_in_pool(1, &argv[*argc - 2], NULL,
6173 &cb->cb_vdevs)) {
6174 /*
6175 * The 2nd to last arg is not a guid, so it's probably
6176 * an interval value.
6177 */
6178 argc_for_interval++;
6179 }
6180 }
6181
6182 /* Point to our list of possible intervals */
6183 tmpargv = &argv[*argc - argc_for_interval];
6184
6185 *argc = *argc - argc_for_interval;
6186 get_interval_count(&argc_for_interval, tmpargv,
6187 interval, count);
6188 }
6189
6190 /*
6191 * Terminal height, in rows. Returns -1 if stdout is not connected to a TTY or
6192 * if we were unable to determine its size.
6193 */
6194 static int
terminal_height(void)6195 terminal_height(void)
6196 {
6197 struct winsize win;
6198
6199 if (isatty(STDOUT_FILENO) == 0)
6200 return (-1);
6201
6202 if (ioctl(STDOUT_FILENO, TIOCGWINSZ, &win) != -1 && win.ws_row > 0)
6203 return (win.ws_row);
6204
6205 return (-1);
6206 }
6207
6208 /*
6209 * Run one of the zpool status/iostat -c scripts with the help (-h) option and
6210 * print the result.
6211 *
6212 * name: Short name of the script ('iostat').
6213 * path: Full path to the script ('/usr/local/etc/zfs/zpool.d/iostat');
6214 */
6215 static void
print_zpool_script_help(char * name,char * path)6216 print_zpool_script_help(char *name, char *path)
6217 {
6218 char *argv[] = {path, (char *)"-h", NULL};
6219 char **lines = NULL;
6220 int lines_cnt = 0;
6221 int rc;
6222
6223 rc = libzfs_run_process_get_stdout_nopath(path, argv, NULL, &lines,
6224 &lines_cnt);
6225 if (rc != 0 || lines == NULL || lines_cnt <= 0) {
6226 if (lines != NULL)
6227 libzfs_free_str_array(lines, lines_cnt);
6228 return;
6229 }
6230
6231 for (int i = 0; i < lines_cnt; i++)
6232 if (!is_blank_str(lines[i]))
6233 printf(" %-14s %s\n", name, lines[i]);
6234
6235 libzfs_free_str_array(lines, lines_cnt);
6236 }
6237
6238 /*
6239 * Go though the zpool status/iostat -c scripts in the user's path, run their
6240 * help option (-h), and print out the results.
6241 */
6242 static void
print_zpool_dir_scripts(char * dirpath)6243 print_zpool_dir_scripts(char *dirpath)
6244 {
6245 DIR *dir;
6246 struct dirent *ent;
6247 char fullpath[MAXPATHLEN];
6248 struct stat dir_stat;
6249
6250 if ((dir = opendir(dirpath)) != NULL) {
6251 /* print all the files and directories within directory */
6252 while ((ent = readdir(dir)) != NULL) {
6253 if (snprintf(fullpath, sizeof (fullpath), "%s/%s",
6254 dirpath, ent->d_name) >= sizeof (fullpath)) {
6255 (void) fprintf(stderr,
6256 gettext("internal error: "
6257 "ZPOOL_SCRIPTS_PATH too large.\n"));
6258 exit(1);
6259 }
6260
6261 /* Print the scripts */
6262 if (stat(fullpath, &dir_stat) == 0)
6263 if (dir_stat.st_mode & S_IXUSR &&
6264 S_ISREG(dir_stat.st_mode))
6265 print_zpool_script_help(ent->d_name,
6266 fullpath);
6267 }
6268 closedir(dir);
6269 }
6270 }
6271
6272 /*
6273 * Print out help text for all zpool status/iostat -c scripts.
6274 */
6275 static void
print_zpool_script_list(const char * subcommand)6276 print_zpool_script_list(const char *subcommand)
6277 {
6278 char *dir, *sp, *tmp;
6279
6280 printf(gettext("Available 'zpool %s -c' commands:\n"), subcommand);
6281
6282 sp = zpool_get_cmd_search_path();
6283 if (sp == NULL)
6284 return;
6285
6286 for (dir = strtok_r(sp, ":", &tmp);
6287 dir != NULL;
6288 dir = strtok_r(NULL, ":", &tmp))
6289 print_zpool_dir_scripts(dir);
6290
6291 free(sp);
6292 }
6293
6294 /*
6295 * Set the minimum pool/vdev name column width. The width must be at least 10,
6296 * but may be as large as the column width - 42 so it still fits on one line.
6297 * NOTE: 42 is the width of the default capacity/operations/bandwidth output
6298 */
6299 static int
get_namewidth_iostat(zpool_handle_t * zhp,void * data)6300 get_namewidth_iostat(zpool_handle_t *zhp, void *data)
6301 {
6302 iostat_cbdata_t *cb = data;
6303 int width, available_width;
6304
6305 /*
6306 * get_namewidth() returns the maximum width of any name in that column
6307 * for any pool/vdev/device line that will be output.
6308 */
6309 width = get_namewidth(zhp, cb->cb_namewidth,
6310 cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID, cb->cb_verbose);
6311
6312 /*
6313 * The width we are calculating is the width of the header and also the
6314 * padding width for names that are less than maximum width. The stats
6315 * take up 42 characters, so the width available for names is:
6316 */
6317 available_width = get_columns() - 42;
6318
6319 /*
6320 * If the maximum width fits on a screen, then great! Make everything
6321 * line up by justifying all lines to the same width. If that max
6322 * width is larger than what's available, the name plus stats won't fit
6323 * on one line, and justifying to that width would cause every line to
6324 * wrap on the screen. We only want lines with long names to wrap.
6325 * Limit the padding to what won't wrap.
6326 */
6327 if (width > available_width)
6328 width = available_width;
6329
6330 /*
6331 * And regardless of whatever the screen width is (get_columns can
6332 * return 0 if the width is not known or less than 42 for a narrow
6333 * terminal) have the width be a minimum of 10.
6334 */
6335 if (width < 10)
6336 width = 10;
6337
6338 /* Save the calculated width */
6339 cb->cb_namewidth = width;
6340
6341 return (0);
6342 }
6343
6344 /*
6345 * zpool iostat [[-c [script1,script2,...]] [-lq]|[-rw]] [-ghHLpPvy] [-n name]
6346 * [-T d|u] [[ pool ...]|[pool vdev ...]|[vdev ...]]
6347 * [interval [count]]
6348 *
6349 * -c CMD For each vdev, run command CMD
6350 * -g Display guid for individual vdev name.
6351 * -L Follow links when resolving vdev path name.
6352 * -P Display full path for vdev name.
6353 * -v Display statistics for individual vdevs
6354 * -h Display help
6355 * -p Display values in parsable (exact) format.
6356 * -H Scripted mode. Don't display headers, and separate properties
6357 * by a single tab.
6358 * -l Display average latency
6359 * -q Display queue depths
6360 * -w Display latency histograms
6361 * -r Display request size histogram
6362 * -T Display a timestamp in date(1) or Unix format
6363 * -n Only print headers once
6364 *
6365 * This command can be tricky because we want to be able to deal with pool
6366 * creation/destruction as well as vdev configuration changes. The bulk of this
6367 * processing is handled by the pool_list_* routines in zpool_iter.c. We rely
6368 * on pool_list_update() to detect the addition of new pools. Configuration
6369 * changes are all handled within libzfs.
6370 */
6371 int
zpool_do_iostat(int argc,char ** argv)6372 zpool_do_iostat(int argc, char **argv)
6373 {
6374 int c;
6375 int ret;
6376 int npools;
6377 float interval = 0;
6378 unsigned long count = 0;
6379 int winheight = 24;
6380 zpool_list_t *list;
6381 boolean_t verbose = B_FALSE;
6382 boolean_t latency = B_FALSE, l_histo = B_FALSE, rq_histo = B_FALSE;
6383 boolean_t queues = B_FALSE, parsable = B_FALSE, scripted = B_FALSE;
6384 boolean_t omit_since_boot = B_FALSE;
6385 boolean_t guid = B_FALSE;
6386 boolean_t follow_links = B_FALSE;
6387 boolean_t full_name = B_FALSE;
6388 boolean_t headers_once = B_FALSE;
6389 iostat_cbdata_t cb = { 0 };
6390 char *cmd = NULL;
6391
6392 /* Used for printing error message */
6393 const char flag_to_arg[] = {[IOS_LATENCY] = 'l', [IOS_QUEUES] = 'q',
6394 [IOS_L_HISTO] = 'w', [IOS_RQ_HISTO] = 'r'};
6395
6396 uint64_t unsupported_flags;
6397
6398 /* check options */
6399 while ((c = getopt(argc, argv, "c:gLPT:vyhplqrwnH")) != -1) {
6400 switch (c) {
6401 case 'c':
6402 if (cmd != NULL) {
6403 fprintf(stderr,
6404 gettext("Can't set -c flag twice\n"));
6405 exit(1);
6406 }
6407
6408 if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL &&
6409 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) {
6410 fprintf(stderr, gettext(
6411 "Can't run -c, disabled by "
6412 "ZPOOL_SCRIPTS_ENABLED.\n"));
6413 exit(1);
6414 }
6415
6416 if ((getuid() <= 0 || geteuid() <= 0) &&
6417 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) {
6418 fprintf(stderr, gettext(
6419 "Can't run -c with root privileges "
6420 "unless ZPOOL_SCRIPTS_AS_ROOT is set.\n"));
6421 exit(1);
6422 }
6423 cmd = optarg;
6424 verbose = B_TRUE;
6425 break;
6426 case 'g':
6427 guid = B_TRUE;
6428 break;
6429 case 'L':
6430 follow_links = B_TRUE;
6431 break;
6432 case 'P':
6433 full_name = B_TRUE;
6434 break;
6435 case 'T':
6436 get_timestamp_arg(*optarg);
6437 break;
6438 case 'v':
6439 verbose = B_TRUE;
6440 break;
6441 case 'p':
6442 parsable = B_TRUE;
6443 break;
6444 case 'l':
6445 latency = B_TRUE;
6446 break;
6447 case 'q':
6448 queues = B_TRUE;
6449 break;
6450 case 'H':
6451 scripted = B_TRUE;
6452 break;
6453 case 'w':
6454 l_histo = B_TRUE;
6455 break;
6456 case 'r':
6457 rq_histo = B_TRUE;
6458 break;
6459 case 'y':
6460 omit_since_boot = B_TRUE;
6461 break;
6462 case 'n':
6463 headers_once = B_TRUE;
6464 break;
6465 case 'h':
6466 usage(B_FALSE);
6467 break;
6468 case '?':
6469 if (optopt == 'c') {
6470 print_zpool_script_list("iostat");
6471 exit(0);
6472 } else {
6473 fprintf(stderr,
6474 gettext("invalid option '%c'\n"), optopt);
6475 }
6476 usage(B_FALSE);
6477 }
6478 }
6479
6480 argc -= optind;
6481 argv += optind;
6482
6483 cb.cb_literal = parsable;
6484 cb.cb_scripted = scripted;
6485
6486 if (guid)
6487 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_GUID;
6488 if (follow_links)
6489 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
6490 if (full_name)
6491 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_PATH;
6492 cb.cb_iteration = 0;
6493 cb.cb_namewidth = 0;
6494 cb.cb_verbose = verbose;
6495
6496 /* Get our interval and count values (if any) */
6497 if (guid) {
6498 get_interval_count_filter_guids(&argc, argv, &interval,
6499 &count, &cb);
6500 } else {
6501 get_interval_count(&argc, argv, &interval, &count);
6502 }
6503
6504 if (argc == 0) {
6505 /* No args, so just print the defaults. */
6506 } else if (are_all_pools(argc, argv)) {
6507 /* All the args are pool names */
6508 } else if (are_vdevs_in_pool(argc, argv, NULL, &cb.cb_vdevs)) {
6509 /* All the args are vdevs */
6510 cb.cb_vdevs.cb_names = argv;
6511 cb.cb_vdevs.cb_names_count = argc;
6512 argc = 0; /* No pools to process */
6513 } else if (are_all_pools(1, argv)) {
6514 /* The first arg is a pool name */
6515 if (are_vdevs_in_pool(argc - 1, argv + 1, argv[0],
6516 &cb.cb_vdevs)) {
6517 /* ...and the rest are vdev names */
6518 cb.cb_vdevs.cb_names = argv + 1;
6519 cb.cb_vdevs.cb_names_count = argc - 1;
6520 argc = 1; /* One pool to process */
6521 } else {
6522 fprintf(stderr, gettext("Expected either a list of "));
6523 fprintf(stderr, gettext("pools, or list of vdevs in"));
6524 fprintf(stderr, " \"%s\", ", argv[0]);
6525 fprintf(stderr, gettext("but got:\n"));
6526 error_list_unresolved_vdevs(argc - 1, argv + 1,
6527 argv[0], &cb.cb_vdevs);
6528 fprintf(stderr, "\n");
6529 usage(B_FALSE);
6530 return (1);
6531 }
6532 } else {
6533 /*
6534 * The args don't make sense. The first arg isn't a pool name,
6535 * nor are all the args vdevs.
6536 */
6537 fprintf(stderr, gettext("Unable to parse pools/vdevs list.\n"));
6538 fprintf(stderr, "\n");
6539 return (1);
6540 }
6541
6542 if (cb.cb_vdevs.cb_names_count != 0) {
6543 /*
6544 * If user specified vdevs, it implies verbose.
6545 */
6546 cb.cb_verbose = B_TRUE;
6547 }
6548
6549 /*
6550 * Construct the list of all interesting pools.
6551 */
6552 ret = 0;
6553 if ((list = pool_list_get(argc, argv, NULL, ZFS_TYPE_POOL, parsable,
6554 &ret)) == NULL)
6555 return (1);
6556
6557 if (pool_list_count(list) == 0 && argc != 0) {
6558 pool_list_free(list);
6559 return (1);
6560 }
6561
6562 if (pool_list_count(list) == 0 && interval == 0) {
6563 pool_list_free(list);
6564 (void) fprintf(stderr, gettext("no pools available\n"));
6565 return (1);
6566 }
6567
6568 if ((l_histo || rq_histo) && (cmd != NULL || latency || queues)) {
6569 pool_list_free(list);
6570 (void) fprintf(stderr,
6571 gettext("[-r|-w] isn't allowed with [-c|-l|-q]\n"));
6572 usage(B_FALSE);
6573 return (1);
6574 }
6575
6576 if (l_histo && rq_histo) {
6577 pool_list_free(list);
6578 (void) fprintf(stderr,
6579 gettext("Only one of [-r|-w] can be passed at a time\n"));
6580 usage(B_FALSE);
6581 return (1);
6582 }
6583
6584 /*
6585 * Enter the main iostat loop.
6586 */
6587 cb.cb_list = list;
6588
6589 if (l_histo) {
6590 /*
6591 * Histograms tables look out of place when you try to display
6592 * them with the other stats, so make a rule that you can only
6593 * print histograms by themselves.
6594 */
6595 cb.cb_flags = IOS_L_HISTO_M;
6596 } else if (rq_histo) {
6597 cb.cb_flags = IOS_RQ_HISTO_M;
6598 } else {
6599 cb.cb_flags = IOS_DEFAULT_M;
6600 if (latency)
6601 cb.cb_flags |= IOS_LATENCY_M;
6602 if (queues)
6603 cb.cb_flags |= IOS_QUEUES_M;
6604 }
6605
6606 /*
6607 * See if the module supports all the stats we want to display.
6608 */
6609 unsupported_flags = cb.cb_flags & ~get_stat_flags(list);
6610 if (unsupported_flags) {
6611 uint64_t f;
6612 int idx;
6613 fprintf(stderr,
6614 gettext("The loaded zfs module doesn't support:"));
6615
6616 /* for each bit set in unsupported_flags */
6617 for (f = unsupported_flags; f; f &= ~(1ULL << idx)) {
6618 idx = lowbit64(f) - 1;
6619 fprintf(stderr, " -%c", flag_to_arg[idx]);
6620 }
6621
6622 fprintf(stderr, ". Try running a newer module.\n");
6623 pool_list_free(list);
6624
6625 return (1);
6626 }
6627
6628 for (;;) {
6629 if ((npools = pool_list_count(list)) == 0)
6630 (void) fprintf(stderr, gettext("no pools available\n"));
6631 else {
6632 /*
6633 * If this is the first iteration and -y was supplied
6634 * we skip any printing.
6635 */
6636 boolean_t skip = (omit_since_boot &&
6637 cb.cb_iteration == 0);
6638
6639 /*
6640 * Refresh all statistics. This is done as an
6641 * explicit step before calculating the maximum name
6642 * width, so that any * configuration changes are
6643 * properly accounted for.
6644 */
6645 (void) pool_list_iter(list, B_FALSE, refresh_iostat,
6646 &cb);
6647
6648 /*
6649 * Iterate over all pools to determine the maximum width
6650 * for the pool / device name column across all pools.
6651 */
6652 cb.cb_namewidth = 0;
6653 (void) pool_list_iter(list, B_FALSE,
6654 get_namewidth_iostat, &cb);
6655
6656 if (timestamp_fmt != NODATE)
6657 print_timestamp(timestamp_fmt);
6658
6659 if (cmd != NULL && cb.cb_verbose &&
6660 !(cb.cb_flags & IOS_ANYHISTO_M)) {
6661 cb.vcdl = all_pools_for_each_vdev_run(argc,
6662 argv, cmd, g_zfs, cb.cb_vdevs.cb_names,
6663 cb.cb_vdevs.cb_names_count,
6664 cb.cb_vdevs.cb_name_flags);
6665 } else {
6666 cb.vcdl = NULL;
6667 }
6668
6669
6670 /*
6671 * Check terminal size so we can print headers
6672 * even when terminal window has its height
6673 * changed.
6674 */
6675 winheight = terminal_height();
6676 /*
6677 * Are we connected to TTY? If not, headers_once
6678 * should be true, to avoid breaking scripts.
6679 */
6680 if (winheight < 0)
6681 headers_once = B_TRUE;
6682
6683 /*
6684 * If it's the first time and we're not skipping it,
6685 * or either skip or verbose mode, print the header.
6686 *
6687 * The histogram code explicitly prints its header on
6688 * every vdev, so skip this for histograms.
6689 */
6690 if (((++cb.cb_iteration == 1 && !skip) ||
6691 (skip != verbose) ||
6692 (!headers_once &&
6693 (cb.cb_iteration % winheight) == 0)) &&
6694 (!(cb.cb_flags & IOS_ANYHISTO_M)) &&
6695 !cb.cb_scripted)
6696 print_iostat_header(&cb);
6697
6698 if (skip) {
6699 (void) fflush(stdout);
6700 (void) fsleep(interval);
6701 continue;
6702 }
6703
6704 pool_list_iter(list, B_FALSE, print_iostat, &cb);
6705
6706 /*
6707 * If there's more than one pool, and we're not in
6708 * verbose mode (which prints a separator for us),
6709 * then print a separator.
6710 *
6711 * In addition, if we're printing specific vdevs then
6712 * we also want an ending separator.
6713 */
6714 if (((npools > 1 && !verbose &&
6715 !(cb.cb_flags & IOS_ANYHISTO_M)) ||
6716 (!(cb.cb_flags & IOS_ANYHISTO_M) &&
6717 cb.cb_vdevs.cb_names_count)) &&
6718 !cb.cb_scripted) {
6719 print_iostat_separator(&cb);
6720 if (cb.vcdl != NULL)
6721 print_cmd_columns(cb.vcdl, 1);
6722 printf("\n");
6723 }
6724
6725 if (cb.vcdl != NULL)
6726 free_vdev_cmd_data_list(cb.vcdl);
6727
6728 }
6729
6730 if (interval == 0)
6731 break;
6732
6733 if (count != 0 && --count == 0)
6734 break;
6735
6736 (void) fflush(stdout);
6737 (void) fsleep(interval);
6738 }
6739
6740 pool_list_free(list);
6741
6742 return (ret);
6743 }
6744
6745 typedef struct list_cbdata {
6746 boolean_t cb_verbose;
6747 int cb_name_flags;
6748 int cb_namewidth;
6749 boolean_t cb_json;
6750 boolean_t cb_scripted;
6751 zprop_list_t *cb_proplist;
6752 boolean_t cb_literal;
6753 nvlist_t *cb_jsobj;
6754 boolean_t cb_json_as_int;
6755 boolean_t cb_json_pool_key_guid;
6756 } list_cbdata_t;
6757
6758
6759 /*
6760 * Given a list of columns to display, output appropriate headers for each one.
6761 */
6762 static void
print_header(list_cbdata_t * cb)6763 print_header(list_cbdata_t *cb)
6764 {
6765 zprop_list_t *pl = cb->cb_proplist;
6766 char headerbuf[ZPOOL_MAXPROPLEN];
6767 const char *header;
6768 boolean_t first = B_TRUE;
6769 boolean_t right_justify;
6770 size_t width = 0;
6771
6772 for (; pl != NULL; pl = pl->pl_next) {
6773 width = pl->pl_width;
6774 if (first && cb->cb_verbose) {
6775 /*
6776 * Reset the width to accommodate the verbose listing
6777 * of devices.
6778 */
6779 width = cb->cb_namewidth;
6780 }
6781
6782 if (!first)
6783 (void) fputs(" ", stdout);
6784 else
6785 first = B_FALSE;
6786
6787 right_justify = B_FALSE;
6788 if (pl->pl_prop != ZPROP_USERPROP) {
6789 header = zpool_prop_column_name(pl->pl_prop);
6790 right_justify = zpool_prop_align_right(pl->pl_prop);
6791 } else {
6792 int i;
6793
6794 for (i = 0; pl->pl_user_prop[i] != '\0'; i++)
6795 headerbuf[i] = toupper(pl->pl_user_prop[i]);
6796 headerbuf[i] = '\0';
6797 header = headerbuf;
6798 }
6799
6800 if (pl->pl_next == NULL && !right_justify)
6801 (void) fputs(header, stdout);
6802 else if (right_justify)
6803 (void) printf("%*s", (int)width, header);
6804 else
6805 (void) printf("%-*s", (int)width, header);
6806 }
6807
6808 (void) fputc('\n', stdout);
6809 }
6810
6811 /*
6812 * Given a pool and a list of properties, print out all the properties according
6813 * to the described layout. Used by zpool_do_list().
6814 */
6815 static void
collect_pool(zpool_handle_t * zhp,list_cbdata_t * cb)6816 collect_pool(zpool_handle_t *zhp, list_cbdata_t *cb)
6817 {
6818 zprop_list_t *pl = cb->cb_proplist;
6819 boolean_t first = B_TRUE;
6820 char property[ZPOOL_MAXPROPLEN];
6821 const char *propstr;
6822 boolean_t right_justify;
6823 size_t width;
6824 zprop_source_t sourcetype = ZPROP_SRC_NONE;
6825 nvlist_t *item, *d, *props;
6826 item = d = props = NULL;
6827
6828 if (cb->cb_json) {
6829 item = fnvlist_alloc();
6830 props = fnvlist_alloc();
6831 d = fnvlist_lookup_nvlist(cb->cb_jsobj, "pools");
6832 if (d == NULL) {
6833 fprintf(stderr, "pools obj not found.\n");
6834 exit(1);
6835 }
6836 fill_pool_info(item, zhp, B_TRUE, cb->cb_json_as_int);
6837 }
6838
6839 for (; pl != NULL; pl = pl->pl_next) {
6840
6841 width = pl->pl_width;
6842 if (first && cb->cb_verbose) {
6843 /*
6844 * Reset the width to accommodate the verbose listing
6845 * of devices.
6846 */
6847 width = cb->cb_namewidth;
6848 }
6849
6850 if (!cb->cb_json && !first) {
6851 if (cb->cb_scripted)
6852 (void) fputc('\t', stdout);
6853 else
6854 (void) fputs(" ", stdout);
6855 } else {
6856 first = B_FALSE;
6857 }
6858
6859 right_justify = B_FALSE;
6860 if (pl->pl_prop != ZPROP_USERPROP) {
6861 if (zpool_get_prop(zhp, pl->pl_prop, property,
6862 sizeof (property), &sourcetype,
6863 cb->cb_literal) != 0)
6864 propstr = "-";
6865 else
6866 propstr = property;
6867
6868 right_justify = zpool_prop_align_right(pl->pl_prop);
6869 } else if ((zpool_prop_feature(pl->pl_user_prop) ||
6870 zpool_prop_unsupported(pl->pl_user_prop)) &&
6871 zpool_prop_get_feature(zhp, pl->pl_user_prop, property,
6872 sizeof (property)) == 0) {
6873 propstr = property;
6874 sourcetype = ZPROP_SRC_LOCAL;
6875 } else if (zfs_prop_user(pl->pl_user_prop) &&
6876 zpool_get_userprop(zhp, pl->pl_user_prop, property,
6877 sizeof (property), &sourcetype) == 0) {
6878 propstr = property;
6879 } else {
6880 propstr = "-";
6881 }
6882
6883 if (cb->cb_json) {
6884 if (pl->pl_prop == ZPOOL_PROP_NAME)
6885 continue;
6886 const char *prop_name;
6887 if (pl->pl_prop != ZPROP_USERPROP)
6888 prop_name = zpool_prop_to_name(pl->pl_prop);
6889 else
6890 prop_name = pl->pl_user_prop;
6891 (void) zprop_nvlist_one_property(
6892 prop_name, propstr,
6893 sourcetype, NULL, NULL, props, cb->cb_json_as_int);
6894 } else {
6895 /*
6896 * If this is being called in scripted mode, or if this
6897 * is the last column and it is left-justified, don't
6898 * include a width format specifier.
6899 */
6900 if (cb->cb_scripted || (pl->pl_next == NULL &&
6901 !right_justify))
6902 (void) fputs(propstr, stdout);
6903 else if (right_justify)
6904 (void) printf("%*s", (int)width, propstr);
6905 else
6906 (void) printf("%-*s", (int)width, propstr);
6907 }
6908 }
6909
6910 if (cb->cb_json) {
6911 fnvlist_add_nvlist(item, "properties", props);
6912 if (cb->cb_json_pool_key_guid) {
6913 char pool_guid[256];
6914 uint64_t guid = fnvlist_lookup_uint64(
6915 zpool_get_config(zhp, NULL),
6916 ZPOOL_CONFIG_POOL_GUID);
6917 snprintf(pool_guid, 256, "%llu",
6918 (u_longlong_t)guid);
6919 fnvlist_add_nvlist(d, pool_guid, item);
6920 } else {
6921 fnvlist_add_nvlist(d, zpool_get_name(zhp),
6922 item);
6923 }
6924 fnvlist_free(props);
6925 fnvlist_free(item);
6926 } else
6927 (void) fputc('\n', stdout);
6928 }
6929
6930 static void
collect_vdev_prop(zpool_prop_t prop,uint64_t value,const char * str,boolean_t scripted,boolean_t valid,enum zfs_nicenum_format format,boolean_t json,nvlist_t * nvl,boolean_t as_int)6931 collect_vdev_prop(zpool_prop_t prop, uint64_t value, const char *str,
6932 boolean_t scripted, boolean_t valid, enum zfs_nicenum_format format,
6933 boolean_t json, nvlist_t *nvl, boolean_t as_int)
6934 {
6935 char propval[64];
6936 boolean_t fixed;
6937 size_t width = zprop_width(prop, &fixed, ZFS_TYPE_POOL);
6938
6939 switch (prop) {
6940 case ZPOOL_PROP_SIZE:
6941 case ZPOOL_PROP_EXPANDSZ:
6942 case ZPOOL_PROP_CHECKPOINT:
6943 case ZPOOL_PROP_DEDUPRATIO:
6944 case ZPOOL_PROP_DEDUPCACHED:
6945 if (value == 0)
6946 (void) strlcpy(propval, "-", sizeof (propval));
6947 else
6948 zfs_nicenum_format(value, propval, sizeof (propval),
6949 format);
6950 break;
6951 case ZPOOL_PROP_FRAGMENTATION:
6952 if (value == ZFS_FRAG_INVALID) {
6953 (void) strlcpy(propval, "-", sizeof (propval));
6954 } else if (format == ZFS_NICENUM_RAW) {
6955 (void) snprintf(propval, sizeof (propval), "%llu",
6956 (unsigned long long)value);
6957 } else {
6958 (void) snprintf(propval, sizeof (propval), "%llu%%",
6959 (unsigned long long)value);
6960 }
6961 break;
6962 case ZPOOL_PROP_CAPACITY:
6963 /* capacity value is in parts-per-10,000 (aka permyriad) */
6964 if (format == ZFS_NICENUM_RAW)
6965 (void) snprintf(propval, sizeof (propval), "%llu",
6966 (unsigned long long)value / 100);
6967 else
6968 (void) snprintf(propval, sizeof (propval),
6969 value < 1000 ? "%1.2f%%" : value < 10000 ?
6970 "%2.1f%%" : "%3.0f%%", value / 100.0);
6971 break;
6972 case ZPOOL_PROP_HEALTH:
6973 width = 8;
6974 (void) strlcpy(propval, str, sizeof (propval));
6975 break;
6976 default:
6977 zfs_nicenum_format(value, propval, sizeof (propval), format);
6978 }
6979
6980 if (!valid)
6981 (void) strlcpy(propval, "-", sizeof (propval));
6982
6983 if (json) {
6984 zprop_nvlist_one_property(zpool_prop_to_name(prop), propval,
6985 ZPROP_SRC_NONE, NULL, NULL, nvl, as_int);
6986 } else {
6987 if (scripted)
6988 (void) printf("\t%s", propval);
6989 else
6990 (void) printf(" %*s", (int)width, propval);
6991 }
6992 }
6993
6994 /*
6995 * print static default line per vdev
6996 * not compatible with '-o' <proplist> option
6997 */
6998 static void
collect_list_stats(zpool_handle_t * zhp,const char * name,nvlist_t * nv,list_cbdata_t * cb,int depth,boolean_t isspare,nvlist_t * item)6999 collect_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv,
7000 list_cbdata_t *cb, int depth, boolean_t isspare, nvlist_t *item)
7001 {
7002 nvlist_t **child;
7003 vdev_stat_t *vs;
7004 uint_t c, children = 0;
7005 char *vname;
7006 boolean_t scripted = cb->cb_scripted;
7007 uint64_t islog = B_FALSE;
7008 nvlist_t *props, *ent, *ch, *obj, *l2c, *sp;
7009 props = ent = ch = obj = sp = l2c = NULL;
7010 const char *dashes = "%-*s - - - - "
7011 "- - - - -\n";
7012
7013 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
7014 (uint64_t **)&vs, &c) == 0);
7015
7016 if (name != NULL) {
7017 boolean_t toplevel = (vs->vs_space != 0);
7018 uint64_t cap;
7019 enum zfs_nicenum_format format;
7020 const char *state;
7021
7022 if (cb->cb_literal)
7023 format = ZFS_NICENUM_RAW;
7024 else
7025 format = ZFS_NICENUM_1024;
7026
7027 if (strcmp(name, VDEV_TYPE_INDIRECT) == 0)
7028 return;
7029
7030 if (cb->cb_json) {
7031 props = fnvlist_alloc();
7032 ent = fnvlist_alloc();
7033 fill_vdev_info(ent, zhp, (char *)name, B_FALSE,
7034 cb->cb_json_as_int);
7035 } else {
7036 if (scripted)
7037 (void) printf("\t%s", name);
7038 else if (strlen(name) + depth > cb->cb_namewidth)
7039 (void) printf("%*s%s", depth, "", name);
7040 else
7041 (void) printf("%*s%s%*s", depth, "", name,
7042 (int)(cb->cb_namewidth - strlen(name) -
7043 depth), "");
7044 }
7045
7046 /*
7047 * Print the properties for the individual vdevs. Some
7048 * properties are only applicable to toplevel vdevs. The
7049 * 'toplevel' boolean value is passed to the print_one_column()
7050 * to indicate that the value is valid.
7051 */
7052 if (VDEV_STAT_VALID(vs_pspace, c) && vs->vs_pspace) {
7053 collect_vdev_prop(ZPOOL_PROP_SIZE, vs->vs_pspace, NULL,
7054 scripted, B_TRUE, format, cb->cb_json, props,
7055 cb->cb_json_as_int);
7056 } else {
7057 collect_vdev_prop(ZPOOL_PROP_SIZE, vs->vs_space, NULL,
7058 scripted, toplevel, format, cb->cb_json, props,
7059 cb->cb_json_as_int);
7060 }
7061 collect_vdev_prop(ZPOOL_PROP_ALLOCATED, vs->vs_alloc, NULL,
7062 scripted, toplevel, format, cb->cb_json, props,
7063 cb->cb_json_as_int);
7064 collect_vdev_prop(ZPOOL_PROP_FREE, vs->vs_space - vs->vs_alloc,
7065 NULL, scripted, toplevel, format, cb->cb_json, props,
7066 cb->cb_json_as_int);
7067 collect_vdev_prop(ZPOOL_PROP_CHECKPOINT,
7068 vs->vs_checkpoint_space, NULL, scripted, toplevel, format,
7069 cb->cb_json, props, cb->cb_json_as_int);
7070 collect_vdev_prop(ZPOOL_PROP_EXPANDSZ, vs->vs_esize, NULL,
7071 scripted, B_TRUE, format, cb->cb_json, props,
7072 cb->cb_json_as_int);
7073 collect_vdev_prop(ZPOOL_PROP_FRAGMENTATION,
7074 vs->vs_fragmentation, NULL, scripted,
7075 (vs->vs_fragmentation != ZFS_FRAG_INVALID && toplevel),
7076 format, cb->cb_json, props, cb->cb_json_as_int);
7077 cap = (vs->vs_space == 0) ? 0 :
7078 (vs->vs_alloc * 10000 / vs->vs_space);
7079 collect_vdev_prop(ZPOOL_PROP_CAPACITY, cap, NULL,
7080 scripted, toplevel, format, cb->cb_json, props,
7081 cb->cb_json_as_int);
7082 collect_vdev_prop(ZPOOL_PROP_DEDUPRATIO, 0, NULL,
7083 scripted, toplevel, format, cb->cb_json, props,
7084 cb->cb_json_as_int);
7085 state = zpool_state_to_name(vs->vs_state, vs->vs_aux);
7086 if (isspare) {
7087 if (vs->vs_aux == VDEV_AUX_SPARED)
7088 state = "INUSE";
7089 else if (vs->vs_state == VDEV_STATE_HEALTHY)
7090 state = "AVAIL";
7091 }
7092 collect_vdev_prop(ZPOOL_PROP_HEALTH, 0, state, scripted,
7093 B_TRUE, format, cb->cb_json, props, cb->cb_json_as_int);
7094
7095 if (cb->cb_json) {
7096 fnvlist_add_nvlist(ent, "properties", props);
7097 fnvlist_free(props);
7098 } else
7099 (void) fputc('\n', stdout);
7100 }
7101
7102 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
7103 &child, &children) != 0) {
7104 if (cb->cb_json) {
7105 fnvlist_add_nvlist(item, name, ent);
7106 fnvlist_free(ent);
7107 }
7108 return;
7109 }
7110
7111 if (cb->cb_json) {
7112 ch = fnvlist_alloc();
7113 }
7114
7115 /* list the normal vdevs first */
7116 for (c = 0; c < children; c++) {
7117 uint64_t ishole = B_FALSE;
7118
7119 if (nvlist_lookup_uint64(child[c],
7120 ZPOOL_CONFIG_IS_HOLE, &ishole) == 0 && ishole)
7121 continue;
7122
7123 if (nvlist_lookup_uint64(child[c],
7124 ZPOOL_CONFIG_IS_LOG, &islog) == 0 && islog)
7125 continue;
7126
7127 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
7128 continue;
7129
7130 vname = zpool_vdev_name(g_zfs, zhp, child[c],
7131 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
7132
7133 if (name == NULL || cb->cb_json != B_TRUE)
7134 collect_list_stats(zhp, vname, child[c], cb, depth + 2,
7135 B_FALSE, item);
7136 else if (cb->cb_json) {
7137 collect_list_stats(zhp, vname, child[c], cb, depth + 2,
7138 B_FALSE, ch);
7139 }
7140 free(vname);
7141 }
7142
7143 if (cb->cb_json) {
7144 if (!nvlist_empty(ch))
7145 fnvlist_add_nvlist(ent, "vdevs", ch);
7146 fnvlist_free(ch);
7147 }
7148
7149 /* list the classes: 'logs', 'dedup', and 'special' */
7150 for (uint_t n = 0; n < ARRAY_SIZE(class_name); n++) {
7151 boolean_t printed = B_FALSE;
7152 if (cb->cb_json)
7153 obj = fnvlist_alloc();
7154 for (c = 0; c < children; c++) {
7155 const char *bias = NULL;
7156 const char *type = NULL;
7157
7158 if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
7159 &islog) == 0 && islog) {
7160 bias = VDEV_ALLOC_CLASS_LOGS;
7161 } else {
7162 (void) nvlist_lookup_string(child[c],
7163 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
7164 (void) nvlist_lookup_string(child[c],
7165 ZPOOL_CONFIG_TYPE, &type);
7166 }
7167 if (bias == NULL || strcmp(bias, class_name[n]) != 0)
7168 continue;
7169 if (!islog && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
7170 continue;
7171
7172 if (!printed && !cb->cb_json) {
7173 /* LINTED E_SEC_PRINTF_VAR_FMT */
7174 (void) printf(dashes, cb->cb_namewidth,
7175 class_name[n]);
7176 printed = B_TRUE;
7177 }
7178 vname = zpool_vdev_name(g_zfs, zhp, child[c],
7179 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
7180 collect_list_stats(zhp, vname, child[c], cb, depth + 2,
7181 B_FALSE, obj);
7182 free(vname);
7183 }
7184 if (cb->cb_json) {
7185 if (!nvlist_empty(obj))
7186 fnvlist_add_nvlist(item, class_name[n], obj);
7187 fnvlist_free(obj);
7188 }
7189 }
7190
7191 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
7192 &child, &children) == 0 && children > 0) {
7193 if (cb->cb_json) {
7194 l2c = fnvlist_alloc();
7195 } else {
7196 /* LINTED E_SEC_PRINTF_VAR_FMT */
7197 (void) printf(dashes, cb->cb_namewidth, "cache");
7198 }
7199 for (c = 0; c < children; c++) {
7200 vname = zpool_vdev_name(g_zfs, zhp, child[c],
7201 cb->cb_name_flags);
7202 collect_list_stats(zhp, vname, child[c], cb, depth + 2,
7203 B_FALSE, l2c);
7204 free(vname);
7205 }
7206 if (cb->cb_json) {
7207 if (!nvlist_empty(l2c))
7208 fnvlist_add_nvlist(item, "l2cache", l2c);
7209 fnvlist_free(l2c);
7210 }
7211 }
7212
7213 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, &child,
7214 &children) == 0 && children > 0) {
7215 if (cb->cb_json) {
7216 sp = fnvlist_alloc();
7217 } else {
7218 /* LINTED E_SEC_PRINTF_VAR_FMT */
7219 (void) printf(dashes, cb->cb_namewidth, "spare");
7220 }
7221 for (c = 0; c < children; c++) {
7222 vname = zpool_vdev_name(g_zfs, zhp, child[c],
7223 cb->cb_name_flags);
7224 collect_list_stats(zhp, vname, child[c], cb, depth + 2,
7225 B_TRUE, sp);
7226 free(vname);
7227 }
7228 if (cb->cb_json) {
7229 if (!nvlist_empty(sp))
7230 fnvlist_add_nvlist(item, "spares", sp);
7231 fnvlist_free(sp);
7232 }
7233 }
7234
7235 if (name != NULL && cb->cb_json) {
7236 fnvlist_add_nvlist(item, name, ent);
7237 fnvlist_free(ent);
7238 }
7239 }
7240
7241 /*
7242 * Generic callback function to list a pool.
7243 */
7244 static int
list_callback(zpool_handle_t * zhp,void * data)7245 list_callback(zpool_handle_t *zhp, void *data)
7246 {
7247 nvlist_t *p, *d, *nvdevs;
7248 uint64_t guid;
7249 char pool_guid[256];
7250 const char *pool_name = zpool_get_name(zhp);
7251 list_cbdata_t *cbp = data;
7252 p = d = nvdevs = NULL;
7253
7254 collect_pool(zhp, cbp);
7255
7256 if (cbp->cb_verbose) {
7257 nvlist_t *config, *nvroot;
7258 config = zpool_get_config(zhp, NULL);
7259 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
7260 &nvroot) == 0);
7261 if (cbp->cb_json) {
7262 d = fnvlist_lookup_nvlist(cbp->cb_jsobj,
7263 "pools");
7264 if (cbp->cb_json_pool_key_guid) {
7265 guid = fnvlist_lookup_uint64(config,
7266 ZPOOL_CONFIG_POOL_GUID);
7267 snprintf(pool_guid, 256, "%llu",
7268 (u_longlong_t)guid);
7269 p = fnvlist_lookup_nvlist(d, pool_guid);
7270 } else {
7271 p = fnvlist_lookup_nvlist(d, pool_name);
7272 }
7273 nvdevs = fnvlist_alloc();
7274 }
7275 collect_list_stats(zhp, NULL, nvroot, cbp, 0, B_FALSE, nvdevs);
7276 if (cbp->cb_json) {
7277 fnvlist_add_nvlist(p, "vdevs", nvdevs);
7278 if (cbp->cb_json_pool_key_guid)
7279 fnvlist_add_nvlist(d, pool_guid, p);
7280 else
7281 fnvlist_add_nvlist(d, pool_name, p);
7282 fnvlist_add_nvlist(cbp->cb_jsobj, "pools", d);
7283 fnvlist_free(nvdevs);
7284 }
7285 }
7286
7287 return (0);
7288 }
7289
7290 /*
7291 * Set the minimum pool/vdev name column width. The width must be at least 9,
7292 * but may be as large as needed.
7293 */
7294 static int
get_namewidth_list(zpool_handle_t * zhp,void * data)7295 get_namewidth_list(zpool_handle_t *zhp, void *data)
7296 {
7297 list_cbdata_t *cb = data;
7298 int width;
7299
7300 width = get_namewidth(zhp, cb->cb_namewidth,
7301 cb->cb_name_flags | VDEV_NAME_TYPE_ID, cb->cb_verbose);
7302
7303 if (width < 9)
7304 width = 9;
7305
7306 cb->cb_namewidth = width;
7307
7308 return (0);
7309 }
7310
7311 /*
7312 * zpool list [-gHLpP] [-o prop[,prop]*] [-T d|u] [pool] ... [interval [count]]
7313 *
7314 * -g Display guid for individual vdev name.
7315 * -H Scripted mode. Don't display headers, and separate properties
7316 * by a single tab.
7317 * -L Follow links when resolving vdev path name.
7318 * -o List of properties to display. Defaults to
7319 * "name,size,allocated,free,expandsize,fragmentation,capacity,"
7320 * "dedupratio,health,altroot"
7321 * -p Display values in parsable (exact) format.
7322 * -P Display full path for vdev name.
7323 * -T Display a timestamp in date(1) or Unix format
7324 * -j Display the output in JSON format
7325 * --json-int Display the numbers as integer instead of strings.
7326 * --json-pool-key-guid Set pool GUID as key for pool objects.
7327 *
7328 * List all pools in the system, whether or not they're healthy. Output space
7329 * statistics for each one, as well as health status summary.
7330 */
7331 int
zpool_do_list(int argc,char ** argv)7332 zpool_do_list(int argc, char **argv)
7333 {
7334 int c;
7335 int ret = 0;
7336 list_cbdata_t cb = { 0 };
7337 static char default_props[] =
7338 "name,size,allocated,free,checkpoint,expandsize,fragmentation,"
7339 "capacity,dedupratio,health,altroot";
7340 char *props = default_props;
7341 float interval = 0;
7342 unsigned long count = 0;
7343 zpool_list_t *list;
7344 boolean_t first = B_TRUE;
7345 nvlist_t *data = NULL;
7346 current_prop_type = ZFS_TYPE_POOL;
7347
7348 struct option long_options[] = {
7349 {"json", no_argument, NULL, 'j'},
7350 {"json-int", no_argument, NULL, ZPOOL_OPTION_JSON_NUMS_AS_INT},
7351 {"json-pool-key-guid", no_argument, NULL,
7352 ZPOOL_OPTION_POOL_KEY_GUID},
7353 {0, 0, 0, 0}
7354 };
7355
7356 /* check options */
7357 while ((c = getopt_long(argc, argv, ":gjHLo:pPT:v", long_options,
7358 NULL)) != -1) {
7359 switch (c) {
7360 case 'g':
7361 cb.cb_name_flags |= VDEV_NAME_GUID;
7362 break;
7363 case 'H':
7364 cb.cb_scripted = B_TRUE;
7365 break;
7366 case 'L':
7367 cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
7368 break;
7369 case 'o':
7370 props = optarg;
7371 break;
7372 case 'P':
7373 cb.cb_name_flags |= VDEV_NAME_PATH;
7374 break;
7375 case 'p':
7376 cb.cb_literal = B_TRUE;
7377 break;
7378 case 'j':
7379 cb.cb_json = B_TRUE;
7380 break;
7381 case ZPOOL_OPTION_JSON_NUMS_AS_INT:
7382 cb.cb_json_as_int = B_TRUE;
7383 cb.cb_literal = B_TRUE;
7384 break;
7385 case ZPOOL_OPTION_POOL_KEY_GUID:
7386 cb.cb_json_pool_key_guid = B_TRUE;
7387 break;
7388 case 'T':
7389 get_timestamp_arg(*optarg);
7390 break;
7391 case 'v':
7392 cb.cb_verbose = B_TRUE;
7393 cb.cb_namewidth = 8; /* 8 until precalc is avail */
7394 break;
7395 case ':':
7396 (void) fprintf(stderr, gettext("missing argument for "
7397 "'%c' option\n"), optopt);
7398 usage(B_FALSE);
7399 break;
7400 case '?':
7401 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7402 optopt);
7403 usage(B_FALSE);
7404 }
7405 }
7406
7407 argc -= optind;
7408 argv += optind;
7409
7410 if (!cb.cb_json && cb.cb_json_as_int) {
7411 (void) fprintf(stderr, gettext("'--json-int' only works with"
7412 " '-j' option\n"));
7413 usage(B_FALSE);
7414 }
7415
7416 if (!cb.cb_json && cb.cb_json_pool_key_guid) {
7417 (void) fprintf(stderr, gettext("'json-pool-key-guid' only"
7418 " works with '-j' option\n"));
7419 usage(B_FALSE);
7420 }
7421
7422 get_interval_count(&argc, argv, &interval, &count);
7423
7424 if (zprop_get_list(g_zfs, props, &cb.cb_proplist, ZFS_TYPE_POOL) != 0)
7425 usage(B_FALSE);
7426
7427 for (;;) {
7428 if ((list = pool_list_get(argc, argv, &cb.cb_proplist,
7429 ZFS_TYPE_POOL, cb.cb_literal, &ret)) == NULL)
7430 return (1);
7431
7432 if (pool_list_count(list) == 0)
7433 break;
7434
7435 if (cb.cb_json) {
7436 cb.cb_jsobj = zpool_json_schema(0, 1);
7437 data = fnvlist_alloc();
7438 fnvlist_add_nvlist(cb.cb_jsobj, "pools", data);
7439 fnvlist_free(data);
7440 }
7441
7442 cb.cb_namewidth = 0;
7443 (void) pool_list_iter(list, B_FALSE, get_namewidth_list, &cb);
7444
7445 if (timestamp_fmt != NODATE) {
7446 if (cb.cb_json) {
7447 if (cb.cb_json_as_int) {
7448 fnvlist_add_uint64(cb.cb_jsobj, "time",
7449 time(NULL));
7450 } else {
7451 char ts[128];
7452 get_timestamp(timestamp_fmt, ts, 128);
7453 fnvlist_add_string(cb.cb_jsobj, "time",
7454 ts);
7455 }
7456 } else
7457 print_timestamp(timestamp_fmt);
7458 }
7459
7460 if (!cb.cb_scripted && (first || cb.cb_verbose) &&
7461 !cb.cb_json) {
7462 print_header(&cb);
7463 first = B_FALSE;
7464 }
7465 ret = pool_list_iter(list, B_TRUE, list_callback, &cb);
7466
7467 if (ret == 0 && cb.cb_json)
7468 zcmd_print_json(cb.cb_jsobj);
7469 else if (ret != 0 && cb.cb_json)
7470 nvlist_free(cb.cb_jsobj);
7471
7472 if (interval == 0)
7473 break;
7474
7475 if (count != 0 && --count == 0)
7476 break;
7477
7478 pool_list_free(list);
7479
7480 (void) fflush(stdout);
7481 (void) fsleep(interval);
7482 }
7483
7484 if (argc == 0 && !cb.cb_scripted && !cb.cb_json &&
7485 pool_list_count(list) == 0) {
7486 (void) printf(gettext("no pools available\n"));
7487 ret = 0;
7488 }
7489
7490 pool_list_free(list);
7491 zprop_free_list(cb.cb_proplist);
7492 return (ret);
7493 }
7494
7495 static int
zpool_do_attach_or_replace(int argc,char ** argv,int replacing)7496 zpool_do_attach_or_replace(int argc, char **argv, int replacing)
7497 {
7498 boolean_t force = B_FALSE;
7499 boolean_t rebuild = B_FALSE;
7500 boolean_t wait = B_FALSE;
7501 int c;
7502 nvlist_t *nvroot;
7503 char *poolname, *old_disk, *new_disk;
7504 zpool_handle_t *zhp;
7505 nvlist_t *props = NULL;
7506 char *propval;
7507 int ret;
7508
7509 /* check options */
7510 while ((c = getopt(argc, argv, "fo:sw")) != -1) {
7511 switch (c) {
7512 case 'f':
7513 force = B_TRUE;
7514 break;
7515 case 'o':
7516 if ((propval = strchr(optarg, '=')) == NULL) {
7517 (void) fprintf(stderr, gettext("missing "
7518 "'=' for -o option\n"));
7519 usage(B_FALSE);
7520 }
7521 *propval = '\0';
7522 propval++;
7523
7524 if ((strcmp(optarg, ZPOOL_CONFIG_ASHIFT) != 0) ||
7525 (add_prop_list(optarg, propval, &props, B_TRUE)))
7526 usage(B_FALSE);
7527 break;
7528 case 's':
7529 rebuild = B_TRUE;
7530 break;
7531 case 'w':
7532 wait = B_TRUE;
7533 break;
7534 case '?':
7535 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7536 optopt);
7537 usage(B_FALSE);
7538 }
7539 }
7540
7541 argc -= optind;
7542 argv += optind;
7543
7544 /* get pool name and check number of arguments */
7545 if (argc < 1) {
7546 (void) fprintf(stderr, gettext("missing pool name argument\n"));
7547 usage(B_FALSE);
7548 }
7549
7550 poolname = argv[0];
7551
7552 if (argc < 2) {
7553 (void) fprintf(stderr,
7554 gettext("missing <device> specification\n"));
7555 usage(B_FALSE);
7556 }
7557
7558 old_disk = argv[1];
7559
7560 if (argc < 3) {
7561 if (!replacing) {
7562 (void) fprintf(stderr,
7563 gettext("missing <new_device> specification\n"));
7564 usage(B_FALSE);
7565 }
7566 new_disk = old_disk;
7567 argc -= 1;
7568 argv += 1;
7569 } else {
7570 new_disk = argv[2];
7571 argc -= 2;
7572 argv += 2;
7573 }
7574
7575 if (argc > 1) {
7576 (void) fprintf(stderr, gettext("too many arguments\n"));
7577 usage(B_FALSE);
7578 }
7579
7580 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) {
7581 nvlist_free(props);
7582 return (1);
7583 }
7584
7585 if (zpool_get_config(zhp, NULL) == NULL) {
7586 (void) fprintf(stderr, gettext("pool '%s' is unavailable\n"),
7587 poolname);
7588 zpool_close(zhp);
7589 nvlist_free(props);
7590 return (1);
7591 }
7592
7593 /* unless manually specified use "ashift" pool property (if set) */
7594 if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) {
7595 int intval;
7596 zprop_source_t src;
7597 char strval[ZPOOL_MAXPROPLEN];
7598
7599 intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src);
7600 if (src != ZPROP_SRC_DEFAULT) {
7601 (void) sprintf(strval, "%" PRId32, intval);
7602 verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval,
7603 &props, B_TRUE) == 0);
7604 }
7605 }
7606
7607 nvroot = make_root_vdev(zhp, props, force, B_FALSE, replacing, B_FALSE,
7608 argc, argv);
7609 if (nvroot == NULL) {
7610 zpool_close(zhp);
7611 nvlist_free(props);
7612 return (1);
7613 }
7614
7615 ret = zpool_vdev_attach(zhp, old_disk, new_disk, nvroot, replacing,
7616 rebuild);
7617
7618 if (ret == 0 && wait) {
7619 zpool_wait_activity_t activity = ZPOOL_WAIT_RESILVER;
7620 char raidz_prefix[] = "raidz";
7621 if (replacing) {
7622 activity = ZPOOL_WAIT_REPLACE;
7623 } else if (strncmp(old_disk,
7624 raidz_prefix, strlen(raidz_prefix)) == 0) {
7625 activity = ZPOOL_WAIT_RAIDZ_EXPAND;
7626 }
7627 ret = zpool_wait(zhp, activity);
7628 }
7629
7630 nvlist_free(props);
7631 nvlist_free(nvroot);
7632 zpool_close(zhp);
7633
7634 return (ret);
7635 }
7636
7637 /*
7638 * zpool replace [-fsw] [-o property=value] <pool> <device> <new_device>
7639 *
7640 * -f Force attach, even if <new_device> appears to be in use.
7641 * -s Use sequential instead of healing reconstruction for resilver.
7642 * -o Set property=value.
7643 * -w Wait for replacing to complete before returning
7644 *
7645 * Replace <device> with <new_device>.
7646 */
7647 int
zpool_do_replace(int argc,char ** argv)7648 zpool_do_replace(int argc, char **argv)
7649 {
7650 return (zpool_do_attach_or_replace(argc, argv, B_TRUE));
7651 }
7652
7653 /*
7654 * zpool attach [-fsw] [-o property=value] <pool> <device>|<vdev> <new_device>
7655 *
7656 * -f Force attach, even if <new_device> appears to be in use.
7657 * -s Use sequential instead of healing reconstruction for resilver.
7658 * -o Set property=value.
7659 * -w Wait for resilvering (mirror) or expansion (raidz) to complete
7660 * before returning.
7661 *
7662 * Attach <new_device> to a <device> or <vdev>, where the vdev can be of type
7663 * mirror or raidz. If <device> is not part of a mirror, then <device> will
7664 * be transformed into a mirror of <device> and <new_device>. When a mirror
7665 * is involved, <new_device> will begin life with a DTL of [0, now], and will
7666 * immediately begin to resilver itself. For the raidz case, a expansion will
7667 * commence and reflow the raidz data across all the disks including the
7668 * <new_device>.
7669 */
7670 int
zpool_do_attach(int argc,char ** argv)7671 zpool_do_attach(int argc, char **argv)
7672 {
7673 return (zpool_do_attach_or_replace(argc, argv, B_FALSE));
7674 }
7675
7676 /*
7677 * zpool detach [-f] <pool> <device>
7678 *
7679 * -f Force detach of <device>, even if DTLs argue against it
7680 * (not supported yet)
7681 *
7682 * Detach a device from a mirror. The operation will be refused if <device>
7683 * is the last device in the mirror, or if the DTLs indicate that this device
7684 * has the only valid copy of some data.
7685 */
7686 int
zpool_do_detach(int argc,char ** argv)7687 zpool_do_detach(int argc, char **argv)
7688 {
7689 int c;
7690 char *poolname, *path;
7691 zpool_handle_t *zhp;
7692 int ret;
7693
7694 /* check options */
7695 while ((c = getopt(argc, argv, "")) != -1) {
7696 switch (c) {
7697 case '?':
7698 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7699 optopt);
7700 usage(B_FALSE);
7701 }
7702 }
7703
7704 argc -= optind;
7705 argv += optind;
7706
7707 /* get pool name and check number of arguments */
7708 if (argc < 1) {
7709 (void) fprintf(stderr, gettext("missing pool name argument\n"));
7710 usage(B_FALSE);
7711 }
7712
7713 if (argc < 2) {
7714 (void) fprintf(stderr,
7715 gettext("missing <device> specification\n"));
7716 usage(B_FALSE);
7717 }
7718
7719 poolname = argv[0];
7720 path = argv[1];
7721
7722 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
7723 return (1);
7724
7725 ret = zpool_vdev_detach(zhp, path);
7726
7727 zpool_close(zhp);
7728
7729 return (ret);
7730 }
7731
7732 /*
7733 * zpool split [-gLnP] [-o prop=val] ...
7734 * [-o mntopt] ...
7735 * [-R altroot] <pool> <newpool> [<device> ...]
7736 *
7737 * -g Display guid for individual vdev name.
7738 * -L Follow links when resolving vdev path name.
7739 * -n Do not split the pool, but display the resulting layout if
7740 * it were to be split.
7741 * -o Set property=value, or set mount options.
7742 * -P Display full path for vdev name.
7743 * -R Mount the split-off pool under an alternate root.
7744 * -l Load encryption keys while importing.
7745 *
7746 * Splits the named pool and gives it the new pool name. Devices to be split
7747 * off may be listed, provided that no more than one device is specified
7748 * per top-level vdev mirror. The newly split pool is left in an exported
7749 * state unless -R is specified.
7750 *
7751 * Restrictions: the top-level of the pool pool must only be made up of
7752 * mirrors; all devices in the pool must be healthy; no device may be
7753 * undergoing a resilvering operation.
7754 */
7755 int
zpool_do_split(int argc,char ** argv)7756 zpool_do_split(int argc, char **argv)
7757 {
7758 char *srcpool, *newpool, *propval;
7759 char *mntopts = NULL;
7760 splitflags_t flags;
7761 int c, ret = 0;
7762 int ms_status = 0;
7763 boolean_t loadkeys = B_FALSE;
7764 zpool_handle_t *zhp;
7765 nvlist_t *config, *props = NULL;
7766
7767 flags.dryrun = B_FALSE;
7768 flags.import = B_FALSE;
7769 flags.name_flags = 0;
7770
7771 /* check options */
7772 while ((c = getopt(argc, argv, ":gLR:lno:P")) != -1) {
7773 switch (c) {
7774 case 'g':
7775 flags.name_flags |= VDEV_NAME_GUID;
7776 break;
7777 case 'L':
7778 flags.name_flags |= VDEV_NAME_FOLLOW_LINKS;
7779 break;
7780 case 'R':
7781 flags.import = B_TRUE;
7782 if (add_prop_list(
7783 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), optarg,
7784 &props, B_TRUE) != 0) {
7785 nvlist_free(props);
7786 usage(B_FALSE);
7787 }
7788 break;
7789 case 'l':
7790 loadkeys = B_TRUE;
7791 break;
7792 case 'n':
7793 flags.dryrun = B_TRUE;
7794 break;
7795 case 'o':
7796 if ((propval = strchr(optarg, '=')) != NULL) {
7797 *propval = '\0';
7798 propval++;
7799 if (add_prop_list(optarg, propval,
7800 &props, B_TRUE) != 0) {
7801 nvlist_free(props);
7802 usage(B_FALSE);
7803 }
7804 } else {
7805 mntopts = optarg;
7806 }
7807 break;
7808 case 'P':
7809 flags.name_flags |= VDEV_NAME_PATH;
7810 break;
7811 case ':':
7812 (void) fprintf(stderr, gettext("missing argument for "
7813 "'%c' option\n"), optopt);
7814 usage(B_FALSE);
7815 break;
7816 case '?':
7817 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7818 optopt);
7819 usage(B_FALSE);
7820 break;
7821 }
7822 }
7823
7824 if (!flags.import && mntopts != NULL) {
7825 (void) fprintf(stderr, gettext("setting mntopts is only "
7826 "valid when importing the pool\n"));
7827 usage(B_FALSE);
7828 }
7829
7830 if (!flags.import && loadkeys) {
7831 (void) fprintf(stderr, gettext("loading keys is only "
7832 "valid when importing the pool\n"));
7833 usage(B_FALSE);
7834 }
7835
7836 argc -= optind;
7837 argv += optind;
7838
7839 if (argc < 1) {
7840 (void) fprintf(stderr, gettext("Missing pool name\n"));
7841 usage(B_FALSE);
7842 }
7843 if (argc < 2) {
7844 (void) fprintf(stderr, gettext("Missing new pool name\n"));
7845 usage(B_FALSE);
7846 }
7847
7848 srcpool = argv[0];
7849 newpool = argv[1];
7850
7851 argc -= 2;
7852 argv += 2;
7853
7854 if ((zhp = zpool_open(g_zfs, srcpool)) == NULL) {
7855 nvlist_free(props);
7856 return (1);
7857 }
7858
7859 config = split_mirror_vdev(zhp, newpool, props, flags, argc, argv);
7860 if (config == NULL) {
7861 ret = 1;
7862 } else {
7863 if (flags.dryrun) {
7864 (void) printf(gettext("would create '%s' with the "
7865 "following layout:\n\n"), newpool);
7866 print_vdev_tree(NULL, newpool, config, 0, "",
7867 flags.name_flags);
7868 print_vdev_tree(NULL, "dedup", config, 0,
7869 VDEV_ALLOC_BIAS_DEDUP, 0);
7870 print_vdev_tree(NULL, "special", config, 0,
7871 VDEV_ALLOC_BIAS_SPECIAL, 0);
7872 }
7873 }
7874
7875 zpool_close(zhp);
7876
7877 if (ret != 0 || flags.dryrun || !flags.import) {
7878 nvlist_free(config);
7879 nvlist_free(props);
7880 return (ret);
7881 }
7882
7883 /*
7884 * The split was successful. Now we need to open the new
7885 * pool and import it.
7886 */
7887 if ((zhp = zpool_open_canfail(g_zfs, newpool)) == NULL) {
7888 nvlist_free(config);
7889 nvlist_free(props);
7890 return (1);
7891 }
7892
7893 if (loadkeys) {
7894 ret = zfs_crypto_attempt_load_keys(g_zfs, newpool);
7895 if (ret != 0)
7896 ret = 1;
7897 }
7898
7899 if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL) {
7900 ms_status = zpool_enable_datasets(zhp, mntopts, 0,
7901 mount_tp_nthr);
7902 if (ms_status == EZFS_SHAREFAILED) {
7903 (void) fprintf(stderr, gettext("Split was successful, "
7904 "datasets are mounted but sharing of some datasets "
7905 "has failed\n"));
7906 } else if (ms_status == EZFS_MOUNTFAILED) {
7907 (void) fprintf(stderr, gettext("Split was successful"
7908 ", but some datasets could not be mounted\n"));
7909 (void) fprintf(stderr, gettext("Try doing '%s' with a "
7910 "different altroot\n"), "zpool import");
7911 }
7912 }
7913 zpool_close(zhp);
7914 nvlist_free(config);
7915 nvlist_free(props);
7916
7917 return (ret);
7918 }
7919
7920
7921 /*
7922 * zpool online [--power] <pool> <device> ...
7923 *
7924 * --power: Power on the enclosure slot to the drive (if possible)
7925 */
7926 int
zpool_do_online(int argc,char ** argv)7927 zpool_do_online(int argc, char **argv)
7928 {
7929 int c, i;
7930 char *poolname;
7931 zpool_handle_t *zhp;
7932 int ret = 0;
7933 vdev_state_t newstate;
7934 int flags = 0;
7935 boolean_t is_power_on = B_FALSE;
7936 struct option long_options[] = {
7937 {"power", no_argument, NULL, ZPOOL_OPTION_POWER},
7938 {0, 0, 0, 0}
7939 };
7940
7941 /* check options */
7942 while ((c = getopt_long(argc, argv, "e", long_options, NULL)) != -1) {
7943 switch (c) {
7944 case 'e':
7945 flags |= ZFS_ONLINE_EXPAND;
7946 break;
7947 case ZPOOL_OPTION_POWER:
7948 is_power_on = B_TRUE;
7949 break;
7950 case '?':
7951 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7952 optopt);
7953 usage(B_FALSE);
7954 }
7955 }
7956
7957 if (libzfs_envvar_is_set("ZPOOL_AUTO_POWER_ON_SLOT"))
7958 is_power_on = B_TRUE;
7959
7960 argc -= optind;
7961 argv += optind;
7962
7963 /* get pool name and check number of arguments */
7964 if (argc < 1) {
7965 (void) fprintf(stderr, gettext("missing pool name\n"));
7966 usage(B_FALSE);
7967 }
7968 if (argc < 2) {
7969 (void) fprintf(stderr, gettext("missing device name\n"));
7970 usage(B_FALSE);
7971 }
7972
7973 poolname = argv[0];
7974
7975 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) {
7976 (void) fprintf(stderr, gettext("failed to open pool "
7977 "\"%s\""), poolname);
7978 return (1);
7979 }
7980
7981 for (i = 1; i < argc; i++) {
7982 vdev_state_t oldstate;
7983 boolean_t avail_spare, l2cache;
7984 int rc;
7985
7986 if (is_power_on) {
7987 rc = zpool_power_on_and_disk_wait(zhp, argv[i]);
7988 if (rc == ENOTSUP) {
7989 (void) fprintf(stderr,
7990 gettext("Power control not supported\n"));
7991 }
7992 if (rc != 0)
7993 return (rc);
7994 }
7995
7996 nvlist_t *tgt = zpool_find_vdev(zhp, argv[i], &avail_spare,
7997 &l2cache, NULL);
7998 if (tgt == NULL) {
7999 ret = 1;
8000 (void) fprintf(stderr, gettext("couldn't find device "
8001 "\"%s\" in pool \"%s\"\n"), argv[i], poolname);
8002 continue;
8003 }
8004 uint_t vsc;
8005 oldstate = ((vdev_stat_t *)fnvlist_lookup_uint64_array(tgt,
8006 ZPOOL_CONFIG_VDEV_STATS, &vsc))->vs_state;
8007 if ((rc = zpool_vdev_online(zhp, argv[i], flags,
8008 &newstate)) == 0) {
8009 if (newstate != VDEV_STATE_HEALTHY) {
8010 (void) printf(gettext("warning: device '%s' "
8011 "onlined, but remains in faulted state\n"),
8012 argv[i]);
8013 if (newstate == VDEV_STATE_FAULTED)
8014 (void) printf(gettext("use 'zpool "
8015 "clear' to restore a faulted "
8016 "device\n"));
8017 else
8018 (void) printf(gettext("use 'zpool "
8019 "replace' to replace devices "
8020 "that are no longer present\n"));
8021 if ((flags & ZFS_ONLINE_EXPAND)) {
8022 (void) printf(gettext("%s: failed "
8023 "to expand usable space on "
8024 "unhealthy device '%s'\n"),
8025 (oldstate >= VDEV_STATE_DEGRADED ?
8026 "error" : "warning"), argv[i]);
8027 if (oldstate >= VDEV_STATE_DEGRADED) {
8028 ret = 1;
8029 break;
8030 }
8031 }
8032 }
8033 } else {
8034 (void) fprintf(stderr, gettext("Failed to online "
8035 "\"%s\" in pool \"%s\": %d\n"),
8036 argv[i], poolname, rc);
8037 ret = 1;
8038 }
8039 }
8040
8041 zpool_close(zhp);
8042
8043 return (ret);
8044 }
8045
8046 /*
8047 * zpool offline [-ft]|[--power] <pool> <device> ...
8048 *
8049 *
8050 * -f Force the device into a faulted state.
8051 *
8052 * -t Only take the device off-line temporarily. The offline/faulted
8053 * state will not be persistent across reboots.
8054 *
8055 * --power Power off the enclosure slot to the drive (if possible)
8056 */
8057 int
zpool_do_offline(int argc,char ** argv)8058 zpool_do_offline(int argc, char **argv)
8059 {
8060 int c, i;
8061 char *poolname;
8062 zpool_handle_t *zhp;
8063 int ret = 0;
8064 boolean_t istmp = B_FALSE;
8065 boolean_t fault = B_FALSE;
8066 boolean_t is_power_off = B_FALSE;
8067
8068 struct option long_options[] = {
8069 {"power", no_argument, NULL, ZPOOL_OPTION_POWER},
8070 {0, 0, 0, 0}
8071 };
8072
8073 /* check options */
8074 while ((c = getopt_long(argc, argv, "ft", long_options, NULL)) != -1) {
8075 switch (c) {
8076 case 'f':
8077 fault = B_TRUE;
8078 break;
8079 case 't':
8080 istmp = B_TRUE;
8081 break;
8082 case ZPOOL_OPTION_POWER:
8083 is_power_off = B_TRUE;
8084 break;
8085 case '?':
8086 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
8087 optopt);
8088 usage(B_FALSE);
8089 }
8090 }
8091
8092 if (is_power_off && fault) {
8093 (void) fprintf(stderr,
8094 gettext("-0 and -f cannot be used together\n"));
8095 usage(B_FALSE);
8096 return (1);
8097 }
8098
8099 if (is_power_off && istmp) {
8100 (void) fprintf(stderr,
8101 gettext("-0 and -t cannot be used together\n"));
8102 usage(B_FALSE);
8103 return (1);
8104 }
8105
8106 argc -= optind;
8107 argv += optind;
8108
8109 /* get pool name and check number of arguments */
8110 if (argc < 1) {
8111 (void) fprintf(stderr, gettext("missing pool name\n"));
8112 usage(B_FALSE);
8113 }
8114 if (argc < 2) {
8115 (void) fprintf(stderr, gettext("missing device name\n"));
8116 usage(B_FALSE);
8117 }
8118
8119 poolname = argv[0];
8120
8121 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) {
8122 (void) fprintf(stderr, gettext("failed to open pool "
8123 "\"%s\""), poolname);
8124 return (1);
8125 }
8126
8127 for (i = 1; i < argc; i++) {
8128 uint64_t guid = zpool_vdev_path_to_guid(zhp, argv[i]);
8129 if (is_power_off) {
8130 /*
8131 * Note: we have to power off first, then set REMOVED,
8132 * or else zpool_vdev_set_removed_state() returns
8133 * EAGAIN.
8134 */
8135 ret = zpool_power_off(zhp, argv[i]);
8136 if (ret != 0) {
8137 (void) fprintf(stderr, "%s %s %d\n",
8138 gettext("unable to power off slot for"),
8139 argv[i], ret);
8140 }
8141 zpool_vdev_set_removed_state(zhp, guid, VDEV_AUX_NONE);
8142
8143 } else if (fault) {
8144 vdev_aux_t aux;
8145 if (istmp == B_FALSE) {
8146 /* Force the fault to persist across imports */
8147 aux = VDEV_AUX_EXTERNAL_PERSIST;
8148 } else {
8149 aux = VDEV_AUX_EXTERNAL;
8150 }
8151
8152 if (guid == 0 || zpool_vdev_fault(zhp, guid, aux) != 0)
8153 ret = 1;
8154 } else {
8155 if (zpool_vdev_offline(zhp, argv[i], istmp) != 0)
8156 ret = 1;
8157 }
8158 }
8159
8160 zpool_close(zhp);
8161
8162 return (ret);
8163 }
8164
8165 /*
8166 * zpool clear [-nF]|[--power] <pool> [device]
8167 *
8168 * Clear all errors associated with a pool or a particular device.
8169 */
8170 int
zpool_do_clear(int argc,char ** argv)8171 zpool_do_clear(int argc, char **argv)
8172 {
8173 int c;
8174 int ret = 0;
8175 boolean_t dryrun = B_FALSE;
8176 boolean_t do_rewind = B_FALSE;
8177 boolean_t xtreme_rewind = B_FALSE;
8178 boolean_t is_power_on = B_FALSE;
8179 uint32_t rewind_policy = ZPOOL_NO_REWIND;
8180 nvlist_t *policy = NULL;
8181 zpool_handle_t *zhp;
8182 char *pool, *device;
8183
8184 struct option long_options[] = {
8185 {"power", no_argument, NULL, ZPOOL_OPTION_POWER},
8186 {0, 0, 0, 0}
8187 };
8188
8189 /* check options */
8190 while ((c = getopt_long(argc, argv, "FnX", long_options,
8191 NULL)) != -1) {
8192 switch (c) {
8193 case 'F':
8194 do_rewind = B_TRUE;
8195 break;
8196 case 'n':
8197 dryrun = B_TRUE;
8198 break;
8199 case 'X':
8200 xtreme_rewind = B_TRUE;
8201 break;
8202 case ZPOOL_OPTION_POWER:
8203 is_power_on = B_TRUE;
8204 break;
8205 case '?':
8206 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
8207 optopt);
8208 usage(B_FALSE);
8209 }
8210 }
8211
8212 if (libzfs_envvar_is_set("ZPOOL_AUTO_POWER_ON_SLOT"))
8213 is_power_on = B_TRUE;
8214
8215 argc -= optind;
8216 argv += optind;
8217
8218 if (argc < 1) {
8219 (void) fprintf(stderr, gettext("missing pool name\n"));
8220 usage(B_FALSE);
8221 }
8222
8223 if (argc > 2) {
8224 (void) fprintf(stderr, gettext("too many arguments\n"));
8225 usage(B_FALSE);
8226 }
8227
8228 if ((dryrun || xtreme_rewind) && !do_rewind) {
8229 (void) fprintf(stderr,
8230 gettext("-n or -X only meaningful with -F\n"));
8231 usage(B_FALSE);
8232 }
8233 if (dryrun)
8234 rewind_policy = ZPOOL_TRY_REWIND;
8235 else if (do_rewind)
8236 rewind_policy = ZPOOL_DO_REWIND;
8237 if (xtreme_rewind)
8238 rewind_policy |= ZPOOL_EXTREME_REWIND;
8239
8240 /* In future, further rewind policy choices can be passed along here */
8241 if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 ||
8242 nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY,
8243 rewind_policy) != 0) {
8244 return (1);
8245 }
8246
8247 pool = argv[0];
8248 device = argc == 2 ? argv[1] : NULL;
8249
8250 if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) {
8251 nvlist_free(policy);
8252 return (1);
8253 }
8254
8255 if (is_power_on) {
8256 if (device == NULL) {
8257 zpool_power_on_pool_and_wait_for_devices(zhp);
8258 } else {
8259 zpool_power_on_and_disk_wait(zhp, device);
8260 }
8261 }
8262
8263 if (zpool_clear(zhp, device, policy) != 0)
8264 ret = 1;
8265
8266 zpool_close(zhp);
8267
8268 nvlist_free(policy);
8269
8270 return (ret);
8271 }
8272
8273 /*
8274 * zpool reguid [-g <guid>] <pool>
8275 */
8276 int
zpool_do_reguid(int argc,char ** argv)8277 zpool_do_reguid(int argc, char **argv)
8278 {
8279 uint64_t guid;
8280 uint64_t *guidp = NULL;
8281 int c;
8282 char *endptr;
8283 char *poolname;
8284 zpool_handle_t *zhp;
8285 int ret = 0;
8286
8287 /* check options */
8288 while ((c = getopt(argc, argv, "g:")) != -1) {
8289 switch (c) {
8290 case 'g':
8291 errno = 0;
8292 guid = strtoull(optarg, &endptr, 10);
8293 if (errno != 0 || *endptr != '\0') {
8294 (void) fprintf(stderr,
8295 gettext("invalid GUID: %s\n"), optarg);
8296 usage(B_FALSE);
8297 }
8298 guidp = &guid;
8299 break;
8300 case '?':
8301 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
8302 optopt);
8303 usage(B_FALSE);
8304 }
8305 }
8306
8307 argc -= optind;
8308 argv += optind;
8309
8310 /* get pool name and check number of arguments */
8311 if (argc < 1) {
8312 (void) fprintf(stderr, gettext("missing pool name\n"));
8313 usage(B_FALSE);
8314 }
8315
8316 if (argc > 1) {
8317 (void) fprintf(stderr, gettext("too many arguments\n"));
8318 usage(B_FALSE);
8319 }
8320
8321 poolname = argv[0];
8322 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
8323 return (1);
8324
8325 ret = zpool_set_guid(zhp, guidp);
8326
8327 zpool_close(zhp);
8328 return (ret);
8329 }
8330
8331
8332 /*
8333 * zpool reopen <pool>
8334 *
8335 * Reopen the pool so that the kernel can update the sizes of all vdevs.
8336 */
8337 int
zpool_do_reopen(int argc,char ** argv)8338 zpool_do_reopen(int argc, char **argv)
8339 {
8340 int c;
8341 int ret = 0;
8342 boolean_t scrub_restart = B_TRUE;
8343
8344 /* check options */
8345 while ((c = getopt(argc, argv, "n")) != -1) {
8346 switch (c) {
8347 case 'n':
8348 scrub_restart = B_FALSE;
8349 break;
8350 case '?':
8351 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
8352 optopt);
8353 usage(B_FALSE);
8354 }
8355 }
8356
8357 argc -= optind;
8358 argv += optind;
8359
8360 /* if argc == 0 we will execute zpool_reopen_one on all pools */
8361 ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
8362 B_FALSE, zpool_reopen_one, &scrub_restart);
8363
8364 return (ret);
8365 }
8366
8367 typedef struct scrub_cbdata {
8368 int cb_type;
8369 pool_scrub_cmd_t cb_scrub_cmd;
8370 } scrub_cbdata_t;
8371
8372 static boolean_t
zpool_has_checkpoint(zpool_handle_t * zhp)8373 zpool_has_checkpoint(zpool_handle_t *zhp)
8374 {
8375 nvlist_t *config, *nvroot;
8376
8377 config = zpool_get_config(zhp, NULL);
8378
8379 if (config != NULL) {
8380 pool_checkpoint_stat_t *pcs = NULL;
8381 uint_t c;
8382
8383 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
8384 (void) nvlist_lookup_uint64_array(nvroot,
8385 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
8386
8387 if (pcs == NULL || pcs->pcs_state == CS_NONE)
8388 return (B_FALSE);
8389
8390 assert(pcs->pcs_state == CS_CHECKPOINT_EXISTS ||
8391 pcs->pcs_state == CS_CHECKPOINT_DISCARDING);
8392 return (B_TRUE);
8393 }
8394
8395 return (B_FALSE);
8396 }
8397
8398 static int
scrub_callback(zpool_handle_t * zhp,void * data)8399 scrub_callback(zpool_handle_t *zhp, void *data)
8400 {
8401 scrub_cbdata_t *cb = data;
8402 int err;
8403
8404 /*
8405 * Ignore faulted pools.
8406 */
8407 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
8408 (void) fprintf(stderr, gettext("cannot scan '%s': pool is "
8409 "currently unavailable\n"), zpool_get_name(zhp));
8410 return (1);
8411 }
8412
8413 err = zpool_scan(zhp, cb->cb_type, cb->cb_scrub_cmd);
8414
8415 if (err == 0 && zpool_has_checkpoint(zhp) &&
8416 cb->cb_type == POOL_SCAN_SCRUB) {
8417 (void) printf(gettext("warning: will not scrub state that "
8418 "belongs to the checkpoint of pool '%s'\n"),
8419 zpool_get_name(zhp));
8420 }
8421
8422 return (err != 0);
8423 }
8424
8425 static int
wait_callback(zpool_handle_t * zhp,void * data)8426 wait_callback(zpool_handle_t *zhp, void *data)
8427 {
8428 zpool_wait_activity_t *act = data;
8429 return (zpool_wait(zhp, *act));
8430 }
8431
8432 /*
8433 * zpool scrub [-e | -s | -p | -C] [-w] <pool> ...
8434 *
8435 * -e Only scrub blocks in the error log.
8436 * -s Stop. Stops any in-progress scrub.
8437 * -p Pause. Pause in-progress scrub.
8438 * -w Wait. Blocks until scrub has completed.
8439 * -C Scrub from last saved txg.
8440 */
8441 int
zpool_do_scrub(int argc,char ** argv)8442 zpool_do_scrub(int argc, char **argv)
8443 {
8444 int c;
8445 scrub_cbdata_t cb;
8446 boolean_t wait = B_FALSE;
8447 int error;
8448
8449 cb.cb_type = POOL_SCAN_SCRUB;
8450 cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
8451
8452 boolean_t is_error_scrub = B_FALSE;
8453 boolean_t is_pause = B_FALSE;
8454 boolean_t is_stop = B_FALSE;
8455 boolean_t is_txg_continue = B_FALSE;
8456
8457 /* check options */
8458 while ((c = getopt(argc, argv, "spweC")) != -1) {
8459 switch (c) {
8460 case 'e':
8461 is_error_scrub = B_TRUE;
8462 break;
8463 case 's':
8464 is_stop = B_TRUE;
8465 break;
8466 case 'p':
8467 is_pause = B_TRUE;
8468 break;
8469 case 'w':
8470 wait = B_TRUE;
8471 break;
8472 case 'C':
8473 is_txg_continue = B_TRUE;
8474 break;
8475 case '?':
8476 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
8477 optopt);
8478 usage(B_FALSE);
8479 }
8480 }
8481
8482 if (is_pause && is_stop) {
8483 (void) fprintf(stderr, gettext("invalid option "
8484 "combination :-s and -p are mutually exclusive\n"));
8485 usage(B_FALSE);
8486 } else if (is_pause && is_txg_continue) {
8487 (void) fprintf(stderr, gettext("invalid option "
8488 "combination :-p and -C are mutually exclusive\n"));
8489 usage(B_FALSE);
8490 } else if (is_stop && is_txg_continue) {
8491 (void) fprintf(stderr, gettext("invalid option "
8492 "combination :-s and -C are mutually exclusive\n"));
8493 usage(B_FALSE);
8494 } else if (is_error_scrub && is_txg_continue) {
8495 (void) fprintf(stderr, gettext("invalid option "
8496 "combination :-e and -C are mutually exclusive\n"));
8497 usage(B_FALSE);
8498 } else {
8499 if (is_error_scrub)
8500 cb.cb_type = POOL_SCAN_ERRORSCRUB;
8501
8502 if (is_pause) {
8503 cb.cb_scrub_cmd = POOL_SCRUB_PAUSE;
8504 } else if (is_stop) {
8505 cb.cb_type = POOL_SCAN_NONE;
8506 } else if (is_txg_continue) {
8507 cb.cb_scrub_cmd = POOL_SCRUB_FROM_LAST_TXG;
8508 } else {
8509 cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
8510 }
8511 }
8512
8513 if (wait && (cb.cb_type == POOL_SCAN_NONE ||
8514 cb.cb_scrub_cmd == POOL_SCRUB_PAUSE)) {
8515 (void) fprintf(stderr, gettext("invalid option combination: "
8516 "-w cannot be used with -p or -s\n"));
8517 usage(B_FALSE);
8518 }
8519
8520 argc -= optind;
8521 argv += optind;
8522
8523 if (argc < 1) {
8524 (void) fprintf(stderr, gettext("missing pool name argument\n"));
8525 usage(B_FALSE);
8526 }
8527
8528 error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
8529 B_FALSE, scrub_callback, &cb);
8530
8531 if (wait && !error) {
8532 zpool_wait_activity_t act = ZPOOL_WAIT_SCRUB;
8533 error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
8534 B_FALSE, wait_callback, &act);
8535 }
8536
8537 return (error);
8538 }
8539
8540 /*
8541 * zpool resilver <pool> ...
8542 *
8543 * Restarts any in-progress resilver
8544 */
8545 int
zpool_do_resilver(int argc,char ** argv)8546 zpool_do_resilver(int argc, char **argv)
8547 {
8548 int c;
8549 scrub_cbdata_t cb;
8550
8551 cb.cb_type = POOL_SCAN_RESILVER;
8552 cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
8553
8554 /* check options */
8555 while ((c = getopt(argc, argv, "")) != -1) {
8556 switch (c) {
8557 case '?':
8558 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
8559 optopt);
8560 usage(B_FALSE);
8561 }
8562 }
8563
8564 argc -= optind;
8565 argv += optind;
8566
8567 if (argc < 1) {
8568 (void) fprintf(stderr, gettext("missing pool name argument\n"));
8569 usage(B_FALSE);
8570 }
8571
8572 return (for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
8573 B_FALSE, scrub_callback, &cb));
8574 }
8575
8576 /*
8577 * zpool trim [-d] [-r <rate>] [-c | -s] <pool> [<device> ...]
8578 *
8579 * -c Cancel. Ends any in-progress trim.
8580 * -d Secure trim. Requires kernel and device support.
8581 * -r <rate> Sets the TRIM rate in bytes (per second). Supports
8582 * adding a multiplier suffix such as 'k' or 'm'.
8583 * -s Suspend. TRIM can then be restarted with no flags.
8584 * -w Wait. Blocks until trimming has completed.
8585 */
8586 int
zpool_do_trim(int argc,char ** argv)8587 zpool_do_trim(int argc, char **argv)
8588 {
8589 struct option long_options[] = {
8590 {"cancel", no_argument, NULL, 'c'},
8591 {"secure", no_argument, NULL, 'd'},
8592 {"rate", required_argument, NULL, 'r'},
8593 {"suspend", no_argument, NULL, 's'},
8594 {"wait", no_argument, NULL, 'w'},
8595 {0, 0, 0, 0}
8596 };
8597
8598 pool_trim_func_t cmd_type = POOL_TRIM_START;
8599 uint64_t rate = 0;
8600 boolean_t secure = B_FALSE;
8601 boolean_t wait = B_FALSE;
8602
8603 int c;
8604 while ((c = getopt_long(argc, argv, "cdr:sw", long_options, NULL))
8605 != -1) {
8606 switch (c) {
8607 case 'c':
8608 if (cmd_type != POOL_TRIM_START &&
8609 cmd_type != POOL_TRIM_CANCEL) {
8610 (void) fprintf(stderr, gettext("-c cannot be "
8611 "combined with other options\n"));
8612 usage(B_FALSE);
8613 }
8614 cmd_type = POOL_TRIM_CANCEL;
8615 break;
8616 case 'd':
8617 if (cmd_type != POOL_TRIM_START) {
8618 (void) fprintf(stderr, gettext("-d cannot be "
8619 "combined with the -c or -s options\n"));
8620 usage(B_FALSE);
8621 }
8622 secure = B_TRUE;
8623 break;
8624 case 'r':
8625 if (cmd_type != POOL_TRIM_START) {
8626 (void) fprintf(stderr, gettext("-r cannot be "
8627 "combined with the -c or -s options\n"));
8628 usage(B_FALSE);
8629 }
8630 if (zfs_nicestrtonum(g_zfs, optarg, &rate) == -1) {
8631 (void) fprintf(stderr, "%s: %s\n",
8632 gettext("invalid value for rate"),
8633 libzfs_error_description(g_zfs));
8634 usage(B_FALSE);
8635 }
8636 break;
8637 case 's':
8638 if (cmd_type != POOL_TRIM_START &&
8639 cmd_type != POOL_TRIM_SUSPEND) {
8640 (void) fprintf(stderr, gettext("-s cannot be "
8641 "combined with other options\n"));
8642 usage(B_FALSE);
8643 }
8644 cmd_type = POOL_TRIM_SUSPEND;
8645 break;
8646 case 'w':
8647 wait = B_TRUE;
8648 break;
8649 case '?':
8650 if (optopt != 0) {
8651 (void) fprintf(stderr,
8652 gettext("invalid option '%c'\n"), optopt);
8653 } else {
8654 (void) fprintf(stderr,
8655 gettext("invalid option '%s'\n"),
8656 argv[optind - 1]);
8657 }
8658 usage(B_FALSE);
8659 }
8660 }
8661
8662 argc -= optind;
8663 argv += optind;
8664
8665 if (argc < 1) {
8666 (void) fprintf(stderr, gettext("missing pool name argument\n"));
8667 usage(B_FALSE);
8668 return (-1);
8669 }
8670
8671 if (wait && (cmd_type != POOL_TRIM_START)) {
8672 (void) fprintf(stderr, gettext("-w cannot be used with -c or "
8673 "-s\n"));
8674 usage(B_FALSE);
8675 }
8676
8677 char *poolname = argv[0];
8678 zpool_handle_t *zhp = zpool_open(g_zfs, poolname);
8679 if (zhp == NULL)
8680 return (-1);
8681
8682 trimflags_t trim_flags = {
8683 .secure = secure,
8684 .rate = rate,
8685 .wait = wait,
8686 };
8687
8688 nvlist_t *vdevs = fnvlist_alloc();
8689 if (argc == 1) {
8690 /* no individual leaf vdevs specified, so add them all */
8691 nvlist_t *config = zpool_get_config(zhp, NULL);
8692 nvlist_t *nvroot = fnvlist_lookup_nvlist(config,
8693 ZPOOL_CONFIG_VDEV_TREE);
8694 zpool_collect_leaves(zhp, nvroot, vdevs);
8695 trim_flags.fullpool = B_TRUE;
8696 } else {
8697 trim_flags.fullpool = B_FALSE;
8698 for (int i = 1; i < argc; i++) {
8699 fnvlist_add_boolean(vdevs, argv[i]);
8700 }
8701 }
8702
8703 int error = zpool_trim(zhp, cmd_type, vdevs, &trim_flags);
8704
8705 fnvlist_free(vdevs);
8706 zpool_close(zhp);
8707
8708 return (error);
8709 }
8710
8711 /*
8712 * Converts a total number of seconds to a human readable string broken
8713 * down in to days/hours/minutes/seconds.
8714 */
8715 static void
secs_to_dhms(uint64_t total,char * buf)8716 secs_to_dhms(uint64_t total, char *buf)
8717 {
8718 uint64_t days = total / 60 / 60 / 24;
8719 uint64_t hours = (total / 60 / 60) % 24;
8720 uint64_t mins = (total / 60) % 60;
8721 uint64_t secs = (total % 60);
8722
8723 if (days > 0) {
8724 (void) sprintf(buf, "%llu days %02llu:%02llu:%02llu",
8725 (u_longlong_t)days, (u_longlong_t)hours,
8726 (u_longlong_t)mins, (u_longlong_t)secs);
8727 } else {
8728 (void) sprintf(buf, "%02llu:%02llu:%02llu",
8729 (u_longlong_t)hours, (u_longlong_t)mins,
8730 (u_longlong_t)secs);
8731 }
8732 }
8733
8734 /*
8735 * Print out detailed error scrub status.
8736 */
8737 static void
print_err_scrub_status(pool_scan_stat_t * ps)8738 print_err_scrub_status(pool_scan_stat_t *ps)
8739 {
8740 time_t start, end, pause;
8741 uint64_t total_secs_left;
8742 uint64_t secs_left, mins_left, hours_left, days_left;
8743 uint64_t examined, to_be_examined;
8744
8745 if (ps == NULL || ps->pss_error_scrub_func != POOL_SCAN_ERRORSCRUB) {
8746 return;
8747 }
8748
8749 (void) printf(gettext(" scrub: "));
8750
8751 start = ps->pss_error_scrub_start;
8752 end = ps->pss_error_scrub_end;
8753 pause = ps->pss_pass_error_scrub_pause;
8754 examined = ps->pss_error_scrub_examined;
8755 to_be_examined = ps->pss_error_scrub_to_be_examined;
8756
8757 assert(ps->pss_error_scrub_func == POOL_SCAN_ERRORSCRUB);
8758
8759 if (ps->pss_error_scrub_state == DSS_FINISHED) {
8760 total_secs_left = end - start;
8761 days_left = total_secs_left / 60 / 60 / 24;
8762 hours_left = (total_secs_left / 60 / 60) % 24;
8763 mins_left = (total_secs_left / 60) % 60;
8764 secs_left = (total_secs_left % 60);
8765
8766 (void) printf(gettext("scrubbed %llu error blocks in %llu days "
8767 "%02llu:%02llu:%02llu on %s"), (u_longlong_t)examined,
8768 (u_longlong_t)days_left, (u_longlong_t)hours_left,
8769 (u_longlong_t)mins_left, (u_longlong_t)secs_left,
8770 ctime(&end));
8771
8772 return;
8773 } else if (ps->pss_error_scrub_state == DSS_CANCELED) {
8774 (void) printf(gettext("error scrub canceled on %s"),
8775 ctime(&end));
8776 return;
8777 }
8778 assert(ps->pss_error_scrub_state == DSS_ERRORSCRUBBING);
8779
8780 /* Error scrub is in progress. */
8781 if (pause == 0) {
8782 (void) printf(gettext("error scrub in progress since %s"),
8783 ctime(&start));
8784 } else {
8785 (void) printf(gettext("error scrub paused since %s"),
8786 ctime(&pause));
8787 (void) printf(gettext("\terror scrub started on %s"),
8788 ctime(&start));
8789 }
8790
8791 double fraction_done = (double)examined / (to_be_examined + examined);
8792 (void) printf(gettext("\t%.2f%% done, issued I/O for %llu error"
8793 " blocks"), 100 * fraction_done, (u_longlong_t)examined);
8794
8795 (void) printf("\n");
8796 }
8797
8798 /*
8799 * Print out detailed scrub status.
8800 */
8801 static void
print_scan_scrub_resilver_status(pool_scan_stat_t * ps)8802 print_scan_scrub_resilver_status(pool_scan_stat_t *ps)
8803 {
8804 time_t start, end, pause;
8805 uint64_t pass_scanned, scanned, pass_issued, issued, total_s, total_i;
8806 uint64_t elapsed, scan_rate, issue_rate;
8807 double fraction_done;
8808 char processed_buf[7], scanned_buf[7], issued_buf[7], total_s_buf[7];
8809 char total_i_buf[7], srate_buf[7], irate_buf[7], time_buf[32];
8810
8811 printf(" ");
8812 printf_color(ANSI_BOLD, gettext("scan:"));
8813 printf(" ");
8814
8815 /* If there's never been a scan, there's not much to say. */
8816 if (ps == NULL || ps->pss_func == POOL_SCAN_NONE ||
8817 ps->pss_func >= POOL_SCAN_FUNCS) {
8818 (void) printf(gettext("none requested\n"));
8819 return;
8820 }
8821
8822 start = ps->pss_start_time;
8823 end = ps->pss_end_time;
8824 pause = ps->pss_pass_scrub_pause;
8825
8826 zfs_nicebytes(ps->pss_processed, processed_buf, sizeof (processed_buf));
8827
8828 int is_resilver = ps->pss_func == POOL_SCAN_RESILVER;
8829 int is_scrub = ps->pss_func == POOL_SCAN_SCRUB;
8830 assert(is_resilver || is_scrub);
8831
8832 /* Scan is finished or canceled. */
8833 if (ps->pss_state == DSS_FINISHED) {
8834 secs_to_dhms(end - start, time_buf);
8835
8836 if (is_scrub) {
8837 (void) printf(gettext("scrub repaired %s "
8838 "in %s with %llu errors on %s"), processed_buf,
8839 time_buf, (u_longlong_t)ps->pss_errors,
8840 ctime(&end));
8841 } else if (is_resilver) {
8842 (void) printf(gettext("resilvered %s "
8843 "in %s with %llu errors on %s"), processed_buf,
8844 time_buf, (u_longlong_t)ps->pss_errors,
8845 ctime(&end));
8846 }
8847 return;
8848 } else if (ps->pss_state == DSS_CANCELED) {
8849 if (is_scrub) {
8850 (void) printf(gettext("scrub canceled on %s"),
8851 ctime(&end));
8852 } else if (is_resilver) {
8853 (void) printf(gettext("resilver canceled on %s"),
8854 ctime(&end));
8855 }
8856 return;
8857 }
8858
8859 assert(ps->pss_state == DSS_SCANNING);
8860
8861 /* Scan is in progress. Resilvers can't be paused. */
8862 if (is_scrub) {
8863 if (pause == 0) {
8864 (void) printf(gettext("scrub in progress since %s"),
8865 ctime(&start));
8866 } else {
8867 (void) printf(gettext("scrub paused since %s"),
8868 ctime(&pause));
8869 (void) printf(gettext("\tscrub started on %s"),
8870 ctime(&start));
8871 }
8872 } else if (is_resilver) {
8873 (void) printf(gettext("resilver in progress since %s"),
8874 ctime(&start));
8875 }
8876
8877 scanned = ps->pss_examined;
8878 pass_scanned = ps->pss_pass_exam;
8879 issued = ps->pss_issued;
8880 pass_issued = ps->pss_pass_issued;
8881 total_s = ps->pss_to_examine;
8882 total_i = ps->pss_to_examine - ps->pss_skipped;
8883
8884 /* we are only done with a block once we have issued the IO for it */
8885 fraction_done = (double)issued / total_i;
8886
8887 /* elapsed time for this pass, rounding up to 1 if it's 0 */
8888 elapsed = time(NULL) - ps->pss_pass_start;
8889 elapsed -= ps->pss_pass_scrub_spent_paused;
8890 elapsed = (elapsed != 0) ? elapsed : 1;
8891
8892 scan_rate = pass_scanned / elapsed;
8893 issue_rate = pass_issued / elapsed;
8894
8895 /* format all of the numbers we will be reporting */
8896 zfs_nicebytes(scanned, scanned_buf, sizeof (scanned_buf));
8897 zfs_nicebytes(issued, issued_buf, sizeof (issued_buf));
8898 zfs_nicebytes(total_s, total_s_buf, sizeof (total_s_buf));
8899 zfs_nicebytes(total_i, total_i_buf, sizeof (total_i_buf));
8900
8901 /* do not print estimated time if we have a paused scrub */
8902 (void) printf(gettext("\t%s / %s scanned"), scanned_buf, total_s_buf);
8903 if (pause == 0 && scan_rate > 0) {
8904 zfs_nicebytes(scan_rate, srate_buf, sizeof (srate_buf));
8905 (void) printf(gettext(" at %s/s"), srate_buf);
8906 }
8907 (void) printf(gettext(", %s / %s issued"), issued_buf, total_i_buf);
8908 if (pause == 0 && issue_rate > 0) {
8909 zfs_nicebytes(issue_rate, irate_buf, sizeof (irate_buf));
8910 (void) printf(gettext(" at %s/s"), irate_buf);
8911 }
8912 (void) printf(gettext("\n"));
8913
8914 if (is_resilver) {
8915 (void) printf(gettext("\t%s resilvered, %.2f%% done"),
8916 processed_buf, 100 * fraction_done);
8917 } else if (is_scrub) {
8918 (void) printf(gettext("\t%s repaired, %.2f%% done"),
8919 processed_buf, 100 * fraction_done);
8920 }
8921
8922 if (pause == 0) {
8923 /*
8924 * Only provide an estimate iff:
8925 * 1) we haven't yet issued all we expected, and
8926 * 2) the issue rate exceeds 10 MB/s, and
8927 * 3) it's either:
8928 * a) a resilver which has started repairs, or
8929 * b) a scrub which has entered the issue phase.
8930 */
8931 if (total_i >= issued && issue_rate >= 10 * 1024 * 1024 &&
8932 ((is_resilver && ps->pss_processed > 0) ||
8933 (is_scrub && issued > 0))) {
8934 secs_to_dhms((total_i - issued) / issue_rate, time_buf);
8935 (void) printf(gettext(", %s to go\n"), time_buf);
8936 } else {
8937 (void) printf(gettext(", no estimated "
8938 "completion time\n"));
8939 }
8940 } else {
8941 (void) printf(gettext("\n"));
8942 }
8943 }
8944
8945 static void
print_rebuild_status_impl(vdev_rebuild_stat_t * vrs,uint_t c,char * vdev_name)8946 print_rebuild_status_impl(vdev_rebuild_stat_t *vrs, uint_t c, char *vdev_name)
8947 {
8948 if (vrs == NULL || vrs->vrs_state == VDEV_REBUILD_NONE)
8949 return;
8950
8951 printf(" ");
8952 printf_color(ANSI_BOLD, gettext("scan:"));
8953 printf(" ");
8954
8955 uint64_t bytes_scanned = vrs->vrs_bytes_scanned;
8956 uint64_t bytes_issued = vrs->vrs_bytes_issued;
8957 uint64_t bytes_rebuilt = vrs->vrs_bytes_rebuilt;
8958 uint64_t bytes_est_s = vrs->vrs_bytes_est;
8959 uint64_t bytes_est_i = vrs->vrs_bytes_est;
8960 if (c > offsetof(vdev_rebuild_stat_t, vrs_pass_bytes_skipped) / 8)
8961 bytes_est_i -= vrs->vrs_pass_bytes_skipped;
8962 uint64_t scan_rate = (vrs->vrs_pass_bytes_scanned /
8963 (vrs->vrs_pass_time_ms + 1)) * 1000;
8964 uint64_t issue_rate = (vrs->vrs_pass_bytes_issued /
8965 (vrs->vrs_pass_time_ms + 1)) * 1000;
8966 double scan_pct = MIN((double)bytes_scanned * 100 /
8967 (bytes_est_s + 1), 100);
8968
8969 /* Format all of the numbers we will be reporting */
8970 char bytes_scanned_buf[7], bytes_issued_buf[7];
8971 char bytes_rebuilt_buf[7], bytes_est_s_buf[7], bytes_est_i_buf[7];
8972 char scan_rate_buf[7], issue_rate_buf[7], time_buf[32];
8973 zfs_nicebytes(bytes_scanned, bytes_scanned_buf,
8974 sizeof (bytes_scanned_buf));
8975 zfs_nicebytes(bytes_issued, bytes_issued_buf,
8976 sizeof (bytes_issued_buf));
8977 zfs_nicebytes(bytes_rebuilt, bytes_rebuilt_buf,
8978 sizeof (bytes_rebuilt_buf));
8979 zfs_nicebytes(bytes_est_s, bytes_est_s_buf, sizeof (bytes_est_s_buf));
8980 zfs_nicebytes(bytes_est_i, bytes_est_i_buf, sizeof (bytes_est_i_buf));
8981
8982 time_t start = vrs->vrs_start_time;
8983 time_t end = vrs->vrs_end_time;
8984
8985 /* Rebuild is finished or canceled. */
8986 if (vrs->vrs_state == VDEV_REBUILD_COMPLETE) {
8987 secs_to_dhms(vrs->vrs_scan_time_ms / 1000, time_buf);
8988 (void) printf(gettext("resilvered (%s) %s in %s "
8989 "with %llu errors on %s"), vdev_name, bytes_rebuilt_buf,
8990 time_buf, (u_longlong_t)vrs->vrs_errors, ctime(&end));
8991 return;
8992 } else if (vrs->vrs_state == VDEV_REBUILD_CANCELED) {
8993 (void) printf(gettext("resilver (%s) canceled on %s"),
8994 vdev_name, ctime(&end));
8995 return;
8996 } else if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
8997 (void) printf(gettext("resilver (%s) in progress since %s"),
8998 vdev_name, ctime(&start));
8999 }
9000
9001 assert(vrs->vrs_state == VDEV_REBUILD_ACTIVE);
9002
9003 (void) printf(gettext("\t%s / %s scanned"), bytes_scanned_buf,
9004 bytes_est_s_buf);
9005 if (scan_rate > 0) {
9006 zfs_nicebytes(scan_rate, scan_rate_buf, sizeof (scan_rate_buf));
9007 (void) printf(gettext(" at %s/s"), scan_rate_buf);
9008 }
9009 (void) printf(gettext(", %s / %s issued"), bytes_issued_buf,
9010 bytes_est_i_buf);
9011 if (issue_rate > 0) {
9012 zfs_nicebytes(issue_rate, issue_rate_buf,
9013 sizeof (issue_rate_buf));
9014 (void) printf(gettext(" at %s/s"), issue_rate_buf);
9015 }
9016 (void) printf(gettext("\n"));
9017
9018 (void) printf(gettext("\t%s resilvered, %.2f%% done"),
9019 bytes_rebuilt_buf, scan_pct);
9020
9021 if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
9022 if (bytes_est_s >= bytes_scanned &&
9023 scan_rate >= 10 * 1024 * 1024) {
9024 secs_to_dhms((bytes_est_s - bytes_scanned) / scan_rate,
9025 time_buf);
9026 (void) printf(gettext(", %s to go\n"), time_buf);
9027 } else {
9028 (void) printf(gettext(", no estimated "
9029 "completion time\n"));
9030 }
9031 } else {
9032 (void) printf(gettext("\n"));
9033 }
9034 }
9035
9036 /*
9037 * Print rebuild status for top-level vdevs.
9038 */
9039 static void
print_rebuild_status(zpool_handle_t * zhp,nvlist_t * nvroot)9040 print_rebuild_status(zpool_handle_t *zhp, nvlist_t *nvroot)
9041 {
9042 nvlist_t **child;
9043 uint_t children;
9044
9045 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
9046 &child, &children) != 0)
9047 children = 0;
9048
9049 for (uint_t c = 0; c < children; c++) {
9050 vdev_rebuild_stat_t *vrs;
9051 uint_t i;
9052
9053 if (nvlist_lookup_uint64_array(child[c],
9054 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i) == 0) {
9055 char *name = zpool_vdev_name(g_zfs, zhp,
9056 child[c], VDEV_NAME_TYPE_ID);
9057 print_rebuild_status_impl(vrs, i, name);
9058 free(name);
9059 }
9060 }
9061 }
9062
9063 /*
9064 * As we don't scrub checkpointed blocks, we want to warn the user that we
9065 * skipped scanning some blocks if a checkpoint exists or existed at any
9066 * time during the scan. If a sequential instead of healing reconstruction
9067 * was performed then the blocks were reconstructed. However, their checksums
9068 * have not been verified so we still print the warning.
9069 */
9070 static void
print_checkpoint_scan_warning(pool_scan_stat_t * ps,pool_checkpoint_stat_t * pcs)9071 print_checkpoint_scan_warning(pool_scan_stat_t *ps, pool_checkpoint_stat_t *pcs)
9072 {
9073 if (ps == NULL || pcs == NULL)
9074 return;
9075
9076 if (pcs->pcs_state == CS_NONE ||
9077 pcs->pcs_state == CS_CHECKPOINT_DISCARDING)
9078 return;
9079
9080 assert(pcs->pcs_state == CS_CHECKPOINT_EXISTS);
9081
9082 if (ps->pss_state == DSS_NONE)
9083 return;
9084
9085 if ((ps->pss_state == DSS_FINISHED || ps->pss_state == DSS_CANCELED) &&
9086 ps->pss_end_time < pcs->pcs_start_time)
9087 return;
9088
9089 if (ps->pss_state == DSS_FINISHED || ps->pss_state == DSS_CANCELED) {
9090 (void) printf(gettext(" scan warning: skipped blocks "
9091 "that are only referenced by the checkpoint.\n"));
9092 } else {
9093 assert(ps->pss_state == DSS_SCANNING);
9094 (void) printf(gettext(" scan warning: skipping blocks "
9095 "that are only referenced by the checkpoint.\n"));
9096 }
9097 }
9098
9099 /*
9100 * Returns B_TRUE if there is an active rebuild in progress. Otherwise,
9101 * B_FALSE is returned and 'rebuild_end_time' is set to the end time for
9102 * the last completed (or cancelled) rebuild.
9103 */
9104 static boolean_t
check_rebuilding(nvlist_t * nvroot,uint64_t * rebuild_end_time)9105 check_rebuilding(nvlist_t *nvroot, uint64_t *rebuild_end_time)
9106 {
9107 nvlist_t **child;
9108 uint_t children;
9109 boolean_t rebuilding = B_FALSE;
9110 uint64_t end_time = 0;
9111
9112 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
9113 &child, &children) != 0)
9114 children = 0;
9115
9116 for (uint_t c = 0; c < children; c++) {
9117 vdev_rebuild_stat_t *vrs;
9118 uint_t i;
9119
9120 if (nvlist_lookup_uint64_array(child[c],
9121 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i) == 0) {
9122
9123 if (vrs->vrs_end_time > end_time)
9124 end_time = vrs->vrs_end_time;
9125
9126 if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
9127 rebuilding = B_TRUE;
9128 end_time = 0;
9129 break;
9130 }
9131 }
9132 }
9133
9134 if (rebuild_end_time != NULL)
9135 *rebuild_end_time = end_time;
9136
9137 return (rebuilding);
9138 }
9139
9140 static void
vdev_stats_nvlist(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t * nv,int depth,boolean_t isspare,char * parent,nvlist_t * item)9141 vdev_stats_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,
9142 int depth, boolean_t isspare, char *parent, nvlist_t *item)
9143 {
9144 nvlist_t *vds, **child, *ch = NULL;
9145 uint_t vsc, children;
9146 vdev_stat_t *vs;
9147 char *vname;
9148 uint64_t notpresent;
9149 const char *type, *path;
9150
9151 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
9152 &child, &children) != 0)
9153 children = 0;
9154 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
9155 (uint64_t **)&vs, &vsc) == 0);
9156 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
9157 if (strcmp(type, VDEV_TYPE_INDIRECT) == 0)
9158 return;
9159
9160 if (cb->cb_print_unhealthy && depth > 0 &&
9161 for_each_vdev_in_nvlist(nv, vdev_health_check_cb, cb) == 0) {
9162 return;
9163 }
9164 vname = zpool_vdev_name(g_zfs, zhp, nv,
9165 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
9166 vds = fnvlist_alloc();
9167 fill_vdev_info(vds, zhp, vname, B_FALSE, cb->cb_json_as_int);
9168 if (cb->cb_flat_vdevs && parent != NULL) {
9169 fnvlist_add_string(vds, "parent", parent);
9170 }
9171
9172 if (isspare) {
9173 if (vs->vs_aux == VDEV_AUX_SPARED) {
9174 fnvlist_add_string(vds, "state", "INUSE");
9175 used_by_other(zhp, nv, vds);
9176 } else if (vs->vs_state == VDEV_STATE_HEALTHY)
9177 fnvlist_add_string(vds, "state", "AVAIL");
9178 } else {
9179 if (vs->vs_alloc) {
9180 nice_num_str_nvlist(vds, "alloc_space", vs->vs_alloc,
9181 cb->cb_literal, cb->cb_json_as_int,
9182 ZFS_NICENUM_BYTES);
9183 }
9184 if (vs->vs_space) {
9185 nice_num_str_nvlist(vds, "total_space", vs->vs_space,
9186 cb->cb_literal, cb->cb_json_as_int,
9187 ZFS_NICENUM_BYTES);
9188 }
9189 if (vs->vs_dspace) {
9190 nice_num_str_nvlist(vds, "def_space", vs->vs_dspace,
9191 cb->cb_literal, cb->cb_json_as_int,
9192 ZFS_NICENUM_BYTES);
9193 }
9194 if (vs->vs_rsize) {
9195 nice_num_str_nvlist(vds, "rep_dev_size", vs->vs_rsize,
9196 cb->cb_literal, cb->cb_json_as_int,
9197 ZFS_NICENUM_BYTES);
9198 }
9199 if (vs->vs_esize) {
9200 nice_num_str_nvlist(vds, "ex_dev_size", vs->vs_esize,
9201 cb->cb_literal, cb->cb_json_as_int,
9202 ZFS_NICENUM_BYTES);
9203 }
9204 if (vs->vs_self_healed) {
9205 nice_num_str_nvlist(vds, "self_healed",
9206 vs->vs_self_healed, cb->cb_literal,
9207 cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9208 }
9209 if (vs->vs_pspace) {
9210 nice_num_str_nvlist(vds, "phys_space", vs->vs_pspace,
9211 cb->cb_literal, cb->cb_json_as_int,
9212 ZFS_NICENUM_BYTES);
9213 }
9214 nice_num_str_nvlist(vds, "read_errors", vs->vs_read_errors,
9215 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
9216 nice_num_str_nvlist(vds, "write_errors", vs->vs_write_errors,
9217 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
9218 nice_num_str_nvlist(vds, "checksum_errors",
9219 vs->vs_checksum_errors, cb->cb_literal,
9220 cb->cb_json_as_int, ZFS_NICENUM_1024);
9221 if (vs->vs_scan_processed) {
9222 nice_num_str_nvlist(vds, "scan_processed",
9223 vs->vs_scan_processed, cb->cb_literal,
9224 cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9225 }
9226 if (vs->vs_checkpoint_space) {
9227 nice_num_str_nvlist(vds, "checkpoint_space",
9228 vs->vs_checkpoint_space, cb->cb_literal,
9229 cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9230 }
9231 if (vs->vs_resilver_deferred) {
9232 nice_num_str_nvlist(vds, "resilver_deferred",
9233 vs->vs_resilver_deferred, B_TRUE,
9234 cb->cb_json_as_int, ZFS_NICENUM_1024);
9235 }
9236 if (children == 0) {
9237 nice_num_str_nvlist(vds, "slow_ios", vs->vs_slow_ios,
9238 cb->cb_literal, cb->cb_json_as_int,
9239 ZFS_NICENUM_1024);
9240 }
9241 if (cb->cb_print_power) {
9242 if (children == 0) {
9243 /* Only leaf vdevs have physical slots */
9244 switch (zpool_power_current_state(zhp, (char *)
9245 fnvlist_lookup_string(nv,
9246 ZPOOL_CONFIG_PATH))) {
9247 case 0:
9248 fnvlist_add_string(vds, "power_state",
9249 "off");
9250 break;
9251 case 1:
9252 fnvlist_add_string(vds, "power_state",
9253 "on");
9254 break;
9255 default:
9256 fnvlist_add_string(vds, "power_state",
9257 "-");
9258 }
9259 } else {
9260 fnvlist_add_string(vds, "power_state", "-");
9261 }
9262 }
9263 }
9264
9265 if (cb->cb_print_dio_verify) {
9266 nice_num_str_nvlist(vds, "dio_verify_errors",
9267 vs->vs_dio_verify_errors, cb->cb_literal,
9268 cb->cb_json_as_int, ZFS_NICENUM_1024);
9269 }
9270
9271 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
9272 ¬present) == 0) {
9273 nice_num_str_nvlist(vds, ZPOOL_CONFIG_NOT_PRESENT,
9274 1, B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9275 fnvlist_add_string(vds, "was",
9276 fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH));
9277 } else if (vs->vs_aux != VDEV_AUX_NONE) {
9278 fnvlist_add_string(vds, "aux", vdev_aux_str[vs->vs_aux]);
9279 } else if (children == 0 && !isspare &&
9280 getenv("ZPOOL_STATUS_NON_NATIVE_ASHIFT_IGNORE") == NULL &&
9281 VDEV_STAT_VALID(vs_physical_ashift, vsc) &&
9282 vs->vs_configured_ashift < vs->vs_physical_ashift) {
9283 nice_num_str_nvlist(vds, "configured_ashift",
9284 vs->vs_configured_ashift, B_TRUE, cb->cb_json_as_int,
9285 ZFS_NICENUM_1024);
9286 nice_num_str_nvlist(vds, "physical_ashift",
9287 vs->vs_physical_ashift, B_TRUE, cb->cb_json_as_int,
9288 ZFS_NICENUM_1024);
9289 }
9290 if (vs->vs_scan_removing != 0) {
9291 nice_num_str_nvlist(vds, "removing", vs->vs_scan_removing,
9292 B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024);
9293 } else if (VDEV_STAT_VALID(vs_noalloc, vsc) && vs->vs_noalloc != 0) {
9294 nice_num_str_nvlist(vds, "noalloc", vs->vs_noalloc,
9295 B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024);
9296 }
9297
9298 if (cb->vcdl != NULL) {
9299 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
9300 zpool_nvlist_cmd(cb->vcdl, zpool_get_name(zhp),
9301 path, vds);
9302 }
9303 }
9304
9305 if (children == 0) {
9306 if (cb->cb_print_vdev_init) {
9307 if (vs->vs_initialize_state != 0) {
9308 uint64_t st = vs->vs_initialize_state;
9309 fnvlist_add_string(vds, "init_state",
9310 vdev_init_state_str[st]);
9311 nice_num_str_nvlist(vds, "initialized",
9312 vs->vs_initialize_bytes_done,
9313 cb->cb_literal, cb->cb_json_as_int,
9314 ZFS_NICENUM_BYTES);
9315 nice_num_str_nvlist(vds, "to_initialize",
9316 vs->vs_initialize_bytes_est,
9317 cb->cb_literal, cb->cb_json_as_int,
9318 ZFS_NICENUM_BYTES);
9319 nice_num_str_nvlist(vds, "init_time",
9320 vs->vs_initialize_action_time,
9321 cb->cb_literal, cb->cb_json_as_int,
9322 ZFS_NICE_TIMESTAMP);
9323 nice_num_str_nvlist(vds, "init_errors",
9324 vs->vs_initialize_errors,
9325 cb->cb_literal, cb->cb_json_as_int,
9326 ZFS_NICENUM_1024);
9327 } else {
9328 fnvlist_add_string(vds, "init_state",
9329 "UNINITIALIZED");
9330 }
9331 }
9332 if (cb->cb_print_vdev_trim) {
9333 if (vs->vs_trim_notsup == 0) {
9334 if (vs->vs_trim_state != 0) {
9335 uint64_t st = vs->vs_trim_state;
9336 fnvlist_add_string(vds, "trim_state",
9337 vdev_trim_state_str[st]);
9338 nice_num_str_nvlist(vds, "trimmed",
9339 vs->vs_trim_bytes_done,
9340 cb->cb_literal, cb->cb_json_as_int,
9341 ZFS_NICENUM_BYTES);
9342 nice_num_str_nvlist(vds, "to_trim",
9343 vs->vs_trim_bytes_est,
9344 cb->cb_literal, cb->cb_json_as_int,
9345 ZFS_NICENUM_BYTES);
9346 nice_num_str_nvlist(vds, "trim_time",
9347 vs->vs_trim_action_time,
9348 cb->cb_literal, cb->cb_json_as_int,
9349 ZFS_NICE_TIMESTAMP);
9350 nice_num_str_nvlist(vds, "trim_errors",
9351 vs->vs_trim_errors,
9352 cb->cb_literal, cb->cb_json_as_int,
9353 ZFS_NICENUM_1024);
9354 } else
9355 fnvlist_add_string(vds, "trim_state",
9356 "UNTRIMMED");
9357 }
9358 nice_num_str_nvlist(vds, "trim_notsup",
9359 vs->vs_trim_notsup, B_TRUE,
9360 cb->cb_json_as_int, ZFS_NICENUM_1024);
9361 }
9362 } else {
9363 ch = fnvlist_alloc();
9364 }
9365
9366 if (cb->cb_flat_vdevs && children == 0) {
9367 fnvlist_add_nvlist(item, vname, vds);
9368 }
9369
9370 for (int c = 0; c < children; c++) {
9371 uint64_t islog = B_FALSE, ishole = B_FALSE;
9372 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
9373 &islog);
9374 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
9375 &ishole);
9376 if (islog || ishole)
9377 continue;
9378 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
9379 continue;
9380 if (cb->cb_flat_vdevs) {
9381 vdev_stats_nvlist(zhp, cb, child[c], depth + 2, isspare,
9382 vname, item);
9383 }
9384 vdev_stats_nvlist(zhp, cb, child[c], depth + 2, isspare,
9385 vname, ch);
9386 }
9387
9388 if (ch != NULL) {
9389 if (!nvlist_empty(ch))
9390 fnvlist_add_nvlist(vds, "vdevs", ch);
9391 fnvlist_free(ch);
9392 }
9393 fnvlist_add_nvlist(item, vname, vds);
9394 fnvlist_free(vds);
9395 free(vname);
9396 }
9397
9398 static void
class_vdevs_nvlist(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t * nv,const char * class,nvlist_t * item)9399 class_vdevs_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,
9400 const char *class, nvlist_t *item)
9401 {
9402 uint_t c, children;
9403 nvlist_t **child;
9404 nvlist_t *class_obj = NULL;
9405
9406 if (!cb->cb_flat_vdevs)
9407 class_obj = fnvlist_alloc();
9408
9409 assert(zhp != NULL || !cb->cb_verbose);
9410
9411 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &child,
9412 &children) != 0)
9413 return;
9414
9415 for (c = 0; c < children; c++) {
9416 uint64_t is_log = B_FALSE;
9417 const char *bias = NULL;
9418 const char *type = NULL;
9419 char *name = zpool_vdev_name(g_zfs, zhp, child[c],
9420 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
9421
9422 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
9423 &is_log);
9424
9425 if (is_log) {
9426 bias = (char *)VDEV_ALLOC_CLASS_LOGS;
9427 } else {
9428 (void) nvlist_lookup_string(child[c],
9429 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
9430 (void) nvlist_lookup_string(child[c],
9431 ZPOOL_CONFIG_TYPE, &type);
9432 }
9433
9434 if (bias == NULL || strcmp(bias, class) != 0)
9435 continue;
9436 if (!is_log && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
9437 continue;
9438
9439 if (cb->cb_flat_vdevs) {
9440 vdev_stats_nvlist(zhp, cb, child[c], 2, B_FALSE,
9441 NULL, item);
9442 } else {
9443 vdev_stats_nvlist(zhp, cb, child[c], 2, B_FALSE,
9444 NULL, class_obj);
9445 }
9446 free(name);
9447 }
9448 if (!cb->cb_flat_vdevs) {
9449 if (!nvlist_empty(class_obj))
9450 fnvlist_add_nvlist(item, class, class_obj);
9451 fnvlist_free(class_obj);
9452 }
9453 }
9454
9455 static void
l2cache_nvlist(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t * nv,nvlist_t * item)9456 l2cache_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,
9457 nvlist_t *item)
9458 {
9459 nvlist_t *l2c = NULL, **l2cache;
9460 uint_t nl2cache;
9461 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
9462 &l2cache, &nl2cache) == 0) {
9463 if (nl2cache == 0)
9464 return;
9465 if (!cb->cb_flat_vdevs)
9466 l2c = fnvlist_alloc();
9467 for (int i = 0; i < nl2cache; i++) {
9468 if (cb->cb_flat_vdevs) {
9469 vdev_stats_nvlist(zhp, cb, l2cache[i], 2,
9470 B_FALSE, NULL, item);
9471 } else {
9472 vdev_stats_nvlist(zhp, cb, l2cache[i], 2,
9473 B_FALSE, NULL, l2c);
9474 }
9475 }
9476 }
9477 if (!cb->cb_flat_vdevs) {
9478 if (!nvlist_empty(l2c))
9479 fnvlist_add_nvlist(item, "l2cache", l2c);
9480 fnvlist_free(l2c);
9481 }
9482 }
9483
9484 static void
spares_nvlist(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t * nv,nvlist_t * item)9485 spares_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,
9486 nvlist_t *item)
9487 {
9488 nvlist_t *sp = NULL, **spares;
9489 uint_t nspares;
9490 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
9491 &spares, &nspares) == 0) {
9492 if (nspares == 0)
9493 return;
9494 if (!cb->cb_flat_vdevs)
9495 sp = fnvlist_alloc();
9496 for (int i = 0; i < nspares; i++) {
9497 if (cb->cb_flat_vdevs) {
9498 vdev_stats_nvlist(zhp, cb, spares[i], 2, B_TRUE,
9499 NULL, item);
9500 } else {
9501 vdev_stats_nvlist(zhp, cb, spares[i], 2, B_TRUE,
9502 NULL, sp);
9503 }
9504 }
9505 }
9506 if (!cb->cb_flat_vdevs) {
9507 if (!nvlist_empty(sp))
9508 fnvlist_add_nvlist(item, "spares", sp);
9509 fnvlist_free(sp);
9510 }
9511 }
9512
9513 static void
errors_nvlist(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t * item)9514 errors_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *item)
9515 {
9516 uint64_t nerr;
9517 nvlist_t *config = zpool_get_config(zhp, NULL);
9518 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT,
9519 &nerr) == 0) {
9520 nice_num_str_nvlist(item, ZPOOL_CONFIG_ERRCOUNT, nerr,
9521 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
9522 if (nerr != 0 && cb->cb_verbose) {
9523 nvlist_t *nverrlist = NULL;
9524 if (zpool_get_errlog(zhp, &nverrlist) == 0) {
9525 int i = 0;
9526 int count = 0;
9527 size_t len = MAXPATHLEN * 2;
9528 nvpair_t *elem = NULL;
9529
9530 for (nvpair_t *pair =
9531 nvlist_next_nvpair(nverrlist, NULL);
9532 pair != NULL;
9533 pair = nvlist_next_nvpair(nverrlist, pair))
9534 count++;
9535 char **errl = (char **)malloc(
9536 count * sizeof (char *));
9537
9538 while ((elem = nvlist_next_nvpair(nverrlist,
9539 elem)) != NULL) {
9540 nvlist_t *nv;
9541 uint64_t dsobj, obj;
9542
9543 verify(nvpair_value_nvlist(elem,
9544 &nv) == 0);
9545 verify(nvlist_lookup_uint64(nv,
9546 ZPOOL_ERR_DATASET, &dsobj) == 0);
9547 verify(nvlist_lookup_uint64(nv,
9548 ZPOOL_ERR_OBJECT, &obj) == 0);
9549 errl[i] = safe_malloc(len);
9550 zpool_obj_to_path(zhp, dsobj, obj,
9551 errl[i++], len);
9552 }
9553 nvlist_free(nverrlist);
9554 fnvlist_add_string_array(item, "errlist",
9555 (const char **)errl, count);
9556 for (int i = 0; i < count; ++i)
9557 free(errl[i]);
9558 free(errl);
9559 } else
9560 fnvlist_add_string(item, "errlist",
9561 strerror(errno));
9562 }
9563 }
9564 }
9565
9566 static void
ddt_stats_nvlist(ddt_stat_t * dds,status_cbdata_t * cb,nvlist_t * item)9567 ddt_stats_nvlist(ddt_stat_t *dds, status_cbdata_t *cb, nvlist_t *item)
9568 {
9569 nice_num_str_nvlist(item, "blocks", dds->dds_blocks,
9570 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
9571 nice_num_str_nvlist(item, "logical_size", dds->dds_lsize,
9572 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9573 nice_num_str_nvlist(item, "physical_size", dds->dds_psize,
9574 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9575 nice_num_str_nvlist(item, "deflated_size", dds->dds_dsize,
9576 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9577 nice_num_str_nvlist(item, "ref_blocks", dds->dds_ref_blocks,
9578 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
9579 nice_num_str_nvlist(item, "ref_lsize", dds->dds_ref_lsize,
9580 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9581 nice_num_str_nvlist(item, "ref_psize", dds->dds_ref_psize,
9582 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9583 nice_num_str_nvlist(item, "ref_dsize", dds->dds_ref_dsize,
9584 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9585 }
9586
9587 static void
dedup_stats_nvlist(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t * item)9588 dedup_stats_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *item)
9589 {
9590 nvlist_t *config;
9591 if (cb->cb_dedup_stats) {
9592 ddt_histogram_t *ddh;
9593 ddt_stat_t *dds;
9594 ddt_object_t *ddo;
9595 nvlist_t *ddt_stat, *ddt_obj, *dedup;
9596 uint_t c;
9597 uint64_t cspace_prop;
9598
9599 config = zpool_get_config(zhp, NULL);
9600 if (nvlist_lookup_uint64_array(config,
9601 ZPOOL_CONFIG_DDT_OBJ_STATS, (uint64_t **)&ddo, &c) != 0)
9602 return;
9603
9604 dedup = fnvlist_alloc();
9605 ddt_obj = fnvlist_alloc();
9606 nice_num_str_nvlist(dedup, "obj_count", ddo->ddo_count,
9607 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
9608 if (ddo->ddo_count == 0) {
9609 fnvlist_add_nvlist(dedup, ZPOOL_CONFIG_DDT_OBJ_STATS,
9610 ddt_obj);
9611 fnvlist_add_nvlist(item, "dedup_stats", dedup);
9612 fnvlist_free(ddt_obj);
9613 fnvlist_free(dedup);
9614 return;
9615 } else {
9616 nice_num_str_nvlist(dedup, "dspace", ddo->ddo_dspace,
9617 cb->cb_literal, cb->cb_json_as_int,
9618 ZFS_NICENUM_1024);
9619 nice_num_str_nvlist(dedup, "mspace", ddo->ddo_mspace,
9620 cb->cb_literal, cb->cb_json_as_int,
9621 ZFS_NICENUM_1024);
9622 /*
9623 * Squash cached size into in-core size to handle race.
9624 * Only include cached size if it is available.
9625 */
9626 cspace_prop = zpool_get_prop_int(zhp,
9627 ZPOOL_PROP_DEDUPCACHED, NULL);
9628 cspace_prop = MIN(cspace_prop, ddo->ddo_mspace);
9629 nice_num_str_nvlist(dedup, "cspace", cspace_prop,
9630 cb->cb_literal, cb->cb_json_as_int,
9631 ZFS_NICENUM_1024);
9632 }
9633
9634 ddt_stat = fnvlist_alloc();
9635 if (nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_STATS,
9636 (uint64_t **)&dds, &c) == 0) {
9637 nvlist_t *total = fnvlist_alloc();
9638 if (dds->dds_blocks == 0)
9639 fnvlist_add_string(total, "blocks", "0");
9640 else
9641 ddt_stats_nvlist(dds, cb, total);
9642 fnvlist_add_nvlist(ddt_stat, "total", total);
9643 fnvlist_free(total);
9644 }
9645 if (nvlist_lookup_uint64_array(config,
9646 ZPOOL_CONFIG_DDT_HISTOGRAM, (uint64_t **)&ddh, &c) == 0) {
9647 nvlist_t *hist = fnvlist_alloc();
9648 nvlist_t *entry = NULL;
9649 char buf[16];
9650 for (int h = 0; h < 64; h++) {
9651 if (ddh->ddh_stat[h].dds_blocks != 0) {
9652 entry = fnvlist_alloc();
9653 ddt_stats_nvlist(&ddh->ddh_stat[h], cb,
9654 entry);
9655 snprintf(buf, 16, "%d", h);
9656 fnvlist_add_nvlist(hist, buf, entry);
9657 fnvlist_free(entry);
9658 }
9659 }
9660 if (!nvlist_empty(hist))
9661 fnvlist_add_nvlist(ddt_stat, "histogram", hist);
9662 fnvlist_free(hist);
9663 }
9664
9665 if (!nvlist_empty(ddt_obj)) {
9666 fnvlist_add_nvlist(dedup, ZPOOL_CONFIG_DDT_OBJ_STATS,
9667 ddt_obj);
9668 }
9669 fnvlist_free(ddt_obj);
9670 if (!nvlist_empty(ddt_stat)) {
9671 fnvlist_add_nvlist(dedup, ZPOOL_CONFIG_DDT_STATS,
9672 ddt_stat);
9673 }
9674 fnvlist_free(ddt_stat);
9675 if (!nvlist_empty(dedup))
9676 fnvlist_add_nvlist(item, "dedup_stats", dedup);
9677 fnvlist_free(dedup);
9678 }
9679 }
9680
9681 static void
raidz_expand_status_nvlist(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t * nvroot,nvlist_t * item)9682 raidz_expand_status_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb,
9683 nvlist_t *nvroot, nvlist_t *item)
9684 {
9685 uint_t c;
9686 pool_raidz_expand_stat_t *pres = NULL;
9687 if (nvlist_lookup_uint64_array(nvroot,
9688 ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, (uint64_t **)&pres, &c) == 0) {
9689 nvlist_t **child;
9690 uint_t children;
9691 nvlist_t *nv = fnvlist_alloc();
9692 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
9693 &child, &children) == 0);
9694 assert(pres->pres_expanding_vdev < children);
9695 char *name =
9696 zpool_vdev_name(g_zfs, zhp,
9697 child[pres->pres_expanding_vdev], 0);
9698 fill_vdev_info(nv, zhp, name, B_FALSE, cb->cb_json_as_int);
9699 fnvlist_add_string(nv, "state",
9700 pool_scan_state_str[pres->pres_state]);
9701 nice_num_str_nvlist(nv, "expanding_vdev",
9702 pres->pres_expanding_vdev, B_TRUE, cb->cb_json_as_int,
9703 ZFS_NICENUM_1024);
9704 nice_num_str_nvlist(nv, "start_time", pres->pres_start_time,
9705 cb->cb_literal, cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);
9706 nice_num_str_nvlist(nv, "end_time", pres->pres_end_time,
9707 cb->cb_literal, cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);
9708 nice_num_str_nvlist(nv, "to_reflow", pres->pres_to_reflow,
9709 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9710 nice_num_str_nvlist(nv, "reflowed", pres->pres_reflowed,
9711 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9712 nice_num_str_nvlist(nv, "waiting_for_resilver",
9713 pres->pres_waiting_for_resilver, B_TRUE,
9714 cb->cb_json_as_int, ZFS_NICENUM_1024);
9715 fnvlist_add_nvlist(item, ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, nv);
9716 fnvlist_free(nv);
9717 free(name);
9718 }
9719 }
9720
9721 static void
checkpoint_status_nvlist(nvlist_t * nvroot,status_cbdata_t * cb,nvlist_t * item)9722 checkpoint_status_nvlist(nvlist_t *nvroot, status_cbdata_t *cb,
9723 nvlist_t *item)
9724 {
9725 uint_t c;
9726 pool_checkpoint_stat_t *pcs = NULL;
9727 if (nvlist_lookup_uint64_array(nvroot,
9728 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c) == 0) {
9729 nvlist_t *nv = fnvlist_alloc();
9730 fnvlist_add_string(nv, "state",
9731 checkpoint_state_str[pcs->pcs_state]);
9732 nice_num_str_nvlist(nv, "start_time",
9733 pcs->pcs_start_time, cb->cb_literal, cb->cb_json_as_int,
9734 ZFS_NICE_TIMESTAMP);
9735 nice_num_str_nvlist(nv, "space",
9736 pcs->pcs_space, cb->cb_literal, cb->cb_json_as_int,
9737 ZFS_NICENUM_BYTES);
9738 fnvlist_add_nvlist(item, ZPOOL_CONFIG_CHECKPOINT_STATS, nv);
9739 fnvlist_free(nv);
9740 }
9741 }
9742
9743 static void
removal_status_nvlist(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t * nvroot,nvlist_t * item)9744 removal_status_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb,
9745 nvlist_t *nvroot, nvlist_t *item)
9746 {
9747 uint_t c;
9748 pool_removal_stat_t *prs = NULL;
9749 if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_REMOVAL_STATS,
9750 (uint64_t **)&prs, &c) == 0) {
9751 if (prs->prs_state != DSS_NONE) {
9752 nvlist_t **child;
9753 uint_t children;
9754 verify(nvlist_lookup_nvlist_array(nvroot,
9755 ZPOOL_CONFIG_CHILDREN, &child, &children) == 0);
9756 assert(prs->prs_removing_vdev < children);
9757 char *vdev_name = zpool_vdev_name(g_zfs, zhp,
9758 child[prs->prs_removing_vdev], B_TRUE);
9759 nvlist_t *nv = fnvlist_alloc();
9760 fill_vdev_info(nv, zhp, vdev_name, B_FALSE,
9761 cb->cb_json_as_int);
9762 fnvlist_add_string(nv, "state",
9763 pool_scan_state_str[prs->prs_state]);
9764 nice_num_str_nvlist(nv, "removing_vdev",
9765 prs->prs_removing_vdev, B_TRUE, cb->cb_json_as_int,
9766 ZFS_NICENUM_1024);
9767 nice_num_str_nvlist(nv, "start_time",
9768 prs->prs_start_time, cb->cb_literal,
9769 cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);
9770 nice_num_str_nvlist(nv, "end_time", prs->prs_end_time,
9771 cb->cb_literal, cb->cb_json_as_int,
9772 ZFS_NICE_TIMESTAMP);
9773 nice_num_str_nvlist(nv, "to_copy", prs->prs_to_copy,
9774 cb->cb_literal, cb->cb_json_as_int,
9775 ZFS_NICENUM_BYTES);
9776 nice_num_str_nvlist(nv, "copied", prs->prs_copied,
9777 cb->cb_literal, cb->cb_json_as_int,
9778 ZFS_NICENUM_BYTES);
9779 nice_num_str_nvlist(nv, "mapping_memory",
9780 prs->prs_mapping_memory, cb->cb_literal,
9781 cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9782 fnvlist_add_nvlist(item,
9783 ZPOOL_CONFIG_REMOVAL_STATS, nv);
9784 fnvlist_free(nv);
9785 free(vdev_name);
9786 }
9787 }
9788 }
9789
9790 static void
scan_status_nvlist(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t * nvroot,nvlist_t * item)9791 scan_status_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb,
9792 nvlist_t *nvroot, nvlist_t *item)
9793 {
9794 pool_scan_stat_t *ps = NULL;
9795 uint_t c;
9796 nvlist_t *scan = fnvlist_alloc();
9797 nvlist_t **child;
9798 uint_t children;
9799
9800 if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_SCAN_STATS,
9801 (uint64_t **)&ps, &c) == 0) {
9802 fnvlist_add_string(scan, "function",
9803 pool_scan_func_str[ps->pss_func]);
9804 fnvlist_add_string(scan, "state",
9805 pool_scan_state_str[ps->pss_state]);
9806 nice_num_str_nvlist(scan, "start_time", ps->pss_start_time,
9807 cb->cb_literal, cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);
9808 nice_num_str_nvlist(scan, "end_time", ps->pss_end_time,
9809 cb->cb_literal, cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);
9810 nice_num_str_nvlist(scan, "to_examine", ps->pss_to_examine,
9811 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9812 nice_num_str_nvlist(scan, "examined", ps->pss_examined,
9813 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9814 nice_num_str_nvlist(scan, "skipped", ps->pss_skipped,
9815 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9816 nice_num_str_nvlist(scan, "processed", ps->pss_processed,
9817 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9818 nice_num_str_nvlist(scan, "errors", ps->pss_errors,
9819 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
9820 nice_num_str_nvlist(scan, "bytes_per_scan", ps->pss_pass_exam,
9821 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9822 nice_num_str_nvlist(scan, "pass_start", ps->pss_pass_start,
9823 B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024);
9824 nice_num_str_nvlist(scan, "scrub_pause",
9825 ps->pss_pass_scrub_pause, cb->cb_literal,
9826 cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);
9827 nice_num_str_nvlist(scan, "scrub_spent_paused",
9828 ps->pss_pass_scrub_spent_paused,
9829 B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024);
9830 nice_num_str_nvlist(scan, "issued_bytes_per_scan",
9831 ps->pss_pass_issued, cb->cb_literal,
9832 cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9833 nice_num_str_nvlist(scan, "issued", ps->pss_issued,
9834 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9835 if (ps->pss_error_scrub_func == POOL_SCAN_ERRORSCRUB &&
9836 ps->pss_error_scrub_start > ps->pss_start_time) {
9837 fnvlist_add_string(scan, "err_scrub_func",
9838 pool_scan_func_str[ps->pss_error_scrub_func]);
9839 fnvlist_add_string(scan, "err_scrub_state",
9840 pool_scan_state_str[ps->pss_error_scrub_state]);
9841 nice_num_str_nvlist(scan, "err_scrub_start_time",
9842 ps->pss_error_scrub_start,
9843 cb->cb_literal, cb->cb_json_as_int,
9844 ZFS_NICE_TIMESTAMP);
9845 nice_num_str_nvlist(scan, "err_scrub_end_time",
9846 ps->pss_error_scrub_end,
9847 cb->cb_literal, cb->cb_json_as_int,
9848 ZFS_NICE_TIMESTAMP);
9849 nice_num_str_nvlist(scan, "err_scrub_examined",
9850 ps->pss_error_scrub_examined,
9851 cb->cb_literal, cb->cb_json_as_int,
9852 ZFS_NICENUM_1024);
9853 nice_num_str_nvlist(scan, "err_scrub_to_examine",
9854 ps->pss_error_scrub_to_be_examined,
9855 cb->cb_literal, cb->cb_json_as_int,
9856 ZFS_NICENUM_1024);
9857 nice_num_str_nvlist(scan, "err_scrub_pause",
9858 ps->pss_pass_error_scrub_pause,
9859 B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024);
9860 }
9861 }
9862
9863 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
9864 &child, &children) == 0) {
9865 vdev_rebuild_stat_t *vrs;
9866 uint_t i;
9867 char *name;
9868 nvlist_t *nv;
9869 nvlist_t *rebuild = fnvlist_alloc();
9870 uint64_t st;
9871 for (uint_t c = 0; c < children; c++) {
9872 if (nvlist_lookup_uint64_array(child[c],
9873 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs,
9874 &i) == 0) {
9875 if (vrs->vrs_state != VDEV_REBUILD_NONE) {
9876 nv = fnvlist_alloc();
9877 name = zpool_vdev_name(g_zfs, zhp,
9878 child[c], VDEV_NAME_TYPE_ID);
9879 fill_vdev_info(nv, zhp, name, B_FALSE,
9880 cb->cb_json_as_int);
9881 st = vrs->vrs_state;
9882 fnvlist_add_string(nv, "state",
9883 vdev_rebuild_state_str[st]);
9884 nice_num_str_nvlist(nv, "start_time",
9885 vrs->vrs_start_time, cb->cb_literal,
9886 cb->cb_json_as_int,
9887 ZFS_NICE_TIMESTAMP);
9888 nice_num_str_nvlist(nv, "end_time",
9889 vrs->vrs_end_time, cb->cb_literal,
9890 cb->cb_json_as_int,
9891 ZFS_NICE_TIMESTAMP);
9892 nice_num_str_nvlist(nv, "scan_time",
9893 vrs->vrs_scan_time_ms * 1000000,
9894 cb->cb_literal, cb->cb_json_as_int,
9895 ZFS_NICENUM_TIME);
9896 nice_num_str_nvlist(nv, "scanned",
9897 vrs->vrs_bytes_scanned,
9898 cb->cb_literal, cb->cb_json_as_int,
9899 ZFS_NICENUM_BYTES);
9900 nice_num_str_nvlist(nv, "issued",
9901 vrs->vrs_bytes_issued,
9902 cb->cb_literal, cb->cb_json_as_int,
9903 ZFS_NICENUM_BYTES);
9904 nice_num_str_nvlist(nv, "rebuilt",
9905 vrs->vrs_bytes_rebuilt,
9906 cb->cb_literal, cb->cb_json_as_int,
9907 ZFS_NICENUM_BYTES);
9908 nice_num_str_nvlist(nv, "to_scan",
9909 vrs->vrs_bytes_est, cb->cb_literal,
9910 cb->cb_json_as_int,
9911 ZFS_NICENUM_BYTES);
9912 nice_num_str_nvlist(nv, "errors",
9913 vrs->vrs_errors, cb->cb_literal,
9914 cb->cb_json_as_int,
9915 ZFS_NICENUM_1024);
9916 nice_num_str_nvlist(nv, "pass_time",
9917 vrs->vrs_pass_time_ms * 1000000,
9918 cb->cb_literal, cb->cb_json_as_int,
9919 ZFS_NICENUM_TIME);
9920 nice_num_str_nvlist(nv, "pass_scanned",
9921 vrs->vrs_pass_bytes_scanned,
9922 cb->cb_literal, cb->cb_json_as_int,
9923 ZFS_NICENUM_BYTES);
9924 nice_num_str_nvlist(nv, "pass_issued",
9925 vrs->vrs_pass_bytes_issued,
9926 cb->cb_literal, cb->cb_json_as_int,
9927 ZFS_NICENUM_BYTES);
9928 nice_num_str_nvlist(nv, "pass_skipped",
9929 vrs->vrs_pass_bytes_skipped,
9930 cb->cb_literal, cb->cb_json_as_int,
9931 ZFS_NICENUM_BYTES);
9932 fnvlist_add_nvlist(rebuild, name, nv);
9933 free(name);
9934 }
9935 }
9936 }
9937 if (!nvlist_empty(rebuild))
9938 fnvlist_add_nvlist(scan, "rebuild_stats", rebuild);
9939 fnvlist_free(rebuild);
9940 }
9941
9942 if (!nvlist_empty(scan))
9943 fnvlist_add_nvlist(item, ZPOOL_CONFIG_SCAN_STATS, scan);
9944 fnvlist_free(scan);
9945 }
9946
9947 /*
9948 * Print the scan status.
9949 */
9950 static void
print_scan_status(zpool_handle_t * zhp,nvlist_t * nvroot)9951 print_scan_status(zpool_handle_t *zhp, nvlist_t *nvroot)
9952 {
9953 uint64_t rebuild_end_time = 0, resilver_end_time = 0;
9954 boolean_t have_resilver = B_FALSE, have_scrub = B_FALSE;
9955 boolean_t have_errorscrub = B_FALSE;
9956 boolean_t active_resilver = B_FALSE;
9957 pool_checkpoint_stat_t *pcs = NULL;
9958 pool_scan_stat_t *ps = NULL;
9959 uint_t c;
9960 time_t scrub_start = 0, errorscrub_start = 0;
9961
9962 if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_SCAN_STATS,
9963 (uint64_t **)&ps, &c) == 0) {
9964 if (ps->pss_func == POOL_SCAN_RESILVER) {
9965 resilver_end_time = ps->pss_end_time;
9966 active_resilver = (ps->pss_state == DSS_SCANNING);
9967 }
9968
9969 have_resilver = (ps->pss_func == POOL_SCAN_RESILVER);
9970 have_scrub = (ps->pss_func == POOL_SCAN_SCRUB);
9971 scrub_start = ps->pss_start_time;
9972 if (c > offsetof(pool_scan_stat_t,
9973 pss_pass_error_scrub_pause) / 8) {
9974 have_errorscrub = (ps->pss_error_scrub_func ==
9975 POOL_SCAN_ERRORSCRUB);
9976 errorscrub_start = ps->pss_error_scrub_start;
9977 }
9978 }
9979
9980 boolean_t active_rebuild = check_rebuilding(nvroot, &rebuild_end_time);
9981 boolean_t have_rebuild = (active_rebuild || (rebuild_end_time > 0));
9982
9983 /* Always print the scrub status when available. */
9984 if (have_scrub && scrub_start > errorscrub_start)
9985 print_scan_scrub_resilver_status(ps);
9986 else if (have_errorscrub && errorscrub_start >= scrub_start)
9987 print_err_scrub_status(ps);
9988
9989 /*
9990 * When there is an active resilver or rebuild print its status.
9991 * Otherwise print the status of the last resilver or rebuild.
9992 */
9993 if (active_resilver || (!active_rebuild && have_resilver &&
9994 resilver_end_time && resilver_end_time > rebuild_end_time)) {
9995 print_scan_scrub_resilver_status(ps);
9996 } else if (active_rebuild || (!active_resilver && have_rebuild &&
9997 rebuild_end_time && rebuild_end_time > resilver_end_time)) {
9998 print_rebuild_status(zhp, nvroot);
9999 }
10000
10001 (void) nvlist_lookup_uint64_array(nvroot,
10002 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
10003 print_checkpoint_scan_warning(ps, pcs);
10004 }
10005
10006 /*
10007 * Print out detailed removal status.
10008 */
10009 static void
print_removal_status(zpool_handle_t * zhp,pool_removal_stat_t * prs)10010 print_removal_status(zpool_handle_t *zhp, pool_removal_stat_t *prs)
10011 {
10012 char copied_buf[7], examined_buf[7], total_buf[7], rate_buf[7];
10013 time_t start, end;
10014 nvlist_t *config, *nvroot;
10015 nvlist_t **child;
10016 uint_t children;
10017 char *vdev_name;
10018
10019 if (prs == NULL || prs->prs_state == DSS_NONE)
10020 return;
10021
10022 /*
10023 * Determine name of vdev.
10024 */
10025 config = zpool_get_config(zhp, NULL);
10026 nvroot = fnvlist_lookup_nvlist(config,
10027 ZPOOL_CONFIG_VDEV_TREE);
10028 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
10029 &child, &children) == 0);
10030 assert(prs->prs_removing_vdev < children);
10031 vdev_name = zpool_vdev_name(g_zfs, zhp,
10032 child[prs->prs_removing_vdev], B_TRUE);
10033
10034 printf_color(ANSI_BOLD, gettext("remove: "));
10035
10036 start = prs->prs_start_time;
10037 end = prs->prs_end_time;
10038 zfs_nicenum(prs->prs_copied, copied_buf, sizeof (copied_buf));
10039
10040 /*
10041 * Removal is finished or canceled.
10042 */
10043 if (prs->prs_state == DSS_FINISHED) {
10044 uint64_t minutes_taken = (end - start) / 60;
10045
10046 (void) printf(gettext("Removal of vdev %llu copied %s "
10047 "in %lluh%um, completed on %s"),
10048 (longlong_t)prs->prs_removing_vdev,
10049 copied_buf,
10050 (u_longlong_t)(minutes_taken / 60),
10051 (uint_t)(minutes_taken % 60),
10052 ctime((time_t *)&end));
10053 } else if (prs->prs_state == DSS_CANCELED) {
10054 (void) printf(gettext("Removal of %s canceled on %s"),
10055 vdev_name, ctime(&end));
10056 } else {
10057 uint64_t copied, total, elapsed, rate, mins_left, hours_left;
10058 double fraction_done;
10059
10060 assert(prs->prs_state == DSS_SCANNING);
10061
10062 /*
10063 * Removal is in progress.
10064 */
10065 (void) printf(gettext(
10066 "Evacuation of %s in progress since %s"),
10067 vdev_name, ctime(&start));
10068
10069 copied = prs->prs_copied > 0 ? prs->prs_copied : 1;
10070 total = prs->prs_to_copy;
10071 fraction_done = (double)copied / total;
10072
10073 /* elapsed time for this pass */
10074 elapsed = time(NULL) - prs->prs_start_time;
10075 elapsed = elapsed > 0 ? elapsed : 1;
10076 rate = copied / elapsed;
10077 rate = rate > 0 ? rate : 1;
10078 mins_left = ((total - copied) / rate) / 60;
10079 hours_left = mins_left / 60;
10080
10081 zfs_nicenum(copied, examined_buf, sizeof (examined_buf));
10082 zfs_nicenum(total, total_buf, sizeof (total_buf));
10083 zfs_nicenum(rate, rate_buf, sizeof (rate_buf));
10084
10085 /*
10086 * do not print estimated time if hours_left is more than
10087 * 30 days
10088 */
10089 (void) printf(gettext(
10090 "\t%s copied out of %s at %s/s, %.2f%% done"),
10091 examined_buf, total_buf, rate_buf, 100 * fraction_done);
10092 if (hours_left < (30 * 24)) {
10093 (void) printf(gettext(", %lluh%um to go\n"),
10094 (u_longlong_t)hours_left, (uint_t)(mins_left % 60));
10095 } else {
10096 (void) printf(gettext(
10097 ", (copy is slow, no estimated time)\n"));
10098 }
10099 }
10100 free(vdev_name);
10101
10102 if (prs->prs_mapping_memory > 0) {
10103 char mem_buf[7];
10104 zfs_nicenum(prs->prs_mapping_memory, mem_buf, sizeof (mem_buf));
10105 (void) printf(gettext(
10106 "\t%s memory used for removed device mappings\n"),
10107 mem_buf);
10108 }
10109 }
10110
10111 /*
10112 * Print out detailed raidz expansion status.
10113 */
10114 static void
print_raidz_expand_status(zpool_handle_t * zhp,pool_raidz_expand_stat_t * pres)10115 print_raidz_expand_status(zpool_handle_t *zhp, pool_raidz_expand_stat_t *pres)
10116 {
10117 char copied_buf[7];
10118
10119 if (pres == NULL || pres->pres_state == DSS_NONE)
10120 return;
10121
10122 /*
10123 * Determine name of vdev.
10124 */
10125 nvlist_t *config = zpool_get_config(zhp, NULL);
10126 nvlist_t *nvroot = fnvlist_lookup_nvlist(config,
10127 ZPOOL_CONFIG_VDEV_TREE);
10128 nvlist_t **child;
10129 uint_t children;
10130 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
10131 &child, &children) == 0);
10132 assert(pres->pres_expanding_vdev < children);
10133
10134 printf_color(ANSI_BOLD, gettext("expand: "));
10135
10136 time_t start = pres->pres_start_time;
10137 time_t end = pres->pres_end_time;
10138 char *vname =
10139 zpool_vdev_name(g_zfs, zhp, child[pres->pres_expanding_vdev], 0);
10140 zfs_nicenum(pres->pres_reflowed, copied_buf, sizeof (copied_buf));
10141
10142 /*
10143 * Expansion is finished or canceled.
10144 */
10145 if (pres->pres_state == DSS_FINISHED) {
10146 char time_buf[32];
10147 secs_to_dhms(end - start, time_buf);
10148
10149 (void) printf(gettext("expanded %s-%u copied %s in %s, "
10150 "on %s"), vname, (int)pres->pres_expanding_vdev,
10151 copied_buf, time_buf, ctime((time_t *)&end));
10152 } else {
10153 char examined_buf[7], total_buf[7], rate_buf[7];
10154 uint64_t copied, total, elapsed, rate, secs_left;
10155 double fraction_done;
10156
10157 assert(pres->pres_state == DSS_SCANNING);
10158
10159 /*
10160 * Expansion is in progress.
10161 */
10162 (void) printf(gettext(
10163 "expansion of %s-%u in progress since %s"),
10164 vname, (int)pres->pres_expanding_vdev, ctime(&start));
10165
10166 copied = pres->pres_reflowed > 0 ? pres->pres_reflowed : 1;
10167 total = pres->pres_to_reflow;
10168 fraction_done = (double)copied / total;
10169
10170 /* elapsed time for this pass */
10171 elapsed = time(NULL) - pres->pres_start_time;
10172 elapsed = elapsed > 0 ? elapsed : 1;
10173 rate = copied / elapsed;
10174 rate = rate > 0 ? rate : 1;
10175 secs_left = (total - copied) / rate;
10176
10177 zfs_nicenum(copied, examined_buf, sizeof (examined_buf));
10178 zfs_nicenum(total, total_buf, sizeof (total_buf));
10179 zfs_nicenum(rate, rate_buf, sizeof (rate_buf));
10180
10181 /*
10182 * do not print estimated time if hours_left is more than
10183 * 30 days
10184 */
10185 (void) printf(gettext("\t%s / %s copied at %s/s, %.2f%% done"),
10186 examined_buf, total_buf, rate_buf, 100 * fraction_done);
10187 if (pres->pres_waiting_for_resilver) {
10188 (void) printf(gettext(", paused for resilver or "
10189 "clear\n"));
10190 } else if (secs_left < (30 * 24 * 3600)) {
10191 char time_buf[32];
10192 secs_to_dhms(secs_left, time_buf);
10193 (void) printf(gettext(", %s to go\n"), time_buf);
10194 } else {
10195 (void) printf(gettext(
10196 ", (copy is slow, no estimated time)\n"));
10197 }
10198 }
10199 free(vname);
10200 }
10201 static void
print_checkpoint_status(pool_checkpoint_stat_t * pcs)10202 print_checkpoint_status(pool_checkpoint_stat_t *pcs)
10203 {
10204 time_t start;
10205 char space_buf[7];
10206
10207 if (pcs == NULL || pcs->pcs_state == CS_NONE)
10208 return;
10209
10210 (void) printf(gettext("checkpoint: "));
10211
10212 start = pcs->pcs_start_time;
10213 zfs_nicenum(pcs->pcs_space, space_buf, sizeof (space_buf));
10214
10215 if (pcs->pcs_state == CS_CHECKPOINT_EXISTS) {
10216 char *date = ctime(&start);
10217
10218 /*
10219 * ctime() adds a newline at the end of the generated
10220 * string, thus the weird format specifier and the
10221 * strlen() call used to chop it off from the output.
10222 */
10223 (void) printf(gettext("created %.*s, consumes %s\n"),
10224 (int)(strlen(date) - 1), date, space_buf);
10225 return;
10226 }
10227
10228 assert(pcs->pcs_state == CS_CHECKPOINT_DISCARDING);
10229
10230 (void) printf(gettext("discarding, %s remaining.\n"),
10231 space_buf);
10232 }
10233
10234 static void
print_error_log(zpool_handle_t * zhp)10235 print_error_log(zpool_handle_t *zhp)
10236 {
10237 nvlist_t *nverrlist = NULL;
10238 nvpair_t *elem;
10239 char *pathname;
10240 size_t len = MAXPATHLEN * 2;
10241
10242 if (zpool_get_errlog(zhp, &nverrlist) != 0)
10243 return;
10244
10245 (void) printf("errors: Permanent errors have been "
10246 "detected in the following files:\n\n");
10247
10248 pathname = safe_malloc(len);
10249 elem = NULL;
10250 while ((elem = nvlist_next_nvpair(nverrlist, elem)) != NULL) {
10251 nvlist_t *nv;
10252 uint64_t dsobj, obj;
10253
10254 verify(nvpair_value_nvlist(elem, &nv) == 0);
10255 verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_DATASET,
10256 &dsobj) == 0);
10257 verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_OBJECT,
10258 &obj) == 0);
10259 zpool_obj_to_path(zhp, dsobj, obj, pathname, len);
10260 (void) printf("%7s %s\n", "", pathname);
10261 }
10262 free(pathname);
10263 nvlist_free(nverrlist);
10264 }
10265
10266 static void
print_spares(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t ** spares,uint_t nspares)10267 print_spares(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **spares,
10268 uint_t nspares)
10269 {
10270 uint_t i;
10271 char *name;
10272
10273 if (nspares == 0)
10274 return;
10275
10276 (void) printf(gettext("\tspares\n"));
10277
10278 for (i = 0; i < nspares; i++) {
10279 name = zpool_vdev_name(g_zfs, zhp, spares[i],
10280 cb->cb_name_flags);
10281 print_status_config(zhp, cb, name, spares[i], 2, B_TRUE, NULL);
10282 free(name);
10283 }
10284 }
10285
10286 static void
print_l2cache(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t ** l2cache,uint_t nl2cache)10287 print_l2cache(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **l2cache,
10288 uint_t nl2cache)
10289 {
10290 uint_t i;
10291 char *name;
10292
10293 if (nl2cache == 0)
10294 return;
10295
10296 (void) printf(gettext("\tcache\n"));
10297
10298 for (i = 0; i < nl2cache; i++) {
10299 name = zpool_vdev_name(g_zfs, zhp, l2cache[i],
10300 cb->cb_name_flags);
10301 print_status_config(zhp, cb, name, l2cache[i], 2,
10302 B_FALSE, NULL);
10303 free(name);
10304 }
10305 }
10306
10307 static void
print_dedup_stats(zpool_handle_t * zhp,nvlist_t * config,boolean_t literal)10308 print_dedup_stats(zpool_handle_t *zhp, nvlist_t *config, boolean_t literal)
10309 {
10310 ddt_histogram_t *ddh;
10311 ddt_stat_t *dds;
10312 ddt_object_t *ddo;
10313 uint_t c;
10314 /* Extra space provided for literal display */
10315 char dspace[32], mspace[32], cspace[32];
10316 uint64_t cspace_prop;
10317 enum zfs_nicenum_format format;
10318 zprop_source_t src;
10319
10320 /*
10321 * If the pool was faulted then we may not have been able to
10322 * obtain the config. Otherwise, if we have anything in the dedup
10323 * table continue processing the stats.
10324 */
10325 if (nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_OBJ_STATS,
10326 (uint64_t **)&ddo, &c) != 0)
10327 return;
10328
10329 (void) printf("\n");
10330 (void) printf(gettext(" dedup: "));
10331 if (ddo->ddo_count == 0) {
10332 (void) printf(gettext("no DDT entries\n"));
10333 return;
10334 }
10335
10336 /*
10337 * Squash cached size into in-core size to handle race.
10338 * Only include cached size if it is available.
10339 */
10340 cspace_prop = zpool_get_prop_int(zhp, ZPOOL_PROP_DEDUPCACHED, &src);
10341 cspace_prop = MIN(cspace_prop, ddo->ddo_mspace);
10342 format = literal ? ZFS_NICENUM_RAW : ZFS_NICENUM_1024;
10343 zfs_nicenum_format(cspace_prop, cspace, sizeof (cspace), format);
10344 zfs_nicenum_format(ddo->ddo_dspace, dspace, sizeof (dspace), format);
10345 zfs_nicenum_format(ddo->ddo_mspace, mspace, sizeof (mspace), format);
10346 (void) printf("DDT entries %llu, size %s on disk, %s in core",
10347 (u_longlong_t)ddo->ddo_count,
10348 dspace,
10349 mspace);
10350 if (src != ZPROP_SRC_DEFAULT) {
10351 (void) printf(", %s cached (%.02f%%)",
10352 cspace,
10353 (double)cspace_prop / (double)ddo->ddo_mspace * 100.0);
10354 }
10355 (void) printf("\n");
10356
10357 verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_STATS,
10358 (uint64_t **)&dds, &c) == 0);
10359 verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_HISTOGRAM,
10360 (uint64_t **)&ddh, &c) == 0);
10361 zpool_dump_ddt(dds, ddh);
10362 }
10363
10364 #define ST_SIZE 4096
10365 #define AC_SIZE 2048
10366
10367 static void
print_status_reason(zpool_handle_t * zhp,status_cbdata_t * cbp,zpool_status_t reason,zpool_errata_t errata,nvlist_t * item)10368 print_status_reason(zpool_handle_t *zhp, status_cbdata_t *cbp,
10369 zpool_status_t reason, zpool_errata_t errata, nvlist_t *item)
10370 {
10371 char status[ST_SIZE];
10372 char action[AC_SIZE];
10373 memset(status, 0, ST_SIZE);
10374 memset(action, 0, AC_SIZE);
10375
10376 switch (reason) {
10377 case ZPOOL_STATUS_MISSING_DEV_R:
10378 snprintf(status, ST_SIZE, gettext("One or more devices could "
10379 "not be opened. Sufficient replicas exist for\n\tthe pool "
10380 "to continue functioning in a degraded state.\n"));
10381 snprintf(action, AC_SIZE, gettext("Attach the missing device "
10382 "and online it using 'zpool online'.\n"));
10383 break;
10384
10385 case ZPOOL_STATUS_MISSING_DEV_NR:
10386 snprintf(status, ST_SIZE, gettext("One or more devices could "
10387 "not be opened. There are insufficient\n\treplicas for the"
10388 " pool to continue functioning.\n"));
10389 snprintf(action, AC_SIZE, gettext("Attach the missing device "
10390 "and online it using 'zpool online'.\n"));
10391 break;
10392
10393 case ZPOOL_STATUS_CORRUPT_LABEL_R:
10394 snprintf(status, ST_SIZE, gettext("One or more devices could "
10395 "not be used because the label is missing or\n\tinvalid. "
10396 "Sufficient replicas exist for the pool to continue\n\t"
10397 "functioning in a degraded state.\n"));
10398 snprintf(action, AC_SIZE, gettext("Replace the device using "
10399 "'zpool replace'.\n"));
10400 break;
10401
10402 case ZPOOL_STATUS_CORRUPT_LABEL_NR:
10403 snprintf(status, ST_SIZE, gettext("One or more devices could "
10404 "not be used because the label is missing \n\tor invalid. "
10405 "There are insufficient replicas for the pool to "
10406 "continue\n\tfunctioning.\n"));
10407 zpool_explain_recover(zpool_get_handle(zhp),
10408 zpool_get_name(zhp), reason, zpool_get_config(zhp, NULL),
10409 action, AC_SIZE);
10410 break;
10411
10412 case ZPOOL_STATUS_FAILING_DEV:
10413 snprintf(status, ST_SIZE, gettext("One or more devices has "
10414 "experienced an unrecoverable error. An\n\tattempt was "
10415 "made to correct the error. Applications are "
10416 "unaffected.\n"));
10417 snprintf(action, AC_SIZE, gettext("Determine if the "
10418 "device needs to be replaced, and clear the errors\n\tusing"
10419 " 'zpool clear' or replace the device with 'zpool "
10420 "replace'.\n"));
10421 break;
10422
10423 case ZPOOL_STATUS_OFFLINE_DEV:
10424 snprintf(status, ST_SIZE, gettext("One or more devices has "
10425 "been taken offline by the administrator.\n\tSufficient "
10426 "replicas exist for the pool to continue functioning in "
10427 "a\n\tdegraded state.\n"));
10428 snprintf(action, AC_SIZE, gettext("Online the device "
10429 "using 'zpool online' or replace the device with\n\t'zpool "
10430 "replace'.\n"));
10431 break;
10432
10433 case ZPOOL_STATUS_REMOVED_DEV:
10434 snprintf(status, ST_SIZE, gettext("One or more devices has "
10435 "been removed by the administrator.\n\tSufficient "
10436 "replicas exist for the pool to continue functioning in "
10437 "a\n\tdegraded state.\n"));
10438 snprintf(action, AC_SIZE, gettext("Online the device "
10439 "using zpool online' or replace the device with\n\t'zpool "
10440 "replace'.\n"));
10441 break;
10442
10443 case ZPOOL_STATUS_RESILVERING:
10444 case ZPOOL_STATUS_REBUILDING:
10445 snprintf(status, ST_SIZE, gettext("One or more devices is "
10446 "currently being resilvered. The pool will\n\tcontinue "
10447 "to function, possibly in a degraded state.\n"));
10448 snprintf(action, AC_SIZE, gettext("Wait for the resilver to "
10449 "complete.\n"));
10450 break;
10451
10452 case ZPOOL_STATUS_REBUILD_SCRUB:
10453 snprintf(status, ST_SIZE, gettext("One or more devices have "
10454 "been sequentially resilvered, scrubbing\n\tthe pool "
10455 "is recommended.\n"));
10456 snprintf(action, AC_SIZE, gettext("Use 'zpool scrub' to "
10457 "verify all data checksums.\n"));
10458 break;
10459
10460 case ZPOOL_STATUS_CORRUPT_DATA:
10461 snprintf(status, ST_SIZE, gettext("One or more devices has "
10462 "experienced an error resulting in data\n\tcorruption. "
10463 "Applications may be affected.\n"));
10464 snprintf(action, AC_SIZE, gettext("Restore the file in question"
10465 " if possible. Otherwise restore the\n\tentire pool from "
10466 "backup.\n"));
10467 break;
10468
10469 case ZPOOL_STATUS_CORRUPT_POOL:
10470 snprintf(status, ST_SIZE, gettext("The pool metadata is "
10471 "corrupted and the pool cannot be opened.\n"));
10472 zpool_explain_recover(zpool_get_handle(zhp),
10473 zpool_get_name(zhp), reason, zpool_get_config(zhp, NULL),
10474 action, AC_SIZE);
10475 break;
10476
10477 case ZPOOL_STATUS_VERSION_OLDER:
10478 snprintf(status, ST_SIZE, gettext("The pool is formatted using "
10479 "a legacy on-disk format. The pool can\n\tstill be used, "
10480 "but some features are unavailable.\n"));
10481 snprintf(action, AC_SIZE, gettext("Upgrade the pool using "
10482 "'zpool upgrade'. Once this is done, the\n\tpool will no "
10483 "longer be accessible on software that does not support\n\t"
10484 "feature flags.\n"));
10485 break;
10486
10487 case ZPOOL_STATUS_VERSION_NEWER:
10488 snprintf(status, ST_SIZE, gettext("The pool has been upgraded "
10489 "to a newer, incompatible on-disk version.\n\tThe pool "
10490 "cannot be accessed on this system.\n"));
10491 snprintf(action, AC_SIZE, gettext("Access the pool from a "
10492 "system running more recent software, or\n\trestore the "
10493 "pool from backup.\n"));
10494 break;
10495
10496 case ZPOOL_STATUS_FEAT_DISABLED:
10497 snprintf(status, ST_SIZE, gettext("Some supported and "
10498 "requested features are not enabled on the pool.\n\t"
10499 "The pool can still be used, but some features are "
10500 "unavailable.\n"));
10501 snprintf(action, AC_SIZE, gettext("Enable all features using "
10502 "'zpool upgrade'. Once this is done,\n\tthe pool may no "
10503 "longer be accessible by software that does not support\n\t"
10504 "the features. See zpool-features(7) for details.\n"));
10505 break;
10506
10507 case ZPOOL_STATUS_COMPATIBILITY_ERR:
10508 snprintf(status, ST_SIZE, gettext("This pool has a "
10509 "compatibility list specified, but it could not be\n\t"
10510 "read/parsed at this time. The pool can still be used, "
10511 "but this\n\tshould be investigated.\n"));
10512 snprintf(action, AC_SIZE, gettext("Check the value of the "
10513 "'compatibility' property against the\n\t"
10514 "appropriate file in " ZPOOL_SYSCONF_COMPAT_D " or "
10515 ZPOOL_DATA_COMPAT_D ".\n"));
10516 break;
10517
10518 case ZPOOL_STATUS_INCOMPATIBLE_FEAT:
10519 snprintf(status, ST_SIZE, gettext("One or more features "
10520 "are enabled on the pool despite not being\n\t"
10521 "requested by the 'compatibility' property.\n"));
10522 snprintf(action, AC_SIZE, gettext("Consider setting "
10523 "'compatibility' to an appropriate value, or\n\t"
10524 "adding needed features to the relevant file in\n\t"
10525 ZPOOL_SYSCONF_COMPAT_D " or " ZPOOL_DATA_COMPAT_D ".\n"));
10526 break;
10527
10528 case ZPOOL_STATUS_UNSUP_FEAT_READ:
10529 snprintf(status, ST_SIZE, gettext("The pool cannot be accessed "
10530 "on this system because it uses the\n\tfollowing feature(s)"
10531 " not supported on this system:\n"));
10532 zpool_collect_unsup_feat(zpool_get_config(zhp, NULL), status,
10533 1024);
10534 snprintf(action, AC_SIZE, gettext("Access the pool from a "
10535 "system that supports the required feature(s),\n\tor "
10536 "restore the pool from backup.\n"));
10537 break;
10538
10539 case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
10540 snprintf(status, ST_SIZE, gettext("The pool can only be "
10541 "accessed in read-only mode on this system. It\n\tcannot be"
10542 " accessed in read-write mode because it uses the "
10543 "following\n\tfeature(s) not supported on this system:\n"));
10544 zpool_collect_unsup_feat(zpool_get_config(zhp, NULL), status,
10545 1024);
10546 snprintf(action, AC_SIZE, gettext("The pool cannot be accessed "
10547 "in read-write mode. Import the pool with\n"
10548 "\t\"-o readonly=on\", access the pool from a system that "
10549 "supports the\n\trequired feature(s), or restore the "
10550 "pool from backup.\n"));
10551 break;
10552
10553 case ZPOOL_STATUS_FAULTED_DEV_R:
10554 snprintf(status, ST_SIZE, gettext("One or more devices are "
10555 "faulted in response to persistent errors.\n\tSufficient "
10556 "replicas exist for the pool to continue functioning "
10557 "in a\n\tdegraded state.\n"));
10558 snprintf(action, AC_SIZE, gettext("Replace the faulted device, "
10559 "or use 'zpool clear' to mark the device\n\trepaired.\n"));
10560 break;
10561
10562 case ZPOOL_STATUS_FAULTED_DEV_NR:
10563 snprintf(status, ST_SIZE, gettext("One or more devices are "
10564 "faulted in response to persistent errors. There are "
10565 "insufficient replicas for the pool to\n\tcontinue "
10566 "functioning.\n"));
10567 snprintf(action, AC_SIZE, gettext("Destroy and re-create the "
10568 "pool from a backup source. Manually marking the device\n"
10569 "\trepaired using 'zpool clear' may allow some data "
10570 "to be recovered.\n"));
10571 break;
10572
10573 case ZPOOL_STATUS_IO_FAILURE_MMP:
10574 snprintf(status, ST_SIZE, gettext("The pool is suspended "
10575 "because multihost writes failed or were delayed;\n\t"
10576 "another system could import the pool undetected.\n"));
10577 snprintf(action, AC_SIZE, gettext("Make sure the pool's devices"
10578 " are connected, then reboot your system and\n\timport the "
10579 "pool or run 'zpool clear' to resume the pool.\n"));
10580 break;
10581
10582 case ZPOOL_STATUS_IO_FAILURE_WAIT:
10583 case ZPOOL_STATUS_IO_FAILURE_CONTINUE:
10584 snprintf(status, ST_SIZE, gettext("One or more devices are "
10585 "faulted in response to IO failures.\n"));
10586 snprintf(action, AC_SIZE, gettext("Make sure the affected "
10587 "devices are connected, then run 'zpool clear'.\n"));
10588 break;
10589
10590 case ZPOOL_STATUS_BAD_LOG:
10591 snprintf(status, ST_SIZE, gettext("An intent log record "
10592 "could not be read.\n"
10593 "\tWaiting for administrator intervention to fix the "
10594 "faulted pool.\n"));
10595 snprintf(action, AC_SIZE, gettext("Either restore the affected "
10596 "device(s) and run 'zpool online',\n"
10597 "\tor ignore the intent log records by running "
10598 "'zpool clear'.\n"));
10599 break;
10600
10601 case ZPOOL_STATUS_NON_NATIVE_ASHIFT:
10602 snprintf(status, ST_SIZE, gettext("One or more devices are "
10603 "configured to use a non-native block size.\n"
10604 "\tExpect reduced performance.\n"));
10605 snprintf(action, AC_SIZE, gettext("Replace affected devices "
10606 "with devices that support the\n\tconfigured block size, "
10607 "or migrate data to a properly configured\n\tpool.\n"));
10608 break;
10609
10610 case ZPOOL_STATUS_HOSTID_MISMATCH:
10611 snprintf(status, ST_SIZE, gettext("Mismatch between pool hostid"
10612 " and system hostid on imported pool.\n\tThis pool was "
10613 "previously imported into a system with a different "
10614 "hostid,\n\tand then was verbatim imported into this "
10615 "system.\n"));
10616 snprintf(action, AC_SIZE, gettext("Export this pool on all "
10617 "systems on which it is imported.\n"
10618 "\tThen import it to correct the mismatch.\n"));
10619 break;
10620
10621 case ZPOOL_STATUS_ERRATA:
10622 snprintf(status, ST_SIZE, gettext("Errata #%d detected.\n"),
10623 errata);
10624 switch (errata) {
10625 case ZPOOL_ERRATA_NONE:
10626 break;
10627
10628 case ZPOOL_ERRATA_ZOL_2094_SCRUB:
10629 snprintf(action, AC_SIZE, gettext("To correct the issue"
10630 " run 'zpool scrub'.\n"));
10631 break;
10632
10633 case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION:
10634 (void) strlcat(status, gettext("\tExisting encrypted "
10635 "datasets contain an on-disk incompatibility\n\t "
10636 "which needs to be corrected.\n"), ST_SIZE);
10637 snprintf(action, AC_SIZE, gettext("To correct the issue"
10638 " backup existing encrypted datasets to new\n\t"
10639 "encrypted datasets and destroy the old ones. "
10640 "'zfs mount -o ro' can\n\tbe used to temporarily "
10641 "mount existing encrypted datasets readonly.\n"));
10642 break;
10643
10644 case ZPOOL_ERRATA_ZOL_8308_ENCRYPTION:
10645 (void) strlcat(status, gettext("\tExisting encrypted "
10646 "snapshots and bookmarks contain an on-disk\n\t"
10647 "incompatibility. This may cause on-disk "
10648 "corruption if they are used\n\twith "
10649 "'zfs recv'.\n"), ST_SIZE);
10650 snprintf(action, AC_SIZE, gettext("To correct the"
10651 "issue, enable the bookmark_v2 feature. No "
10652 "additional\n\taction is needed if there are no "
10653 "encrypted snapshots or bookmarks.\n\tIf preserving"
10654 "the encrypted snapshots and bookmarks is required,"
10655 " use\n\ta non-raw send to backup and restore them."
10656 " Alternately, they may be\n\tremoved to resolve "
10657 "the incompatibility.\n"));
10658 break;
10659
10660 default:
10661 /*
10662 * All errata which allow the pool to be imported
10663 * must contain an action message.
10664 */
10665 assert(0);
10666 }
10667 break;
10668
10669 default:
10670 /*
10671 * The remaining errors can't actually be generated, yet.
10672 */
10673 assert(reason == ZPOOL_STATUS_OK);
10674 }
10675
10676 if (status[0] != 0) {
10677 if (cbp->cb_json)
10678 fnvlist_add_string(item, "status", status);
10679 else {
10680 printf_color(ANSI_BOLD, gettext("status: "));
10681 printf_color(ANSI_YELLOW, status);
10682 }
10683 }
10684
10685 if (action[0] != 0) {
10686 if (cbp->cb_json)
10687 fnvlist_add_string(item, "action", action);
10688 else {
10689 printf_color(ANSI_BOLD, gettext("action: "));
10690 printf_color(ANSI_YELLOW, action);
10691 }
10692 }
10693 }
10694
10695 static int
status_callback_json(zpool_handle_t * zhp,void * data)10696 status_callback_json(zpool_handle_t *zhp, void *data)
10697 {
10698 status_cbdata_t *cbp = data;
10699 nvlist_t *config, *nvroot;
10700 const char *msgid;
10701 char pool_guid[256];
10702 char msgbuf[256];
10703 uint64_t guid;
10704 zpool_status_t reason;
10705 zpool_errata_t errata;
10706 uint_t c;
10707 vdev_stat_t *vs;
10708 nvlist_t *item, *d, *load_info, *vds;
10709 item = d = NULL;
10710
10711 /* If dedup stats were requested, also fetch dedupcached. */
10712 if (cbp->cb_dedup_stats > 1)
10713 zpool_add_propname(zhp, ZPOOL_DEDUPCACHED_PROP_NAME);
10714 reason = zpool_get_status(zhp, &msgid, &errata);
10715 /*
10716 * If we were given 'zpool status -x', only report those pools with
10717 * problems.
10718 */
10719 if (cbp->cb_explain &&
10720 (reason == ZPOOL_STATUS_OK ||
10721 reason == ZPOOL_STATUS_VERSION_OLDER ||
10722 reason == ZPOOL_STATUS_FEAT_DISABLED ||
10723 reason == ZPOOL_STATUS_COMPATIBILITY_ERR ||
10724 reason == ZPOOL_STATUS_INCOMPATIBLE_FEAT)) {
10725 return (0);
10726 }
10727
10728 d = fnvlist_lookup_nvlist(cbp->cb_jsobj, "pools");
10729 item = fnvlist_alloc();
10730 vds = fnvlist_alloc();
10731 fill_pool_info(item, zhp, B_FALSE, cbp->cb_json_as_int);
10732 config = zpool_get_config(zhp, NULL);
10733
10734 if (config != NULL) {
10735 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
10736 verify(nvlist_lookup_uint64_array(nvroot,
10737 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &c) == 0);
10738 if (cbp->cb_json_pool_key_guid) {
10739 guid = fnvlist_lookup_uint64(config,
10740 ZPOOL_CONFIG_POOL_GUID);
10741 snprintf(pool_guid, 256, "%llu", (u_longlong_t)guid);
10742 }
10743 cbp->cb_count++;
10744
10745 print_status_reason(zhp, cbp, reason, errata, item);
10746 if (msgid != NULL) {
10747 snprintf(msgbuf, 256,
10748 "https://openzfs.github.io/openzfs-docs/msg/%s",
10749 msgid);
10750 fnvlist_add_string(item, "msgid", msgid);
10751 fnvlist_add_string(item, "moreinfo", msgbuf);
10752 }
10753
10754 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
10755 &load_info) == 0) {
10756 fnvlist_add_nvlist(item, ZPOOL_CONFIG_LOAD_INFO,
10757 load_info);
10758 }
10759
10760 scan_status_nvlist(zhp, cbp, nvroot, item);
10761 removal_status_nvlist(zhp, cbp, nvroot, item);
10762 checkpoint_status_nvlist(nvroot, cbp, item);
10763 raidz_expand_status_nvlist(zhp, cbp, nvroot, item);
10764 vdev_stats_nvlist(zhp, cbp, nvroot, 0, B_FALSE, NULL, vds);
10765 if (cbp->cb_flat_vdevs) {
10766 class_vdevs_nvlist(zhp, cbp, nvroot,
10767 VDEV_ALLOC_BIAS_DEDUP, vds);
10768 class_vdevs_nvlist(zhp, cbp, nvroot,
10769 VDEV_ALLOC_BIAS_SPECIAL, vds);
10770 class_vdevs_nvlist(zhp, cbp, nvroot,
10771 VDEV_ALLOC_CLASS_LOGS, vds);
10772 l2cache_nvlist(zhp, cbp, nvroot, vds);
10773 spares_nvlist(zhp, cbp, nvroot, vds);
10774
10775 fnvlist_add_nvlist(item, "vdevs", vds);
10776 fnvlist_free(vds);
10777 } else {
10778 fnvlist_add_nvlist(item, "vdevs", vds);
10779 fnvlist_free(vds);
10780
10781 class_vdevs_nvlist(zhp, cbp, nvroot,
10782 VDEV_ALLOC_BIAS_DEDUP, item);
10783 class_vdevs_nvlist(zhp, cbp, nvroot,
10784 VDEV_ALLOC_BIAS_SPECIAL, item);
10785 class_vdevs_nvlist(zhp, cbp, nvroot,
10786 VDEV_ALLOC_CLASS_LOGS, item);
10787 l2cache_nvlist(zhp, cbp, nvroot, item);
10788 spares_nvlist(zhp, cbp, nvroot, item);
10789 }
10790 dedup_stats_nvlist(zhp, cbp, item);
10791 errors_nvlist(zhp, cbp, item);
10792 }
10793 if (cbp->cb_json_pool_key_guid) {
10794 fnvlist_add_nvlist(d, pool_guid, item);
10795 } else {
10796 fnvlist_add_nvlist(d, zpool_get_name(zhp),
10797 item);
10798 }
10799 fnvlist_free(item);
10800 return (0);
10801 }
10802
10803 /*
10804 * Display a summary of pool status. Displays a summary such as:
10805 *
10806 * pool: tank
10807 * status: DEGRADED
10808 * reason: One or more devices ...
10809 * see: https://openzfs.github.io/openzfs-docs/msg/ZFS-xxxx-01
10810 * config:
10811 * mirror DEGRADED
10812 * c1t0d0 OK
10813 * c2t0d0 UNAVAIL
10814 *
10815 * When given the '-v' option, we print out the complete config. If the '-e'
10816 * option is specified, then we print out error rate information as well.
10817 */
10818 static int
status_callback(zpool_handle_t * zhp,void * data)10819 status_callback(zpool_handle_t *zhp, void *data)
10820 {
10821 status_cbdata_t *cbp = data;
10822 nvlist_t *config, *nvroot;
10823 const char *msgid;
10824 zpool_status_t reason;
10825 zpool_errata_t errata;
10826 const char *health;
10827 uint_t c;
10828 vdev_stat_t *vs;
10829
10830 /* If dedup stats were requested, also fetch dedupcached. */
10831 if (cbp->cb_dedup_stats > 1)
10832 zpool_add_propname(zhp, ZPOOL_DEDUPCACHED_PROP_NAME);
10833
10834 config = zpool_get_config(zhp, NULL);
10835 reason = zpool_get_status(zhp, &msgid, &errata);
10836
10837 cbp->cb_count++;
10838
10839 /*
10840 * If we were given 'zpool status -x', only report those pools with
10841 * problems.
10842 */
10843 if (cbp->cb_explain &&
10844 (reason == ZPOOL_STATUS_OK ||
10845 reason == ZPOOL_STATUS_VERSION_OLDER ||
10846 reason == ZPOOL_STATUS_FEAT_DISABLED ||
10847 reason == ZPOOL_STATUS_COMPATIBILITY_ERR ||
10848 reason == ZPOOL_STATUS_INCOMPATIBLE_FEAT)) {
10849 if (!cbp->cb_allpools) {
10850 (void) printf(gettext("pool '%s' is healthy\n"),
10851 zpool_get_name(zhp));
10852 if (cbp->cb_first)
10853 cbp->cb_first = B_FALSE;
10854 }
10855 return (0);
10856 }
10857
10858 if (cbp->cb_first)
10859 cbp->cb_first = B_FALSE;
10860 else
10861 (void) printf("\n");
10862
10863 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
10864 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
10865 (uint64_t **)&vs, &c) == 0);
10866
10867 health = zpool_get_state_str(zhp);
10868
10869 printf(" ");
10870 printf_color(ANSI_BOLD, gettext("pool:"));
10871 printf(" %s\n", zpool_get_name(zhp));
10872 fputc(' ', stdout);
10873 printf_color(ANSI_BOLD, gettext("state: "));
10874
10875 printf_color(health_str_to_color(health), "%s", health);
10876
10877 fputc('\n', stdout);
10878 print_status_reason(zhp, cbp, reason, errata, NULL);
10879
10880 if (msgid != NULL) {
10881 printf(" ");
10882 printf_color(ANSI_BOLD, gettext("see:"));
10883 printf(gettext(
10884 " https://openzfs.github.io/openzfs-docs/msg/%s\n"),
10885 msgid);
10886 }
10887
10888 if (config != NULL) {
10889 uint64_t nerr;
10890 nvlist_t **spares, **l2cache;
10891 uint_t nspares, nl2cache;
10892
10893 print_scan_status(zhp, nvroot);
10894
10895 pool_removal_stat_t *prs = NULL;
10896 (void) nvlist_lookup_uint64_array(nvroot,
10897 ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c);
10898 print_removal_status(zhp, prs);
10899
10900 pool_checkpoint_stat_t *pcs = NULL;
10901 (void) nvlist_lookup_uint64_array(nvroot,
10902 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
10903 print_checkpoint_status(pcs);
10904
10905 pool_raidz_expand_stat_t *pres = NULL;
10906 (void) nvlist_lookup_uint64_array(nvroot,
10907 ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, (uint64_t **)&pres, &c);
10908 print_raidz_expand_status(zhp, pres);
10909
10910 cbp->cb_namewidth = max_width(zhp, nvroot, 0, 0,
10911 cbp->cb_name_flags | VDEV_NAME_TYPE_ID);
10912 if (cbp->cb_namewidth < 10)
10913 cbp->cb_namewidth = 10;
10914
10915 color_start(ANSI_BOLD);
10916 (void) printf(gettext("config:\n\n"));
10917 (void) printf(gettext("\t%-*s %-8s %5s %5s %5s"),
10918 cbp->cb_namewidth, "NAME", "STATE", "READ", "WRITE",
10919 "CKSUM");
10920 color_end();
10921
10922 if (cbp->cb_print_slow_ios) {
10923 printf_color(ANSI_BOLD, " %5s", gettext("SLOW"));
10924 }
10925
10926 if (cbp->cb_print_power) {
10927 printf_color(ANSI_BOLD, " %5s", gettext("POWER"));
10928 }
10929
10930 if (cbp->cb_print_dio_verify) {
10931 printf_color(ANSI_BOLD, " %5s", gettext("DIO"));
10932 }
10933
10934 if (cbp->vcdl != NULL)
10935 print_cmd_columns(cbp->vcdl, 0);
10936
10937 printf("\n");
10938
10939 print_status_config(zhp, cbp, zpool_get_name(zhp), nvroot, 0,
10940 B_FALSE, NULL);
10941
10942 print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_BIAS_DEDUP);
10943 print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_BIAS_SPECIAL);
10944 print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_CLASS_LOGS);
10945
10946 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
10947 &l2cache, &nl2cache) == 0)
10948 print_l2cache(zhp, cbp, l2cache, nl2cache);
10949
10950 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
10951 &spares, &nspares) == 0)
10952 print_spares(zhp, cbp, spares, nspares);
10953
10954 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT,
10955 &nerr) == 0) {
10956 (void) printf("\n");
10957 if (nerr == 0) {
10958 (void) printf(gettext(
10959 "errors: No known data errors\n"));
10960 } else if (!cbp->cb_verbose) {
10961 color_start(ANSI_RED);
10962 (void) printf(gettext("errors: %llu data "
10963 "errors, use '-v' for a list\n"),
10964 (u_longlong_t)nerr);
10965 color_end();
10966 } else {
10967 print_error_log(zhp);
10968 }
10969 }
10970
10971 if (cbp->cb_dedup_stats)
10972 print_dedup_stats(zhp, config, cbp->cb_literal);
10973 } else {
10974 (void) printf(gettext("config: The configuration cannot be "
10975 "determined.\n"));
10976 }
10977
10978 return (0);
10979 }
10980
10981 /*
10982 * zpool status [-c [script1,script2,...]] [-dDegiLpPstvx] [--power] ...
10983 * [-T d|u] [pool] [interval [count]]
10984 *
10985 * -c CMD For each vdev, run command CMD
10986 * -d Display Direct I/O write verify errors
10987 * -D Display dedup status (undocumented)
10988 * -e Display only unhealthy vdevs
10989 * -g Display guid for individual vdev name.
10990 * -i Display vdev initialization status.
10991 * -L Follow links when resolving vdev path name.
10992 * -p Display values in parsable (exact) format.
10993 * -P Display full path for vdev name.
10994 * -s Display slow IOs column.
10995 * -t Display vdev TRIM status.
10996 * -T Display a timestamp in date(1) or Unix format
10997 * -v Display complete error logs
10998 * -x Display only pools with potential problems
10999 * -j Display output in JSON format
11000 * --power Display vdev enclosure slot power status
11001 * --json-int Display numbers in inteeger format instead of string
11002 * --json-flat-vdevs Display vdevs in flat hierarchy
11003 * --json-pool-key-guid Use pool GUID as key for pool objects
11004 *
11005 * Describes the health status of all pools or some subset.
11006 */
11007 int
zpool_do_status(int argc,char ** argv)11008 zpool_do_status(int argc, char **argv)
11009 {
11010 int c;
11011 int ret;
11012 float interval = 0;
11013 unsigned long count = 0;
11014 status_cbdata_t cb = { 0 };
11015 nvlist_t *data;
11016 char *cmd = NULL;
11017
11018 struct option long_options[] = {
11019 {"power", no_argument, NULL, ZPOOL_OPTION_POWER},
11020 {"json", no_argument, NULL, 'j'},
11021 {"json-int", no_argument, NULL, ZPOOL_OPTION_JSON_NUMS_AS_INT},
11022 {"json-flat-vdevs", no_argument, NULL,
11023 ZPOOL_OPTION_JSON_FLAT_VDEVS},
11024 {"json-pool-key-guid", no_argument, NULL,
11025 ZPOOL_OPTION_POOL_KEY_GUID},
11026 {0, 0, 0, 0}
11027 };
11028
11029 /* check options */
11030 while ((c = getopt_long(argc, argv, "c:jdDegiLpPstT:vx", long_options,
11031 NULL)) != -1) {
11032 switch (c) {
11033 case 'c':
11034 if (cmd != NULL) {
11035 fprintf(stderr,
11036 gettext("Can't set -c flag twice\n"));
11037 exit(1);
11038 }
11039
11040 if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL &&
11041 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) {
11042 fprintf(stderr, gettext(
11043 "Can't run -c, disabled by "
11044 "ZPOOL_SCRIPTS_ENABLED.\n"));
11045 exit(1);
11046 }
11047
11048 if ((getuid() <= 0 || geteuid() <= 0) &&
11049 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) {
11050 fprintf(stderr, gettext(
11051 "Can't run -c with root privileges "
11052 "unless ZPOOL_SCRIPTS_AS_ROOT is set.\n"));
11053 exit(1);
11054 }
11055 cmd = optarg;
11056 break;
11057 case 'd':
11058 cb.cb_print_dio_verify = B_TRUE;
11059 break;
11060 case 'D':
11061 if (++cb.cb_dedup_stats > 2)
11062 cb.cb_dedup_stats = 2;
11063 break;
11064 case 'e':
11065 cb.cb_print_unhealthy = B_TRUE;
11066 break;
11067 case 'g':
11068 cb.cb_name_flags |= VDEV_NAME_GUID;
11069 break;
11070 case 'i':
11071 cb.cb_print_vdev_init = B_TRUE;
11072 break;
11073 case 'L':
11074 cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
11075 break;
11076 case 'p':
11077 cb.cb_literal = B_TRUE;
11078 break;
11079 case 'P':
11080 cb.cb_name_flags |= VDEV_NAME_PATH;
11081 break;
11082 case 's':
11083 cb.cb_print_slow_ios = B_TRUE;
11084 break;
11085 case 't':
11086 cb.cb_print_vdev_trim = B_TRUE;
11087 break;
11088 case 'T':
11089 get_timestamp_arg(*optarg);
11090 break;
11091 case 'v':
11092 cb.cb_verbose = B_TRUE;
11093 break;
11094 case 'j':
11095 cb.cb_json = B_TRUE;
11096 break;
11097 case 'x':
11098 cb.cb_explain = B_TRUE;
11099 break;
11100 case ZPOOL_OPTION_POWER:
11101 cb.cb_print_power = B_TRUE;
11102 break;
11103 case ZPOOL_OPTION_JSON_FLAT_VDEVS:
11104 cb.cb_flat_vdevs = B_TRUE;
11105 break;
11106 case ZPOOL_OPTION_JSON_NUMS_AS_INT:
11107 cb.cb_json_as_int = B_TRUE;
11108 cb.cb_literal = B_TRUE;
11109 break;
11110 case ZPOOL_OPTION_POOL_KEY_GUID:
11111 cb.cb_json_pool_key_guid = B_TRUE;
11112 break;
11113 case '?':
11114 if (optopt == 'c') {
11115 print_zpool_script_list("status");
11116 exit(0);
11117 } else {
11118 fprintf(stderr,
11119 gettext("invalid option '%c'\n"), optopt);
11120 }
11121 usage(B_FALSE);
11122 }
11123 }
11124
11125 argc -= optind;
11126 argv += optind;
11127
11128 get_interval_count(&argc, argv, &interval, &count);
11129
11130 if (argc == 0)
11131 cb.cb_allpools = B_TRUE;
11132
11133 cb.cb_first = B_TRUE;
11134 cb.cb_print_status = B_TRUE;
11135
11136 if (cb.cb_flat_vdevs && !cb.cb_json) {
11137 fprintf(stderr, gettext("'--json-flat-vdevs' only works with"
11138 " '-j' option\n"));
11139 usage(B_FALSE);
11140 }
11141
11142 if (cb.cb_json_as_int && !cb.cb_json) {
11143 (void) fprintf(stderr, gettext("'--json-int' only works with"
11144 " '-j' option\n"));
11145 usage(B_FALSE);
11146 }
11147
11148 if (!cb.cb_json && cb.cb_json_pool_key_guid) {
11149 (void) fprintf(stderr, gettext("'json-pool-key-guid' only"
11150 " works with '-j' option\n"));
11151 usage(B_FALSE);
11152 }
11153
11154 for (;;) {
11155 if (cb.cb_json) {
11156 cb.cb_jsobj = zpool_json_schema(0, 1);
11157 data = fnvlist_alloc();
11158 fnvlist_add_nvlist(cb.cb_jsobj, "pools", data);
11159 fnvlist_free(data);
11160 }
11161
11162 if (timestamp_fmt != NODATE) {
11163 if (cb.cb_json) {
11164 if (cb.cb_json_as_int) {
11165 fnvlist_add_uint64(cb.cb_jsobj, "time",
11166 time(NULL));
11167 } else {
11168 char ts[128];
11169 get_timestamp(timestamp_fmt, ts, 128);
11170 fnvlist_add_string(cb.cb_jsobj, "time",
11171 ts);
11172 }
11173 } else
11174 print_timestamp(timestamp_fmt);
11175 }
11176
11177 if (cmd != NULL)
11178 cb.vcdl = all_pools_for_each_vdev_run(argc, argv, cmd,
11179 NULL, NULL, 0, 0);
11180
11181 if (cb.cb_json) {
11182 ret = for_each_pool(argc, argv, B_TRUE, NULL,
11183 ZFS_TYPE_POOL, cb.cb_literal,
11184 status_callback_json, &cb);
11185 } else {
11186 ret = for_each_pool(argc, argv, B_TRUE, NULL,
11187 ZFS_TYPE_POOL, cb.cb_literal,
11188 status_callback, &cb);
11189 }
11190
11191 if (cb.vcdl != NULL)
11192 free_vdev_cmd_data_list(cb.vcdl);
11193
11194 if (cb.cb_json) {
11195 if (ret == 0)
11196 zcmd_print_json(cb.cb_jsobj);
11197 else
11198 nvlist_free(cb.cb_jsobj);
11199 } else {
11200 if (argc == 0 && cb.cb_count == 0) {
11201 (void) fprintf(stderr, "%s",
11202 gettext("no pools available\n"));
11203 } else if (cb.cb_explain && cb.cb_first &&
11204 cb.cb_allpools) {
11205 (void) printf("%s",
11206 gettext("all pools are healthy\n"));
11207 }
11208 }
11209
11210 if (ret != 0)
11211 return (ret);
11212
11213 if (interval == 0)
11214 break;
11215
11216 if (count != 0 && --count == 0)
11217 break;
11218
11219 (void) fflush(stdout);
11220 (void) fsleep(interval);
11221 }
11222
11223 return (0);
11224 }
11225
11226 typedef struct upgrade_cbdata {
11227 int cb_first;
11228 int cb_argc;
11229 uint64_t cb_version;
11230 char **cb_argv;
11231 } upgrade_cbdata_t;
11232
11233 static int
check_unsupp_fs(zfs_handle_t * zhp,void * unsupp_fs)11234 check_unsupp_fs(zfs_handle_t *zhp, void *unsupp_fs)
11235 {
11236 int zfs_version = (int)zfs_prop_get_int(zhp, ZFS_PROP_VERSION);
11237 int *count = (int *)unsupp_fs;
11238
11239 if (zfs_version > ZPL_VERSION) {
11240 (void) printf(gettext("%s (v%d) is not supported by this "
11241 "implementation of ZFS.\n"),
11242 zfs_get_name(zhp), zfs_version);
11243 (*count)++;
11244 }
11245
11246 zfs_iter_filesystems_v2(zhp, 0, check_unsupp_fs, unsupp_fs);
11247
11248 zfs_close(zhp);
11249
11250 return (0);
11251 }
11252
11253 static int
upgrade_version(zpool_handle_t * zhp,uint64_t version)11254 upgrade_version(zpool_handle_t *zhp, uint64_t version)
11255 {
11256 int ret;
11257 nvlist_t *config;
11258 uint64_t oldversion;
11259 int unsupp_fs = 0;
11260
11261 config = zpool_get_config(zhp, NULL);
11262 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
11263 &oldversion) == 0);
11264
11265 char compat[ZFS_MAXPROPLEN];
11266 if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, compat,
11267 ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
11268 compat[0] = '\0';
11269
11270 assert(SPA_VERSION_IS_SUPPORTED(oldversion));
11271 assert(oldversion < version);
11272
11273 ret = zfs_iter_root(zpool_get_handle(zhp), check_unsupp_fs, &unsupp_fs);
11274 if (ret != 0)
11275 return (ret);
11276
11277 if (unsupp_fs) {
11278 (void) fprintf(stderr, gettext("Upgrade not performed due "
11279 "to %d unsupported filesystems (max v%d).\n"),
11280 unsupp_fs, (int)ZPL_VERSION);
11281 return (1);
11282 }
11283
11284 if (strcmp(compat, ZPOOL_COMPAT_LEGACY) == 0) {
11285 (void) fprintf(stderr, gettext("Upgrade not performed because "
11286 "'compatibility' property set to '"
11287 ZPOOL_COMPAT_LEGACY "'.\n"));
11288 return (1);
11289 }
11290
11291 ret = zpool_upgrade(zhp, version);
11292 if (ret != 0)
11293 return (ret);
11294
11295 if (version >= SPA_VERSION_FEATURES) {
11296 (void) printf(gettext("Successfully upgraded "
11297 "'%s' from version %llu to feature flags.\n"),
11298 zpool_get_name(zhp), (u_longlong_t)oldversion);
11299 } else {
11300 (void) printf(gettext("Successfully upgraded "
11301 "'%s' from version %llu to version %llu.\n"),
11302 zpool_get_name(zhp), (u_longlong_t)oldversion,
11303 (u_longlong_t)version);
11304 }
11305
11306 return (0);
11307 }
11308
11309 static int
upgrade_enable_all(zpool_handle_t * zhp,int * countp)11310 upgrade_enable_all(zpool_handle_t *zhp, int *countp)
11311 {
11312 int i, ret, count;
11313 boolean_t firstff = B_TRUE;
11314 nvlist_t *enabled = zpool_get_features(zhp);
11315
11316 char compat[ZFS_MAXPROPLEN];
11317 if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, compat,
11318 ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
11319 compat[0] = '\0';
11320
11321 boolean_t requested_features[SPA_FEATURES];
11322 if (zpool_do_load_compat(compat, requested_features) !=
11323 ZPOOL_COMPATIBILITY_OK)
11324 return (-1);
11325
11326 count = 0;
11327 for (i = 0; i < SPA_FEATURES; i++) {
11328 const char *fname = spa_feature_table[i].fi_uname;
11329 const char *fguid = spa_feature_table[i].fi_guid;
11330
11331 if (!spa_feature_table[i].fi_zfs_mod_supported)
11332 continue;
11333
11334 if (!nvlist_exists(enabled, fguid) && requested_features[i]) {
11335 char *propname;
11336 verify(-1 != asprintf(&propname, "feature@%s", fname));
11337 ret = zpool_set_prop(zhp, propname,
11338 ZFS_FEATURE_ENABLED);
11339 if (ret != 0) {
11340 free(propname);
11341 return (ret);
11342 }
11343 count++;
11344
11345 if (firstff) {
11346 (void) printf(gettext("Enabled the "
11347 "following features on '%s':\n"),
11348 zpool_get_name(zhp));
11349 firstff = B_FALSE;
11350 }
11351 (void) printf(gettext(" %s\n"), fname);
11352 free(propname);
11353 }
11354 }
11355
11356 if (countp != NULL)
11357 *countp = count;
11358 return (0);
11359 }
11360
11361 static int
upgrade_cb(zpool_handle_t * zhp,void * arg)11362 upgrade_cb(zpool_handle_t *zhp, void *arg)
11363 {
11364 upgrade_cbdata_t *cbp = arg;
11365 nvlist_t *config;
11366 uint64_t version;
11367 boolean_t modified_pool = B_FALSE;
11368 int ret;
11369
11370 config = zpool_get_config(zhp, NULL);
11371 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
11372 &version) == 0);
11373
11374 assert(SPA_VERSION_IS_SUPPORTED(version));
11375
11376 if (version < cbp->cb_version) {
11377 cbp->cb_first = B_FALSE;
11378 ret = upgrade_version(zhp, cbp->cb_version);
11379 if (ret != 0)
11380 return (ret);
11381 modified_pool = B_TRUE;
11382
11383 /*
11384 * If they did "zpool upgrade -a", then we could
11385 * be doing ioctls to different pools. We need
11386 * to log this history once to each pool, and bypass
11387 * the normal history logging that happens in main().
11388 */
11389 (void) zpool_log_history(g_zfs, history_str);
11390 log_history = B_FALSE;
11391 }
11392
11393 if (cbp->cb_version >= SPA_VERSION_FEATURES) {
11394 int count;
11395 ret = upgrade_enable_all(zhp, &count);
11396 if (ret != 0)
11397 return (ret);
11398
11399 if (count > 0) {
11400 cbp->cb_first = B_FALSE;
11401 modified_pool = B_TRUE;
11402 }
11403 }
11404
11405 if (modified_pool) {
11406 (void) printf("\n");
11407 (void) after_zpool_upgrade(zhp);
11408 }
11409
11410 return (0);
11411 }
11412
11413 static int
upgrade_list_older_cb(zpool_handle_t * zhp,void * arg)11414 upgrade_list_older_cb(zpool_handle_t *zhp, void *arg)
11415 {
11416 upgrade_cbdata_t *cbp = arg;
11417 nvlist_t *config;
11418 uint64_t version;
11419
11420 config = zpool_get_config(zhp, NULL);
11421 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
11422 &version) == 0);
11423
11424 assert(SPA_VERSION_IS_SUPPORTED(version));
11425
11426 if (version < SPA_VERSION_FEATURES) {
11427 if (cbp->cb_first) {
11428 (void) printf(gettext("The following pools are "
11429 "formatted with legacy version numbers and can\n"
11430 "be upgraded to use feature flags. After "
11431 "being upgraded, these pools\nwill no "
11432 "longer be accessible by software that does not "
11433 "support feature\nflags.\n\n"
11434 "Note that setting a pool's 'compatibility' "
11435 "feature to '" ZPOOL_COMPAT_LEGACY "' will\n"
11436 "inhibit upgrades.\n\n"));
11437 (void) printf(gettext("VER POOL\n"));
11438 (void) printf(gettext("--- ------------\n"));
11439 cbp->cb_first = B_FALSE;
11440 }
11441
11442 (void) printf("%2llu %s\n", (u_longlong_t)version,
11443 zpool_get_name(zhp));
11444 }
11445
11446 return (0);
11447 }
11448
11449 static int
upgrade_list_disabled_cb(zpool_handle_t * zhp,void * arg)11450 upgrade_list_disabled_cb(zpool_handle_t *zhp, void *arg)
11451 {
11452 upgrade_cbdata_t *cbp = arg;
11453 nvlist_t *config;
11454 uint64_t version;
11455
11456 config = zpool_get_config(zhp, NULL);
11457 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
11458 &version) == 0);
11459
11460 if (version >= SPA_VERSION_FEATURES) {
11461 int i;
11462 boolean_t poolfirst = B_TRUE;
11463 nvlist_t *enabled = zpool_get_features(zhp);
11464
11465 for (i = 0; i < SPA_FEATURES; i++) {
11466 const char *fguid = spa_feature_table[i].fi_guid;
11467 const char *fname = spa_feature_table[i].fi_uname;
11468
11469 if (!spa_feature_table[i].fi_zfs_mod_supported)
11470 continue;
11471
11472 if (!nvlist_exists(enabled, fguid)) {
11473 if (cbp->cb_first) {
11474 (void) printf(gettext("\nSome "
11475 "supported features are not "
11476 "enabled on the following pools. "
11477 "Once a\nfeature is enabled the "
11478 "pool may become incompatible with "
11479 "software\nthat does not support "
11480 "the feature. See "
11481 "zpool-features(7) for "
11482 "details.\n\n"
11483 "Note that the pool "
11484 "'compatibility' feature can be "
11485 "used to inhibit\nfeature "
11486 "upgrades.\n\n"));
11487 (void) printf(gettext("POOL "
11488 "FEATURE\n"));
11489 (void) printf(gettext("------"
11490 "---------\n"));
11491 cbp->cb_first = B_FALSE;
11492 }
11493
11494 if (poolfirst) {
11495 (void) printf(gettext("%s\n"),
11496 zpool_get_name(zhp));
11497 poolfirst = B_FALSE;
11498 }
11499
11500 (void) printf(gettext(" %s\n"), fname);
11501 }
11502 /*
11503 * If they did "zpool upgrade -a", then we could
11504 * be doing ioctls to different pools. We need
11505 * to log this history once to each pool, and bypass
11506 * the normal history logging that happens in main().
11507 */
11508 (void) zpool_log_history(g_zfs, history_str);
11509 log_history = B_FALSE;
11510 }
11511 }
11512
11513 return (0);
11514 }
11515
11516 static int
upgrade_one(zpool_handle_t * zhp,void * data)11517 upgrade_one(zpool_handle_t *zhp, void *data)
11518 {
11519 boolean_t modified_pool = B_FALSE;
11520 upgrade_cbdata_t *cbp = data;
11521 uint64_t cur_version;
11522 int ret;
11523
11524 if (strcmp("log", zpool_get_name(zhp)) == 0) {
11525 (void) fprintf(stderr, gettext("'log' is now a reserved word\n"
11526 "Pool 'log' must be renamed using export and import"
11527 " to upgrade.\n"));
11528 return (1);
11529 }
11530
11531 cur_version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
11532 if (cur_version > cbp->cb_version) {
11533 (void) printf(gettext("Pool '%s' is already formatted "
11534 "using more current version '%llu'.\n\n"),
11535 zpool_get_name(zhp), (u_longlong_t)cur_version);
11536 return (0);
11537 }
11538
11539 if (cbp->cb_version != SPA_VERSION && cur_version == cbp->cb_version) {
11540 (void) printf(gettext("Pool '%s' is already formatted "
11541 "using version %llu.\n\n"), zpool_get_name(zhp),
11542 (u_longlong_t)cbp->cb_version);
11543 return (0);
11544 }
11545
11546 if (cur_version != cbp->cb_version) {
11547 modified_pool = B_TRUE;
11548 ret = upgrade_version(zhp, cbp->cb_version);
11549 if (ret != 0)
11550 return (ret);
11551 }
11552
11553 if (cbp->cb_version >= SPA_VERSION_FEATURES) {
11554 int count = 0;
11555 ret = upgrade_enable_all(zhp, &count);
11556 if (ret != 0)
11557 return (ret);
11558
11559 if (count != 0) {
11560 modified_pool = B_TRUE;
11561 } else if (cur_version == SPA_VERSION) {
11562 (void) printf(gettext("Pool '%s' already has all "
11563 "supported and requested features enabled.\n"),
11564 zpool_get_name(zhp));
11565 }
11566 }
11567
11568 if (modified_pool) {
11569 (void) printf("\n");
11570 (void) after_zpool_upgrade(zhp);
11571 }
11572
11573 return (0);
11574 }
11575
11576 /*
11577 * zpool upgrade
11578 * zpool upgrade -v
11579 * zpool upgrade [-V version] <-a | pool ...>
11580 *
11581 * With no arguments, display downrev'd ZFS pool available for upgrade.
11582 * Individual pools can be upgraded by specifying the pool, and '-a' will
11583 * upgrade all pools.
11584 */
11585 int
zpool_do_upgrade(int argc,char ** argv)11586 zpool_do_upgrade(int argc, char **argv)
11587 {
11588 int c;
11589 upgrade_cbdata_t cb = { 0 };
11590 int ret = 0;
11591 boolean_t showversions = B_FALSE;
11592 boolean_t upgradeall = B_FALSE;
11593 char *end;
11594
11595
11596 /* check options */
11597 while ((c = getopt(argc, argv, ":avV:")) != -1) {
11598 switch (c) {
11599 case 'a':
11600 upgradeall = B_TRUE;
11601 break;
11602 case 'v':
11603 showversions = B_TRUE;
11604 break;
11605 case 'V':
11606 cb.cb_version = strtoll(optarg, &end, 10);
11607 if (*end != '\0' ||
11608 !SPA_VERSION_IS_SUPPORTED(cb.cb_version)) {
11609 (void) fprintf(stderr,
11610 gettext("invalid version '%s'\n"), optarg);
11611 usage(B_FALSE);
11612 }
11613 break;
11614 case ':':
11615 (void) fprintf(stderr, gettext("missing argument for "
11616 "'%c' option\n"), optopt);
11617 usage(B_FALSE);
11618 break;
11619 case '?':
11620 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
11621 optopt);
11622 usage(B_FALSE);
11623 }
11624 }
11625
11626 cb.cb_argc = argc;
11627 cb.cb_argv = argv;
11628 argc -= optind;
11629 argv += optind;
11630
11631 if (cb.cb_version == 0) {
11632 cb.cb_version = SPA_VERSION;
11633 } else if (!upgradeall && argc == 0) {
11634 (void) fprintf(stderr, gettext("-V option is "
11635 "incompatible with other arguments\n"));
11636 usage(B_FALSE);
11637 }
11638
11639 if (showversions) {
11640 if (upgradeall || argc != 0) {
11641 (void) fprintf(stderr, gettext("-v option is "
11642 "incompatible with other arguments\n"));
11643 usage(B_FALSE);
11644 }
11645 } else if (upgradeall) {
11646 if (argc != 0) {
11647 (void) fprintf(stderr, gettext("-a option should not "
11648 "be used along with a pool name\n"));
11649 usage(B_FALSE);
11650 }
11651 }
11652
11653 (void) printf("%s", gettext("This system supports ZFS pool feature "
11654 "flags.\n\n"));
11655 if (showversions) {
11656 int i;
11657
11658 (void) printf(gettext("The following features are "
11659 "supported:\n\n"));
11660 (void) printf(gettext("FEAT DESCRIPTION\n"));
11661 (void) printf("----------------------------------------------"
11662 "---------------\n");
11663 for (i = 0; i < SPA_FEATURES; i++) {
11664 zfeature_info_t *fi = &spa_feature_table[i];
11665 if (!fi->fi_zfs_mod_supported)
11666 continue;
11667 const char *ro =
11668 (fi->fi_flags & ZFEATURE_FLAG_READONLY_COMPAT) ?
11669 " (read-only compatible)" : "";
11670
11671 (void) printf("%-37s%s\n", fi->fi_uname, ro);
11672 (void) printf(" %s\n", fi->fi_desc);
11673 }
11674 (void) printf("\n");
11675
11676 (void) printf(gettext("The following legacy versions are also "
11677 "supported:\n\n"));
11678 (void) printf(gettext("VER DESCRIPTION\n"));
11679 (void) printf("--- -----------------------------------------"
11680 "---------------\n");
11681 (void) printf(gettext(" 1 Initial ZFS version\n"));
11682 (void) printf(gettext(" 2 Ditto blocks "
11683 "(replicated metadata)\n"));
11684 (void) printf(gettext(" 3 Hot spares and double parity "
11685 "RAID-Z\n"));
11686 (void) printf(gettext(" 4 zpool history\n"));
11687 (void) printf(gettext(" 5 Compression using the gzip "
11688 "algorithm\n"));
11689 (void) printf(gettext(" 6 bootfs pool property\n"));
11690 (void) printf(gettext(" 7 Separate intent log devices\n"));
11691 (void) printf(gettext(" 8 Delegated administration\n"));
11692 (void) printf(gettext(" 9 refquota and refreservation "
11693 "properties\n"));
11694 (void) printf(gettext(" 10 Cache devices\n"));
11695 (void) printf(gettext(" 11 Improved scrub performance\n"));
11696 (void) printf(gettext(" 12 Snapshot properties\n"));
11697 (void) printf(gettext(" 13 snapused property\n"));
11698 (void) printf(gettext(" 14 passthrough-x aclinherit\n"));
11699 (void) printf(gettext(" 15 user/group space accounting\n"));
11700 (void) printf(gettext(" 16 stmf property support\n"));
11701 (void) printf(gettext(" 17 Triple-parity RAID-Z\n"));
11702 (void) printf(gettext(" 18 Snapshot user holds\n"));
11703 (void) printf(gettext(" 19 Log device removal\n"));
11704 (void) printf(gettext(" 20 Compression using zle "
11705 "(zero-length encoding)\n"));
11706 (void) printf(gettext(" 21 Deduplication\n"));
11707 (void) printf(gettext(" 22 Received properties\n"));
11708 (void) printf(gettext(" 23 Slim ZIL\n"));
11709 (void) printf(gettext(" 24 System attributes\n"));
11710 (void) printf(gettext(" 25 Improved scrub stats\n"));
11711 (void) printf(gettext(" 26 Improved snapshot deletion "
11712 "performance\n"));
11713 (void) printf(gettext(" 27 Improved snapshot creation "
11714 "performance\n"));
11715 (void) printf(gettext(" 28 Multiple vdev replacements\n"));
11716 (void) printf(gettext("\nFor more information on a particular "
11717 "version, including supported releases,\n"));
11718 (void) printf(gettext("see the ZFS Administration Guide.\n\n"));
11719 } else if (argc == 0 && upgradeall) {
11720 cb.cb_first = B_TRUE;
11721 ret = zpool_iter(g_zfs, upgrade_cb, &cb);
11722 if (ret == 0 && cb.cb_first) {
11723 if (cb.cb_version == SPA_VERSION) {
11724 (void) printf(gettext("All pools are already "
11725 "formatted using feature flags.\n\n"));
11726 (void) printf(gettext("Every feature flags "
11727 "pool already has all supported and "
11728 "requested features enabled.\n"));
11729 } else {
11730 (void) printf(gettext("All pools are already "
11731 "formatted with version %llu or higher.\n"),
11732 (u_longlong_t)cb.cb_version);
11733 }
11734 }
11735 } else if (argc == 0) {
11736 cb.cb_first = B_TRUE;
11737 ret = zpool_iter(g_zfs, upgrade_list_older_cb, &cb);
11738 assert(ret == 0);
11739
11740 if (cb.cb_first) {
11741 (void) printf(gettext("All pools are formatted "
11742 "using feature flags.\n\n"));
11743 } else {
11744 (void) printf(gettext("\nUse 'zpool upgrade -v' "
11745 "for a list of available legacy versions.\n"));
11746 }
11747
11748 cb.cb_first = B_TRUE;
11749 ret = zpool_iter(g_zfs, upgrade_list_disabled_cb, &cb);
11750 assert(ret == 0);
11751
11752 if (cb.cb_first) {
11753 (void) printf(gettext("Every feature flags pool has "
11754 "all supported and requested features enabled.\n"));
11755 } else {
11756 (void) printf(gettext("\n"));
11757 }
11758 } else {
11759 ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL,
11760 B_FALSE, upgrade_one, &cb);
11761 }
11762
11763 return (ret);
11764 }
11765
11766 typedef struct hist_cbdata {
11767 boolean_t first;
11768 boolean_t longfmt;
11769 boolean_t internal;
11770 } hist_cbdata_t;
11771
11772 static void
print_history_records(nvlist_t * nvhis,hist_cbdata_t * cb)11773 print_history_records(nvlist_t *nvhis, hist_cbdata_t *cb)
11774 {
11775 nvlist_t **records;
11776 uint_t numrecords;
11777 int i;
11778
11779 verify(nvlist_lookup_nvlist_array(nvhis, ZPOOL_HIST_RECORD,
11780 &records, &numrecords) == 0);
11781 for (i = 0; i < numrecords; i++) {
11782 nvlist_t *rec = records[i];
11783 char tbuf[64] = "";
11784
11785 if (nvlist_exists(rec, ZPOOL_HIST_TIME)) {
11786 time_t tsec;
11787 struct tm t;
11788
11789 tsec = fnvlist_lookup_uint64(records[i],
11790 ZPOOL_HIST_TIME);
11791 (void) localtime_r(&tsec, &t);
11792 (void) strftime(tbuf, sizeof (tbuf), "%F.%T", &t);
11793 }
11794
11795 if (nvlist_exists(rec, ZPOOL_HIST_ELAPSED_NS)) {
11796 uint64_t elapsed_ns = fnvlist_lookup_int64(records[i],
11797 ZPOOL_HIST_ELAPSED_NS);
11798 (void) snprintf(tbuf + strlen(tbuf),
11799 sizeof (tbuf) - strlen(tbuf),
11800 " (%lldms)", (long long)elapsed_ns / 1000 / 1000);
11801 }
11802
11803 if (nvlist_exists(rec, ZPOOL_HIST_CMD)) {
11804 (void) printf("%s %s", tbuf,
11805 fnvlist_lookup_string(rec, ZPOOL_HIST_CMD));
11806 } else if (nvlist_exists(rec, ZPOOL_HIST_INT_EVENT)) {
11807 int ievent =
11808 fnvlist_lookup_uint64(rec, ZPOOL_HIST_INT_EVENT);
11809 if (!cb->internal)
11810 continue;
11811 if (ievent >= ZFS_NUM_LEGACY_HISTORY_EVENTS) {
11812 (void) printf("%s unrecognized record:\n",
11813 tbuf);
11814 dump_nvlist(rec, 4);
11815 continue;
11816 }
11817 (void) printf("%s [internal %s txg:%lld] %s", tbuf,
11818 zfs_history_event_names[ievent],
11819 (longlong_t)fnvlist_lookup_uint64(
11820 rec, ZPOOL_HIST_TXG),
11821 fnvlist_lookup_string(rec, ZPOOL_HIST_INT_STR));
11822 } else if (nvlist_exists(rec, ZPOOL_HIST_INT_NAME)) {
11823 if (!cb->internal)
11824 continue;
11825 (void) printf("%s [txg:%lld] %s", tbuf,
11826 (longlong_t)fnvlist_lookup_uint64(
11827 rec, ZPOOL_HIST_TXG),
11828 fnvlist_lookup_string(rec, ZPOOL_HIST_INT_NAME));
11829 if (nvlist_exists(rec, ZPOOL_HIST_DSNAME)) {
11830 (void) printf(" %s (%llu)",
11831 fnvlist_lookup_string(rec,
11832 ZPOOL_HIST_DSNAME),
11833 (u_longlong_t)fnvlist_lookup_uint64(rec,
11834 ZPOOL_HIST_DSID));
11835 }
11836 (void) printf(" %s", fnvlist_lookup_string(rec,
11837 ZPOOL_HIST_INT_STR));
11838 } else if (nvlist_exists(rec, ZPOOL_HIST_IOCTL)) {
11839 if (!cb->internal)
11840 continue;
11841 (void) printf("%s ioctl %s\n", tbuf,
11842 fnvlist_lookup_string(rec, ZPOOL_HIST_IOCTL));
11843 if (nvlist_exists(rec, ZPOOL_HIST_INPUT_NVL)) {
11844 (void) printf(" input:\n");
11845 dump_nvlist(fnvlist_lookup_nvlist(rec,
11846 ZPOOL_HIST_INPUT_NVL), 8);
11847 }
11848 if (nvlist_exists(rec, ZPOOL_HIST_OUTPUT_NVL)) {
11849 (void) printf(" output:\n");
11850 dump_nvlist(fnvlist_lookup_nvlist(rec,
11851 ZPOOL_HIST_OUTPUT_NVL), 8);
11852 }
11853 if (nvlist_exists(rec, ZPOOL_HIST_OUTPUT_SIZE)) {
11854 (void) printf(" output nvlist omitted; "
11855 "original size: %lldKB\n",
11856 (longlong_t)fnvlist_lookup_int64(rec,
11857 ZPOOL_HIST_OUTPUT_SIZE) / 1024);
11858 }
11859 if (nvlist_exists(rec, ZPOOL_HIST_ERRNO)) {
11860 (void) printf(" errno: %lld\n",
11861 (longlong_t)fnvlist_lookup_int64(rec,
11862 ZPOOL_HIST_ERRNO));
11863 }
11864 } else {
11865 if (!cb->internal)
11866 continue;
11867 (void) printf("%s unrecognized record:\n", tbuf);
11868 dump_nvlist(rec, 4);
11869 }
11870
11871 if (!cb->longfmt) {
11872 (void) printf("\n");
11873 continue;
11874 }
11875 (void) printf(" [");
11876 if (nvlist_exists(rec, ZPOOL_HIST_WHO)) {
11877 uid_t who = fnvlist_lookup_uint64(rec, ZPOOL_HIST_WHO);
11878 struct passwd *pwd = getpwuid(who);
11879 (void) printf("user %d ", (int)who);
11880 if (pwd != NULL)
11881 (void) printf("(%s) ", pwd->pw_name);
11882 }
11883 if (nvlist_exists(rec, ZPOOL_HIST_HOST)) {
11884 (void) printf("on %s",
11885 fnvlist_lookup_string(rec, ZPOOL_HIST_HOST));
11886 }
11887 if (nvlist_exists(rec, ZPOOL_HIST_ZONE)) {
11888 (void) printf(":%s",
11889 fnvlist_lookup_string(rec, ZPOOL_HIST_ZONE));
11890 }
11891
11892 (void) printf("]");
11893 (void) printf("\n");
11894 }
11895 }
11896
11897 /*
11898 * Print out the command history for a specific pool.
11899 */
11900 static int
get_history_one(zpool_handle_t * zhp,void * data)11901 get_history_one(zpool_handle_t *zhp, void *data)
11902 {
11903 nvlist_t *nvhis;
11904 int ret;
11905 hist_cbdata_t *cb = (hist_cbdata_t *)data;
11906 uint64_t off = 0;
11907 boolean_t eof = B_FALSE;
11908
11909 cb->first = B_FALSE;
11910
11911 (void) printf(gettext("History for '%s':\n"), zpool_get_name(zhp));
11912
11913 while (!eof) {
11914 if ((ret = zpool_get_history(zhp, &nvhis, &off, &eof)) != 0)
11915 return (ret);
11916
11917 print_history_records(nvhis, cb);
11918 nvlist_free(nvhis);
11919 }
11920 (void) printf("\n");
11921
11922 return (ret);
11923 }
11924
11925 /*
11926 * zpool history <pool>
11927 *
11928 * Displays the history of commands that modified pools.
11929 */
11930 int
zpool_do_history(int argc,char ** argv)11931 zpool_do_history(int argc, char **argv)
11932 {
11933 hist_cbdata_t cbdata = { 0 };
11934 int ret;
11935 int c;
11936
11937 cbdata.first = B_TRUE;
11938 /* check options */
11939 while ((c = getopt(argc, argv, "li")) != -1) {
11940 switch (c) {
11941 case 'l':
11942 cbdata.longfmt = B_TRUE;
11943 break;
11944 case 'i':
11945 cbdata.internal = B_TRUE;
11946 break;
11947 case '?':
11948 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
11949 optopt);
11950 usage(B_FALSE);
11951 }
11952 }
11953 argc -= optind;
11954 argv += optind;
11955
11956 ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL,
11957 B_FALSE, get_history_one, &cbdata);
11958
11959 if (argc == 0 && cbdata.first == B_TRUE) {
11960 (void) fprintf(stderr, gettext("no pools available\n"));
11961 return (0);
11962 }
11963
11964 return (ret);
11965 }
11966
11967 typedef struct ev_opts {
11968 int verbose;
11969 int scripted;
11970 int follow;
11971 int clear;
11972 char poolname[ZFS_MAX_DATASET_NAME_LEN];
11973 } ev_opts_t;
11974
11975 static void
zpool_do_events_short(nvlist_t * nvl,ev_opts_t * opts)11976 zpool_do_events_short(nvlist_t *nvl, ev_opts_t *opts)
11977 {
11978 char ctime_str[26], str[32];
11979 const char *ptr;
11980 int64_t *tv;
11981 uint_t n;
11982
11983 verify(nvlist_lookup_int64_array(nvl, FM_EREPORT_TIME, &tv, &n) == 0);
11984 memset(str, ' ', 32);
11985 (void) ctime_r((const time_t *)&tv[0], ctime_str);
11986 (void) memcpy(str, ctime_str+4, 6); /* 'Jun 30' */
11987 (void) memcpy(str+7, ctime_str+20, 4); /* '1993' */
11988 (void) memcpy(str+12, ctime_str+11, 8); /* '21:49:08' */
11989 (void) sprintf(str+20, ".%09lld", (longlong_t)tv[1]); /* '.123456789' */
11990 if (opts->scripted)
11991 (void) printf(gettext("%s\t"), str);
11992 else
11993 (void) printf(gettext("%s "), str);
11994
11995 verify(nvlist_lookup_string(nvl, FM_CLASS, &ptr) == 0);
11996 (void) printf(gettext("%s\n"), ptr);
11997 }
11998
11999 static void
zpool_do_events_nvprint(nvlist_t * nvl,int depth)12000 zpool_do_events_nvprint(nvlist_t *nvl, int depth)
12001 {
12002 nvpair_t *nvp;
12003 static char flagstr[256];
12004
12005 for (nvp = nvlist_next_nvpair(nvl, NULL);
12006 nvp != NULL; nvp = nvlist_next_nvpair(nvl, nvp)) {
12007
12008 data_type_t type = nvpair_type(nvp);
12009 const char *name = nvpair_name(nvp);
12010
12011 boolean_t b;
12012 uint8_t i8;
12013 uint16_t i16;
12014 uint32_t i32;
12015 uint64_t i64;
12016 const char *str;
12017 nvlist_t *cnv;
12018
12019 printf(gettext("%*s%s = "), depth, "", name);
12020
12021 switch (type) {
12022 case DATA_TYPE_BOOLEAN:
12023 printf(gettext("%s"), "1");
12024 break;
12025
12026 case DATA_TYPE_BOOLEAN_VALUE:
12027 (void) nvpair_value_boolean_value(nvp, &b);
12028 printf(gettext("%s"), b ? "1" : "0");
12029 break;
12030
12031 case DATA_TYPE_BYTE:
12032 (void) nvpair_value_byte(nvp, &i8);
12033 printf(gettext("0x%x"), i8);
12034 break;
12035
12036 case DATA_TYPE_INT8:
12037 (void) nvpair_value_int8(nvp, (void *)&i8);
12038 printf(gettext("0x%x"), i8);
12039 break;
12040
12041 case DATA_TYPE_UINT8:
12042 (void) nvpair_value_uint8(nvp, &i8);
12043 printf(gettext("0x%x"), i8);
12044 break;
12045
12046 case DATA_TYPE_INT16:
12047 (void) nvpair_value_int16(nvp, (void *)&i16);
12048 printf(gettext("0x%x"), i16);
12049 break;
12050
12051 case DATA_TYPE_UINT16:
12052 (void) nvpair_value_uint16(nvp, &i16);
12053 printf(gettext("0x%x"), i16);
12054 break;
12055
12056 case DATA_TYPE_INT32:
12057 (void) nvpair_value_int32(nvp, (void *)&i32);
12058 printf(gettext("0x%x"), i32);
12059 break;
12060
12061 case DATA_TYPE_UINT32:
12062 (void) nvpair_value_uint32(nvp, &i32);
12063 if (strcmp(name,
12064 FM_EREPORT_PAYLOAD_ZFS_ZIO_STAGE) == 0 ||
12065 strcmp(name,
12066 FM_EREPORT_PAYLOAD_ZFS_ZIO_PIPELINE) == 0) {
12067 zfs_valstr_zio_stage(i32, flagstr,
12068 sizeof (flagstr));
12069 printf(gettext("0x%x [%s]"), i32, flagstr);
12070 } else if (strcmp(name,
12071 FM_EREPORT_PAYLOAD_ZFS_ZIO_PRIORITY) == 0) {
12072 zfs_valstr_zio_priority(i32, flagstr,
12073 sizeof (flagstr));
12074 printf(gettext("0x%x [%s]"), i32, flagstr);
12075 } else {
12076 printf(gettext("0x%x"), i32);
12077 }
12078 break;
12079
12080 case DATA_TYPE_INT64:
12081 (void) nvpair_value_int64(nvp, (void *)&i64);
12082 printf(gettext("0x%llx"), (u_longlong_t)i64);
12083 break;
12084
12085 case DATA_TYPE_UINT64:
12086 (void) nvpair_value_uint64(nvp, &i64);
12087 /*
12088 * translate vdev state values to readable
12089 * strings to aide zpool events consumers
12090 */
12091 if (strcmp(name,
12092 FM_EREPORT_PAYLOAD_ZFS_VDEV_STATE) == 0 ||
12093 strcmp(name,
12094 FM_EREPORT_PAYLOAD_ZFS_VDEV_LASTSTATE) == 0) {
12095 printf(gettext("\"%s\" (0x%llx)"),
12096 zpool_state_to_name(i64, VDEV_AUX_NONE),
12097 (u_longlong_t)i64);
12098 } else if (strcmp(name,
12099 FM_EREPORT_PAYLOAD_ZFS_ZIO_FLAGS) == 0) {
12100 zfs_valstr_zio_flag(i64, flagstr,
12101 sizeof (flagstr));
12102 printf(gettext("0x%llx [%s]"),
12103 (u_longlong_t)i64, flagstr);
12104 } else {
12105 printf(gettext("0x%llx"), (u_longlong_t)i64);
12106 }
12107 break;
12108
12109 case DATA_TYPE_HRTIME:
12110 (void) nvpair_value_hrtime(nvp, (void *)&i64);
12111 printf(gettext("0x%llx"), (u_longlong_t)i64);
12112 break;
12113
12114 case DATA_TYPE_STRING:
12115 (void) nvpair_value_string(nvp, &str);
12116 printf(gettext("\"%s\""), str ? str : "<NULL>");
12117 break;
12118
12119 case DATA_TYPE_NVLIST:
12120 printf(gettext("(embedded nvlist)\n"));
12121 (void) nvpair_value_nvlist(nvp, &cnv);
12122 zpool_do_events_nvprint(cnv, depth + 8);
12123 printf(gettext("%*s(end %s)"), depth, "", name);
12124 break;
12125
12126 case DATA_TYPE_NVLIST_ARRAY: {
12127 nvlist_t **val;
12128 uint_t i, nelem;
12129
12130 (void) nvpair_value_nvlist_array(nvp, &val, &nelem);
12131 printf(gettext("(%d embedded nvlists)\n"), nelem);
12132 for (i = 0; i < nelem; i++) {
12133 printf(gettext("%*s%s[%d] = %s\n"),
12134 depth, "", name, i, "(embedded nvlist)");
12135 zpool_do_events_nvprint(val[i], depth + 8);
12136 printf(gettext("%*s(end %s[%i])\n"),
12137 depth, "", name, i);
12138 }
12139 printf(gettext("%*s(end %s)\n"), depth, "", name);
12140 }
12141 break;
12142
12143 case DATA_TYPE_INT8_ARRAY: {
12144 int8_t *val;
12145 uint_t i, nelem;
12146
12147 (void) nvpair_value_int8_array(nvp, &val, &nelem);
12148 for (i = 0; i < nelem; i++)
12149 printf(gettext("0x%x "), val[i]);
12150
12151 break;
12152 }
12153
12154 case DATA_TYPE_UINT8_ARRAY: {
12155 uint8_t *val;
12156 uint_t i, nelem;
12157
12158 (void) nvpair_value_uint8_array(nvp, &val, &nelem);
12159 for (i = 0; i < nelem; i++)
12160 printf(gettext("0x%x "), val[i]);
12161
12162 break;
12163 }
12164
12165 case DATA_TYPE_INT16_ARRAY: {
12166 int16_t *val;
12167 uint_t i, nelem;
12168
12169 (void) nvpair_value_int16_array(nvp, &val, &nelem);
12170 for (i = 0; i < nelem; i++)
12171 printf(gettext("0x%x "), val[i]);
12172
12173 break;
12174 }
12175
12176 case DATA_TYPE_UINT16_ARRAY: {
12177 uint16_t *val;
12178 uint_t i, nelem;
12179
12180 (void) nvpair_value_uint16_array(nvp, &val, &nelem);
12181 for (i = 0; i < nelem; i++)
12182 printf(gettext("0x%x "), val[i]);
12183
12184 break;
12185 }
12186
12187 case DATA_TYPE_INT32_ARRAY: {
12188 int32_t *val;
12189 uint_t i, nelem;
12190
12191 (void) nvpair_value_int32_array(nvp, &val, &nelem);
12192 for (i = 0; i < nelem; i++)
12193 printf(gettext("0x%x "), val[i]);
12194
12195 break;
12196 }
12197
12198 case DATA_TYPE_UINT32_ARRAY: {
12199 uint32_t *val;
12200 uint_t i, nelem;
12201
12202 (void) nvpair_value_uint32_array(nvp, &val, &nelem);
12203 for (i = 0; i < nelem; i++)
12204 printf(gettext("0x%x "), val[i]);
12205
12206 break;
12207 }
12208
12209 case DATA_TYPE_INT64_ARRAY: {
12210 int64_t *val;
12211 uint_t i, nelem;
12212
12213 (void) nvpair_value_int64_array(nvp, &val, &nelem);
12214 for (i = 0; i < nelem; i++)
12215 printf(gettext("0x%llx "),
12216 (u_longlong_t)val[i]);
12217
12218 break;
12219 }
12220
12221 case DATA_TYPE_UINT64_ARRAY: {
12222 uint64_t *val;
12223 uint_t i, nelem;
12224
12225 (void) nvpair_value_uint64_array(nvp, &val, &nelem);
12226 for (i = 0; i < nelem; i++)
12227 printf(gettext("0x%llx "),
12228 (u_longlong_t)val[i]);
12229
12230 break;
12231 }
12232
12233 case DATA_TYPE_STRING_ARRAY: {
12234 const char **str;
12235 uint_t i, nelem;
12236
12237 (void) nvpair_value_string_array(nvp, &str, &nelem);
12238 for (i = 0; i < nelem; i++)
12239 printf(gettext("\"%s\" "),
12240 str[i] ? str[i] : "<NULL>");
12241
12242 break;
12243 }
12244
12245 case DATA_TYPE_BOOLEAN_ARRAY:
12246 case DATA_TYPE_BYTE_ARRAY:
12247 case DATA_TYPE_DOUBLE:
12248 case DATA_TYPE_DONTCARE:
12249 case DATA_TYPE_UNKNOWN:
12250 printf(gettext("<unknown>"));
12251 break;
12252 }
12253
12254 printf(gettext("\n"));
12255 }
12256 }
12257
12258 static int
zpool_do_events_next(ev_opts_t * opts)12259 zpool_do_events_next(ev_opts_t *opts)
12260 {
12261 nvlist_t *nvl;
12262 int zevent_fd, ret, dropped;
12263 const char *pool;
12264
12265 zevent_fd = open(ZFS_DEV, O_RDWR);
12266 VERIFY(zevent_fd >= 0);
12267
12268 if (!opts->scripted)
12269 (void) printf(gettext("%-30s %s\n"), "TIME", "CLASS");
12270
12271 while (1) {
12272 ret = zpool_events_next(g_zfs, &nvl, &dropped,
12273 (opts->follow ? ZEVENT_NONE : ZEVENT_NONBLOCK), zevent_fd);
12274 if (ret || nvl == NULL)
12275 break;
12276
12277 if (dropped > 0)
12278 (void) printf(gettext("dropped %d events\n"), dropped);
12279
12280 if (strlen(opts->poolname) > 0 &&
12281 nvlist_lookup_string(nvl, FM_FMRI_ZFS_POOL, &pool) == 0 &&
12282 strcmp(opts->poolname, pool) != 0)
12283 continue;
12284
12285 zpool_do_events_short(nvl, opts);
12286
12287 if (opts->verbose) {
12288 zpool_do_events_nvprint(nvl, 8);
12289 printf(gettext("\n"));
12290 }
12291 (void) fflush(stdout);
12292
12293 nvlist_free(nvl);
12294 }
12295
12296 VERIFY(0 == close(zevent_fd));
12297
12298 return (ret);
12299 }
12300
12301 static int
zpool_do_events_clear(void)12302 zpool_do_events_clear(void)
12303 {
12304 int count, ret;
12305
12306 ret = zpool_events_clear(g_zfs, &count);
12307 if (!ret)
12308 (void) printf(gettext("cleared %d events\n"), count);
12309
12310 return (ret);
12311 }
12312
12313 /*
12314 * zpool events [-vHf [pool] | -c]
12315 *
12316 * Displays events logs by ZFS.
12317 */
12318 int
zpool_do_events(int argc,char ** argv)12319 zpool_do_events(int argc, char **argv)
12320 {
12321 ev_opts_t opts = { 0 };
12322 int ret;
12323 int c;
12324
12325 /* check options */
12326 while ((c = getopt(argc, argv, "vHfc")) != -1) {
12327 switch (c) {
12328 case 'v':
12329 opts.verbose = 1;
12330 break;
12331 case 'H':
12332 opts.scripted = 1;
12333 break;
12334 case 'f':
12335 opts.follow = 1;
12336 break;
12337 case 'c':
12338 opts.clear = 1;
12339 break;
12340 case '?':
12341 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
12342 optopt);
12343 usage(B_FALSE);
12344 }
12345 }
12346 argc -= optind;
12347 argv += optind;
12348
12349 if (argc > 1) {
12350 (void) fprintf(stderr, gettext("too many arguments\n"));
12351 usage(B_FALSE);
12352 } else if (argc == 1) {
12353 (void) strlcpy(opts.poolname, argv[0], sizeof (opts.poolname));
12354 if (!zfs_name_valid(opts.poolname, ZFS_TYPE_POOL)) {
12355 (void) fprintf(stderr,
12356 gettext("invalid pool name '%s'\n"), opts.poolname);
12357 usage(B_FALSE);
12358 }
12359 }
12360
12361 if ((argc == 1 || opts.verbose || opts.scripted || opts.follow) &&
12362 opts.clear) {
12363 (void) fprintf(stderr,
12364 gettext("invalid options combined with -c\n"));
12365 usage(B_FALSE);
12366 }
12367
12368 if (opts.clear)
12369 ret = zpool_do_events_clear();
12370 else
12371 ret = zpool_do_events_next(&opts);
12372
12373 return (ret);
12374 }
12375
12376 static int
get_callback_vdev(zpool_handle_t * zhp,char * vdevname,void * data)12377 get_callback_vdev(zpool_handle_t *zhp, char *vdevname, void *data)
12378 {
12379 zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
12380 char value[ZFS_MAXPROPLEN];
12381 zprop_source_t srctype;
12382 nvlist_t *props, *item, *d;
12383 props = item = d = NULL;
12384
12385 if (cbp->cb_json) {
12386 d = fnvlist_lookup_nvlist(cbp->cb_jsobj, "vdevs");
12387 if (d == NULL) {
12388 fprintf(stderr, "vdevs obj not found.\n");
12389 exit(1);
12390 }
12391 props = fnvlist_alloc();
12392 }
12393
12394 for (zprop_list_t *pl = cbp->cb_proplist; pl != NULL;
12395 pl = pl->pl_next) {
12396 char *prop_name;
12397 /*
12398 * If the first property is pool name, it is a special
12399 * placeholder that we can skip. This will also skip
12400 * over the name property when 'all' is specified.
12401 */
12402 if (pl->pl_prop == ZPOOL_PROP_NAME &&
12403 pl == cbp->cb_proplist)
12404 continue;
12405
12406 if (pl->pl_prop == ZPROP_INVAL) {
12407 prop_name = pl->pl_user_prop;
12408 } else {
12409 prop_name = (char *)vdev_prop_to_name(pl->pl_prop);
12410 }
12411 if (zpool_get_vdev_prop(zhp, vdevname, pl->pl_prop,
12412 prop_name, value, sizeof (value), &srctype,
12413 cbp->cb_literal) == 0) {
12414 zprop_collect_property(vdevname, cbp, prop_name,
12415 value, srctype, NULL, NULL, props);
12416 }
12417 }
12418
12419 if (cbp->cb_json) {
12420 if (!nvlist_empty(props)) {
12421 item = fnvlist_alloc();
12422 fill_vdev_info(item, zhp, vdevname, B_TRUE,
12423 cbp->cb_json_as_int);
12424 fnvlist_add_nvlist(item, "properties", props);
12425 fnvlist_add_nvlist(d, vdevname, item);
12426 fnvlist_add_nvlist(cbp->cb_jsobj, "vdevs", d);
12427 fnvlist_free(item);
12428 }
12429 fnvlist_free(props);
12430 }
12431
12432 return (0);
12433 }
12434
12435 static int
get_callback_vdev_cb(void * zhp_data,nvlist_t * nv,void * data)12436 get_callback_vdev_cb(void *zhp_data, nvlist_t *nv, void *data)
12437 {
12438 zpool_handle_t *zhp = zhp_data;
12439 zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
12440 char *vdevname;
12441 const char *type;
12442 int ret;
12443
12444 /*
12445 * zpool_vdev_name() transforms the root vdev name (i.e., root-0) to the
12446 * pool name for display purposes, which is not desired. Fallback to
12447 * zpool_vdev_name() when not dealing with the root vdev.
12448 */
12449 type = fnvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE);
12450 if (zhp != NULL && strcmp(type, "root") == 0)
12451 vdevname = strdup("root-0");
12452 else
12453 vdevname = zpool_vdev_name(g_zfs, zhp, nv,
12454 cbp->cb_vdevs.cb_name_flags);
12455
12456 (void) vdev_expand_proplist(zhp, vdevname, &cbp->cb_proplist);
12457
12458 ret = get_callback_vdev(zhp, vdevname, data);
12459
12460 free(vdevname);
12461
12462 return (ret);
12463 }
12464
12465 static int
get_callback(zpool_handle_t * zhp,void * data)12466 get_callback(zpool_handle_t *zhp, void *data)
12467 {
12468 zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
12469 char value[ZFS_MAXPROPLEN];
12470 zprop_source_t srctype;
12471 zprop_list_t *pl;
12472 int vid;
12473 int err = 0;
12474 nvlist_t *props, *item, *d;
12475 props = item = d = NULL;
12476
12477 if (cbp->cb_type == ZFS_TYPE_VDEV) {
12478 if (cbp->cb_json) {
12479 nvlist_t *pool = fnvlist_alloc();
12480 fill_pool_info(pool, zhp, B_FALSE, cbp->cb_json_as_int);
12481 fnvlist_add_nvlist(cbp->cb_jsobj, "pool", pool);
12482 fnvlist_free(pool);
12483 }
12484
12485 if (strcmp(cbp->cb_vdevs.cb_names[0], "all-vdevs") == 0) {
12486 for_each_vdev(zhp, get_callback_vdev_cb, data);
12487 } else {
12488 /* Adjust column widths for vdev properties */
12489 for (vid = 0; vid < cbp->cb_vdevs.cb_names_count;
12490 vid++) {
12491 vdev_expand_proplist(zhp,
12492 cbp->cb_vdevs.cb_names[vid],
12493 &cbp->cb_proplist);
12494 }
12495 /* Display the properties */
12496 for (vid = 0; vid < cbp->cb_vdevs.cb_names_count;
12497 vid++) {
12498 get_callback_vdev(zhp,
12499 cbp->cb_vdevs.cb_names[vid], data);
12500 }
12501 }
12502 } else {
12503 assert(cbp->cb_type == ZFS_TYPE_POOL);
12504 if (cbp->cb_json) {
12505 d = fnvlist_lookup_nvlist(cbp->cb_jsobj, "pools");
12506 if (d == NULL) {
12507 fprintf(stderr, "pools obj not found.\n");
12508 exit(1);
12509 }
12510 props = fnvlist_alloc();
12511 }
12512 for (pl = cbp->cb_proplist; pl != NULL; pl = pl->pl_next) {
12513 /*
12514 * Skip the special fake placeholder. This will also
12515 * skip over the name property when 'all' is specified.
12516 */
12517 if (pl->pl_prop == ZPOOL_PROP_NAME &&
12518 pl == cbp->cb_proplist)
12519 continue;
12520
12521 if (pl->pl_prop == ZPROP_INVAL &&
12522 zfs_prop_user(pl->pl_user_prop)) {
12523 srctype = ZPROP_SRC_LOCAL;
12524
12525 if (zpool_get_userprop(zhp, pl->pl_user_prop,
12526 value, sizeof (value), &srctype) != 0)
12527 continue;
12528
12529 err = zprop_collect_property(
12530 zpool_get_name(zhp), cbp, pl->pl_user_prop,
12531 value, srctype, NULL, NULL, props);
12532 } else if (pl->pl_prop == ZPROP_INVAL &&
12533 (zpool_prop_feature(pl->pl_user_prop) ||
12534 zpool_prop_unsupported(pl->pl_user_prop))) {
12535 srctype = ZPROP_SRC_LOCAL;
12536
12537 if (zpool_prop_get_feature(zhp,
12538 pl->pl_user_prop, value,
12539 sizeof (value)) == 0) {
12540 err = zprop_collect_property(
12541 zpool_get_name(zhp), cbp,
12542 pl->pl_user_prop, value, srctype,
12543 NULL, NULL, props);
12544 }
12545 } else {
12546 if (zpool_get_prop(zhp, pl->pl_prop, value,
12547 sizeof (value), &srctype,
12548 cbp->cb_literal) != 0)
12549 continue;
12550
12551 err = zprop_collect_property(
12552 zpool_get_name(zhp), cbp,
12553 zpool_prop_to_name(pl->pl_prop),
12554 value, srctype, NULL, NULL, props);
12555 }
12556 if (err != 0)
12557 return (err);
12558 }
12559
12560 if (cbp->cb_json) {
12561 if (!nvlist_empty(props)) {
12562 item = fnvlist_alloc();
12563 fill_pool_info(item, zhp, B_TRUE,
12564 cbp->cb_json_as_int);
12565 fnvlist_add_nvlist(item, "properties", props);
12566 if (cbp->cb_json_pool_key_guid) {
12567 char buf[256];
12568 uint64_t guid = fnvlist_lookup_uint64(
12569 zpool_get_config(zhp, NULL),
12570 ZPOOL_CONFIG_POOL_GUID);
12571 snprintf(buf, 256, "%llu",
12572 (u_longlong_t)guid);
12573 fnvlist_add_nvlist(d, buf, item);
12574 } else {
12575 const char *name = zpool_get_name(zhp);
12576 fnvlist_add_nvlist(d, name, item);
12577 }
12578 fnvlist_add_nvlist(cbp->cb_jsobj, "pools", d);
12579 fnvlist_free(item);
12580 }
12581 fnvlist_free(props);
12582 }
12583 }
12584
12585 return (0);
12586 }
12587
12588 /*
12589 * zpool get [-Hp] [-o "all" | field[,...]] <"all" | property[,...]> <pool> ...
12590 *
12591 * -H Scripted mode. Don't display headers, and separate properties
12592 * by a single tab.
12593 * -o List of columns to display. Defaults to
12594 * "name,property,value,source".
12595 * -p Display values in parsable (exact) format.
12596 * -j Display output in JSON format.
12597 * --json-int Display numbers as integers instead of strings.
12598 * --json-pool-key-guid Set pool GUID as key for pool objects.
12599 *
12600 * Get properties of pools in the system. Output space statistics
12601 * for each one as well as other attributes.
12602 */
12603 int
zpool_do_get(int argc,char ** argv)12604 zpool_do_get(int argc, char **argv)
12605 {
12606 zprop_get_cbdata_t cb = { 0 };
12607 zprop_list_t fake_name = { 0 };
12608 int ret;
12609 int c, i;
12610 char *propstr = NULL;
12611 char *vdev = NULL;
12612 nvlist_t *data = NULL;
12613
12614 cb.cb_first = B_TRUE;
12615
12616 /*
12617 * Set up default columns and sources.
12618 */
12619 cb.cb_sources = ZPROP_SRC_ALL;
12620 cb.cb_columns[0] = GET_COL_NAME;
12621 cb.cb_columns[1] = GET_COL_PROPERTY;
12622 cb.cb_columns[2] = GET_COL_VALUE;
12623 cb.cb_columns[3] = GET_COL_SOURCE;
12624 cb.cb_type = ZFS_TYPE_POOL;
12625 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_TYPE_ID;
12626 current_prop_type = cb.cb_type;
12627
12628 struct option long_options[] = {
12629 {"json", no_argument, NULL, 'j'},
12630 {"json-int", no_argument, NULL, ZPOOL_OPTION_JSON_NUMS_AS_INT},
12631 {"json-pool-key-guid", no_argument, NULL,
12632 ZPOOL_OPTION_POOL_KEY_GUID},
12633 {0, 0, 0, 0}
12634 };
12635
12636 /* check options */
12637 while ((c = getopt_long(argc, argv, ":jHpo:", long_options,
12638 NULL)) != -1) {
12639 switch (c) {
12640 case 'p':
12641 cb.cb_literal = B_TRUE;
12642 break;
12643 case 'H':
12644 cb.cb_scripted = B_TRUE;
12645 break;
12646 case 'j':
12647 cb.cb_json = B_TRUE;
12648 cb.cb_jsobj = zpool_json_schema(0, 1);
12649 data = fnvlist_alloc();
12650 break;
12651 case ZPOOL_OPTION_POOL_KEY_GUID:
12652 cb.cb_json_pool_key_guid = B_TRUE;
12653 break;
12654 case ZPOOL_OPTION_JSON_NUMS_AS_INT:
12655 cb.cb_json_as_int = B_TRUE;
12656 cb.cb_literal = B_TRUE;
12657 break;
12658 case 'o':
12659 memset(&cb.cb_columns, 0, sizeof (cb.cb_columns));
12660 i = 0;
12661
12662 for (char *tok; (tok = strsep(&optarg, ",")); ) {
12663 static const char *const col_opts[] =
12664 { "name", "property", "value", "source",
12665 "all" };
12666 static const zfs_get_column_t col_cols[] =
12667 { GET_COL_NAME, GET_COL_PROPERTY, GET_COL_VALUE,
12668 GET_COL_SOURCE };
12669
12670 if (i == ZFS_GET_NCOLS - 1) {
12671 (void) fprintf(stderr, gettext("too "
12672 "many fields given to -o "
12673 "option\n"));
12674 usage(B_FALSE);
12675 }
12676
12677 for (c = 0; c < ARRAY_SIZE(col_opts); ++c)
12678 if (strcmp(tok, col_opts[c]) == 0)
12679 goto found;
12680
12681 (void) fprintf(stderr,
12682 gettext("invalid column name '%s'\n"), tok);
12683 usage(B_FALSE);
12684
12685 found:
12686 if (c >= 4) {
12687 if (i > 0) {
12688 (void) fprintf(stderr,
12689 gettext("\"all\" conflicts "
12690 "with specific fields "
12691 "given to -o option\n"));
12692 usage(B_FALSE);
12693 }
12694
12695 memcpy(cb.cb_columns, col_cols,
12696 sizeof (col_cols));
12697 i = ZFS_GET_NCOLS - 1;
12698 } else
12699 cb.cb_columns[i++] = col_cols[c];
12700 }
12701 break;
12702 case '?':
12703 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
12704 optopt);
12705 usage(B_FALSE);
12706 }
12707 }
12708
12709 argc -= optind;
12710 argv += optind;
12711
12712 if (!cb.cb_json && cb.cb_json_as_int) {
12713 (void) fprintf(stderr, gettext("'--json-int' only works with"
12714 " '-j' option\n"));
12715 usage(B_FALSE);
12716 }
12717
12718 if (!cb.cb_json && cb.cb_json_pool_key_guid) {
12719 (void) fprintf(stderr, gettext("'json-pool-key-guid' only"
12720 " works with '-j' option\n"));
12721 usage(B_FALSE);
12722 }
12723
12724 if (argc < 1) {
12725 (void) fprintf(stderr, gettext("missing property "
12726 "argument\n"));
12727 usage(B_FALSE);
12728 }
12729
12730 /* Properties list is needed later by zprop_get_list() */
12731 propstr = argv[0];
12732
12733 argc--;
12734 argv++;
12735
12736 if (argc == 0) {
12737 /* No args, so just print the defaults. */
12738 } else if (are_all_pools(argc, argv)) {
12739 /* All the args are pool names */
12740 } else if (are_all_pools(1, argv)) {
12741 /* The first arg is a pool name */
12742 if ((argc == 2 && strcmp(argv[1], "all-vdevs") == 0) ||
12743 (argc == 2 && strcmp(argv[1], "root") == 0) ||
12744 are_vdevs_in_pool(argc - 1, argv + 1, argv[0],
12745 &cb.cb_vdevs)) {
12746
12747 if (strcmp(argv[1], "root") == 0)
12748 vdev = strdup("root-0");
12749 else
12750 vdev = strdup(argv[1]);
12751
12752 /* ... and the rest are vdev names */
12753 cb.cb_vdevs.cb_names = &vdev;
12754 cb.cb_vdevs.cb_names_count = argc - 1;
12755 cb.cb_type = ZFS_TYPE_VDEV;
12756 argc = 1; /* One pool to process */
12757 } else {
12758 if (cb.cb_json) {
12759 nvlist_free(cb.cb_jsobj);
12760 nvlist_free(data);
12761 }
12762 fprintf(stderr, gettext("Expected a list of vdevs in"
12763 " \"%s\", but got:\n"), argv[0]);
12764 error_list_unresolved_vdevs(argc - 1, argv + 1,
12765 argv[0], &cb.cb_vdevs);
12766 fprintf(stderr, "\n");
12767 usage(B_FALSE);
12768 return (1);
12769 }
12770 } else {
12771 if (cb.cb_json) {
12772 nvlist_free(cb.cb_jsobj);
12773 nvlist_free(data);
12774 }
12775 /*
12776 * The first arg isn't the name of a valid pool.
12777 */
12778 fprintf(stderr, gettext("Cannot get properties of %s: "
12779 "no such pool available.\n"), argv[0]);
12780 return (1);
12781 }
12782
12783 if (zprop_get_list(g_zfs, propstr, &cb.cb_proplist,
12784 cb.cb_type) != 0) {
12785 /* Use correct list of valid properties (pool or vdev) */
12786 current_prop_type = cb.cb_type;
12787 usage(B_FALSE);
12788 }
12789
12790 if (cb.cb_proplist != NULL) {
12791 fake_name.pl_prop = ZPOOL_PROP_NAME;
12792 fake_name.pl_width = strlen(gettext("NAME"));
12793 fake_name.pl_next = cb.cb_proplist;
12794 cb.cb_proplist = &fake_name;
12795 }
12796
12797 if (cb.cb_json) {
12798 if (cb.cb_type == ZFS_TYPE_VDEV)
12799 fnvlist_add_nvlist(cb.cb_jsobj, "vdevs", data);
12800 else
12801 fnvlist_add_nvlist(cb.cb_jsobj, "pools", data);
12802 fnvlist_free(data);
12803 }
12804
12805 ret = for_each_pool(argc, argv, B_TRUE, &cb.cb_proplist, cb.cb_type,
12806 cb.cb_literal, get_callback, &cb);
12807
12808 if (ret == 0 && cb.cb_json)
12809 zcmd_print_json(cb.cb_jsobj);
12810 else if (ret != 0 && cb.cb_json)
12811 nvlist_free(cb.cb_jsobj);
12812
12813 if (cb.cb_proplist == &fake_name)
12814 zprop_free_list(fake_name.pl_next);
12815 else
12816 zprop_free_list(cb.cb_proplist);
12817
12818 if (vdev != NULL)
12819 free(vdev);
12820
12821 return (ret);
12822 }
12823
12824 typedef struct set_cbdata {
12825 char *cb_propname;
12826 char *cb_value;
12827 zfs_type_t cb_type;
12828 vdev_cbdata_t cb_vdevs;
12829 boolean_t cb_any_successful;
12830 } set_cbdata_t;
12831
12832 static int
set_pool_callback(zpool_handle_t * zhp,set_cbdata_t * cb)12833 set_pool_callback(zpool_handle_t *zhp, set_cbdata_t *cb)
12834 {
12835 int error;
12836
12837 /* Check if we have out-of-bounds features */
12838 if (strcmp(cb->cb_propname, ZPOOL_CONFIG_COMPATIBILITY) == 0) {
12839 boolean_t features[SPA_FEATURES];
12840 if (zpool_do_load_compat(cb->cb_value, features) !=
12841 ZPOOL_COMPATIBILITY_OK)
12842 return (-1);
12843
12844 nvlist_t *enabled = zpool_get_features(zhp);
12845 spa_feature_t i;
12846 for (i = 0; i < SPA_FEATURES; i++) {
12847 const char *fguid = spa_feature_table[i].fi_guid;
12848 if (nvlist_exists(enabled, fguid) && !features[i])
12849 break;
12850 }
12851 if (i < SPA_FEATURES)
12852 (void) fprintf(stderr, gettext("Warning: one or "
12853 "more features already enabled on pool '%s'\n"
12854 "are not present in this compatibility set.\n"),
12855 zpool_get_name(zhp));
12856 }
12857
12858 /* if we're setting a feature, check it's in compatibility set */
12859 if (zpool_prop_feature(cb->cb_propname) &&
12860 strcmp(cb->cb_value, ZFS_FEATURE_ENABLED) == 0) {
12861 char *fname = strchr(cb->cb_propname, '@') + 1;
12862 spa_feature_t f;
12863
12864 if (zfeature_lookup_name(fname, &f) == 0) {
12865 char compat[ZFS_MAXPROPLEN];
12866 if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY,
12867 compat, ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
12868 compat[0] = '\0';
12869
12870 boolean_t features[SPA_FEATURES];
12871 if (zpool_do_load_compat(compat, features) !=
12872 ZPOOL_COMPATIBILITY_OK) {
12873 (void) fprintf(stderr, gettext("Error: "
12874 "cannot enable feature '%s' on pool '%s'\n"
12875 "because the pool's 'compatibility' "
12876 "property cannot be parsed.\n"),
12877 fname, zpool_get_name(zhp));
12878 return (-1);
12879 }
12880
12881 if (!features[f]) {
12882 (void) fprintf(stderr, gettext("Error: "
12883 "cannot enable feature '%s' on pool '%s'\n"
12884 "as it is not specified in this pool's "
12885 "current compatibility set.\n"
12886 "Consider setting 'compatibility' to a "
12887 "less restrictive set, or to 'off'.\n"),
12888 fname, zpool_get_name(zhp));
12889 return (-1);
12890 }
12891 }
12892 }
12893
12894 error = zpool_set_prop(zhp, cb->cb_propname, cb->cb_value);
12895
12896 return (error);
12897 }
12898
12899 static int
set_callback(zpool_handle_t * zhp,void * data)12900 set_callback(zpool_handle_t *zhp, void *data)
12901 {
12902 int error;
12903 set_cbdata_t *cb = (set_cbdata_t *)data;
12904
12905 if (cb->cb_type == ZFS_TYPE_VDEV) {
12906 error = zpool_set_vdev_prop(zhp, *cb->cb_vdevs.cb_names,
12907 cb->cb_propname, cb->cb_value);
12908 } else {
12909 assert(cb->cb_type == ZFS_TYPE_POOL);
12910 error = set_pool_callback(zhp, cb);
12911 }
12912
12913 cb->cb_any_successful = !error;
12914 return (error);
12915 }
12916
12917 int
zpool_do_set(int argc,char ** argv)12918 zpool_do_set(int argc, char **argv)
12919 {
12920 set_cbdata_t cb = { 0 };
12921 int error;
12922 char *vdev = NULL;
12923
12924 current_prop_type = ZFS_TYPE_POOL;
12925 if (argc > 1 && argv[1][0] == '-') {
12926 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
12927 argv[1][1]);
12928 usage(B_FALSE);
12929 }
12930
12931 if (argc < 2) {
12932 (void) fprintf(stderr, gettext("missing property=value "
12933 "argument\n"));
12934 usage(B_FALSE);
12935 }
12936
12937 if (argc < 3) {
12938 (void) fprintf(stderr, gettext("missing pool name\n"));
12939 usage(B_FALSE);
12940 }
12941
12942 if (argc > 4) {
12943 (void) fprintf(stderr, gettext("too many pool names\n"));
12944 usage(B_FALSE);
12945 }
12946
12947 cb.cb_propname = argv[1];
12948 cb.cb_type = ZFS_TYPE_POOL;
12949 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_TYPE_ID;
12950 cb.cb_value = strchr(cb.cb_propname, '=');
12951 if (cb.cb_value == NULL) {
12952 (void) fprintf(stderr, gettext("missing value in "
12953 "property=value argument\n"));
12954 usage(B_FALSE);
12955 }
12956
12957 *(cb.cb_value) = '\0';
12958 cb.cb_value++;
12959 argc -= 2;
12960 argv += 2;
12961
12962 /* argv[0] is pool name */
12963 if (!is_pool(argv[0])) {
12964 (void) fprintf(stderr,
12965 gettext("cannot open '%s': is not a pool\n"), argv[0]);
12966 return (EINVAL);
12967 }
12968
12969 /* argv[1], when supplied, is vdev name */
12970 if (argc == 2) {
12971
12972 if (strcmp(argv[1], "root") == 0)
12973 vdev = strdup("root-0");
12974 else
12975 vdev = strdup(argv[1]);
12976
12977 if (!are_vdevs_in_pool(1, &vdev, argv[0], &cb.cb_vdevs)) {
12978 (void) fprintf(stderr, gettext(
12979 "cannot find '%s' in '%s': device not in pool\n"),
12980 vdev, argv[0]);
12981 free(vdev);
12982 return (EINVAL);
12983 }
12984 cb.cb_vdevs.cb_names = &vdev;
12985 cb.cb_vdevs.cb_names_count = 1;
12986 cb.cb_type = ZFS_TYPE_VDEV;
12987 }
12988
12989 error = for_each_pool(1, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
12990 B_FALSE, set_callback, &cb);
12991
12992 if (vdev != NULL)
12993 free(vdev);
12994
12995 return (error);
12996 }
12997
12998 /* Add up the total number of bytes left to initialize/trim across all vdevs */
12999 static uint64_t
vdev_activity_remaining(nvlist_t * nv,zpool_wait_activity_t activity)13000 vdev_activity_remaining(nvlist_t *nv, zpool_wait_activity_t activity)
13001 {
13002 uint64_t bytes_remaining;
13003 nvlist_t **child;
13004 uint_t c, children;
13005 vdev_stat_t *vs;
13006
13007 assert(activity == ZPOOL_WAIT_INITIALIZE ||
13008 activity == ZPOOL_WAIT_TRIM);
13009
13010 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
13011 (uint64_t **)&vs, &c) == 0);
13012
13013 if (activity == ZPOOL_WAIT_INITIALIZE &&
13014 vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE)
13015 bytes_remaining = vs->vs_initialize_bytes_est -
13016 vs->vs_initialize_bytes_done;
13017 else if (activity == ZPOOL_WAIT_TRIM &&
13018 vs->vs_trim_state == VDEV_TRIM_ACTIVE)
13019 bytes_remaining = vs->vs_trim_bytes_est -
13020 vs->vs_trim_bytes_done;
13021 else
13022 bytes_remaining = 0;
13023
13024 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
13025 &child, &children) != 0)
13026 children = 0;
13027
13028 for (c = 0; c < children; c++)
13029 bytes_remaining += vdev_activity_remaining(child[c], activity);
13030
13031 return (bytes_remaining);
13032 }
13033
13034 /* Add up the total number of bytes left to rebuild across top-level vdevs */
13035 static uint64_t
vdev_activity_top_remaining(nvlist_t * nv)13036 vdev_activity_top_remaining(nvlist_t *nv)
13037 {
13038 uint64_t bytes_remaining = 0;
13039 nvlist_t **child;
13040 uint_t children;
13041 int error;
13042
13043 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
13044 &child, &children) != 0)
13045 children = 0;
13046
13047 for (uint_t c = 0; c < children; c++) {
13048 vdev_rebuild_stat_t *vrs;
13049 uint_t i;
13050
13051 error = nvlist_lookup_uint64_array(child[c],
13052 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i);
13053 if (error == 0) {
13054 if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
13055 bytes_remaining += (vrs->vrs_bytes_est -
13056 vrs->vrs_bytes_rebuilt);
13057 }
13058 }
13059 }
13060
13061 return (bytes_remaining);
13062 }
13063
13064 /* Whether any vdevs are 'spare' or 'replacing' vdevs */
13065 static boolean_t
vdev_any_spare_replacing(nvlist_t * nv)13066 vdev_any_spare_replacing(nvlist_t *nv)
13067 {
13068 nvlist_t **child;
13069 uint_t c, children;
13070 const char *vdev_type;
13071
13072 (void) nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &vdev_type);
13073
13074 if (strcmp(vdev_type, VDEV_TYPE_REPLACING) == 0 ||
13075 strcmp(vdev_type, VDEV_TYPE_SPARE) == 0 ||
13076 strcmp(vdev_type, VDEV_TYPE_DRAID_SPARE) == 0) {
13077 return (B_TRUE);
13078 }
13079
13080 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
13081 &child, &children) != 0)
13082 children = 0;
13083
13084 for (c = 0; c < children; c++) {
13085 if (vdev_any_spare_replacing(child[c]))
13086 return (B_TRUE);
13087 }
13088
13089 return (B_FALSE);
13090 }
13091
13092 typedef struct wait_data {
13093 char *wd_poolname;
13094 boolean_t wd_scripted;
13095 boolean_t wd_exact;
13096 boolean_t wd_headers_once;
13097 boolean_t wd_should_exit;
13098 /* Which activities to wait for */
13099 boolean_t wd_enabled[ZPOOL_WAIT_NUM_ACTIVITIES];
13100 float wd_interval;
13101 pthread_cond_t wd_cv;
13102 pthread_mutex_t wd_mutex;
13103 } wait_data_t;
13104
13105 /*
13106 * Print to stdout a single line, containing one column for each activity that
13107 * we are waiting for specifying how many bytes of work are left for that
13108 * activity.
13109 */
13110 static void
print_wait_status_row(wait_data_t * wd,zpool_handle_t * zhp,int row)13111 print_wait_status_row(wait_data_t *wd, zpool_handle_t *zhp, int row)
13112 {
13113 nvlist_t *config, *nvroot;
13114 uint_t c;
13115 int i;
13116 pool_checkpoint_stat_t *pcs = NULL;
13117 pool_scan_stat_t *pss = NULL;
13118 pool_removal_stat_t *prs = NULL;
13119 pool_raidz_expand_stat_t *pres = NULL;
13120 const char *const headers[] = {"DISCARD", "FREE", "INITIALIZE",
13121 "REPLACE", "REMOVE", "RESILVER", "SCRUB", "TRIM", "RAIDZ_EXPAND"};
13122 int col_widths[ZPOOL_WAIT_NUM_ACTIVITIES];
13123
13124 /* Calculate the width of each column */
13125 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
13126 /*
13127 * Make sure we have enough space in the col for pretty-printed
13128 * numbers and for the column header, and then leave a couple
13129 * spaces between cols for readability.
13130 */
13131 col_widths[i] = MAX(strlen(headers[i]), 6) + 2;
13132 }
13133
13134 if (timestamp_fmt != NODATE)
13135 print_timestamp(timestamp_fmt);
13136
13137 /* Print header if appropriate */
13138 int term_height = terminal_height();
13139 boolean_t reprint_header = (!wd->wd_headers_once && term_height > 0 &&
13140 row % (term_height-1) == 0);
13141 if (!wd->wd_scripted && (row == 0 || reprint_header)) {
13142 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
13143 if (wd->wd_enabled[i])
13144 (void) printf("%*s", col_widths[i], headers[i]);
13145 }
13146 (void) fputc('\n', stdout);
13147 }
13148
13149 /* Bytes of work remaining in each activity */
13150 int64_t bytes_rem[ZPOOL_WAIT_NUM_ACTIVITIES] = {0};
13151
13152 bytes_rem[ZPOOL_WAIT_FREE] =
13153 zpool_get_prop_int(zhp, ZPOOL_PROP_FREEING, NULL);
13154
13155 config = zpool_get_config(zhp, NULL);
13156 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
13157
13158 (void) nvlist_lookup_uint64_array(nvroot,
13159 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
13160 if (pcs != NULL && pcs->pcs_state == CS_CHECKPOINT_DISCARDING)
13161 bytes_rem[ZPOOL_WAIT_CKPT_DISCARD] = pcs->pcs_space;
13162
13163 (void) nvlist_lookup_uint64_array(nvroot,
13164 ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c);
13165 if (prs != NULL && prs->prs_state == DSS_SCANNING)
13166 bytes_rem[ZPOOL_WAIT_REMOVE] = prs->prs_to_copy -
13167 prs->prs_copied;
13168
13169 (void) nvlist_lookup_uint64_array(nvroot,
13170 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&pss, &c);
13171 if (pss != NULL && pss->pss_state == DSS_SCANNING &&
13172 pss->pss_pass_scrub_pause == 0) {
13173 int64_t rem = pss->pss_to_examine - pss->pss_issued;
13174 if (pss->pss_func == POOL_SCAN_SCRUB)
13175 bytes_rem[ZPOOL_WAIT_SCRUB] = rem;
13176 else
13177 bytes_rem[ZPOOL_WAIT_RESILVER] = rem;
13178 } else if (check_rebuilding(nvroot, NULL)) {
13179 bytes_rem[ZPOOL_WAIT_RESILVER] =
13180 vdev_activity_top_remaining(nvroot);
13181 }
13182
13183 (void) nvlist_lookup_uint64_array(nvroot,
13184 ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, (uint64_t **)&pres, &c);
13185 if (pres != NULL && pres->pres_state == DSS_SCANNING) {
13186 int64_t rem = pres->pres_to_reflow - pres->pres_reflowed;
13187 bytes_rem[ZPOOL_WAIT_RAIDZ_EXPAND] = rem;
13188 }
13189
13190 bytes_rem[ZPOOL_WAIT_INITIALIZE] =
13191 vdev_activity_remaining(nvroot, ZPOOL_WAIT_INITIALIZE);
13192 bytes_rem[ZPOOL_WAIT_TRIM] =
13193 vdev_activity_remaining(nvroot, ZPOOL_WAIT_TRIM);
13194
13195 /*
13196 * A replace finishes after resilvering finishes, so the amount of work
13197 * left for a replace is the same as for resilvering.
13198 *
13199 * It isn't quite correct to say that if we have any 'spare' or
13200 * 'replacing' vdevs and a resilver is happening, then a replace is in
13201 * progress, like we do here. When a hot spare is used, the faulted vdev
13202 * is not removed after the hot spare is resilvered, so parent 'spare'
13203 * vdev is not removed either. So we could have a 'spare' vdev, but be
13204 * resilvering for a different reason. However, we use it as a heuristic
13205 * because we don't have access to the DTLs, which could tell us whether
13206 * or not we have really finished resilvering a hot spare.
13207 */
13208 if (vdev_any_spare_replacing(nvroot))
13209 bytes_rem[ZPOOL_WAIT_REPLACE] = bytes_rem[ZPOOL_WAIT_RESILVER];
13210
13211 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
13212 char buf[64];
13213 if (!wd->wd_enabled[i])
13214 continue;
13215
13216 if (wd->wd_exact) {
13217 (void) snprintf(buf, sizeof (buf), "%" PRIi64,
13218 bytes_rem[i]);
13219 } else {
13220 zfs_nicenum(bytes_rem[i], buf, sizeof (buf));
13221 }
13222
13223 if (wd->wd_scripted)
13224 (void) printf(i == 0 ? "%s" : "\t%s", buf);
13225 else
13226 (void) printf(" %*s", col_widths[i] - 1, buf);
13227 }
13228 (void) printf("\n");
13229 (void) fflush(stdout);
13230 }
13231
13232 static void *
wait_status_thread(void * arg)13233 wait_status_thread(void *arg)
13234 {
13235 wait_data_t *wd = (wait_data_t *)arg;
13236 zpool_handle_t *zhp;
13237
13238 if ((zhp = zpool_open(g_zfs, wd->wd_poolname)) == NULL)
13239 return (void *)(1);
13240
13241 for (int row = 0; ; row++) {
13242 boolean_t missing;
13243 struct timespec timeout;
13244 int ret = 0;
13245 (void) clock_gettime(CLOCK_REALTIME, &timeout);
13246
13247 if (zpool_refresh_stats(zhp, &missing) != 0 || missing ||
13248 zpool_props_refresh(zhp) != 0) {
13249 zpool_close(zhp);
13250 return (void *)(uintptr_t)(missing ? 0 : 1);
13251 }
13252
13253 print_wait_status_row(wd, zhp, row);
13254
13255 timeout.tv_sec += floor(wd->wd_interval);
13256 long nanos = timeout.tv_nsec +
13257 (wd->wd_interval - floor(wd->wd_interval)) * NANOSEC;
13258 if (nanos >= NANOSEC) {
13259 timeout.tv_sec++;
13260 timeout.tv_nsec = nanos - NANOSEC;
13261 } else {
13262 timeout.tv_nsec = nanos;
13263 }
13264 pthread_mutex_lock(&wd->wd_mutex);
13265 if (!wd->wd_should_exit)
13266 ret = pthread_cond_timedwait(&wd->wd_cv, &wd->wd_mutex,
13267 &timeout);
13268 pthread_mutex_unlock(&wd->wd_mutex);
13269 if (ret == 0) {
13270 break; /* signaled by main thread */
13271 } else if (ret != ETIMEDOUT) {
13272 (void) fprintf(stderr, gettext("pthread_cond_timedwait "
13273 "failed: %s\n"), strerror(ret));
13274 zpool_close(zhp);
13275 return (void *)(uintptr_t)(1);
13276 }
13277 }
13278
13279 zpool_close(zhp);
13280 return (void *)(0);
13281 }
13282
13283 int
zpool_do_wait(int argc,char ** argv)13284 zpool_do_wait(int argc, char **argv)
13285 {
13286 boolean_t verbose = B_FALSE;
13287 int c, i;
13288 unsigned long count;
13289 pthread_t status_thr;
13290 int error = 0;
13291 zpool_handle_t *zhp;
13292
13293 wait_data_t wd;
13294 wd.wd_scripted = B_FALSE;
13295 wd.wd_exact = B_FALSE;
13296 wd.wd_headers_once = B_FALSE;
13297 wd.wd_should_exit = B_FALSE;
13298
13299 pthread_mutex_init(&wd.wd_mutex, NULL);
13300 pthread_cond_init(&wd.wd_cv, NULL);
13301
13302 /* By default, wait for all types of activity. */
13303 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++)
13304 wd.wd_enabled[i] = B_TRUE;
13305
13306 while ((c = getopt(argc, argv, "HpT:t:")) != -1) {
13307 switch (c) {
13308 case 'H':
13309 wd.wd_scripted = B_TRUE;
13310 break;
13311 case 'n':
13312 wd.wd_headers_once = B_TRUE;
13313 break;
13314 case 'p':
13315 wd.wd_exact = B_TRUE;
13316 break;
13317 case 'T':
13318 get_timestamp_arg(*optarg);
13319 break;
13320 case 't':
13321 /* Reset activities array */
13322 memset(&wd.wd_enabled, 0, sizeof (wd.wd_enabled));
13323
13324 for (char *tok; (tok = strsep(&optarg, ",")); ) {
13325 static const char *const col_opts[] = {
13326 "discard", "free", "initialize", "replace",
13327 "remove", "resilver", "scrub", "trim",
13328 "raidz_expand" };
13329
13330 for (i = 0; i < ARRAY_SIZE(col_opts); ++i)
13331 if (strcmp(tok, col_opts[i]) == 0) {
13332 wd.wd_enabled[i] = B_TRUE;
13333 goto found;
13334 }
13335
13336 (void) fprintf(stderr,
13337 gettext("invalid activity '%s'\n"), tok);
13338 usage(B_FALSE);
13339 found:;
13340 }
13341 break;
13342 case '?':
13343 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
13344 optopt);
13345 usage(B_FALSE);
13346 }
13347 }
13348
13349 argc -= optind;
13350 argv += optind;
13351
13352 get_interval_count(&argc, argv, &wd.wd_interval, &count);
13353 if (count != 0) {
13354 /* This subcmd only accepts an interval, not a count */
13355 (void) fprintf(stderr, gettext("too many arguments\n"));
13356 usage(B_FALSE);
13357 }
13358
13359 if (wd.wd_interval != 0)
13360 verbose = B_TRUE;
13361
13362 if (argc < 1) {
13363 (void) fprintf(stderr, gettext("missing 'pool' argument\n"));
13364 usage(B_FALSE);
13365 }
13366 if (argc > 1) {
13367 (void) fprintf(stderr, gettext("too many arguments\n"));
13368 usage(B_FALSE);
13369 }
13370
13371 wd.wd_poolname = argv[0];
13372
13373 if ((zhp = zpool_open(g_zfs, wd.wd_poolname)) == NULL)
13374 return (1);
13375
13376 if (verbose) {
13377 /*
13378 * We use a separate thread for printing status updates because
13379 * the main thread will call lzc_wait(), which blocks as long
13380 * as an activity is in progress, which can be a long time.
13381 */
13382 if (pthread_create(&status_thr, NULL, wait_status_thread, &wd)
13383 != 0) {
13384 (void) fprintf(stderr, gettext("failed to create status"
13385 "thread: %s\n"), strerror(errno));
13386 zpool_close(zhp);
13387 return (1);
13388 }
13389 }
13390
13391 /*
13392 * Loop over all activities that we are supposed to wait for until none
13393 * of them are in progress. Note that this means we can end up waiting
13394 * for more activities to complete than just those that were in progress
13395 * when we began waiting; if an activity we are interested in begins
13396 * while we are waiting for another activity, we will wait for both to
13397 * complete before exiting.
13398 */
13399 for (;;) {
13400 boolean_t missing = B_FALSE;
13401 boolean_t any_waited = B_FALSE;
13402
13403 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
13404 boolean_t waited;
13405
13406 if (!wd.wd_enabled[i])
13407 continue;
13408
13409 error = zpool_wait_status(zhp, i, &missing, &waited);
13410 if (error != 0 || missing)
13411 break;
13412
13413 any_waited = (any_waited || waited);
13414 }
13415
13416 if (error != 0 || missing || !any_waited)
13417 break;
13418 }
13419
13420 zpool_close(zhp);
13421
13422 if (verbose) {
13423 uintptr_t status;
13424 pthread_mutex_lock(&wd.wd_mutex);
13425 wd.wd_should_exit = B_TRUE;
13426 pthread_cond_signal(&wd.wd_cv);
13427 pthread_mutex_unlock(&wd.wd_mutex);
13428 (void) pthread_join(status_thr, (void *)&status);
13429 if (status != 0)
13430 error = status;
13431 }
13432
13433 pthread_mutex_destroy(&wd.wd_mutex);
13434 pthread_cond_destroy(&wd.wd_cv);
13435 return (error);
13436 }
13437
13438 /*
13439 * zpool ddtprune -d|-p <amount> <pool>
13440 *
13441 * -d <days> Prune entries <days> old and older
13442 * -p <percent> Prune <percent> amount of entries
13443 *
13444 * Prune single reference entries from DDT to satisfy the amount specified.
13445 */
13446 int
zpool_do_ddt_prune(int argc,char ** argv)13447 zpool_do_ddt_prune(int argc, char **argv)
13448 {
13449 zpool_ddt_prune_unit_t unit = ZPOOL_DDT_PRUNE_NONE;
13450 uint64_t amount = 0;
13451 zpool_handle_t *zhp;
13452 char *endptr;
13453 int c;
13454
13455 while ((c = getopt(argc, argv, "d:p:")) != -1) {
13456 switch (c) {
13457 case 'd':
13458 if (unit == ZPOOL_DDT_PRUNE_PERCENTAGE) {
13459 (void) fprintf(stderr, gettext("-d cannot be "
13460 "combined with -p option\n"));
13461 usage(B_FALSE);
13462 }
13463 errno = 0;
13464 amount = strtoull(optarg, &endptr, 0);
13465 if (errno != 0 || *endptr != '\0' || amount == 0) {
13466 (void) fprintf(stderr,
13467 gettext("invalid days value\n"));
13468 usage(B_FALSE);
13469 }
13470 amount *= 86400; /* convert days to seconds */
13471 unit = ZPOOL_DDT_PRUNE_AGE;
13472 break;
13473 case 'p':
13474 if (unit == ZPOOL_DDT_PRUNE_AGE) {
13475 (void) fprintf(stderr, gettext("-p cannot be "
13476 "combined with -d option\n"));
13477 usage(B_FALSE);
13478 }
13479 errno = 0;
13480 amount = strtoull(optarg, &endptr, 0);
13481 if (errno != 0 || *endptr != '\0' ||
13482 amount == 0 || amount > 100) {
13483 (void) fprintf(stderr,
13484 gettext("invalid percentage value\n"));
13485 usage(B_FALSE);
13486 }
13487 unit = ZPOOL_DDT_PRUNE_PERCENTAGE;
13488 break;
13489 case '?':
13490 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
13491 optopt);
13492 usage(B_FALSE);
13493 }
13494 }
13495 argc -= optind;
13496 argv += optind;
13497
13498 if (unit == ZPOOL_DDT_PRUNE_NONE) {
13499 (void) fprintf(stderr,
13500 gettext("missing amount option (-d|-p <value>)\n"));
13501 usage(B_FALSE);
13502 } else if (argc < 1) {
13503 (void) fprintf(stderr, gettext("missing pool argument\n"));
13504 usage(B_FALSE);
13505 } else if (argc > 1) {
13506 (void) fprintf(stderr, gettext("too many arguments\n"));
13507 usage(B_FALSE);
13508 }
13509 zhp = zpool_open(g_zfs, argv[0]);
13510 if (zhp == NULL)
13511 return (-1);
13512
13513 int error = zpool_ddt_prune(zhp, unit, amount);
13514
13515 zpool_close(zhp);
13516
13517 return (error);
13518 }
13519
13520 static int
find_command_idx(const char * command,int * idx)13521 find_command_idx(const char *command, int *idx)
13522 {
13523 for (int i = 0; i < NCOMMAND; ++i) {
13524 if (command_table[i].name == NULL)
13525 continue;
13526
13527 if (strcmp(command, command_table[i].name) == 0) {
13528 *idx = i;
13529 return (0);
13530 }
13531 }
13532 return (1);
13533 }
13534
13535 /*
13536 * Display version message
13537 */
13538 static int
zpool_do_version(int argc,char ** argv)13539 zpool_do_version(int argc, char **argv)
13540 {
13541 int c;
13542 nvlist_t *jsobj = NULL, *zfs_ver = NULL;
13543 boolean_t json = B_FALSE;
13544
13545 struct option long_options[] = {
13546 {"json", no_argument, NULL, 'j'},
13547 };
13548
13549 while ((c = getopt_long(argc, argv, "j", long_options, NULL)) != -1) {
13550 switch (c) {
13551 case 'j':
13552 json = B_TRUE;
13553 jsobj = zpool_json_schema(0, 1);
13554 break;
13555 case '?':
13556 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
13557 optopt);
13558 usage(B_FALSE);
13559 }
13560 }
13561
13562 argc -= optind;
13563 if (argc != 0) {
13564 (void) fprintf(stderr, "too many arguments\n");
13565 usage(B_FALSE);
13566 }
13567
13568 if (json) {
13569 zfs_ver = zfs_version_nvlist();
13570 if (zfs_ver) {
13571 fnvlist_add_nvlist(jsobj, "zfs_version", zfs_ver);
13572 zcmd_print_json(jsobj);
13573 fnvlist_free(zfs_ver);
13574 return (0);
13575 } else
13576 return (-1);
13577 } else
13578 return (zfs_version_print() != 0);
13579 }
13580
13581 /* Display documentation */
13582 static int
zpool_do_help(int argc,char ** argv)13583 zpool_do_help(int argc, char **argv)
13584 {
13585 char page[MAXNAMELEN];
13586 if (argc < 3 || strcmp(argv[2], "zpool") == 0)
13587 strcpy(page, "zpool");
13588 else if (strcmp(argv[2], "concepts") == 0 ||
13589 strcmp(argv[2], "props") == 0)
13590 snprintf(page, sizeof (page), "zpool%s", argv[2]);
13591 else
13592 snprintf(page, sizeof (page), "zpool-%s", argv[2]);
13593
13594 execlp("man", "man", page, NULL);
13595
13596 fprintf(stderr, "couldn't run man program: %s", strerror(errno));
13597 return (-1);
13598 }
13599
13600 /*
13601 * Do zpool_load_compat() and print error message on failure
13602 */
13603 static zpool_compat_status_t
zpool_do_load_compat(const char * compat,boolean_t * list)13604 zpool_do_load_compat(const char *compat, boolean_t *list)
13605 {
13606 char report[1024];
13607
13608 zpool_compat_status_t ret;
13609
13610 ret = zpool_load_compat(compat, list, report, 1024);
13611 switch (ret) {
13612
13613 case ZPOOL_COMPATIBILITY_OK:
13614 break;
13615
13616 case ZPOOL_COMPATIBILITY_NOFILES:
13617 case ZPOOL_COMPATIBILITY_BADFILE:
13618 case ZPOOL_COMPATIBILITY_BADTOKEN:
13619 (void) fprintf(stderr, "Error: %s\n", report);
13620 break;
13621
13622 case ZPOOL_COMPATIBILITY_WARNTOKEN:
13623 (void) fprintf(stderr, "Warning: %s\n", report);
13624 ret = ZPOOL_COMPATIBILITY_OK;
13625 break;
13626 }
13627 return (ret);
13628 }
13629
13630 int
main(int argc,char ** argv)13631 main(int argc, char **argv)
13632 {
13633 int ret = 0;
13634 int i = 0;
13635 char *cmdname;
13636 char **newargv;
13637
13638 (void) setlocale(LC_ALL, "");
13639 (void) setlocale(LC_NUMERIC, "C");
13640 (void) textdomain(TEXT_DOMAIN);
13641 srand(time(NULL));
13642
13643 opterr = 0;
13644
13645 /*
13646 * Make sure the user has specified some command.
13647 */
13648 if (argc < 2) {
13649 (void) fprintf(stderr, gettext("missing command\n"));
13650 usage(B_FALSE);
13651 }
13652
13653 cmdname = argv[1];
13654
13655 /*
13656 * Special case '-?'
13657 */
13658 if ((strcmp(cmdname, "-?") == 0) || strcmp(cmdname, "--help") == 0)
13659 usage(B_TRUE);
13660
13661 /*
13662 * Special case '-V|--version'
13663 */
13664 if ((strcmp(cmdname, "-V") == 0) || (strcmp(cmdname, "--version") == 0))
13665 return (zfs_version_print() != 0);
13666
13667 /*
13668 * Special case 'help'
13669 */
13670 if (strcmp(cmdname, "help") == 0)
13671 return (zpool_do_help(argc, argv));
13672
13673 if ((g_zfs = libzfs_init()) == NULL) {
13674 (void) fprintf(stderr, "%s\n", libzfs_error_init(errno));
13675 return (1);
13676 }
13677
13678 libzfs_print_on_error(g_zfs, B_TRUE);
13679
13680 zfs_save_arguments(argc, argv, history_str, sizeof (history_str));
13681
13682 /*
13683 * Many commands modify input strings for string parsing reasons.
13684 * We create a copy to protect the original argv.
13685 */
13686 newargv = safe_malloc((argc + 1) * sizeof (newargv[0]));
13687 for (i = 0; i < argc; i++)
13688 newargv[i] = strdup(argv[i]);
13689 newargv[argc] = NULL;
13690
13691 /*
13692 * Run the appropriate command.
13693 */
13694 if (find_command_idx(cmdname, &i) == 0) {
13695 current_command = &command_table[i];
13696 ret = command_table[i].func(argc - 1, newargv + 1);
13697 } else if (strchr(cmdname, '=')) {
13698 verify(find_command_idx("set", &i) == 0);
13699 current_command = &command_table[i];
13700 ret = command_table[i].func(argc, newargv);
13701 } else if (strcmp(cmdname, "freeze") == 0 && argc == 3) {
13702 /*
13703 * 'freeze' is a vile debugging abomination, so we treat
13704 * it as such.
13705 */
13706 zfs_cmd_t zc = {"\0"};
13707
13708 (void) strlcpy(zc.zc_name, argv[2], sizeof (zc.zc_name));
13709 ret = zfs_ioctl(g_zfs, ZFS_IOC_POOL_FREEZE, &zc);
13710 if (ret != 0) {
13711 (void) fprintf(stderr,
13712 gettext("failed to freeze pool: %d\n"), errno);
13713 ret = 1;
13714 }
13715
13716 log_history = 0;
13717 } else {
13718 (void) fprintf(stderr, gettext("unrecognized "
13719 "command '%s'\n"), cmdname);
13720 usage(B_FALSE);
13721 ret = 1;
13722 }
13723
13724 for (i = 0; i < argc; i++)
13725 free(newargv[i]);
13726 free(newargv);
13727
13728 if (ret == 0 && log_history)
13729 (void) zpool_log_history(g_zfs, history_str);
13730
13731 libzfs_fini(g_zfs);
13732
13733 /*
13734 * The 'ZFS_ABORT' environment variable causes us to dump core on exit
13735 * for the purposes of running ::findleaks.
13736 */
13737 if (getenv("ZFS_ABORT") != NULL) {
13738 (void) printf("dumping core by request\n");
13739 abort();
13740 }
13741
13742 return (ret);
13743 }
13744