1 // SPDX-License-Identifier: CDDL-1.0
2 /*
3 * CDDL HEADER START
4 *
5 * The contents of this file are subject to the terms of the
6 * Common Development and Distribution License (the "License").
7 * You may not use this file except in compliance with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or https://opensource.org/licenses/CDDL-1.0.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22
23 /*
24 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
26 * Copyright (c) 2011, 2024 by Delphix. All rights reserved.
27 * Copyright (c) 2012 by Frederik Wessels. All rights reserved.
28 * Copyright (c) 2012 by Cyril Plisko. All rights reserved.
29 * Copyright (c) 2013 by Prasad Joshi (sTec). All rights reserved.
30 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>.
31 * Copyright (c) 2017 Datto Inc.
32 * Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
33 * Copyright (c) 2017, Intel Corporation.
34 * Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>
35 * Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
36 * Copyright (c) 2021, 2023, Klara Inc.
37 * Copyright [2021] Hewlett Packard Enterprise Development LP
38 */
39
40 #include <assert.h>
41 #include <ctype.h>
42 #include <dirent.h>
43 #include <errno.h>
44 #include <fcntl.h>
45 #include <getopt.h>
46 #include <libgen.h>
47 #include <libintl.h>
48 #include <libuutil.h>
49 #include <locale.h>
50 #include <pthread.h>
51 #include <stdio.h>
52 #include <stdlib.h>
53 #include <string.h>
54 #include <thread_pool.h>
55 #include <time.h>
56 #include <unistd.h>
57 #include <pwd.h>
58 #include <zone.h>
59 #include <sys/wait.h>
60 #include <zfs_prop.h>
61 #include <sys/fs/zfs.h>
62 #include <sys/stat.h>
63 #include <sys/systeminfo.h>
64 #include <sys/fm/fs/zfs.h>
65 #include <sys/fm/util.h>
66 #include <sys/fm/protocol.h>
67 #include <sys/zfs_ioctl.h>
68 #include <sys/mount.h>
69 #include <sys/sysmacros.h>
70 #include <string.h>
71 #include <math.h>
72
73 #include <libzfs.h>
74 #include <libzutil.h>
75
76 #include "zpool_util.h"
77 #include "zfs_comutil.h"
78 #include "zfeature_common.h"
79 #include "zfs_valstr.h"
80
81 #include "statcommon.h"
82
83 libzfs_handle_t *g_zfs;
84
85 static int mount_tp_nthr = 512; /* tpool threads for multi-threaded mounting */
86
87 static int zpool_do_create(int, char **);
88 static int zpool_do_destroy(int, char **);
89
90 static int zpool_do_add(int, char **);
91 static int zpool_do_remove(int, char **);
92 static int zpool_do_labelclear(int, char **);
93
94 static int zpool_do_checkpoint(int, char **);
95 static int zpool_do_prefetch(int, char **);
96
97 static int zpool_do_list(int, char **);
98 static int zpool_do_iostat(int, char **);
99 static int zpool_do_status(int, char **);
100
101 static int zpool_do_online(int, char **);
102 static int zpool_do_offline(int, char **);
103 static int zpool_do_clear(int, char **);
104 static int zpool_do_reopen(int, char **);
105
106 static int zpool_do_reguid(int, char **);
107
108 static int zpool_do_attach(int, char **);
109 static int zpool_do_detach(int, char **);
110 static int zpool_do_replace(int, char **);
111 static int zpool_do_split(int, char **);
112
113 static int zpool_do_initialize(int, char **);
114 static int zpool_do_scrub(int, char **);
115 static int zpool_do_resilver(int, char **);
116 static int zpool_do_trim(int, char **);
117
118 static int zpool_do_import(int, char **);
119 static int zpool_do_export(int, char **);
120
121 static int zpool_do_upgrade(int, char **);
122
123 static int zpool_do_history(int, char **);
124 static int zpool_do_events(int, char **);
125
126 static int zpool_do_get(int, char **);
127 static int zpool_do_set(int, char **);
128
129 static int zpool_do_sync(int, char **);
130
131 static int zpool_do_version(int, char **);
132
133 static int zpool_do_wait(int, char **);
134
135 static int zpool_do_ddt_prune(int, char **);
136
137 static int zpool_do_help(int argc, char **argv);
138
139 static zpool_compat_status_t zpool_do_load_compat(
140 const char *, boolean_t *);
141
142 enum zpool_options {
143 ZPOOL_OPTION_POWER = 1024,
144 ZPOOL_OPTION_ALLOW_INUSE,
145 ZPOOL_OPTION_ALLOW_REPLICATION_MISMATCH,
146 ZPOOL_OPTION_ALLOW_ASHIFT_MISMATCH,
147 ZPOOL_OPTION_POOL_KEY_GUID,
148 ZPOOL_OPTION_JSON_NUMS_AS_INT,
149 ZPOOL_OPTION_JSON_FLAT_VDEVS
150 };
151
152 /*
153 * These libumem hooks provide a reasonable set of defaults for the allocator's
154 * debugging facilities.
155 */
156
157 #ifdef DEBUG
158 const char *
_umem_debug_init(void)159 _umem_debug_init(void)
160 {
161 return ("default,verbose"); /* $UMEM_DEBUG setting */
162 }
163
164 const char *
_umem_logging_init(void)165 _umem_logging_init(void)
166 {
167 return ("fail,contents"); /* $UMEM_LOGGING setting */
168 }
169 #endif
170
171 typedef enum {
172 HELP_ADD,
173 HELP_ATTACH,
174 HELP_CLEAR,
175 HELP_CREATE,
176 HELP_CHECKPOINT,
177 HELP_DDT_PRUNE,
178 HELP_DESTROY,
179 HELP_DETACH,
180 HELP_EXPORT,
181 HELP_HISTORY,
182 HELP_IMPORT,
183 HELP_IOSTAT,
184 HELP_LABELCLEAR,
185 HELP_LIST,
186 HELP_OFFLINE,
187 HELP_ONLINE,
188 HELP_PREFETCH,
189 HELP_REPLACE,
190 HELP_REMOVE,
191 HELP_INITIALIZE,
192 HELP_SCRUB,
193 HELP_RESILVER,
194 HELP_TRIM,
195 HELP_STATUS,
196 HELP_UPGRADE,
197 HELP_EVENTS,
198 HELP_GET,
199 HELP_SET,
200 HELP_SPLIT,
201 HELP_SYNC,
202 HELP_REGUID,
203 HELP_REOPEN,
204 HELP_VERSION,
205 HELP_WAIT
206 } zpool_help_t;
207
208
209 /*
210 * Flags for stats to display with "zpool iostats"
211 */
212 enum iostat_type {
213 IOS_DEFAULT = 0,
214 IOS_LATENCY = 1,
215 IOS_QUEUES = 2,
216 IOS_L_HISTO = 3,
217 IOS_RQ_HISTO = 4,
218 IOS_COUNT, /* always last element */
219 };
220
221 /* iostat_type entries as bitmasks */
222 #define IOS_DEFAULT_M (1ULL << IOS_DEFAULT)
223 #define IOS_LATENCY_M (1ULL << IOS_LATENCY)
224 #define IOS_QUEUES_M (1ULL << IOS_QUEUES)
225 #define IOS_L_HISTO_M (1ULL << IOS_L_HISTO)
226 #define IOS_RQ_HISTO_M (1ULL << IOS_RQ_HISTO)
227
228 /* Mask of all the histo bits */
229 #define IOS_ANYHISTO_M (IOS_L_HISTO_M | IOS_RQ_HISTO_M)
230
231 /*
232 * Lookup table for iostat flags to nvlist names. Basically a list
233 * of all the nvlists a flag requires. Also specifies the order in
234 * which data gets printed in zpool iostat.
235 */
236 static const char *vsx_type_to_nvlist[IOS_COUNT][15] = {
237 [IOS_L_HISTO] = {
238 ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
239 ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
240 ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
241 ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
242 ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO,
243 ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO,
244 ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO,
245 ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO,
246 ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO,
247 ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
248 ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
249 NULL},
250 [IOS_LATENCY] = {
251 ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
252 ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
253 ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
254 ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
255 ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
256 ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
257 NULL},
258 [IOS_QUEUES] = {
259 ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE,
260 ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE,
261 ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE,
262 ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE,
263 ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,
264 ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE,
265 ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE,
266 NULL},
267 [IOS_RQ_HISTO] = {
268 ZPOOL_CONFIG_VDEV_SYNC_IND_R_HISTO,
269 ZPOOL_CONFIG_VDEV_SYNC_AGG_R_HISTO,
270 ZPOOL_CONFIG_VDEV_SYNC_IND_W_HISTO,
271 ZPOOL_CONFIG_VDEV_SYNC_AGG_W_HISTO,
272 ZPOOL_CONFIG_VDEV_ASYNC_IND_R_HISTO,
273 ZPOOL_CONFIG_VDEV_ASYNC_AGG_R_HISTO,
274 ZPOOL_CONFIG_VDEV_ASYNC_IND_W_HISTO,
275 ZPOOL_CONFIG_VDEV_ASYNC_AGG_W_HISTO,
276 ZPOOL_CONFIG_VDEV_IND_SCRUB_HISTO,
277 ZPOOL_CONFIG_VDEV_AGG_SCRUB_HISTO,
278 ZPOOL_CONFIG_VDEV_IND_TRIM_HISTO,
279 ZPOOL_CONFIG_VDEV_AGG_TRIM_HISTO,
280 ZPOOL_CONFIG_VDEV_IND_REBUILD_HISTO,
281 ZPOOL_CONFIG_VDEV_AGG_REBUILD_HISTO,
282 NULL},
283 };
284
285 static const char *pool_scan_func_str[] = {
286 "NONE",
287 "SCRUB",
288 "RESILVER",
289 "ERRORSCRUB"
290 };
291
292 static const char *pool_scan_state_str[] = {
293 "NONE",
294 "SCANNING",
295 "FINISHED",
296 "CANCELED",
297 "ERRORSCRUBBING"
298 };
299
300 static const char *vdev_rebuild_state_str[] = {
301 "NONE",
302 "ACTIVE",
303 "CANCELED",
304 "COMPLETE"
305 };
306
307 static const char *checkpoint_state_str[] = {
308 "NONE",
309 "EXISTS",
310 "DISCARDING"
311 };
312
313 static const char *vdev_state_str[] = {
314 "UNKNOWN",
315 "CLOSED",
316 "OFFLINE",
317 "REMOVED",
318 "CANT_OPEN",
319 "FAULTED",
320 "DEGRADED",
321 "ONLINE"
322 };
323
324 static const char *vdev_aux_str[] = {
325 "NONE",
326 "OPEN_FAILED",
327 "CORRUPT_DATA",
328 "NO_REPLICAS",
329 "BAD_GUID_SUM",
330 "TOO_SMALL",
331 "BAD_LABEL",
332 "VERSION_NEWER",
333 "VERSION_OLDER",
334 "UNSUP_FEAT",
335 "SPARED",
336 "ERR_EXCEEDED",
337 "IO_FAILURE",
338 "BAD_LOG",
339 "EXTERNAL",
340 "SPLIT_POOL",
341 "BAD_ASHIFT",
342 "EXTERNAL_PERSIST",
343 "ACTIVE",
344 "CHILDREN_OFFLINE",
345 "ASHIFT_TOO_BIG"
346 };
347
348 static const char *vdev_init_state_str[] = {
349 "NONE",
350 "ACTIVE",
351 "CANCELED",
352 "SUSPENDED",
353 "COMPLETE"
354 };
355
356 static const char *vdev_trim_state_str[] = {
357 "NONE",
358 "ACTIVE",
359 "CANCELED",
360 "SUSPENDED",
361 "COMPLETE"
362 };
363
364 #define ZFS_NICE_TIMESTAMP 100
365
366 /*
367 * Given a cb->cb_flags with a histogram bit set, return the iostat_type.
368 * Right now, only one histo bit is ever set at one time, so we can
369 * just do a highbit64(a)
370 */
371 #define IOS_HISTO_IDX(a) (highbit64(a & IOS_ANYHISTO_M) - 1)
372
373 typedef struct zpool_command {
374 const char *name;
375 int (*func)(int, char **);
376 zpool_help_t usage;
377 } zpool_command_t;
378
379 /*
380 * Master command table. Each ZFS command has a name, associated function, and
381 * usage message. The usage messages need to be internationalized, so we have
382 * to have a function to return the usage message based on a command index.
383 *
384 * These commands are organized according to how they are displayed in the usage
385 * message. An empty command (one with a NULL name) indicates an empty line in
386 * the generic usage message.
387 */
388 static zpool_command_t command_table[] = {
389 { "version", zpool_do_version, HELP_VERSION },
390 { NULL },
391 { "create", zpool_do_create, HELP_CREATE },
392 { "destroy", zpool_do_destroy, HELP_DESTROY },
393 { NULL },
394 { "add", zpool_do_add, HELP_ADD },
395 { "remove", zpool_do_remove, HELP_REMOVE },
396 { NULL },
397 { "labelclear", zpool_do_labelclear, HELP_LABELCLEAR },
398 { NULL },
399 { "checkpoint", zpool_do_checkpoint, HELP_CHECKPOINT },
400 { "prefetch", zpool_do_prefetch, HELP_PREFETCH },
401 { NULL },
402 { "list", zpool_do_list, HELP_LIST },
403 { "iostat", zpool_do_iostat, HELP_IOSTAT },
404 { "status", zpool_do_status, HELP_STATUS },
405 { NULL },
406 { "online", zpool_do_online, HELP_ONLINE },
407 { "offline", zpool_do_offline, HELP_OFFLINE },
408 { "clear", zpool_do_clear, HELP_CLEAR },
409 { "reopen", zpool_do_reopen, HELP_REOPEN },
410 { NULL },
411 { "attach", zpool_do_attach, HELP_ATTACH },
412 { "detach", zpool_do_detach, HELP_DETACH },
413 { "replace", zpool_do_replace, HELP_REPLACE },
414 { "split", zpool_do_split, HELP_SPLIT },
415 { NULL },
416 { "initialize", zpool_do_initialize, HELP_INITIALIZE },
417 { "resilver", zpool_do_resilver, HELP_RESILVER },
418 { "scrub", zpool_do_scrub, HELP_SCRUB },
419 { "trim", zpool_do_trim, HELP_TRIM },
420 { NULL },
421 { "import", zpool_do_import, HELP_IMPORT },
422 { "export", zpool_do_export, HELP_EXPORT },
423 { "upgrade", zpool_do_upgrade, HELP_UPGRADE },
424 { "reguid", zpool_do_reguid, HELP_REGUID },
425 { NULL },
426 { "history", zpool_do_history, HELP_HISTORY },
427 { "events", zpool_do_events, HELP_EVENTS },
428 { NULL },
429 { "get", zpool_do_get, HELP_GET },
430 { "set", zpool_do_set, HELP_SET },
431 { "sync", zpool_do_sync, HELP_SYNC },
432 { NULL },
433 { "wait", zpool_do_wait, HELP_WAIT },
434 { NULL },
435 { "ddtprune", zpool_do_ddt_prune, HELP_DDT_PRUNE },
436 };
437
438 #define NCOMMAND (ARRAY_SIZE(command_table))
439
440 #define VDEV_ALLOC_CLASS_LOGS "logs"
441
442 #define MAX_CMD_LEN 256
443
444 static zpool_command_t *current_command;
445 static zfs_type_t current_prop_type = (ZFS_TYPE_POOL | ZFS_TYPE_VDEV);
446 static char history_str[HIS_MAX_RECORD_LEN];
447 static boolean_t log_history = B_TRUE;
448 static uint_t timestamp_fmt = NODATE;
449
450 static const char *
get_usage(zpool_help_t idx)451 get_usage(zpool_help_t idx)
452 {
453 switch (idx) {
454 case HELP_ADD:
455 return (gettext("\tadd [-afgLnP] [-o property=value] "
456 "<pool> <vdev> ...\n"));
457 case HELP_ATTACH:
458 return (gettext("\tattach [-fsw] [-o property=value] "
459 "<pool> <device> <new-device>\n"));
460 case HELP_CLEAR:
461 return (gettext("\tclear [[--power]|[-nF]] <pool> [device]\n"));
462 case HELP_CREATE:
463 return (gettext("\tcreate [-fnd] [-o property=value] ... \n"
464 "\t [-O file-system-property=value] ... \n"
465 "\t [-m mountpoint] [-R root] <pool> <vdev> ...\n"));
466 case HELP_CHECKPOINT:
467 return (gettext("\tcheckpoint [-d [-w]] <pool> ...\n"));
468 case HELP_DESTROY:
469 return (gettext("\tdestroy [-f] <pool>\n"));
470 case HELP_DETACH:
471 return (gettext("\tdetach <pool> <device>\n"));
472 case HELP_EXPORT:
473 return (gettext("\texport [-af] <pool> ...\n"));
474 case HELP_HISTORY:
475 return (gettext("\thistory [-il] [<pool>] ...\n"));
476 case HELP_IMPORT:
477 return (gettext("\timport [-d dir] [-D]\n"
478 "\timport [-o mntopts] [-o property=value] ... \n"
479 "\t [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] "
480 "[-R root] [-F [-n]] -a\n"
481 "\timport [-o mntopts] [-o property=value] ... \n"
482 "\t [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] "
483 "[-R root] [-F [-n]]\n"
484 "\t [--rewind-to-checkpoint] <pool | id> [newpool]\n"));
485 case HELP_IOSTAT:
486 return (gettext("\tiostat [[[-c [script1,script2,...]"
487 "[-lq]]|[-rw]] [-T d | u] [-ghHLpPvy]\n"
488 "\t [[pool ...]|[pool vdev ...]|[vdev ...]]"
489 " [[-n] interval [count]]\n"));
490 case HELP_LABELCLEAR:
491 return (gettext("\tlabelclear [-f] <vdev>\n"));
492 case HELP_LIST:
493 return (gettext("\tlist [-gHLpPv] [-o property[,...]] [-j "
494 "[--json-int, --json-pool-key-guid]] ...\n"
495 "\t [-T d|u] [pool] [interval [count]]\n"));
496 case HELP_PREFETCH:
497 return (gettext("\tprefetch -t <type> [<type opts>] <pool>\n"
498 "\t -t ddt <pool>\n"));
499 case HELP_OFFLINE:
500 return (gettext("\toffline [--power]|[[-f][-t]] <pool> "
501 "<device> ...\n"));
502 case HELP_ONLINE:
503 return (gettext("\tonline [--power][-e] <pool> <device> "
504 "...\n"));
505 case HELP_REPLACE:
506 return (gettext("\treplace [-fsw] [-o property=value] "
507 "<pool> <device> [new-device]\n"));
508 case HELP_REMOVE:
509 return (gettext("\tremove [-npsw] <pool> <device> ...\n"));
510 case HELP_REOPEN:
511 return (gettext("\treopen [-n] <pool>\n"));
512 case HELP_INITIALIZE:
513 return (gettext("\tinitialize [-c | -s | -u] [-w] <pool> "
514 "[<device> ...]\n"));
515 case HELP_SCRUB:
516 return (gettext("\tscrub [-e | -s | -p | -C] [-w] "
517 "<pool> ...\n"));
518 case HELP_RESILVER:
519 return (gettext("\tresilver <pool> ...\n"));
520 case HELP_TRIM:
521 return (gettext("\ttrim [-dw] [-r <rate>] [-c | -s] <pool> "
522 "[<device> ...]\n"));
523 case HELP_STATUS:
524 return (gettext("\tstatus [--power] [-j [--json-int, "
525 "--json-flat-vdevs, ...\n"
526 "\t --json-pool-key-guid]] [-c [script1,script2,...]] "
527 "[-dDegiLpPstvx] ...\n"
528 "\t [-T d|u] [pool] [interval [count]]\n"));
529 case HELP_UPGRADE:
530 return (gettext("\tupgrade\n"
531 "\tupgrade -v\n"
532 "\tupgrade [-V version] <-a | pool ...>\n"));
533 case HELP_EVENTS:
534 return (gettext("\tevents [-vHf [pool] | -c]\n"));
535 case HELP_GET:
536 return (gettext("\tget [-Hp] [-j [--json-int, "
537 "--json-pool-key-guid]] ...\n"
538 "\t [-o \"all\" | field[,...]] "
539 "<\"all\" | property[,...]> <pool> ...\n"));
540 case HELP_SET:
541 return (gettext("\tset <property=value> <pool>\n"
542 "\tset <vdev_property=value> <pool> <vdev>\n"));
543 case HELP_SPLIT:
544 return (gettext("\tsplit [-gLnPl] [-R altroot] [-o mntopts]\n"
545 "\t [-o property=value] <pool> <newpool> "
546 "[<device> ...]\n"));
547 case HELP_REGUID:
548 return (gettext("\treguid [-g guid] <pool>\n"));
549 case HELP_SYNC:
550 return (gettext("\tsync [pool] ...\n"));
551 case HELP_VERSION:
552 return (gettext("\tversion [-j]\n"));
553 case HELP_WAIT:
554 return (gettext("\twait [-Hp] [-T d|u] [-t <activity>[,...]] "
555 "<pool> [interval]\n"));
556 case HELP_DDT_PRUNE:
557 return (gettext("\tddtprune -d|-p <amount> <pool>\n"));
558 default:
559 __builtin_unreachable();
560 }
561 }
562
563 static void
zpool_collect_leaves(zpool_handle_t * zhp,nvlist_t * nvroot,nvlist_t * res)564 zpool_collect_leaves(zpool_handle_t *zhp, nvlist_t *nvroot, nvlist_t *res)
565 {
566 uint_t children = 0;
567 nvlist_t **child;
568 uint_t i;
569
570 (void) nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
571 &child, &children);
572
573 if (children == 0) {
574 char *path = zpool_vdev_name(g_zfs, zhp, nvroot,
575 VDEV_NAME_PATH);
576
577 if (strcmp(path, VDEV_TYPE_INDIRECT) != 0 &&
578 strcmp(path, VDEV_TYPE_HOLE) != 0)
579 fnvlist_add_boolean(res, path);
580
581 free(path);
582 return;
583 }
584
585 for (i = 0; i < children; i++) {
586 zpool_collect_leaves(zhp, child[i], res);
587 }
588 }
589
590 /*
591 * Callback routine that will print out a pool property value.
592 */
593 static int
print_pool_prop_cb(int prop,void * cb)594 print_pool_prop_cb(int prop, void *cb)
595 {
596 FILE *fp = cb;
597
598 (void) fprintf(fp, "\t%-19s ", zpool_prop_to_name(prop));
599
600 if (zpool_prop_readonly(prop))
601 (void) fprintf(fp, " NO ");
602 else
603 (void) fprintf(fp, " YES ");
604
605 if (zpool_prop_values(prop) == NULL)
606 (void) fprintf(fp, "-\n");
607 else
608 (void) fprintf(fp, "%s\n", zpool_prop_values(prop));
609
610 return (ZPROP_CONT);
611 }
612
613 /*
614 * Callback routine that will print out a vdev property value.
615 */
616 static int
print_vdev_prop_cb(int prop,void * cb)617 print_vdev_prop_cb(int prop, void *cb)
618 {
619 FILE *fp = cb;
620
621 (void) fprintf(fp, "\t%-19s ", vdev_prop_to_name(prop));
622
623 if (vdev_prop_readonly(prop))
624 (void) fprintf(fp, " NO ");
625 else
626 (void) fprintf(fp, " YES ");
627
628 if (vdev_prop_values(prop) == NULL)
629 (void) fprintf(fp, "-\n");
630 else
631 (void) fprintf(fp, "%s\n", vdev_prop_values(prop));
632
633 return (ZPROP_CONT);
634 }
635
636 /*
637 * Given a leaf vdev name like 'L5' return its VDEV_CONFIG_PATH like
638 * '/dev/disk/by-vdev/L5'.
639 */
640 static const char *
vdev_name_to_path(zpool_handle_t * zhp,char * vdev)641 vdev_name_to_path(zpool_handle_t *zhp, char *vdev)
642 {
643 nvlist_t *vdev_nv = zpool_find_vdev(zhp, vdev, NULL, NULL, NULL);
644 if (vdev_nv == NULL) {
645 return (NULL);
646 }
647 return (fnvlist_lookup_string(vdev_nv, ZPOOL_CONFIG_PATH));
648 }
649
650 static int
zpool_power_on(zpool_handle_t * zhp,char * vdev)651 zpool_power_on(zpool_handle_t *zhp, char *vdev)
652 {
653 return (zpool_power(zhp, vdev, B_TRUE));
654 }
655
656 static int
zpool_power_on_and_disk_wait(zpool_handle_t * zhp,char * vdev)657 zpool_power_on_and_disk_wait(zpool_handle_t *zhp, char *vdev)
658 {
659 int rc;
660
661 rc = zpool_power_on(zhp, vdev);
662 if (rc != 0)
663 return (rc);
664
665 zpool_disk_wait(vdev_name_to_path(zhp, vdev));
666
667 return (0);
668 }
669
670 static int
zpool_power_on_pool_and_wait_for_devices(zpool_handle_t * zhp)671 zpool_power_on_pool_and_wait_for_devices(zpool_handle_t *zhp)
672 {
673 nvlist_t *nv;
674 const char *path = NULL;
675 int rc;
676
677 /* Power up all the devices first */
678 FOR_EACH_REAL_LEAF_VDEV(zhp, nv) {
679 path = fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH);
680 if (path != NULL) {
681 rc = zpool_power_on(zhp, (char *)path);
682 if (rc != 0) {
683 return (rc);
684 }
685 }
686 }
687
688 /*
689 * Wait for their devices to show up. Since we powered them on
690 * at roughly the same time, they should all come online around
691 * the same time.
692 */
693 FOR_EACH_REAL_LEAF_VDEV(zhp, nv) {
694 path = fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH);
695 zpool_disk_wait(path);
696 }
697
698 return (0);
699 }
700
701 static int
zpool_power_off(zpool_handle_t * zhp,char * vdev)702 zpool_power_off(zpool_handle_t *zhp, char *vdev)
703 {
704 return (zpool_power(zhp, vdev, B_FALSE));
705 }
706
707 /*
708 * Display usage message. If we're inside a command, display only the usage for
709 * that command. Otherwise, iterate over the entire command table and display
710 * a complete usage message.
711 */
712 static __attribute__((noreturn)) void
usage(boolean_t requested)713 usage(boolean_t requested)
714 {
715 FILE *fp = requested ? stdout : stderr;
716
717 if (current_command == NULL) {
718 int i;
719
720 (void) fprintf(fp, gettext("usage: zpool command args ...\n"));
721 (void) fprintf(fp,
722 gettext("where 'command' is one of the following:\n\n"));
723
724 for (i = 0; i < NCOMMAND; i++) {
725 if (command_table[i].name == NULL)
726 (void) fprintf(fp, "\n");
727 else
728 (void) fprintf(fp, "%s",
729 get_usage(command_table[i].usage));
730 }
731
732 (void) fprintf(fp,
733 gettext("\nFor further help on a command or topic, "
734 "run: %s\n"), "zpool help [<topic>]");
735 } else {
736 (void) fprintf(fp, gettext("usage:\n"));
737 (void) fprintf(fp, "%s", get_usage(current_command->usage));
738 }
739
740 if (current_command != NULL &&
741 current_prop_type != (ZFS_TYPE_POOL | ZFS_TYPE_VDEV) &&
742 ((strcmp(current_command->name, "set") == 0) ||
743 (strcmp(current_command->name, "get") == 0) ||
744 (strcmp(current_command->name, "list") == 0))) {
745
746 (void) fprintf(fp, "%s",
747 gettext("\nthe following properties are supported:\n"));
748
749 (void) fprintf(fp, "\n\t%-19s %s %s\n\n",
750 "PROPERTY", "EDIT", "VALUES");
751
752 /* Iterate over all properties */
753 if (current_prop_type == ZFS_TYPE_POOL) {
754 (void) zprop_iter(print_pool_prop_cb, fp, B_FALSE,
755 B_TRUE, current_prop_type);
756
757 (void) fprintf(fp, "\t%-19s ", "feature@...");
758 (void) fprintf(fp, "YES "
759 "disabled | enabled | active\n");
760
761 (void) fprintf(fp, gettext("\nThe feature@ properties "
762 "must be appended with a feature name.\n"
763 "See zpool-features(7).\n"));
764 } else if (current_prop_type == ZFS_TYPE_VDEV) {
765 (void) zprop_iter(print_vdev_prop_cb, fp, B_FALSE,
766 B_TRUE, current_prop_type);
767 }
768 }
769
770 /*
771 * See comments at end of main().
772 */
773 if (getenv("ZFS_ABORT") != NULL) {
774 (void) printf("dumping core by request\n");
775 abort();
776 }
777
778 exit(requested ? 0 : 2);
779 }
780
781 /*
782 * zpool initialize [-c | -s | -u] [-w] <pool> [<vdev> ...]
783 * Initialize all unused blocks in the specified vdevs, or all vdevs in the pool
784 * if none specified.
785 *
786 * -c Cancel. Ends active initializing.
787 * -s Suspend. Initializing can then be restarted with no flags.
788 * -u Uninitialize. Clears initialization state.
789 * -w Wait. Blocks until initializing has completed.
790 */
791 int
zpool_do_initialize(int argc,char ** argv)792 zpool_do_initialize(int argc, char **argv)
793 {
794 int c;
795 char *poolname;
796 zpool_handle_t *zhp;
797 nvlist_t *vdevs;
798 int err = 0;
799 boolean_t wait = B_FALSE;
800
801 struct option long_options[] = {
802 {"cancel", no_argument, NULL, 'c'},
803 {"suspend", no_argument, NULL, 's'},
804 {"uninit", no_argument, NULL, 'u'},
805 {"wait", no_argument, NULL, 'w'},
806 {0, 0, 0, 0}
807 };
808
809 pool_initialize_func_t cmd_type = POOL_INITIALIZE_START;
810 while ((c = getopt_long(argc, argv, "csuw", long_options,
811 NULL)) != -1) {
812 switch (c) {
813 case 'c':
814 if (cmd_type != POOL_INITIALIZE_START &&
815 cmd_type != POOL_INITIALIZE_CANCEL) {
816 (void) fprintf(stderr, gettext("-c cannot be "
817 "combined with other options\n"));
818 usage(B_FALSE);
819 }
820 cmd_type = POOL_INITIALIZE_CANCEL;
821 break;
822 case 's':
823 if (cmd_type != POOL_INITIALIZE_START &&
824 cmd_type != POOL_INITIALIZE_SUSPEND) {
825 (void) fprintf(stderr, gettext("-s cannot be "
826 "combined with other options\n"));
827 usage(B_FALSE);
828 }
829 cmd_type = POOL_INITIALIZE_SUSPEND;
830 break;
831 case 'u':
832 if (cmd_type != POOL_INITIALIZE_START &&
833 cmd_type != POOL_INITIALIZE_UNINIT) {
834 (void) fprintf(stderr, gettext("-u cannot be "
835 "combined with other options\n"));
836 usage(B_FALSE);
837 }
838 cmd_type = POOL_INITIALIZE_UNINIT;
839 break;
840 case 'w':
841 wait = B_TRUE;
842 break;
843 case '?':
844 if (optopt != 0) {
845 (void) fprintf(stderr,
846 gettext("invalid option '%c'\n"), optopt);
847 } else {
848 (void) fprintf(stderr,
849 gettext("invalid option '%s'\n"),
850 argv[optind - 1]);
851 }
852 usage(B_FALSE);
853 }
854 }
855
856 argc -= optind;
857 argv += optind;
858
859 if (argc < 1) {
860 (void) fprintf(stderr, gettext("missing pool name argument\n"));
861 usage(B_FALSE);
862 return (-1);
863 }
864
865 if (wait && (cmd_type != POOL_INITIALIZE_START)) {
866 (void) fprintf(stderr, gettext("-w cannot be used with -c, -s"
867 "or -u\n"));
868 usage(B_FALSE);
869 }
870
871 poolname = argv[0];
872 zhp = zpool_open(g_zfs, poolname);
873 if (zhp == NULL)
874 return (-1);
875
876 vdevs = fnvlist_alloc();
877 if (argc == 1) {
878 /* no individual leaf vdevs specified, so add them all */
879 nvlist_t *config = zpool_get_config(zhp, NULL);
880 nvlist_t *nvroot = fnvlist_lookup_nvlist(config,
881 ZPOOL_CONFIG_VDEV_TREE);
882 zpool_collect_leaves(zhp, nvroot, vdevs);
883 } else {
884 for (int i = 1; i < argc; i++) {
885 fnvlist_add_boolean(vdevs, argv[i]);
886 }
887 }
888
889 if (wait)
890 err = zpool_initialize_wait(zhp, cmd_type, vdevs);
891 else
892 err = zpool_initialize(zhp, cmd_type, vdevs);
893
894 fnvlist_free(vdevs);
895 zpool_close(zhp);
896
897 return (err);
898 }
899
900 /*
901 * print a pool vdev config for dry runs
902 */
903 static void
print_vdev_tree(zpool_handle_t * zhp,const char * name,nvlist_t * nv,int indent,const char * match,int name_flags)904 print_vdev_tree(zpool_handle_t *zhp, const char *name, nvlist_t *nv, int indent,
905 const char *match, int name_flags)
906 {
907 nvlist_t **child;
908 uint_t c, children;
909 char *vname;
910 boolean_t printed = B_FALSE;
911
912 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
913 &child, &children) != 0) {
914 if (name != NULL)
915 (void) printf("\t%*s%s\n", indent, "", name);
916 return;
917 }
918
919 for (c = 0; c < children; c++) {
920 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
921 const char *class = "";
922
923 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
924 &is_hole);
925
926 if (is_hole == B_TRUE) {
927 continue;
928 }
929
930 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
931 &is_log);
932 if (is_log)
933 class = VDEV_ALLOC_BIAS_LOG;
934 (void) nvlist_lookup_string(child[c],
935 ZPOOL_CONFIG_ALLOCATION_BIAS, &class);
936 if (strcmp(match, class) != 0)
937 continue;
938
939 if (!printed && name != NULL) {
940 (void) printf("\t%*s%s\n", indent, "", name);
941 printed = B_TRUE;
942 }
943 vname = zpool_vdev_name(g_zfs, zhp, child[c], name_flags);
944 print_vdev_tree(zhp, vname, child[c], indent + 2, "",
945 name_flags);
946 free(vname);
947 }
948 }
949
950 /*
951 * Print the list of l2cache devices for dry runs.
952 */
953 static void
print_cache_list(nvlist_t * nv,int indent)954 print_cache_list(nvlist_t *nv, int indent)
955 {
956 nvlist_t **child;
957 uint_t c, children;
958
959 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
960 &child, &children) == 0 && children > 0) {
961 (void) printf("\t%*s%s\n", indent, "", "cache");
962 } else {
963 return;
964 }
965 for (c = 0; c < children; c++) {
966 char *vname;
967
968 vname = zpool_vdev_name(g_zfs, NULL, child[c], 0);
969 (void) printf("\t%*s%s\n", indent + 2, "", vname);
970 free(vname);
971 }
972 }
973
974 /*
975 * Print the list of spares for dry runs.
976 */
977 static void
print_spare_list(nvlist_t * nv,int indent)978 print_spare_list(nvlist_t *nv, int indent)
979 {
980 nvlist_t **child;
981 uint_t c, children;
982
983 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
984 &child, &children) == 0 && children > 0) {
985 (void) printf("\t%*s%s\n", indent, "", "spares");
986 } else {
987 return;
988 }
989 for (c = 0; c < children; c++) {
990 char *vname;
991
992 vname = zpool_vdev_name(g_zfs, NULL, child[c], 0);
993 (void) printf("\t%*s%s\n", indent + 2, "", vname);
994 free(vname);
995 }
996 }
997
998 typedef struct spare_cbdata {
999 uint64_t cb_guid;
1000 zpool_handle_t *cb_zhp;
1001 } spare_cbdata_t;
1002
1003 static boolean_t
find_vdev(nvlist_t * nv,uint64_t search)1004 find_vdev(nvlist_t *nv, uint64_t search)
1005 {
1006 uint64_t guid;
1007 nvlist_t **child;
1008 uint_t c, children;
1009
1010 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0 &&
1011 search == guid)
1012 return (B_TRUE);
1013
1014 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1015 &child, &children) == 0) {
1016 for (c = 0; c < children; c++)
1017 if (find_vdev(child[c], search))
1018 return (B_TRUE);
1019 }
1020
1021 return (B_FALSE);
1022 }
1023
1024 static int
find_spare(zpool_handle_t * zhp,void * data)1025 find_spare(zpool_handle_t *zhp, void *data)
1026 {
1027 spare_cbdata_t *cbp = data;
1028 nvlist_t *config, *nvroot;
1029
1030 config = zpool_get_config(zhp, NULL);
1031 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
1032 &nvroot) == 0);
1033
1034 if (find_vdev(nvroot, cbp->cb_guid)) {
1035 cbp->cb_zhp = zhp;
1036 return (1);
1037 }
1038
1039 zpool_close(zhp);
1040 return (0);
1041 }
1042
1043 static void
nice_num_str_nvlist(nvlist_t * item,const char * key,uint64_t value,boolean_t literal,boolean_t as_int,int format)1044 nice_num_str_nvlist(nvlist_t *item, const char *key, uint64_t value,
1045 boolean_t literal, boolean_t as_int, int format)
1046 {
1047 char buf[256];
1048 if (literal) {
1049 if (!as_int)
1050 snprintf(buf, 256, "%llu", (u_longlong_t)value);
1051 } else {
1052 switch (format) {
1053 case ZFS_NICENUM_1024:
1054 zfs_nicenum_format(value, buf, 256, ZFS_NICENUM_1024);
1055 break;
1056 case ZFS_NICENUM_BYTES:
1057 zfs_nicenum_format(value, buf, 256, ZFS_NICENUM_BYTES);
1058 break;
1059 case ZFS_NICENUM_TIME:
1060 zfs_nicenum_format(value, buf, 256, ZFS_NICENUM_TIME);
1061 break;
1062 case ZFS_NICE_TIMESTAMP:
1063 format_timestamp(value, buf, 256);
1064 break;
1065 default:
1066 fprintf(stderr, "Invalid number format");
1067 exit(1);
1068 }
1069 }
1070 if (as_int)
1071 fnvlist_add_uint64(item, key, value);
1072 else
1073 fnvlist_add_string(item, key, buf);
1074 }
1075
1076 /*
1077 * Generates an nvlist with output version for every command based on params.
1078 * Purpose of this is to add a version of JSON output, considering the schema
1079 * format might be updated for each command in future.
1080 *
1081 * Schema:
1082 *
1083 * "output_version": {
1084 * "command": string,
1085 * "vers_major": integer,
1086 * "vers_minor": integer,
1087 * }
1088 */
1089 static nvlist_t *
zpool_json_schema(int maj_v,int min_v)1090 zpool_json_schema(int maj_v, int min_v)
1091 {
1092 char cmd[MAX_CMD_LEN];
1093 nvlist_t *sch = fnvlist_alloc();
1094 nvlist_t *ov = fnvlist_alloc();
1095
1096 snprintf(cmd, MAX_CMD_LEN, "zpool %s", current_command->name);
1097 fnvlist_add_string(ov, "command", cmd);
1098 fnvlist_add_uint32(ov, "vers_major", maj_v);
1099 fnvlist_add_uint32(ov, "vers_minor", min_v);
1100 fnvlist_add_nvlist(sch, "output_version", ov);
1101 fnvlist_free(ov);
1102 return (sch);
1103 }
1104
1105 static void
fill_pool_info(nvlist_t * list,zpool_handle_t * zhp,boolean_t addtype,boolean_t as_int)1106 fill_pool_info(nvlist_t *list, zpool_handle_t *zhp, boolean_t addtype,
1107 boolean_t as_int)
1108 {
1109 nvlist_t *config = zpool_get_config(zhp, NULL);
1110 uint64_t guid = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID);
1111 uint64_t txg = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG);
1112
1113 fnvlist_add_string(list, "name", zpool_get_name(zhp));
1114 if (addtype)
1115 fnvlist_add_string(list, "type", "POOL");
1116 fnvlist_add_string(list, "state", zpool_get_state_str(zhp));
1117 if (as_int) {
1118 if (guid)
1119 fnvlist_add_uint64(list, ZPOOL_CONFIG_POOL_GUID, guid);
1120 if (txg)
1121 fnvlist_add_uint64(list, ZPOOL_CONFIG_POOL_TXG, txg);
1122 fnvlist_add_uint64(list, "spa_version", SPA_VERSION);
1123 fnvlist_add_uint64(list, "zpl_version", ZPL_VERSION);
1124 } else {
1125 char value[ZFS_MAXPROPLEN];
1126 if (guid) {
1127 snprintf(value, ZFS_MAXPROPLEN, "%llu",
1128 (u_longlong_t)guid);
1129 fnvlist_add_string(list, ZPOOL_CONFIG_POOL_GUID, value);
1130 }
1131 if (txg) {
1132 snprintf(value, ZFS_MAXPROPLEN, "%llu",
1133 (u_longlong_t)txg);
1134 fnvlist_add_string(list, ZPOOL_CONFIG_POOL_TXG, value);
1135 }
1136 fnvlist_add_string(list, "spa_version", SPA_VERSION_STRING);
1137 fnvlist_add_string(list, "zpl_version", ZPL_VERSION_STRING);
1138 }
1139 }
1140
1141 static void
used_by_other(zpool_handle_t * zhp,nvlist_t * nvdev,nvlist_t * list)1142 used_by_other(zpool_handle_t *zhp, nvlist_t *nvdev, nvlist_t *list)
1143 {
1144 spare_cbdata_t spare_cb;
1145 verify(nvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_GUID,
1146 &spare_cb.cb_guid) == 0);
1147 if (zpool_iter(g_zfs, find_spare, &spare_cb) == 1) {
1148 if (strcmp(zpool_get_name(spare_cb.cb_zhp),
1149 zpool_get_name(zhp)) != 0) {
1150 fnvlist_add_string(list, "used_by",
1151 zpool_get_name(spare_cb.cb_zhp));
1152 }
1153 zpool_close(spare_cb.cb_zhp);
1154 }
1155 }
1156
1157 static void
fill_vdev_info(nvlist_t * list,zpool_handle_t * zhp,char * name,boolean_t addtype,boolean_t as_int)1158 fill_vdev_info(nvlist_t *list, zpool_handle_t *zhp, char *name,
1159 boolean_t addtype, boolean_t as_int)
1160 {
1161 boolean_t l2c = B_FALSE;
1162 const char *path, *phys, *devid, *bias = NULL;
1163 uint64_t hole = 0, log = 0, spare = 0;
1164 vdev_stat_t *vs;
1165 uint_t c;
1166 nvlist_t *nvdev;
1167 nvlist_t *nvdev_parent = NULL;
1168 char *_name;
1169
1170 if (strcmp(name, zpool_get_name(zhp)) != 0)
1171 _name = name;
1172 else
1173 _name = (char *)"root-0";
1174
1175 nvdev = zpool_find_vdev(zhp, _name, NULL, &l2c, NULL);
1176
1177 fnvlist_add_string(list, "name", name);
1178 if (addtype)
1179 fnvlist_add_string(list, "type", "VDEV");
1180 if (nvdev) {
1181 const char *type = fnvlist_lookup_string(nvdev,
1182 ZPOOL_CONFIG_TYPE);
1183 if (type)
1184 fnvlist_add_string(list, "vdev_type", type);
1185 uint64_t guid = fnvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_GUID);
1186 if (guid) {
1187 if (as_int) {
1188 fnvlist_add_uint64(list, "guid", guid);
1189 } else {
1190 char buf[ZFS_MAXPROPLEN];
1191 snprintf(buf, ZFS_MAXPROPLEN, "%llu",
1192 (u_longlong_t)guid);
1193 fnvlist_add_string(list, "guid", buf);
1194 }
1195 }
1196 if (nvlist_lookup_string(nvdev, ZPOOL_CONFIG_PATH, &path) == 0)
1197 fnvlist_add_string(list, "path", path);
1198 if (nvlist_lookup_string(nvdev, ZPOOL_CONFIG_PHYS_PATH,
1199 &phys) == 0)
1200 fnvlist_add_string(list, "phys_path", phys);
1201 if (nvlist_lookup_string(nvdev, ZPOOL_CONFIG_DEVID,
1202 &devid) == 0)
1203 fnvlist_add_string(list, "devid", devid);
1204 (void) nvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_IS_LOG, &log);
1205 (void) nvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_IS_SPARE,
1206 &spare);
1207 (void) nvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_IS_HOLE, &hole);
1208 if (hole)
1209 fnvlist_add_string(list, "class", VDEV_TYPE_HOLE);
1210 else if (l2c)
1211 fnvlist_add_string(list, "class", VDEV_TYPE_L2CACHE);
1212 else if (spare)
1213 fnvlist_add_string(list, "class", VDEV_TYPE_SPARE);
1214 else if (log)
1215 fnvlist_add_string(list, "class", VDEV_TYPE_LOG);
1216 else {
1217 (void) nvlist_lookup_string(nvdev,
1218 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
1219 if (bias != NULL)
1220 fnvlist_add_string(list, "class", bias);
1221 else {
1222 nvdev_parent = NULL;
1223 nvdev_parent = zpool_find_parent_vdev(zhp,
1224 _name, NULL, NULL, NULL);
1225
1226 /*
1227 * With a mirrored special device, the parent
1228 * "mirror" vdev will have
1229 * ZPOOL_CONFIG_ALLOCATION_BIAS set to "special"
1230 * not the leaf vdevs. If we're a leaf vdev
1231 * in that case we need to look at our parent
1232 * to see if they're "special" to know if we
1233 * are "special" too.
1234 */
1235 if (nvdev_parent) {
1236 (void) nvlist_lookup_string(
1237 nvdev_parent,
1238 ZPOOL_CONFIG_ALLOCATION_BIAS,
1239 &bias);
1240 }
1241 if (bias != NULL)
1242 fnvlist_add_string(list, "class", bias);
1243 else
1244 fnvlist_add_string(list, "class",
1245 "normal");
1246 }
1247 }
1248 if (nvlist_lookup_uint64_array(nvdev, ZPOOL_CONFIG_VDEV_STATS,
1249 (uint64_t **)&vs, &c) == 0) {
1250 fnvlist_add_string(list, "state",
1251 vdev_state_str[vs->vs_state]);
1252 }
1253 }
1254 }
1255
1256 static boolean_t
prop_list_contains_feature(nvlist_t * proplist)1257 prop_list_contains_feature(nvlist_t *proplist)
1258 {
1259 nvpair_t *nvp;
1260 for (nvp = nvlist_next_nvpair(proplist, NULL); NULL != nvp;
1261 nvp = nvlist_next_nvpair(proplist, nvp)) {
1262 if (zpool_prop_feature(nvpair_name(nvp)))
1263 return (B_TRUE);
1264 }
1265 return (B_FALSE);
1266 }
1267
1268 /*
1269 * Add a property pair (name, string-value) into a property nvlist.
1270 */
1271 static int
add_prop_list(const char * propname,const char * propval,nvlist_t ** props,boolean_t poolprop)1272 add_prop_list(const char *propname, const char *propval, nvlist_t **props,
1273 boolean_t poolprop)
1274 {
1275 zpool_prop_t prop = ZPOOL_PROP_INVAL;
1276 nvlist_t *proplist;
1277 const char *normnm;
1278 const char *strval;
1279
1280 if (*props == NULL &&
1281 nvlist_alloc(props, NV_UNIQUE_NAME, 0) != 0) {
1282 (void) fprintf(stderr,
1283 gettext("internal error: out of memory\n"));
1284 return (1);
1285 }
1286
1287 proplist = *props;
1288
1289 if (poolprop) {
1290 const char *vname = zpool_prop_to_name(ZPOOL_PROP_VERSION);
1291 const char *cname =
1292 zpool_prop_to_name(ZPOOL_PROP_COMPATIBILITY);
1293
1294 if ((prop = zpool_name_to_prop(propname)) == ZPOOL_PROP_INVAL &&
1295 (!zpool_prop_feature(propname) &&
1296 !zpool_prop_vdev(propname))) {
1297 (void) fprintf(stderr, gettext("property '%s' is "
1298 "not a valid pool or vdev property\n"), propname);
1299 return (2);
1300 }
1301
1302 /*
1303 * feature@ properties and version should not be specified
1304 * at the same time.
1305 */
1306 if ((prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname) &&
1307 nvlist_exists(proplist, vname)) ||
1308 (prop == ZPOOL_PROP_VERSION &&
1309 prop_list_contains_feature(proplist))) {
1310 (void) fprintf(stderr, gettext("'feature@' and "
1311 "'version' properties cannot be specified "
1312 "together\n"));
1313 return (2);
1314 }
1315
1316 /*
1317 * if version is specified, only "legacy" compatibility
1318 * may be requested
1319 */
1320 if ((prop == ZPOOL_PROP_COMPATIBILITY &&
1321 strcmp(propval, ZPOOL_COMPAT_LEGACY) != 0 &&
1322 nvlist_exists(proplist, vname)) ||
1323 (prop == ZPOOL_PROP_VERSION &&
1324 nvlist_exists(proplist, cname) &&
1325 strcmp(fnvlist_lookup_string(proplist, cname),
1326 ZPOOL_COMPAT_LEGACY) != 0)) {
1327 (void) fprintf(stderr, gettext("when 'version' is "
1328 "specified, the 'compatibility' feature may only "
1329 "be set to '" ZPOOL_COMPAT_LEGACY "'\n"));
1330 return (2);
1331 }
1332
1333 if (zpool_prop_feature(propname) || zpool_prop_vdev(propname))
1334 normnm = propname;
1335 else
1336 normnm = zpool_prop_to_name(prop);
1337 } else {
1338 zfs_prop_t fsprop = zfs_name_to_prop(propname);
1339
1340 if (zfs_prop_valid_for_type(fsprop, ZFS_TYPE_FILESYSTEM,
1341 B_FALSE)) {
1342 normnm = zfs_prop_to_name(fsprop);
1343 } else if (zfs_prop_user(propname) ||
1344 zfs_prop_userquota(propname)) {
1345 normnm = propname;
1346 } else {
1347 (void) fprintf(stderr, gettext("property '%s' is "
1348 "not a valid filesystem property\n"), propname);
1349 return (2);
1350 }
1351 }
1352
1353 if (nvlist_lookup_string(proplist, normnm, &strval) == 0 &&
1354 prop != ZPOOL_PROP_CACHEFILE) {
1355 (void) fprintf(stderr, gettext("property '%s' "
1356 "specified multiple times\n"), propname);
1357 return (2);
1358 }
1359
1360 if (nvlist_add_string(proplist, normnm, propval) != 0) {
1361 (void) fprintf(stderr, gettext("internal "
1362 "error: out of memory\n"));
1363 return (1);
1364 }
1365
1366 return (0);
1367 }
1368
1369 /*
1370 * Set a default property pair (name, string-value) in a property nvlist
1371 */
1372 static int
add_prop_list_default(const char * propname,const char * propval,nvlist_t ** props)1373 add_prop_list_default(const char *propname, const char *propval,
1374 nvlist_t **props)
1375 {
1376 const char *pval;
1377
1378 if (nvlist_lookup_string(*props, propname, &pval) == 0)
1379 return (0);
1380
1381 return (add_prop_list(propname, propval, props, B_TRUE));
1382 }
1383
1384 /*
1385 * zpool add [-afgLnP] [-o property=value] <pool> <vdev> ...
1386 *
1387 * -a Disable the ashift validation checks
1388 * -f Force addition of devices, even if they appear in use
1389 * -g Display guid for individual vdev name.
1390 * -L Follow links when resolving vdev path name.
1391 * -n Do not add the devices, but display the resulting layout if
1392 * they were to be added.
1393 * -o Set property=value.
1394 * -P Display full path for vdev name.
1395 *
1396 * Adds the given vdevs to 'pool'. As with create, the bulk of this work is
1397 * handled by make_root_vdev(), which constructs the nvlist needed to pass to
1398 * libzfs.
1399 */
1400 int
zpool_do_add(int argc,char ** argv)1401 zpool_do_add(int argc, char **argv)
1402 {
1403 boolean_t check_replication = B_TRUE;
1404 boolean_t check_inuse = B_TRUE;
1405 boolean_t dryrun = B_FALSE;
1406 boolean_t check_ashift = B_TRUE;
1407 boolean_t force = B_FALSE;
1408 int name_flags = 0;
1409 int c;
1410 nvlist_t *nvroot;
1411 char *poolname;
1412 int ret;
1413 zpool_handle_t *zhp;
1414 nvlist_t *config;
1415 nvlist_t *props = NULL;
1416 char *propval;
1417
1418 struct option long_options[] = {
1419 {"allow-in-use", no_argument, NULL, ZPOOL_OPTION_ALLOW_INUSE},
1420 {"allow-replication-mismatch", no_argument, NULL,
1421 ZPOOL_OPTION_ALLOW_REPLICATION_MISMATCH},
1422 {"allow-ashift-mismatch", no_argument, NULL,
1423 ZPOOL_OPTION_ALLOW_ASHIFT_MISMATCH},
1424 {0, 0, 0, 0}
1425 };
1426
1427 /* check options */
1428 while ((c = getopt_long(argc, argv, "fgLno:P", long_options, NULL))
1429 != -1) {
1430 switch (c) {
1431 case 'f':
1432 force = B_TRUE;
1433 break;
1434 case 'g':
1435 name_flags |= VDEV_NAME_GUID;
1436 break;
1437 case 'L':
1438 name_flags |= VDEV_NAME_FOLLOW_LINKS;
1439 break;
1440 case 'n':
1441 dryrun = B_TRUE;
1442 break;
1443 case 'o':
1444 if ((propval = strchr(optarg, '=')) == NULL) {
1445 (void) fprintf(stderr, gettext("missing "
1446 "'=' for -o option\n"));
1447 usage(B_FALSE);
1448 }
1449 *propval = '\0';
1450 propval++;
1451
1452 if ((strcmp(optarg, ZPOOL_CONFIG_ASHIFT) != 0) ||
1453 (add_prop_list(optarg, propval, &props, B_TRUE)))
1454 usage(B_FALSE);
1455 break;
1456 case 'P':
1457 name_flags |= VDEV_NAME_PATH;
1458 break;
1459 case ZPOOL_OPTION_ALLOW_INUSE:
1460 check_inuse = B_FALSE;
1461 break;
1462 case ZPOOL_OPTION_ALLOW_REPLICATION_MISMATCH:
1463 check_replication = B_FALSE;
1464 break;
1465 case ZPOOL_OPTION_ALLOW_ASHIFT_MISMATCH:
1466 check_ashift = B_FALSE;
1467 break;
1468 case '?':
1469 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
1470 optopt);
1471 usage(B_FALSE);
1472 }
1473 }
1474
1475 argc -= optind;
1476 argv += optind;
1477
1478 /* get pool name and check number of arguments */
1479 if (argc < 1) {
1480 (void) fprintf(stderr, gettext("missing pool name argument\n"));
1481 usage(B_FALSE);
1482 }
1483 if (argc < 2) {
1484 (void) fprintf(stderr, gettext("missing vdev specification\n"));
1485 usage(B_FALSE);
1486 }
1487
1488 if (force) {
1489 if (!check_inuse || !check_replication || !check_ashift) {
1490 (void) fprintf(stderr, gettext("'-f' option is not "
1491 "allowed with '--allow-replication-mismatch', "
1492 "'--allow-ashift-mismatch', or "
1493 "'--allow-in-use'\n"));
1494 usage(B_FALSE);
1495 }
1496 check_inuse = B_FALSE;
1497 check_replication = B_FALSE;
1498 check_ashift = B_FALSE;
1499 }
1500
1501 poolname = argv[0];
1502
1503 argc--;
1504 argv++;
1505
1506 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
1507 return (1);
1508
1509 if ((config = zpool_get_config(zhp, NULL)) == NULL) {
1510 (void) fprintf(stderr, gettext("pool '%s' is unavailable\n"),
1511 poolname);
1512 zpool_close(zhp);
1513 return (1);
1514 }
1515
1516 /* unless manually specified use "ashift" pool property (if set) */
1517 if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) {
1518 int intval;
1519 zprop_source_t src;
1520 char strval[ZPOOL_MAXPROPLEN];
1521
1522 intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src);
1523 if (src != ZPROP_SRC_DEFAULT) {
1524 (void) sprintf(strval, "%" PRId32, intval);
1525 verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval,
1526 &props, B_TRUE) == 0);
1527 }
1528 }
1529
1530 /* pass off to make_root_vdev for processing */
1531 nvroot = make_root_vdev(zhp, props, !check_inuse,
1532 check_replication, B_FALSE, dryrun, argc, argv);
1533 if (nvroot == NULL) {
1534 zpool_close(zhp);
1535 return (1);
1536 }
1537
1538 if (dryrun) {
1539 nvlist_t *poolnvroot;
1540 nvlist_t **l2child, **sparechild;
1541 uint_t l2children, sparechildren, c;
1542 char *vname;
1543 boolean_t hadcache = B_FALSE, hadspare = B_FALSE;
1544
1545 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
1546 &poolnvroot) == 0);
1547
1548 (void) printf(gettext("would update '%s' to the following "
1549 "configuration:\n\n"), zpool_get_name(zhp));
1550
1551 /* print original main pool and new tree */
1552 print_vdev_tree(zhp, poolname, poolnvroot, 0, "",
1553 name_flags | VDEV_NAME_TYPE_ID);
1554 print_vdev_tree(zhp, NULL, nvroot, 0, "", name_flags);
1555
1556 /* print other classes: 'dedup', 'special', and 'log' */
1557 if (zfs_special_devs(poolnvroot, VDEV_ALLOC_BIAS_DEDUP)) {
1558 print_vdev_tree(zhp, "dedup", poolnvroot, 0,
1559 VDEV_ALLOC_BIAS_DEDUP, name_flags);
1560 print_vdev_tree(zhp, NULL, nvroot, 0,
1561 VDEV_ALLOC_BIAS_DEDUP, name_flags);
1562 } else if (zfs_special_devs(nvroot, VDEV_ALLOC_BIAS_DEDUP)) {
1563 print_vdev_tree(zhp, "dedup", nvroot, 0,
1564 VDEV_ALLOC_BIAS_DEDUP, name_flags);
1565 }
1566
1567 if (zfs_special_devs(poolnvroot, VDEV_ALLOC_BIAS_SPECIAL)) {
1568 print_vdev_tree(zhp, "special", poolnvroot, 0,
1569 VDEV_ALLOC_BIAS_SPECIAL, name_flags);
1570 print_vdev_tree(zhp, NULL, nvroot, 0,
1571 VDEV_ALLOC_BIAS_SPECIAL, name_flags);
1572 } else if (zfs_special_devs(nvroot, VDEV_ALLOC_BIAS_SPECIAL)) {
1573 print_vdev_tree(zhp, "special", nvroot, 0,
1574 VDEV_ALLOC_BIAS_SPECIAL, name_flags);
1575 }
1576
1577 if (num_logs(poolnvroot) > 0) {
1578 print_vdev_tree(zhp, "logs", poolnvroot, 0,
1579 VDEV_ALLOC_BIAS_LOG, name_flags);
1580 print_vdev_tree(zhp, NULL, nvroot, 0,
1581 VDEV_ALLOC_BIAS_LOG, name_flags);
1582 } else if (num_logs(nvroot) > 0) {
1583 print_vdev_tree(zhp, "logs", nvroot, 0,
1584 VDEV_ALLOC_BIAS_LOG, name_flags);
1585 }
1586
1587 /* Do the same for the caches */
1588 if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_L2CACHE,
1589 &l2child, &l2children) == 0 && l2children) {
1590 hadcache = B_TRUE;
1591 (void) printf(gettext("\tcache\n"));
1592 for (c = 0; c < l2children; c++) {
1593 vname = zpool_vdev_name(g_zfs, NULL,
1594 l2child[c], name_flags);
1595 (void) printf("\t %s\n", vname);
1596 free(vname);
1597 }
1598 }
1599 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1600 &l2child, &l2children) == 0 && l2children) {
1601 if (!hadcache)
1602 (void) printf(gettext("\tcache\n"));
1603 for (c = 0; c < l2children; c++) {
1604 vname = zpool_vdev_name(g_zfs, NULL,
1605 l2child[c], name_flags);
1606 (void) printf("\t %s\n", vname);
1607 free(vname);
1608 }
1609 }
1610 /* And finally the spares */
1611 if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_SPARES,
1612 &sparechild, &sparechildren) == 0 && sparechildren > 0) {
1613 hadspare = B_TRUE;
1614 (void) printf(gettext("\tspares\n"));
1615 for (c = 0; c < sparechildren; c++) {
1616 vname = zpool_vdev_name(g_zfs, NULL,
1617 sparechild[c], name_flags);
1618 (void) printf("\t %s\n", vname);
1619 free(vname);
1620 }
1621 }
1622 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1623 &sparechild, &sparechildren) == 0 && sparechildren > 0) {
1624 if (!hadspare)
1625 (void) printf(gettext("\tspares\n"));
1626 for (c = 0; c < sparechildren; c++) {
1627 vname = zpool_vdev_name(g_zfs, NULL,
1628 sparechild[c], name_flags);
1629 (void) printf("\t %s\n", vname);
1630 free(vname);
1631 }
1632 }
1633
1634 ret = 0;
1635 } else {
1636 ret = (zpool_add(zhp, nvroot, check_ashift) != 0);
1637 }
1638
1639 nvlist_free(props);
1640 nvlist_free(nvroot);
1641 zpool_close(zhp);
1642
1643 return (ret);
1644 }
1645
1646 /*
1647 * zpool remove [-npsw] <pool> <vdev> ...
1648 *
1649 * Removes the given vdev from the pool.
1650 */
1651 int
zpool_do_remove(int argc,char ** argv)1652 zpool_do_remove(int argc, char **argv)
1653 {
1654 char *poolname;
1655 int i, ret = 0;
1656 zpool_handle_t *zhp = NULL;
1657 boolean_t stop = B_FALSE;
1658 int c;
1659 boolean_t noop = B_FALSE;
1660 boolean_t parsable = B_FALSE;
1661 boolean_t wait = B_FALSE;
1662
1663 /* check options */
1664 while ((c = getopt(argc, argv, "npsw")) != -1) {
1665 switch (c) {
1666 case 'n':
1667 noop = B_TRUE;
1668 break;
1669 case 'p':
1670 parsable = B_TRUE;
1671 break;
1672 case 's':
1673 stop = B_TRUE;
1674 break;
1675 case 'w':
1676 wait = B_TRUE;
1677 break;
1678 case '?':
1679 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
1680 optopt);
1681 usage(B_FALSE);
1682 }
1683 }
1684
1685 argc -= optind;
1686 argv += optind;
1687
1688 /* get pool name and check number of arguments */
1689 if (argc < 1) {
1690 (void) fprintf(stderr, gettext("missing pool name argument\n"));
1691 usage(B_FALSE);
1692 }
1693
1694 poolname = argv[0];
1695
1696 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
1697 return (1);
1698
1699 if (stop && noop) {
1700 zpool_close(zhp);
1701 (void) fprintf(stderr, gettext("stop request ignored\n"));
1702 return (0);
1703 }
1704
1705 if (stop) {
1706 if (argc > 1) {
1707 (void) fprintf(stderr, gettext("too many arguments\n"));
1708 usage(B_FALSE);
1709 }
1710 if (zpool_vdev_remove_cancel(zhp) != 0)
1711 ret = 1;
1712 if (wait) {
1713 (void) fprintf(stderr, gettext("invalid option "
1714 "combination: -w cannot be used with -s\n"));
1715 usage(B_FALSE);
1716 }
1717 } else {
1718 if (argc < 2) {
1719 (void) fprintf(stderr, gettext("missing device\n"));
1720 usage(B_FALSE);
1721 }
1722
1723 for (i = 1; i < argc; i++) {
1724 if (noop) {
1725 uint64_t size;
1726
1727 if (zpool_vdev_indirect_size(zhp, argv[i],
1728 &size) != 0) {
1729 ret = 1;
1730 break;
1731 }
1732 if (parsable) {
1733 (void) printf("%s %llu\n",
1734 argv[i], (unsigned long long)size);
1735 } else {
1736 char valstr[32];
1737 zfs_nicenum(size, valstr,
1738 sizeof (valstr));
1739 (void) printf("Memory that will be "
1740 "used after removing %s: %s\n",
1741 argv[i], valstr);
1742 }
1743 } else {
1744 if (zpool_vdev_remove(zhp, argv[i]) != 0)
1745 ret = 1;
1746 }
1747 }
1748
1749 if (ret == 0 && wait)
1750 ret = zpool_wait(zhp, ZPOOL_WAIT_REMOVE);
1751 }
1752 zpool_close(zhp);
1753
1754 return (ret);
1755 }
1756
1757 /*
1758 * Return 1 if a vdev is active (being used in a pool)
1759 * Return 0 if a vdev is inactive (offlined or faulted, or not in active pool)
1760 *
1761 * This is useful for checking if a disk in an active pool is offlined or
1762 * faulted.
1763 */
1764 static int
vdev_is_active(char * vdev_path)1765 vdev_is_active(char *vdev_path)
1766 {
1767 int fd;
1768 fd = open(vdev_path, O_EXCL);
1769 if (fd < 0) {
1770 return (1); /* cant open O_EXCL - disk is active */
1771 }
1772
1773 close(fd);
1774 return (0); /* disk is inactive in the pool */
1775 }
1776
1777 /*
1778 * zpool labelclear [-f] <vdev>
1779 *
1780 * -f Force clearing the label for the vdevs which are members of
1781 * the exported or foreign pools.
1782 *
1783 * Verifies that the vdev is not active and zeros out the label information
1784 * on the device.
1785 */
1786 int
zpool_do_labelclear(int argc,char ** argv)1787 zpool_do_labelclear(int argc, char **argv)
1788 {
1789 char vdev[MAXPATHLEN];
1790 char *name = NULL;
1791 int c, fd = -1, ret = 0;
1792 nvlist_t *config;
1793 pool_state_t state;
1794 boolean_t inuse = B_FALSE;
1795 boolean_t force = B_FALSE;
1796
1797 /* check options */
1798 while ((c = getopt(argc, argv, "f")) != -1) {
1799 switch (c) {
1800 case 'f':
1801 force = B_TRUE;
1802 break;
1803 default:
1804 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
1805 optopt);
1806 usage(B_FALSE);
1807 }
1808 }
1809
1810 argc -= optind;
1811 argv += optind;
1812
1813 /* get vdev name */
1814 if (argc < 1) {
1815 (void) fprintf(stderr, gettext("missing vdev name\n"));
1816 usage(B_FALSE);
1817 }
1818 if (argc > 1) {
1819 (void) fprintf(stderr, gettext("too many arguments\n"));
1820 usage(B_FALSE);
1821 }
1822
1823 (void) strlcpy(vdev, argv[0], sizeof (vdev));
1824
1825 /*
1826 * If we cannot open an absolute path, we quit.
1827 * Otherwise if the provided vdev name doesn't point to a file,
1828 * try prepending expected disk paths and partition numbers.
1829 */
1830 if ((fd = open(vdev, O_RDWR)) < 0) {
1831 int error;
1832 if (vdev[0] == '/') {
1833 (void) fprintf(stderr, gettext("failed to open "
1834 "%s: %s\n"), vdev, strerror(errno));
1835 return (1);
1836 }
1837
1838 error = zfs_resolve_shortname(argv[0], vdev, MAXPATHLEN);
1839 if (error == 0 && zfs_dev_is_whole_disk(vdev)) {
1840 if (zfs_append_partition(vdev, MAXPATHLEN) == -1)
1841 error = ENOENT;
1842 }
1843
1844 if (error || ((fd = open(vdev, O_RDWR)) < 0)) {
1845 if (errno == ENOENT) {
1846 (void) fprintf(stderr, gettext(
1847 "failed to find device %s, try "
1848 "specifying absolute path instead\n"),
1849 argv[0]);
1850 return (1);
1851 }
1852
1853 (void) fprintf(stderr, gettext("failed to open %s:"
1854 " %s\n"), vdev, strerror(errno));
1855 return (1);
1856 }
1857 }
1858
1859 /*
1860 * Flush all dirty pages for the block device. This should not be
1861 * fatal when the device does not support BLKFLSBUF as would be the
1862 * case for a file vdev.
1863 */
1864 if ((zfs_dev_flush(fd) != 0) && (errno != ENOTTY))
1865 (void) fprintf(stderr, gettext("failed to invalidate "
1866 "cache for %s: %s\n"), vdev, strerror(errno));
1867
1868 if (zpool_read_label(fd, &config, NULL) != 0) {
1869 (void) fprintf(stderr,
1870 gettext("failed to read label from %s\n"), vdev);
1871 ret = 1;
1872 goto errout;
1873 }
1874 nvlist_free(config);
1875
1876 ret = zpool_in_use(g_zfs, fd, &state, &name, &inuse);
1877 if (ret != 0) {
1878 (void) fprintf(stderr,
1879 gettext("failed to check state for %s\n"), vdev);
1880 ret = 1;
1881 goto errout;
1882 }
1883
1884 if (!inuse)
1885 goto wipe_label;
1886
1887 switch (state) {
1888 default:
1889 case POOL_STATE_ACTIVE:
1890 case POOL_STATE_SPARE:
1891 case POOL_STATE_L2CACHE:
1892 /*
1893 * We allow the user to call 'zpool offline -f'
1894 * on an offlined disk in an active pool. We can check if
1895 * the disk is online by calling vdev_is_active().
1896 */
1897 if (force && !vdev_is_active(vdev))
1898 break;
1899
1900 (void) fprintf(stderr, gettext(
1901 "%s is a member (%s) of pool \"%s\""),
1902 vdev, zpool_pool_state_to_name(state), name);
1903
1904 if (force) {
1905 (void) fprintf(stderr, gettext(
1906 ". Offline the disk first to clear its label."));
1907 }
1908 printf("\n");
1909 ret = 1;
1910 goto errout;
1911
1912 case POOL_STATE_EXPORTED:
1913 if (force)
1914 break;
1915 (void) fprintf(stderr, gettext(
1916 "use '-f' to override the following error:\n"
1917 "%s is a member of exported pool \"%s\"\n"),
1918 vdev, name);
1919 ret = 1;
1920 goto errout;
1921
1922 case POOL_STATE_POTENTIALLY_ACTIVE:
1923 if (force)
1924 break;
1925 (void) fprintf(stderr, gettext(
1926 "use '-f' to override the following error:\n"
1927 "%s is a member of potentially active pool \"%s\"\n"),
1928 vdev, name);
1929 ret = 1;
1930 goto errout;
1931
1932 case POOL_STATE_DESTROYED:
1933 /* inuse should never be set for a destroyed pool */
1934 assert(0);
1935 break;
1936 }
1937
1938 wipe_label:
1939 ret = zpool_clear_label(fd);
1940 if (ret != 0) {
1941 (void) fprintf(stderr,
1942 gettext("failed to clear label for %s\n"), vdev);
1943 }
1944
1945 errout:
1946 free(name);
1947 (void) close(fd);
1948
1949 return (ret);
1950 }
1951
1952 /*
1953 * zpool create [-fnd] [-o property=value] ...
1954 * [-O file-system-property=value] ...
1955 * [-R root] [-m mountpoint] <pool> <dev> ...
1956 *
1957 * -f Force creation, even if devices appear in use
1958 * -n Do not create the pool, but display the resulting layout if it
1959 * were to be created.
1960 * -R Create a pool under an alternate root
1961 * -m Set default mountpoint for the root dataset. By default it's
1962 * '/<pool>'
1963 * -o Set property=value.
1964 * -o Set feature@feature=enabled|disabled.
1965 * -d Don't automatically enable all supported pool features
1966 * (individual features can be enabled with -o).
1967 * -O Set fsproperty=value in the pool's root file system
1968 *
1969 * Creates the named pool according to the given vdev specification. The
1970 * bulk of the vdev processing is done in make_root_vdev() in zpool_vdev.c.
1971 * Once we get the nvlist back from make_root_vdev(), we either print out the
1972 * contents (if '-n' was specified), or pass it to libzfs to do the creation.
1973 */
1974 int
zpool_do_create(int argc,char ** argv)1975 zpool_do_create(int argc, char **argv)
1976 {
1977 boolean_t force = B_FALSE;
1978 boolean_t dryrun = B_FALSE;
1979 boolean_t enable_pool_features = B_TRUE;
1980
1981 int c;
1982 nvlist_t *nvroot = NULL;
1983 char *poolname;
1984 char *tname = NULL;
1985 int ret = 1;
1986 char *altroot = NULL;
1987 char *compat = NULL;
1988 char *mountpoint = NULL;
1989 nvlist_t *fsprops = NULL;
1990 nvlist_t *props = NULL;
1991 char *propval;
1992
1993 /* check options */
1994 while ((c = getopt(argc, argv, ":fndR:m:o:O:t:")) != -1) {
1995 switch (c) {
1996 case 'f':
1997 force = B_TRUE;
1998 break;
1999 case 'n':
2000 dryrun = B_TRUE;
2001 break;
2002 case 'd':
2003 enable_pool_features = B_FALSE;
2004 break;
2005 case 'R':
2006 altroot = optarg;
2007 if (add_prop_list(zpool_prop_to_name(
2008 ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE))
2009 goto errout;
2010 if (add_prop_list_default(zpool_prop_to_name(
2011 ZPOOL_PROP_CACHEFILE), "none", &props))
2012 goto errout;
2013 break;
2014 case 'm':
2015 /* Equivalent to -O mountpoint=optarg */
2016 mountpoint = optarg;
2017 break;
2018 case 'o':
2019 if ((propval = strchr(optarg, '=')) == NULL) {
2020 (void) fprintf(stderr, gettext("missing "
2021 "'=' for -o option\n"));
2022 goto errout;
2023 }
2024 *propval = '\0';
2025 propval++;
2026
2027 if (add_prop_list(optarg, propval, &props, B_TRUE))
2028 goto errout;
2029
2030 /*
2031 * If the user is creating a pool that doesn't support
2032 * feature flags, don't enable any features.
2033 */
2034 if (zpool_name_to_prop(optarg) == ZPOOL_PROP_VERSION) {
2035 char *end;
2036 u_longlong_t ver;
2037
2038 ver = strtoull(propval, &end, 0);
2039 if (*end == '\0' &&
2040 ver < SPA_VERSION_FEATURES) {
2041 enable_pool_features = B_FALSE;
2042 }
2043 }
2044 if (zpool_name_to_prop(optarg) == ZPOOL_PROP_ALTROOT)
2045 altroot = propval;
2046 if (zpool_name_to_prop(optarg) ==
2047 ZPOOL_PROP_COMPATIBILITY)
2048 compat = propval;
2049 break;
2050 case 'O':
2051 if ((propval = strchr(optarg, '=')) == NULL) {
2052 (void) fprintf(stderr, gettext("missing "
2053 "'=' for -O option\n"));
2054 goto errout;
2055 }
2056 *propval = '\0';
2057 propval++;
2058
2059 /*
2060 * Mountpoints are checked and then added later.
2061 * Uniquely among properties, they can be specified
2062 * more than once, to avoid conflict with -m.
2063 */
2064 if (0 == strcmp(optarg,
2065 zfs_prop_to_name(ZFS_PROP_MOUNTPOINT))) {
2066 mountpoint = propval;
2067 } else if (add_prop_list(optarg, propval, &fsprops,
2068 B_FALSE)) {
2069 goto errout;
2070 }
2071 break;
2072 case 't':
2073 /*
2074 * Sanity check temporary pool name.
2075 */
2076 if (strchr(optarg, '/') != NULL) {
2077 (void) fprintf(stderr, gettext("cannot create "
2078 "'%s': invalid character '/' in temporary "
2079 "name\n"), optarg);
2080 (void) fprintf(stderr, gettext("use 'zfs "
2081 "create' to create a dataset\n"));
2082 goto errout;
2083 }
2084
2085 if (add_prop_list(zpool_prop_to_name(
2086 ZPOOL_PROP_TNAME), optarg, &props, B_TRUE))
2087 goto errout;
2088 if (add_prop_list_default(zpool_prop_to_name(
2089 ZPOOL_PROP_CACHEFILE), "none", &props))
2090 goto errout;
2091 tname = optarg;
2092 break;
2093 case ':':
2094 (void) fprintf(stderr, gettext("missing argument for "
2095 "'%c' option\n"), optopt);
2096 goto badusage;
2097 case '?':
2098 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
2099 optopt);
2100 goto badusage;
2101 }
2102 }
2103
2104 argc -= optind;
2105 argv += optind;
2106
2107 /* get pool name and check number of arguments */
2108 if (argc < 1) {
2109 (void) fprintf(stderr, gettext("missing pool name argument\n"));
2110 goto badusage;
2111 }
2112 if (argc < 2) {
2113 (void) fprintf(stderr, gettext("missing vdev specification\n"));
2114 goto badusage;
2115 }
2116
2117 poolname = argv[0];
2118
2119 /*
2120 * As a special case, check for use of '/' in the name, and direct the
2121 * user to use 'zfs create' instead.
2122 */
2123 if (strchr(poolname, '/') != NULL) {
2124 (void) fprintf(stderr, gettext("cannot create '%s': invalid "
2125 "character '/' in pool name\n"), poolname);
2126 (void) fprintf(stderr, gettext("use 'zfs create' to "
2127 "create a dataset\n"));
2128 goto errout;
2129 }
2130
2131 /* pass off to make_root_vdev for bulk processing */
2132 nvroot = make_root_vdev(NULL, props, force, !force, B_FALSE, dryrun,
2133 argc - 1, argv + 1);
2134 if (nvroot == NULL)
2135 goto errout;
2136
2137 /* make_root_vdev() allows 0 toplevel children if there are spares */
2138 if (!zfs_allocatable_devs(nvroot)) {
2139 (void) fprintf(stderr, gettext("invalid vdev "
2140 "specification: at least one toplevel vdev must be "
2141 "specified\n"));
2142 goto errout;
2143 }
2144
2145 if (altroot != NULL && altroot[0] != '/') {
2146 (void) fprintf(stderr, gettext("invalid alternate root '%s': "
2147 "must be an absolute path\n"), altroot);
2148 goto errout;
2149 }
2150
2151 /*
2152 * Check the validity of the mountpoint and direct the user to use the
2153 * '-m' mountpoint option if it looks like its in use.
2154 */
2155 if (mountpoint == NULL ||
2156 (strcmp(mountpoint, ZFS_MOUNTPOINT_LEGACY) != 0 &&
2157 strcmp(mountpoint, ZFS_MOUNTPOINT_NONE) != 0)) {
2158 char buf[MAXPATHLEN];
2159 DIR *dirp;
2160
2161 if (mountpoint && mountpoint[0] != '/') {
2162 (void) fprintf(stderr, gettext("invalid mountpoint "
2163 "'%s': must be an absolute path, 'legacy', or "
2164 "'none'\n"), mountpoint);
2165 goto errout;
2166 }
2167
2168 if (mountpoint == NULL) {
2169 if (altroot != NULL)
2170 (void) snprintf(buf, sizeof (buf), "%s/%s",
2171 altroot, poolname);
2172 else
2173 (void) snprintf(buf, sizeof (buf), "/%s",
2174 poolname);
2175 } else {
2176 if (altroot != NULL)
2177 (void) snprintf(buf, sizeof (buf), "%s%s",
2178 altroot, mountpoint);
2179 else
2180 (void) snprintf(buf, sizeof (buf), "%s",
2181 mountpoint);
2182 }
2183
2184 if ((dirp = opendir(buf)) == NULL && errno != ENOENT) {
2185 (void) fprintf(stderr, gettext("mountpoint '%s' : "
2186 "%s\n"), buf, strerror(errno));
2187 (void) fprintf(stderr, gettext("use '-m' "
2188 "option to provide a different default\n"));
2189 goto errout;
2190 } else if (dirp) {
2191 int count = 0;
2192
2193 while (count < 3 && readdir(dirp) != NULL)
2194 count++;
2195 (void) closedir(dirp);
2196
2197 if (count > 2) {
2198 (void) fprintf(stderr, gettext("mountpoint "
2199 "'%s' exists and is not empty\n"), buf);
2200 (void) fprintf(stderr, gettext("use '-m' "
2201 "option to provide a "
2202 "different default\n"));
2203 goto errout;
2204 }
2205 }
2206 }
2207
2208 /*
2209 * Now that the mountpoint's validity has been checked, ensure that
2210 * the property is set appropriately prior to creating the pool.
2211 */
2212 if (mountpoint != NULL) {
2213 ret = add_prop_list(zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
2214 mountpoint, &fsprops, B_FALSE);
2215 if (ret != 0)
2216 goto errout;
2217 }
2218
2219 ret = 1;
2220 if (dryrun) {
2221 /*
2222 * For a dry run invocation, print out a basic message and run
2223 * through all the vdevs in the list and print out in an
2224 * appropriate hierarchy.
2225 */
2226 (void) printf(gettext("would create '%s' with the "
2227 "following layout:\n\n"), poolname);
2228
2229 print_vdev_tree(NULL, poolname, nvroot, 0, "", 0);
2230 print_vdev_tree(NULL, "dedup", nvroot, 0,
2231 VDEV_ALLOC_BIAS_DEDUP, 0);
2232 print_vdev_tree(NULL, "special", nvroot, 0,
2233 VDEV_ALLOC_BIAS_SPECIAL, 0);
2234 print_vdev_tree(NULL, "logs", nvroot, 0,
2235 VDEV_ALLOC_BIAS_LOG, 0);
2236 print_cache_list(nvroot, 0);
2237 print_spare_list(nvroot, 0);
2238
2239 ret = 0;
2240 } else {
2241 /*
2242 * Load in feature set.
2243 * Note: if compatibility property not given, we'll have
2244 * NULL, which means 'all features'.
2245 */
2246 boolean_t requested_features[SPA_FEATURES];
2247 if (zpool_do_load_compat(compat, requested_features) !=
2248 ZPOOL_COMPATIBILITY_OK)
2249 goto errout;
2250
2251 /*
2252 * props contains list of features to enable.
2253 * For each feature:
2254 * - remove it if feature@name=disabled
2255 * - leave it there if feature@name=enabled
2256 * - add it if:
2257 * - enable_pool_features (ie: no '-d' or '-o version')
2258 * - it's supported by the kernel module
2259 * - it's in the requested feature set
2260 * - warn if it's enabled but not in compat
2261 */
2262 for (spa_feature_t i = 0; i < SPA_FEATURES; i++) {
2263 char propname[MAXPATHLEN];
2264 const char *propval;
2265 zfeature_info_t *feat = &spa_feature_table[i];
2266
2267 (void) snprintf(propname, sizeof (propname),
2268 "feature@%s", feat->fi_uname);
2269
2270 if (!nvlist_lookup_string(props, propname, &propval)) {
2271 if (strcmp(propval,
2272 ZFS_FEATURE_DISABLED) == 0) {
2273 (void) nvlist_remove_all(props,
2274 propname);
2275 } else if (strcmp(propval,
2276 ZFS_FEATURE_ENABLED) == 0 &&
2277 !requested_features[i]) {
2278 (void) fprintf(stderr, gettext(
2279 "Warning: feature \"%s\" enabled "
2280 "but is not in specified "
2281 "'compatibility' feature set.\n"),
2282 feat->fi_uname);
2283 }
2284 } else if (
2285 enable_pool_features &&
2286 feat->fi_zfs_mod_supported &&
2287 requested_features[i]) {
2288 ret = add_prop_list(propname,
2289 ZFS_FEATURE_ENABLED, &props, B_TRUE);
2290 if (ret != 0)
2291 goto errout;
2292 }
2293 }
2294
2295 ret = 1;
2296 if (zpool_create(g_zfs, poolname,
2297 nvroot, props, fsprops) == 0) {
2298 zfs_handle_t *pool = zfs_open(g_zfs,
2299 tname ? tname : poolname, ZFS_TYPE_FILESYSTEM);
2300 if (pool != NULL) {
2301 if (zfs_mount(pool, NULL, 0) == 0) {
2302 ret = zfs_share(pool, NULL);
2303 zfs_commit_shares(NULL);
2304 }
2305 zfs_close(pool);
2306 }
2307 } else if (libzfs_errno(g_zfs) == EZFS_INVALIDNAME) {
2308 (void) fprintf(stderr, gettext("pool name may have "
2309 "been omitted\n"));
2310 }
2311 }
2312
2313 errout:
2314 nvlist_free(nvroot);
2315 nvlist_free(fsprops);
2316 nvlist_free(props);
2317 return (ret);
2318 badusage:
2319 nvlist_free(fsprops);
2320 nvlist_free(props);
2321 usage(B_FALSE);
2322 return (2);
2323 }
2324
2325 /*
2326 * zpool destroy <pool>
2327 *
2328 * -f Forcefully unmount any datasets
2329 *
2330 * Destroy the given pool. Automatically unmounts any datasets in the pool.
2331 */
2332 int
zpool_do_destroy(int argc,char ** argv)2333 zpool_do_destroy(int argc, char **argv)
2334 {
2335 boolean_t force = B_FALSE;
2336 int c;
2337 char *pool;
2338 zpool_handle_t *zhp;
2339 int ret;
2340
2341 /* check options */
2342 while ((c = getopt(argc, argv, "f")) != -1) {
2343 switch (c) {
2344 case 'f':
2345 force = B_TRUE;
2346 break;
2347 case '?':
2348 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
2349 optopt);
2350 usage(B_FALSE);
2351 }
2352 }
2353
2354 argc -= optind;
2355 argv += optind;
2356
2357 /* check arguments */
2358 if (argc < 1) {
2359 (void) fprintf(stderr, gettext("missing pool argument\n"));
2360 usage(B_FALSE);
2361 }
2362 if (argc > 1) {
2363 (void) fprintf(stderr, gettext("too many arguments\n"));
2364 usage(B_FALSE);
2365 }
2366
2367 pool = argv[0];
2368
2369 if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) {
2370 /*
2371 * As a special case, check for use of '/' in the name, and
2372 * direct the user to use 'zfs destroy' instead.
2373 */
2374 if (strchr(pool, '/') != NULL)
2375 (void) fprintf(stderr, gettext("use 'zfs destroy' to "
2376 "destroy a dataset\n"));
2377 return (1);
2378 }
2379
2380 if (zpool_disable_datasets(zhp, force) != 0) {
2381 (void) fprintf(stderr, gettext("could not destroy '%s': "
2382 "could not unmount datasets\n"), zpool_get_name(zhp));
2383 zpool_close(zhp);
2384 return (1);
2385 }
2386
2387 /* The history must be logged as part of the export */
2388 log_history = B_FALSE;
2389
2390 ret = (zpool_destroy(zhp, history_str) != 0);
2391
2392 zpool_close(zhp);
2393
2394 return (ret);
2395 }
2396
2397 typedef struct export_cbdata {
2398 tpool_t *tpool;
2399 pthread_mutex_t mnttab_lock;
2400 boolean_t force;
2401 boolean_t hardforce;
2402 int retval;
2403 } export_cbdata_t;
2404
2405
2406 typedef struct {
2407 char *aea_poolname;
2408 export_cbdata_t *aea_cbdata;
2409 } async_export_args_t;
2410
2411 /*
2412 * Export one pool
2413 */
2414 static int
zpool_export_one(zpool_handle_t * zhp,void * data)2415 zpool_export_one(zpool_handle_t *zhp, void *data)
2416 {
2417 export_cbdata_t *cb = data;
2418
2419 /*
2420 * zpool_disable_datasets() is not thread-safe for mnttab access.
2421 * So we serialize access here for 'zpool export -a' parallel case.
2422 */
2423 if (cb->tpool != NULL)
2424 pthread_mutex_lock(&cb->mnttab_lock);
2425
2426 int retval = zpool_disable_datasets(zhp, cb->force);
2427
2428 if (cb->tpool != NULL)
2429 pthread_mutex_unlock(&cb->mnttab_lock);
2430
2431 if (retval)
2432 return (1);
2433
2434 if (cb->hardforce) {
2435 if (zpool_export_force(zhp, history_str) != 0)
2436 return (1);
2437 } else if (zpool_export(zhp, cb->force, history_str) != 0) {
2438 return (1);
2439 }
2440
2441 return (0);
2442 }
2443
2444 /*
2445 * Asynchronous export request
2446 */
2447 static void
zpool_export_task(void * arg)2448 zpool_export_task(void *arg)
2449 {
2450 async_export_args_t *aea = arg;
2451
2452 zpool_handle_t *zhp = zpool_open(g_zfs, aea->aea_poolname);
2453 if (zhp != NULL) {
2454 int ret = zpool_export_one(zhp, aea->aea_cbdata);
2455 if (ret != 0)
2456 aea->aea_cbdata->retval = ret;
2457 zpool_close(zhp);
2458 } else {
2459 aea->aea_cbdata->retval = 1;
2460 }
2461
2462 free(aea->aea_poolname);
2463 free(aea);
2464 }
2465
2466 /*
2467 * Process an export request in parallel
2468 */
2469 static int
zpool_export_one_async(zpool_handle_t * zhp,void * data)2470 zpool_export_one_async(zpool_handle_t *zhp, void *data)
2471 {
2472 tpool_t *tpool = ((export_cbdata_t *)data)->tpool;
2473 async_export_args_t *aea = safe_malloc(sizeof (async_export_args_t));
2474
2475 /* save pool name since zhp will go out of scope */
2476 aea->aea_poolname = strdup(zpool_get_name(zhp));
2477 aea->aea_cbdata = data;
2478
2479 /* ship off actual export to another thread */
2480 if (tpool_dispatch(tpool, zpool_export_task, (void *)aea) != 0)
2481 return (errno); /* unlikely */
2482 else
2483 return (0);
2484 }
2485
2486 /*
2487 * zpool export [-f] <pool> ...
2488 *
2489 * -a Export all pools
2490 * -f Forcefully unmount datasets
2491 *
2492 * Export the given pools. By default, the command will attempt to cleanly
2493 * unmount any active datasets within the pool. If the '-f' flag is specified,
2494 * then the datasets will be forcefully unmounted.
2495 */
2496 int
zpool_do_export(int argc,char ** argv)2497 zpool_do_export(int argc, char **argv)
2498 {
2499 export_cbdata_t cb;
2500 boolean_t do_all = B_FALSE;
2501 boolean_t force = B_FALSE;
2502 boolean_t hardforce = B_FALSE;
2503 int c, ret;
2504
2505 /* check options */
2506 while ((c = getopt(argc, argv, "afF")) != -1) {
2507 switch (c) {
2508 case 'a':
2509 do_all = B_TRUE;
2510 break;
2511 case 'f':
2512 force = B_TRUE;
2513 break;
2514 case 'F':
2515 hardforce = B_TRUE;
2516 break;
2517 case '?':
2518 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
2519 optopt);
2520 usage(B_FALSE);
2521 }
2522 }
2523
2524 cb.force = force;
2525 cb.hardforce = hardforce;
2526 cb.tpool = NULL;
2527 cb.retval = 0;
2528 argc -= optind;
2529 argv += optind;
2530
2531 /* The history will be logged as part of the export itself */
2532 log_history = B_FALSE;
2533
2534 if (do_all) {
2535 if (argc != 0) {
2536 (void) fprintf(stderr, gettext("too many arguments\n"));
2537 usage(B_FALSE);
2538 }
2539
2540 cb.tpool = tpool_create(1, 5 * sysconf(_SC_NPROCESSORS_ONLN),
2541 0, NULL);
2542 pthread_mutex_init(&cb.mnttab_lock, NULL);
2543
2544 /* Asynchronously call zpool_export_one using thread pool */
2545 ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
2546 B_FALSE, zpool_export_one_async, &cb);
2547
2548 tpool_wait(cb.tpool);
2549 tpool_destroy(cb.tpool);
2550 (void) pthread_mutex_destroy(&cb.mnttab_lock);
2551
2552 return (ret | cb.retval);
2553 }
2554
2555 /* check arguments */
2556 if (argc < 1) {
2557 (void) fprintf(stderr, gettext("missing pool argument\n"));
2558 usage(B_FALSE);
2559 }
2560
2561 ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
2562 B_FALSE, zpool_export_one, &cb);
2563
2564 return (ret);
2565 }
2566
2567 /*
2568 * Given a vdev configuration, determine the maximum width needed for the device
2569 * name column.
2570 */
2571 static int
max_width(zpool_handle_t * zhp,nvlist_t * nv,int depth,int max,int name_flags)2572 max_width(zpool_handle_t *zhp, nvlist_t *nv, int depth, int max,
2573 int name_flags)
2574 {
2575 static const char *const subtypes[] =
2576 {ZPOOL_CONFIG_SPARES, ZPOOL_CONFIG_L2CACHE, ZPOOL_CONFIG_CHILDREN};
2577
2578 char *name = zpool_vdev_name(g_zfs, zhp, nv, name_flags);
2579 max = MAX(strlen(name) + depth, max);
2580 free(name);
2581
2582 nvlist_t **child;
2583 uint_t children;
2584 for (size_t i = 0; i < ARRAY_SIZE(subtypes); ++i)
2585 if (nvlist_lookup_nvlist_array(nv, subtypes[i],
2586 &child, &children) == 0)
2587 for (uint_t c = 0; c < children; ++c)
2588 max = MAX(max_width(zhp, child[c], depth + 2,
2589 max, name_flags), max);
2590
2591 return (max);
2592 }
2593
2594 typedef struct status_cbdata {
2595 int cb_count;
2596 int cb_name_flags;
2597 int cb_namewidth;
2598 boolean_t cb_allpools;
2599 boolean_t cb_verbose;
2600 boolean_t cb_literal;
2601 boolean_t cb_explain;
2602 boolean_t cb_first;
2603 boolean_t cb_dedup_stats;
2604 boolean_t cb_print_unhealthy;
2605 boolean_t cb_print_status;
2606 boolean_t cb_print_slow_ios;
2607 boolean_t cb_print_dio_verify;
2608 boolean_t cb_print_vdev_init;
2609 boolean_t cb_print_vdev_trim;
2610 vdev_cmd_data_list_t *vcdl;
2611 boolean_t cb_print_power;
2612 boolean_t cb_json;
2613 boolean_t cb_flat_vdevs;
2614 nvlist_t *cb_jsobj;
2615 boolean_t cb_json_as_int;
2616 boolean_t cb_json_pool_key_guid;
2617 } status_cbdata_t;
2618
2619 /* Return 1 if string is NULL, empty, or whitespace; return 0 otherwise. */
2620 static boolean_t
is_blank_str(const char * str)2621 is_blank_str(const char *str)
2622 {
2623 for (; str != NULL && *str != '\0'; ++str)
2624 if (!isblank(*str))
2625 return (B_FALSE);
2626 return (B_TRUE);
2627 }
2628
2629 static void
zpool_nvlist_cmd(vdev_cmd_data_list_t * vcdl,const char * pool,const char * path,nvlist_t * item)2630 zpool_nvlist_cmd(vdev_cmd_data_list_t *vcdl, const char *pool, const char *path,
2631 nvlist_t *item)
2632 {
2633 vdev_cmd_data_t *data;
2634 int i, j, k = 1;
2635 char tmp[256];
2636 const char *val;
2637
2638 for (i = 0; i < vcdl->count; i++) {
2639 if ((strcmp(vcdl->data[i].path, path) != 0) ||
2640 (strcmp(vcdl->data[i].pool, pool) != 0))
2641 continue;
2642
2643 data = &vcdl->data[i];
2644 for (j = 0; j < vcdl->uniq_cols_cnt; j++) {
2645 val = NULL;
2646 for (int k = 0; k < data->cols_cnt; k++) {
2647 if (strcmp(data->cols[k],
2648 vcdl->uniq_cols[j]) == 0) {
2649 val = data->lines[k];
2650 break;
2651 }
2652 }
2653 if (val == NULL || is_blank_str(val))
2654 val = "-";
2655 fnvlist_add_string(item, vcdl->uniq_cols[j], val);
2656 }
2657
2658 for (j = data->cols_cnt; j < data->lines_cnt; j++) {
2659 if (data->lines[j]) {
2660 snprintf(tmp, 256, "extra_%d", k++);
2661 fnvlist_add_string(item, tmp,
2662 data->lines[j]);
2663 }
2664 }
2665 break;
2666 }
2667 }
2668
2669 /* Print command output lines for specific vdev in a specific pool */
2670 static void
zpool_print_cmd(vdev_cmd_data_list_t * vcdl,const char * pool,const char * path)2671 zpool_print_cmd(vdev_cmd_data_list_t *vcdl, const char *pool, const char *path)
2672 {
2673 vdev_cmd_data_t *data;
2674 int i, j;
2675 const char *val;
2676
2677 for (i = 0; i < vcdl->count; i++) {
2678 if ((strcmp(vcdl->data[i].path, path) != 0) ||
2679 (strcmp(vcdl->data[i].pool, pool) != 0)) {
2680 /* Not the vdev we're looking for */
2681 continue;
2682 }
2683
2684 data = &vcdl->data[i];
2685 /* Print out all the output values for this vdev */
2686 for (j = 0; j < vcdl->uniq_cols_cnt; j++) {
2687 val = NULL;
2688 /* Does this vdev have values for this column? */
2689 for (int k = 0; k < data->cols_cnt; k++) {
2690 if (strcmp(data->cols[k],
2691 vcdl->uniq_cols[j]) == 0) {
2692 /* yes it does, record the value */
2693 val = data->lines[k];
2694 break;
2695 }
2696 }
2697 /*
2698 * Mark empty values with dashes to make output
2699 * awk-able.
2700 */
2701 if (val == NULL || is_blank_str(val))
2702 val = "-";
2703
2704 printf("%*s", vcdl->uniq_cols_width[j], val);
2705 if (j < vcdl->uniq_cols_cnt - 1)
2706 fputs(" ", stdout);
2707 }
2708
2709 /* Print out any values that aren't in a column at the end */
2710 for (j = data->cols_cnt; j < data->lines_cnt; j++) {
2711 /* Did we have any columns? If so print a spacer. */
2712 if (vcdl->uniq_cols_cnt > 0)
2713 fputs(" ", stdout);
2714
2715 val = data->lines[j];
2716 fputs(val ?: "", stdout);
2717 }
2718 break;
2719 }
2720 }
2721
2722 /*
2723 * Print vdev initialization status for leaves
2724 */
2725 static void
print_status_initialize(vdev_stat_t * vs,boolean_t verbose)2726 print_status_initialize(vdev_stat_t *vs, boolean_t verbose)
2727 {
2728 if (verbose) {
2729 if ((vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE ||
2730 vs->vs_initialize_state == VDEV_INITIALIZE_SUSPENDED ||
2731 vs->vs_initialize_state == VDEV_INITIALIZE_COMPLETE) &&
2732 !vs->vs_scan_removing) {
2733 char zbuf[1024];
2734 char tbuf[256];
2735
2736 time_t t = vs->vs_initialize_action_time;
2737 int initialize_pct = 100;
2738 if (vs->vs_initialize_state !=
2739 VDEV_INITIALIZE_COMPLETE) {
2740 initialize_pct = (vs->vs_initialize_bytes_done *
2741 100 / (vs->vs_initialize_bytes_est + 1));
2742 }
2743
2744 (void) ctime_r(&t, tbuf);
2745 tbuf[24] = 0;
2746
2747 switch (vs->vs_initialize_state) {
2748 case VDEV_INITIALIZE_SUSPENDED:
2749 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2750 gettext("suspended, started at"), tbuf);
2751 break;
2752 case VDEV_INITIALIZE_ACTIVE:
2753 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2754 gettext("started at"), tbuf);
2755 break;
2756 case VDEV_INITIALIZE_COMPLETE:
2757 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2758 gettext("completed at"), tbuf);
2759 break;
2760 }
2761
2762 (void) printf(gettext(" (%d%% initialized%s)"),
2763 initialize_pct, zbuf);
2764 } else {
2765 (void) printf(gettext(" (uninitialized)"));
2766 }
2767 } else if (vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE) {
2768 (void) printf(gettext(" (initializing)"));
2769 }
2770 }
2771
2772 /*
2773 * Print vdev TRIM status for leaves
2774 */
2775 static void
print_status_trim(vdev_stat_t * vs,boolean_t verbose)2776 print_status_trim(vdev_stat_t *vs, boolean_t verbose)
2777 {
2778 if (verbose) {
2779 if ((vs->vs_trim_state == VDEV_TRIM_ACTIVE ||
2780 vs->vs_trim_state == VDEV_TRIM_SUSPENDED ||
2781 vs->vs_trim_state == VDEV_TRIM_COMPLETE) &&
2782 !vs->vs_scan_removing) {
2783 char zbuf[1024];
2784 char tbuf[256];
2785
2786 time_t t = vs->vs_trim_action_time;
2787 int trim_pct = 100;
2788 if (vs->vs_trim_state != VDEV_TRIM_COMPLETE) {
2789 trim_pct = (vs->vs_trim_bytes_done *
2790 100 / (vs->vs_trim_bytes_est + 1));
2791 }
2792
2793 (void) ctime_r(&t, tbuf);
2794 tbuf[24] = 0;
2795
2796 switch (vs->vs_trim_state) {
2797 case VDEV_TRIM_SUSPENDED:
2798 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2799 gettext("suspended, started at"), tbuf);
2800 break;
2801 case VDEV_TRIM_ACTIVE:
2802 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2803 gettext("started at"), tbuf);
2804 break;
2805 case VDEV_TRIM_COMPLETE:
2806 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2807 gettext("completed at"), tbuf);
2808 break;
2809 }
2810
2811 (void) printf(gettext(" (%d%% trimmed%s)"),
2812 trim_pct, zbuf);
2813 } else if (vs->vs_trim_notsup) {
2814 (void) printf(gettext(" (trim unsupported)"));
2815 } else {
2816 (void) printf(gettext(" (untrimmed)"));
2817 }
2818 } else if (vs->vs_trim_state == VDEV_TRIM_ACTIVE) {
2819 (void) printf(gettext(" (trimming)"));
2820 }
2821 }
2822
2823 /*
2824 * Return the color associated with a health string. This includes returning
2825 * NULL for no color change.
2826 */
2827 static const char *
health_str_to_color(const char * health)2828 health_str_to_color(const char *health)
2829 {
2830 if (strcmp(health, gettext("FAULTED")) == 0 ||
2831 strcmp(health, gettext("SUSPENDED")) == 0 ||
2832 strcmp(health, gettext("UNAVAIL")) == 0) {
2833 return (ANSI_RED);
2834 }
2835
2836 if (strcmp(health, gettext("OFFLINE")) == 0 ||
2837 strcmp(health, gettext("DEGRADED")) == 0 ||
2838 strcmp(health, gettext("REMOVED")) == 0) {
2839 return (ANSI_YELLOW);
2840 }
2841
2842 return (NULL);
2843 }
2844
2845 /*
2846 * Called for each leaf vdev. Returns 0 if the vdev is healthy.
2847 * A vdev is unhealthy if any of the following are true:
2848 * 1) there are read, write, or checksum errors,
2849 * 2) its state is not ONLINE, or
2850 * 3) slow IO reporting was requested (-s) and there are slow IOs.
2851 */
2852 static int
vdev_health_check_cb(void * hdl_data,nvlist_t * nv,void * data)2853 vdev_health_check_cb(void *hdl_data, nvlist_t *nv, void *data)
2854 {
2855 status_cbdata_t *cb = data;
2856 vdev_stat_t *vs;
2857 uint_t vsc;
2858 (void) hdl_data;
2859
2860 if (nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
2861 (uint64_t **)&vs, &vsc) != 0)
2862 return (1);
2863
2864 if (vs->vs_checksum_errors || vs->vs_read_errors ||
2865 vs->vs_write_errors || vs->vs_state != VDEV_STATE_HEALTHY)
2866 return (1);
2867
2868 if (cb->cb_print_slow_ios && vs->vs_slow_ios)
2869 return (1);
2870
2871 return (0);
2872 }
2873
2874 /*
2875 * Print out configuration state as requested by status_callback.
2876 */
2877 static void
print_status_config(zpool_handle_t * zhp,status_cbdata_t * cb,const char * name,nvlist_t * nv,int depth,boolean_t isspare,vdev_rebuild_stat_t * vrs)2878 print_status_config(zpool_handle_t *zhp, status_cbdata_t *cb, const char *name,
2879 nvlist_t *nv, int depth, boolean_t isspare, vdev_rebuild_stat_t *vrs)
2880 {
2881 nvlist_t **child, *root;
2882 uint_t c, i, vsc, children;
2883 pool_scan_stat_t *ps = NULL;
2884 vdev_stat_t *vs;
2885 char rbuf[6], wbuf[6], cbuf[6], dbuf[6];
2886 char *vname;
2887 uint64_t notpresent;
2888 spare_cbdata_t spare_cb;
2889 const char *state;
2890 const char *type;
2891 const char *path = NULL;
2892 const char *rcolor = NULL, *wcolor = NULL, *ccolor = NULL,
2893 *scolor = NULL;
2894
2895 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2896 &child, &children) != 0)
2897 children = 0;
2898
2899 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
2900 (uint64_t **)&vs, &vsc) == 0);
2901
2902 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
2903
2904 if (strcmp(type, VDEV_TYPE_INDIRECT) == 0)
2905 return;
2906
2907 state = zpool_state_to_name(vs->vs_state, vs->vs_aux);
2908
2909 if (isspare) {
2910 /*
2911 * For hot spares, we use the terms 'INUSE' and 'AVAILABLE' for
2912 * online drives.
2913 */
2914 if (vs->vs_aux == VDEV_AUX_SPARED)
2915 state = gettext("INUSE");
2916 else if (vs->vs_state == VDEV_STATE_HEALTHY)
2917 state = gettext("AVAIL");
2918 }
2919
2920 /*
2921 * If '-e' is specified then top-level vdevs and their children
2922 * can be pruned if all of their leaves are healthy.
2923 */
2924 if (cb->cb_print_unhealthy && depth > 0 &&
2925 for_each_vdev_in_nvlist(nv, vdev_health_check_cb, cb) == 0) {
2926 return;
2927 }
2928
2929 printf_color(health_str_to_color(state),
2930 "\t%*s%-*s %-8s", depth, "", cb->cb_namewidth - depth,
2931 name, state);
2932
2933 if (!isspare) {
2934 if (vs->vs_read_errors)
2935 rcolor = ANSI_RED;
2936
2937 if (vs->vs_write_errors)
2938 wcolor = ANSI_RED;
2939
2940 if (vs->vs_checksum_errors)
2941 ccolor = ANSI_RED;
2942
2943 if (vs->vs_slow_ios)
2944 scolor = ANSI_BLUE;
2945
2946 if (cb->cb_literal) {
2947 fputc(' ', stdout);
2948 printf_color(rcolor, "%5llu",
2949 (u_longlong_t)vs->vs_read_errors);
2950 fputc(' ', stdout);
2951 printf_color(wcolor, "%5llu",
2952 (u_longlong_t)vs->vs_write_errors);
2953 fputc(' ', stdout);
2954 printf_color(ccolor, "%5llu",
2955 (u_longlong_t)vs->vs_checksum_errors);
2956 } else {
2957 zfs_nicenum(vs->vs_read_errors, rbuf, sizeof (rbuf));
2958 zfs_nicenum(vs->vs_write_errors, wbuf, sizeof (wbuf));
2959 zfs_nicenum(vs->vs_checksum_errors, cbuf,
2960 sizeof (cbuf));
2961 fputc(' ', stdout);
2962 printf_color(rcolor, "%5s", rbuf);
2963 fputc(' ', stdout);
2964 printf_color(wcolor, "%5s", wbuf);
2965 fputc(' ', stdout);
2966 printf_color(ccolor, "%5s", cbuf);
2967 }
2968 if (cb->cb_print_slow_ios) {
2969 if (children == 0) {
2970 /* Only leafs vdevs have slow IOs */
2971 zfs_nicenum(vs->vs_slow_ios, rbuf,
2972 sizeof (rbuf));
2973 } else {
2974 snprintf(rbuf, sizeof (rbuf), "-");
2975 }
2976
2977 if (cb->cb_literal)
2978 printf_color(scolor, " %5llu",
2979 (u_longlong_t)vs->vs_slow_ios);
2980 else
2981 printf_color(scolor, " %5s", rbuf);
2982 }
2983 if (cb->cb_print_power) {
2984 if (children == 0) {
2985 /* Only leaf vdevs have physical slots */
2986 switch (zpool_power_current_state(zhp, (char *)
2987 fnvlist_lookup_string(nv,
2988 ZPOOL_CONFIG_PATH))) {
2989 case 0:
2990 printf_color(ANSI_RED, " %5s",
2991 gettext("off"));
2992 break;
2993 case 1:
2994 printf(" %5s", gettext("on"));
2995 break;
2996 default:
2997 printf(" %5s", "-");
2998 }
2999 } else {
3000 printf(" %5s", "-");
3001 }
3002 }
3003 if (VDEV_STAT_VALID(vs_dio_verify_errors, vsc) &&
3004 cb->cb_print_dio_verify) {
3005 zfs_nicenum(vs->vs_dio_verify_errors, dbuf,
3006 sizeof (dbuf));
3007
3008 if (cb->cb_literal)
3009 printf(" %5llu",
3010 (u_longlong_t)vs->vs_dio_verify_errors);
3011 else
3012 printf(" %5s", dbuf);
3013 }
3014 }
3015
3016 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
3017 ¬present) == 0) {
3018 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0);
3019 (void) printf(" %s %s", gettext("was"), path);
3020 } else if (vs->vs_aux != 0) {
3021 (void) printf(" ");
3022 color_start(ANSI_RED);
3023 switch (vs->vs_aux) {
3024 case VDEV_AUX_OPEN_FAILED:
3025 (void) printf(gettext("cannot open"));
3026 break;
3027
3028 case VDEV_AUX_BAD_GUID_SUM:
3029 (void) printf(gettext("missing device"));
3030 break;
3031
3032 case VDEV_AUX_NO_REPLICAS:
3033 (void) printf(gettext("insufficient replicas"));
3034 break;
3035
3036 case VDEV_AUX_VERSION_NEWER:
3037 (void) printf(gettext("newer version"));
3038 break;
3039
3040 case VDEV_AUX_UNSUP_FEAT:
3041 (void) printf(gettext("unsupported feature(s)"));
3042 break;
3043
3044 case VDEV_AUX_ASHIFT_TOO_BIG:
3045 (void) printf(gettext("unsupported minimum blocksize"));
3046 break;
3047
3048 case VDEV_AUX_SPARED:
3049 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3050 &spare_cb.cb_guid) == 0);
3051 if (zpool_iter(g_zfs, find_spare, &spare_cb) == 1) {
3052 if (strcmp(zpool_get_name(spare_cb.cb_zhp),
3053 zpool_get_name(zhp)) == 0)
3054 (void) printf(gettext("currently in "
3055 "use"));
3056 else
3057 (void) printf(gettext("in use by "
3058 "pool '%s'"),
3059 zpool_get_name(spare_cb.cb_zhp));
3060 zpool_close(spare_cb.cb_zhp);
3061 } else {
3062 (void) printf(gettext("currently in use"));
3063 }
3064 break;
3065
3066 case VDEV_AUX_ERR_EXCEEDED:
3067 if (vs->vs_read_errors + vs->vs_write_errors +
3068 vs->vs_checksum_errors == 0 && children == 0 &&
3069 vs->vs_slow_ios > 0) {
3070 (void) printf(gettext("too many slow I/Os"));
3071 } else {
3072 (void) printf(gettext("too many errors"));
3073 }
3074 break;
3075
3076 case VDEV_AUX_IO_FAILURE:
3077 (void) printf(gettext("experienced I/O failures"));
3078 break;
3079
3080 case VDEV_AUX_BAD_LOG:
3081 (void) printf(gettext("bad intent log"));
3082 break;
3083
3084 case VDEV_AUX_EXTERNAL:
3085 (void) printf(gettext("external device fault"));
3086 break;
3087
3088 case VDEV_AUX_SPLIT_POOL:
3089 (void) printf(gettext("split into new pool"));
3090 break;
3091
3092 case VDEV_AUX_ACTIVE:
3093 (void) printf(gettext("currently in use"));
3094 break;
3095
3096 case VDEV_AUX_CHILDREN_OFFLINE:
3097 (void) printf(gettext("all children offline"));
3098 break;
3099
3100 case VDEV_AUX_BAD_LABEL:
3101 (void) printf(gettext("invalid label"));
3102 break;
3103
3104 default:
3105 (void) printf(gettext("corrupted data"));
3106 break;
3107 }
3108 color_end();
3109 } else if (children == 0 && !isspare &&
3110 getenv("ZPOOL_STATUS_NON_NATIVE_ASHIFT_IGNORE") == NULL &&
3111 VDEV_STAT_VALID(vs_physical_ashift, vsc) &&
3112 vs->vs_configured_ashift < vs->vs_physical_ashift) {
3113 (void) printf(
3114 gettext(" block size: %dB configured, %dB native"),
3115 1 << vs->vs_configured_ashift, 1 << vs->vs_physical_ashift);
3116 }
3117
3118 if (vs->vs_scan_removing != 0) {
3119 (void) printf(gettext(" (removing)"));
3120 } else if (VDEV_STAT_VALID(vs_noalloc, vsc) && vs->vs_noalloc != 0) {
3121 (void) printf(gettext(" (non-allocating)"));
3122 }
3123
3124 /* The root vdev has the scrub/resilver stats */
3125 root = fnvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
3126 ZPOOL_CONFIG_VDEV_TREE);
3127 (void) nvlist_lookup_uint64_array(root, ZPOOL_CONFIG_SCAN_STATS,
3128 (uint64_t **)&ps, &c);
3129
3130 /*
3131 * If you force fault a drive that's resilvering, its scan stats can
3132 * get frozen in time, giving the false impression that it's
3133 * being resilvered. That's why we check the state to see if the vdev
3134 * is healthy before reporting "resilvering" or "repairing".
3135 */
3136 if (ps != NULL && ps->pss_state == DSS_SCANNING && children == 0 &&
3137 vs->vs_state == VDEV_STATE_HEALTHY) {
3138 if (vs->vs_scan_processed != 0) {
3139 (void) printf(gettext(" (%s)"),
3140 (ps->pss_func == POOL_SCAN_RESILVER) ?
3141 "resilvering" : "repairing");
3142 } else if (vs->vs_resilver_deferred) {
3143 (void) printf(gettext(" (awaiting resilver)"));
3144 }
3145 }
3146
3147 /* The top-level vdevs have the rebuild stats */
3148 if (vrs != NULL && vrs->vrs_state == VDEV_REBUILD_ACTIVE &&
3149 children == 0 && vs->vs_state == VDEV_STATE_HEALTHY) {
3150 if (vs->vs_rebuild_processed != 0) {
3151 (void) printf(gettext(" (resilvering)"));
3152 }
3153 }
3154
3155 if (cb->vcdl != NULL) {
3156 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
3157 printf(" ");
3158 zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path);
3159 }
3160 }
3161
3162 /* Display vdev initialization and trim status for leaves. */
3163 if (children == 0) {
3164 print_status_initialize(vs, cb->cb_print_vdev_init);
3165 print_status_trim(vs, cb->cb_print_vdev_trim);
3166 }
3167
3168 (void) printf("\n");
3169
3170 for (c = 0; c < children; c++) {
3171 uint64_t islog = B_FALSE, ishole = B_FALSE;
3172
3173 /* Don't print logs or holes here */
3174 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
3175 &islog);
3176 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
3177 &ishole);
3178 if (islog || ishole)
3179 continue;
3180 /* Only print normal classes here */
3181 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
3182 continue;
3183
3184 /* Provide vdev_rebuild_stats to children if available */
3185 if (vrs == NULL) {
3186 (void) nvlist_lookup_uint64_array(nv,
3187 ZPOOL_CONFIG_REBUILD_STATS,
3188 (uint64_t **)&vrs, &i);
3189 }
3190
3191 vname = zpool_vdev_name(g_zfs, zhp, child[c],
3192 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
3193 print_status_config(zhp, cb, vname, child[c], depth + 2,
3194 isspare, vrs);
3195 free(vname);
3196 }
3197 }
3198
3199 /*
3200 * Print the configuration of an exported pool. Iterate over all vdevs in the
3201 * pool, printing out the name and status for each one.
3202 */
3203 static void
print_import_config(status_cbdata_t * cb,const char * name,nvlist_t * nv,int depth)3204 print_import_config(status_cbdata_t *cb, const char *name, nvlist_t *nv,
3205 int depth)
3206 {
3207 nvlist_t **child;
3208 uint_t c, children;
3209 vdev_stat_t *vs;
3210 const char *type;
3211 char *vname;
3212
3213 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
3214 if (strcmp(type, VDEV_TYPE_MISSING) == 0 ||
3215 strcmp(type, VDEV_TYPE_HOLE) == 0)
3216 return;
3217
3218 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
3219 (uint64_t **)&vs, &c) == 0);
3220
3221 (void) printf("\t%*s%-*s", depth, "", cb->cb_namewidth - depth, name);
3222 (void) printf(" %s", zpool_state_to_name(vs->vs_state, vs->vs_aux));
3223
3224 if (vs->vs_aux != 0) {
3225 (void) printf(" ");
3226
3227 switch (vs->vs_aux) {
3228 case VDEV_AUX_OPEN_FAILED:
3229 (void) printf(gettext("cannot open"));
3230 break;
3231
3232 case VDEV_AUX_BAD_GUID_SUM:
3233 (void) printf(gettext("missing device"));
3234 break;
3235
3236 case VDEV_AUX_NO_REPLICAS:
3237 (void) printf(gettext("insufficient replicas"));
3238 break;
3239
3240 case VDEV_AUX_VERSION_NEWER:
3241 (void) printf(gettext("newer version"));
3242 break;
3243
3244 case VDEV_AUX_UNSUP_FEAT:
3245 (void) printf(gettext("unsupported feature(s)"));
3246 break;
3247
3248 case VDEV_AUX_ERR_EXCEEDED:
3249 (void) printf(gettext("too many errors"));
3250 break;
3251
3252 case VDEV_AUX_ACTIVE:
3253 (void) printf(gettext("currently in use"));
3254 break;
3255
3256 case VDEV_AUX_CHILDREN_OFFLINE:
3257 (void) printf(gettext("all children offline"));
3258 break;
3259
3260 case VDEV_AUX_BAD_LABEL:
3261 (void) printf(gettext("invalid label"));
3262 break;
3263
3264 default:
3265 (void) printf(gettext("corrupted data"));
3266 break;
3267 }
3268 }
3269 (void) printf("\n");
3270
3271 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
3272 &child, &children) != 0)
3273 return;
3274
3275 for (c = 0; c < children; c++) {
3276 uint64_t is_log = B_FALSE;
3277
3278 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
3279 &is_log);
3280 if (is_log)
3281 continue;
3282 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
3283 continue;
3284
3285 vname = zpool_vdev_name(g_zfs, NULL, child[c],
3286 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
3287 print_import_config(cb, vname, child[c], depth + 2);
3288 free(vname);
3289 }
3290
3291 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
3292 &child, &children) == 0) {
3293 (void) printf(gettext("\tcache\n"));
3294 for (c = 0; c < children; c++) {
3295 vname = zpool_vdev_name(g_zfs, NULL, child[c],
3296 cb->cb_name_flags);
3297 (void) printf("\t %s\n", vname);
3298 free(vname);
3299 }
3300 }
3301
3302 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
3303 &child, &children) == 0) {
3304 (void) printf(gettext("\tspares\n"));
3305 for (c = 0; c < children; c++) {
3306 vname = zpool_vdev_name(g_zfs, NULL, child[c],
3307 cb->cb_name_flags);
3308 (void) printf("\t %s\n", vname);
3309 free(vname);
3310 }
3311 }
3312 }
3313
3314 /*
3315 * Print specialized class vdevs.
3316 *
3317 * These are recorded as top level vdevs in the main pool child array
3318 * but with "is_log" set to 1 or an "alloc_bias" string. We use either
3319 * print_status_config() or print_import_config() to print the top level
3320 * class vdevs then any of their children (eg mirrored slogs) are printed
3321 * recursively - which works because only the top level vdev is marked.
3322 */
3323 static void
print_class_vdevs(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t * nv,const char * class)3324 print_class_vdevs(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,
3325 const char *class)
3326 {
3327 uint_t c, children;
3328 nvlist_t **child;
3329 boolean_t printed = B_FALSE;
3330
3331 assert(zhp != NULL || !cb->cb_verbose);
3332
3333 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &child,
3334 &children) != 0)
3335 return;
3336
3337 for (c = 0; c < children; c++) {
3338 uint64_t is_log = B_FALSE;
3339 const char *bias = NULL;
3340 const char *type = NULL;
3341
3342 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
3343 &is_log);
3344
3345 if (is_log) {
3346 bias = (char *)VDEV_ALLOC_CLASS_LOGS;
3347 } else {
3348 (void) nvlist_lookup_string(child[c],
3349 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
3350 (void) nvlist_lookup_string(child[c],
3351 ZPOOL_CONFIG_TYPE, &type);
3352 }
3353
3354 if (bias == NULL || strcmp(bias, class) != 0)
3355 continue;
3356 if (!is_log && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
3357 continue;
3358
3359 if (!printed) {
3360 (void) printf("\t%s\t\n", gettext(class));
3361 printed = B_TRUE;
3362 }
3363
3364 char *name = zpool_vdev_name(g_zfs, zhp, child[c],
3365 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
3366 if (cb->cb_print_status)
3367 print_status_config(zhp, cb, name, child[c], 2,
3368 B_FALSE, NULL);
3369 else
3370 print_import_config(cb, name, child[c], 2);
3371 free(name);
3372 }
3373 }
3374
3375 /*
3376 * Display the status for the given pool.
3377 */
3378 static int
show_import(nvlist_t * config,boolean_t report_error)3379 show_import(nvlist_t *config, boolean_t report_error)
3380 {
3381 uint64_t pool_state;
3382 vdev_stat_t *vs;
3383 const char *name;
3384 uint64_t guid;
3385 uint64_t hostid = 0;
3386 const char *msgid;
3387 const char *hostname = "unknown";
3388 nvlist_t *nvroot, *nvinfo;
3389 zpool_status_t reason;
3390 zpool_errata_t errata;
3391 const char *health;
3392 uint_t vsc;
3393 const char *comment;
3394 const char *indent;
3395 char buf[2048];
3396 status_cbdata_t cb = { 0 };
3397
3398 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
3399 &name) == 0);
3400 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
3401 &guid) == 0);
3402 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
3403 &pool_state) == 0);
3404 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
3405 &nvroot) == 0);
3406
3407 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
3408 (uint64_t **)&vs, &vsc) == 0);
3409 health = zpool_state_to_name(vs->vs_state, vs->vs_aux);
3410
3411 reason = zpool_import_status(config, &msgid, &errata);
3412
3413 /*
3414 * If we're importing using a cachefile, then we won't report any
3415 * errors unless we are in the scan phase of the import.
3416 */
3417 if (reason != ZPOOL_STATUS_OK && !report_error)
3418 return (reason);
3419
3420 if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0) {
3421 indent = " ";
3422 } else {
3423 comment = NULL;
3424 indent = "";
3425 }
3426
3427 (void) printf(gettext("%s pool: %s\n"), indent, name);
3428 (void) printf(gettext("%s id: %llu\n"), indent, (u_longlong_t)guid);
3429 (void) printf(gettext("%s state: %s"), indent, health);
3430 if (pool_state == POOL_STATE_DESTROYED)
3431 (void) printf(gettext(" (DESTROYED)"));
3432 (void) printf("\n");
3433
3434 if (reason != ZPOOL_STATUS_OK) {
3435 (void) printf("%s", indent);
3436 printf_color(ANSI_BOLD, gettext("status: "));
3437 }
3438 switch (reason) {
3439 case ZPOOL_STATUS_MISSING_DEV_R:
3440 case ZPOOL_STATUS_MISSING_DEV_NR:
3441 case ZPOOL_STATUS_BAD_GUID_SUM:
3442 printf_color(ANSI_YELLOW, gettext("One or more devices are "
3443 "missing from the system.\n"));
3444 break;
3445
3446 case ZPOOL_STATUS_CORRUPT_LABEL_R:
3447 case ZPOOL_STATUS_CORRUPT_LABEL_NR:
3448 printf_color(ANSI_YELLOW, gettext("One or more devices "
3449 "contains corrupted data.\n"));
3450 break;
3451
3452 case ZPOOL_STATUS_CORRUPT_DATA:
3453 printf_color(ANSI_YELLOW, gettext("The pool data is "
3454 "corrupted.\n"));
3455 break;
3456
3457 case ZPOOL_STATUS_OFFLINE_DEV:
3458 printf_color(ANSI_YELLOW, gettext("One or more devices "
3459 "are offlined.\n"));
3460 break;
3461
3462 case ZPOOL_STATUS_CORRUPT_POOL:
3463 printf_color(ANSI_YELLOW, gettext("The pool metadata is "
3464 "corrupted.\n"));
3465 break;
3466
3467 case ZPOOL_STATUS_VERSION_OLDER:
3468 printf_color(ANSI_YELLOW, gettext("The pool is formatted using "
3469 "a legacy on-disk version.\n"));
3470 break;
3471
3472 case ZPOOL_STATUS_VERSION_NEWER:
3473 printf_color(ANSI_YELLOW, gettext("The pool is formatted using "
3474 "an incompatible version.\n"));
3475 break;
3476
3477 case ZPOOL_STATUS_FEAT_DISABLED:
3478 printf_color(ANSI_YELLOW, gettext("Some supported "
3479 "features are not enabled on the pool.\n"
3480 "\t%s(Note that they may be intentionally disabled if the\n"
3481 "\t%s'compatibility' property is set.)\n"), indent, indent);
3482 break;
3483
3484 case ZPOOL_STATUS_COMPATIBILITY_ERR:
3485 printf_color(ANSI_YELLOW, gettext("Error reading or parsing "
3486 "the file(s) indicated by the 'compatibility'\n"
3487 "\t%sproperty.\n"), indent);
3488 break;
3489
3490 case ZPOOL_STATUS_INCOMPATIBLE_FEAT:
3491 printf_color(ANSI_YELLOW, gettext("One or more features "
3492 "are enabled on the pool despite not being\n"
3493 "\t%srequested by the 'compatibility' property.\n"),
3494 indent);
3495 break;
3496
3497 case ZPOOL_STATUS_UNSUP_FEAT_READ:
3498 printf_color(ANSI_YELLOW, gettext("The pool uses the following "
3499 "feature(s) not supported on this system:\n"));
3500 color_start(ANSI_YELLOW);
3501 zpool_collect_unsup_feat(config, buf, 2048);
3502 (void) printf("%s", buf);
3503 color_end();
3504 break;
3505
3506 case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
3507 printf_color(ANSI_YELLOW, gettext("The pool can only be "
3508 "accessed in read-only mode on this system. It\n"
3509 "\t%scannot be accessed in read-write mode because it uses "
3510 "the following\n"
3511 "\t%sfeature(s) not supported on this system:\n"),
3512 indent, indent);
3513 color_start(ANSI_YELLOW);
3514 zpool_collect_unsup_feat(config, buf, 2048);
3515 (void) printf("%s", buf);
3516 color_end();
3517 break;
3518
3519 case ZPOOL_STATUS_HOSTID_ACTIVE:
3520 printf_color(ANSI_YELLOW, gettext("The pool is currently "
3521 "imported by another system.\n"));
3522 break;
3523
3524 case ZPOOL_STATUS_HOSTID_REQUIRED:
3525 printf_color(ANSI_YELLOW, gettext("The pool has the "
3526 "multihost property on. It cannot\n"
3527 "\t%sbe safely imported when the system hostid is not "
3528 "set.\n"), indent);
3529 break;
3530
3531 case ZPOOL_STATUS_HOSTID_MISMATCH:
3532 printf_color(ANSI_YELLOW, gettext("The pool was last accessed "
3533 "by another system.\n"));
3534 break;
3535
3536 case ZPOOL_STATUS_FAULTED_DEV_R:
3537 case ZPOOL_STATUS_FAULTED_DEV_NR:
3538 printf_color(ANSI_YELLOW, gettext("One or more devices are "
3539 "faulted.\n"));
3540 break;
3541
3542 case ZPOOL_STATUS_BAD_LOG:
3543 printf_color(ANSI_YELLOW, gettext("An intent log record cannot "
3544 "be read.\n"));
3545 break;
3546
3547 case ZPOOL_STATUS_RESILVERING:
3548 case ZPOOL_STATUS_REBUILDING:
3549 printf_color(ANSI_YELLOW, gettext("One or more devices were "
3550 "being resilvered.\n"));
3551 break;
3552
3553 case ZPOOL_STATUS_ERRATA:
3554 printf_color(ANSI_YELLOW, gettext("Errata #%d detected.\n"),
3555 errata);
3556 break;
3557
3558 case ZPOOL_STATUS_NON_NATIVE_ASHIFT:
3559 printf_color(ANSI_YELLOW, gettext("One or more devices are "
3560 "configured to use a non-native block size.\n"
3561 "\t%sExpect reduced performance.\n"), indent);
3562 break;
3563
3564 default:
3565 /*
3566 * No other status can be seen when importing pools.
3567 */
3568 assert(reason == ZPOOL_STATUS_OK);
3569 }
3570
3571 /*
3572 * Print out an action according to the overall state of the pool.
3573 */
3574 if (vs->vs_state != VDEV_STATE_HEALTHY ||
3575 reason != ZPOOL_STATUS_ERRATA || errata != ZPOOL_ERRATA_NONE) {
3576 (void) printf("%s", indent);
3577 (void) printf(gettext("action: "));
3578 }
3579 if (vs->vs_state == VDEV_STATE_HEALTHY) {
3580 if (reason == ZPOOL_STATUS_VERSION_OLDER ||
3581 reason == ZPOOL_STATUS_FEAT_DISABLED) {
3582 (void) printf(gettext("The pool can be imported using "
3583 "its name or numeric identifier, though\n"
3584 "\t%ssome features will not be available without "
3585 "an explicit 'zpool upgrade'.\n"), indent);
3586 } else if (reason == ZPOOL_STATUS_COMPATIBILITY_ERR) {
3587 (void) printf(gettext("The pool can be imported using "
3588 "its name or numeric\n"
3589 "\t%sidentifier, though the file(s) indicated by "
3590 "its 'compatibility'\n"
3591 "\t%sproperty cannot be parsed at this time.\n"),
3592 indent, indent);
3593 } else if (reason == ZPOOL_STATUS_HOSTID_MISMATCH) {
3594 (void) printf(gettext("The pool can be imported using "
3595 "its name or numeric identifier and\n"
3596 "\t%sthe '-f' flag.\n"), indent);
3597 } else if (reason == ZPOOL_STATUS_ERRATA) {
3598 switch (errata) {
3599 case ZPOOL_ERRATA_ZOL_2094_SCRUB:
3600 (void) printf(gettext("The pool can be "
3601 "imported using its name or numeric "
3602 "identifier,\n"
3603 "\t%showever there is a compatibility "
3604 "issue which should be corrected\n"
3605 "\t%sby running 'zpool scrub'\n"),
3606 indent, indent);
3607 break;
3608
3609 case ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY:
3610 (void) printf(gettext("The pool cannot be "
3611 "imported with this version of ZFS due to\n"
3612 "\t%san active asynchronous destroy. "
3613 "Revert to an earlier version\n"
3614 "\t%sand allow the destroy to complete "
3615 "before updating.\n"), indent, indent);
3616 break;
3617
3618 case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION:
3619 (void) printf(gettext("Existing encrypted "
3620 "datasets contain an on-disk "
3621 "incompatibility, which\n"
3622 "\t%sneeds to be corrected. Backup these "
3623 "datasets to new encrypted datasets\n"
3624 "\t%sand destroy the old ones.\n"),
3625 indent, indent);
3626 break;
3627
3628 case ZPOOL_ERRATA_ZOL_8308_ENCRYPTION:
3629 (void) printf(gettext("Existing encrypted "
3630 "snapshots and bookmarks contain an "
3631 "on-disk\n"
3632 "\t%sincompatibility. This may cause "
3633 "on-disk corruption if they are used\n"
3634 "\t%swith 'zfs recv'. To correct the "
3635 "issue, enable the bookmark_v2 feature.\n"
3636 "\t%sNo additional action is needed if "
3637 "there are no encrypted snapshots or\n"
3638 "\t%sbookmarks. If preserving the "
3639 "encrypted snapshots and bookmarks is\n"
3640 "\t%srequired, use a non-raw send to "
3641 "backup and restore them. Alternately,\n"
3642 "\t%sthey may be removed to resolve the "
3643 "incompatibility.\n"), indent, indent,
3644 indent, indent, indent, indent);
3645 break;
3646 default:
3647 /*
3648 * All errata must contain an action message.
3649 */
3650 assert(errata == ZPOOL_ERRATA_NONE);
3651 }
3652 } else {
3653 (void) printf(gettext("The pool can be imported using "
3654 "its name or numeric identifier.\n"));
3655 }
3656 } else if (vs->vs_state == VDEV_STATE_DEGRADED) {
3657 (void) printf(gettext("The pool can be imported despite "
3658 "missing or damaged devices. The\n"
3659 "\t%sfault tolerance of the pool may be compromised if "
3660 "imported.\n"), indent);
3661 } else {
3662 switch (reason) {
3663 case ZPOOL_STATUS_VERSION_NEWER:
3664 (void) printf(gettext("The pool cannot be imported. "
3665 "Access the pool on a system running newer\n"
3666 "\t%ssoftware, or recreate the pool from "
3667 "backup.\n"), indent);
3668 break;
3669 case ZPOOL_STATUS_UNSUP_FEAT_READ:
3670 (void) printf(gettext("The pool cannot be imported. "
3671 "Access the pool on a system that supports\n"
3672 "\t%sthe required feature(s), or recreate the pool "
3673 "from backup.\n"), indent);
3674 break;
3675 case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
3676 (void) printf(gettext("The pool cannot be imported in "
3677 "read-write mode. Import the pool with\n"
3678 "\t%s'-o readonly=on', access the pool on a system "
3679 "that supports the\n"
3680 "\t%srequired feature(s), or recreate the pool "
3681 "from backup.\n"), indent, indent);
3682 break;
3683 case ZPOOL_STATUS_MISSING_DEV_R:
3684 case ZPOOL_STATUS_MISSING_DEV_NR:
3685 case ZPOOL_STATUS_BAD_GUID_SUM:
3686 (void) printf(gettext("The pool cannot be imported. "
3687 "Attach the missing\n"
3688 "\t%sdevices and try again.\n"), indent);
3689 break;
3690 case ZPOOL_STATUS_HOSTID_ACTIVE:
3691 VERIFY0(nvlist_lookup_nvlist(config,
3692 ZPOOL_CONFIG_LOAD_INFO, &nvinfo));
3693
3694 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME))
3695 hostname = fnvlist_lookup_string(nvinfo,
3696 ZPOOL_CONFIG_MMP_HOSTNAME);
3697
3698 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID))
3699 hostid = fnvlist_lookup_uint64(nvinfo,
3700 ZPOOL_CONFIG_MMP_HOSTID);
3701
3702 (void) printf(gettext("The pool must be exported from "
3703 "%s (hostid=%"PRIx64")\n"
3704 "\t%sbefore it can be safely imported.\n"),
3705 hostname, hostid, indent);
3706 break;
3707 case ZPOOL_STATUS_HOSTID_REQUIRED:
3708 (void) printf(gettext("Set a unique system hostid with "
3709 "the zgenhostid(8) command.\n"));
3710 break;
3711 default:
3712 (void) printf(gettext("The pool cannot be imported due "
3713 "to damaged devices or data.\n"));
3714 }
3715 }
3716
3717 /* Print the comment attached to the pool. */
3718 if (comment != NULL)
3719 (void) printf(gettext("comment: %s\n"), comment);
3720
3721 /*
3722 * If the state is "closed" or "can't open", and the aux state
3723 * is "corrupt data":
3724 */
3725 if ((vs->vs_state == VDEV_STATE_CLOSED ||
3726 vs->vs_state == VDEV_STATE_CANT_OPEN) &&
3727 vs->vs_aux == VDEV_AUX_CORRUPT_DATA) {
3728 if (pool_state == POOL_STATE_DESTROYED)
3729 (void) printf(gettext("\t%sThe pool was destroyed, "
3730 "but can be imported using the '-Df' flags.\n"),
3731 indent);
3732 else if (pool_state != POOL_STATE_EXPORTED)
3733 (void) printf(gettext("\t%sThe pool may be active on "
3734 "another system, but can be imported using\n"
3735 "\t%sthe '-f' flag.\n"), indent, indent);
3736 }
3737
3738 if (msgid != NULL) {
3739 (void) printf(gettext("%s see: "
3740 "https://openzfs.github.io/openzfs-docs/msg/%s\n"),
3741 indent, msgid);
3742 }
3743
3744 (void) printf(gettext("%sconfig:\n\n"), indent);
3745
3746 cb.cb_namewidth = max_width(NULL, nvroot, 0, strlen(name),
3747 VDEV_NAME_TYPE_ID);
3748 if (cb.cb_namewidth < 10)
3749 cb.cb_namewidth = 10;
3750
3751 print_import_config(&cb, name, nvroot, 0);
3752
3753 print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_BIAS_DEDUP);
3754 print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_BIAS_SPECIAL);
3755 print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_CLASS_LOGS);
3756
3757 if (reason == ZPOOL_STATUS_BAD_GUID_SUM) {
3758 (void) printf(gettext("\n\t%sAdditional devices are known to "
3759 "be part of this pool, though their\n"
3760 "\t%sexact configuration cannot be determined.\n"),
3761 indent, indent);
3762 }
3763 return (0);
3764 }
3765
3766 static boolean_t
zfs_force_import_required(nvlist_t * config)3767 zfs_force_import_required(nvlist_t *config)
3768 {
3769 uint64_t state;
3770 uint64_t hostid = 0;
3771 nvlist_t *nvinfo;
3772
3773 state = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE);
3774 nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
3775
3776 /*
3777 * The hostid on LOAD_INFO comes from the MOS label via
3778 * spa_tryimport(). If its not there then we're likely talking to an
3779 * older kernel, so use the top one, which will be from the label
3780 * discovered in zpool_find_import(), or if a cachefile is in use, the
3781 * local hostid.
3782 */
3783 if (nvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_HOSTID, &hostid) != 0)
3784 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_HOSTID,
3785 &hostid);
3786
3787 if (state != POOL_STATE_EXPORTED && hostid != get_system_hostid())
3788 return (B_TRUE);
3789
3790 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE)) {
3791 mmp_state_t mmp_state = fnvlist_lookup_uint64(nvinfo,
3792 ZPOOL_CONFIG_MMP_STATE);
3793
3794 if (mmp_state != MMP_STATE_INACTIVE)
3795 return (B_TRUE);
3796 }
3797
3798 return (B_FALSE);
3799 }
3800
3801 /*
3802 * Perform the import for the given configuration. This passes the heavy
3803 * lifting off to zpool_import_props(), and then mounts the datasets contained
3804 * within the pool.
3805 */
3806 static int
do_import(nvlist_t * config,const char * newname,const char * mntopts,nvlist_t * props,int flags,uint_t mntthreads)3807 do_import(nvlist_t *config, const char *newname, const char *mntopts,
3808 nvlist_t *props, int flags, uint_t mntthreads)
3809 {
3810 int ret = 0;
3811 int ms_status = 0;
3812 zpool_handle_t *zhp;
3813 const char *name;
3814 uint64_t version;
3815
3816 name = fnvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME);
3817 version = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION);
3818
3819 if (!SPA_VERSION_IS_SUPPORTED(version)) {
3820 (void) fprintf(stderr, gettext("cannot import '%s': pool "
3821 "is formatted using an unsupported ZFS version\n"), name);
3822 return (1);
3823 } else if (zfs_force_import_required(config) &&
3824 !(flags & ZFS_IMPORT_ANY_HOST)) {
3825 mmp_state_t mmp_state = MMP_STATE_INACTIVE;
3826 nvlist_t *nvinfo;
3827
3828 nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
3829 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE))
3830 mmp_state = fnvlist_lookup_uint64(nvinfo,
3831 ZPOOL_CONFIG_MMP_STATE);
3832
3833 if (mmp_state == MMP_STATE_ACTIVE) {
3834 const char *hostname = "<unknown>";
3835 uint64_t hostid = 0;
3836
3837 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME))
3838 hostname = fnvlist_lookup_string(nvinfo,
3839 ZPOOL_CONFIG_MMP_HOSTNAME);
3840
3841 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID))
3842 hostid = fnvlist_lookup_uint64(nvinfo,
3843 ZPOOL_CONFIG_MMP_HOSTID);
3844
3845 (void) fprintf(stderr, gettext("cannot import '%s': "
3846 "pool is imported on %s (hostid: "
3847 "0x%"PRIx64")\nExport the pool on the other "
3848 "system, then run 'zpool import'.\n"),
3849 name, hostname, hostid);
3850 } else if (mmp_state == MMP_STATE_NO_HOSTID) {
3851 (void) fprintf(stderr, gettext("Cannot import '%s': "
3852 "pool has the multihost property on and the\n"
3853 "system's hostid is not set. Set a unique hostid "
3854 "with the zgenhostid(8) command.\n"), name);
3855 } else {
3856 const char *hostname = "<unknown>";
3857 time_t timestamp = 0;
3858 uint64_t hostid = 0;
3859
3860 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_HOSTNAME))
3861 hostname = fnvlist_lookup_string(nvinfo,
3862 ZPOOL_CONFIG_HOSTNAME);
3863 else if (nvlist_exists(config, ZPOOL_CONFIG_HOSTNAME))
3864 hostname = fnvlist_lookup_string(config,
3865 ZPOOL_CONFIG_HOSTNAME);
3866
3867 if (nvlist_exists(config, ZPOOL_CONFIG_TIMESTAMP))
3868 timestamp = fnvlist_lookup_uint64(config,
3869 ZPOOL_CONFIG_TIMESTAMP);
3870
3871 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_HOSTID))
3872 hostid = fnvlist_lookup_uint64(nvinfo,
3873 ZPOOL_CONFIG_HOSTID);
3874 else if (nvlist_exists(config, ZPOOL_CONFIG_HOSTID))
3875 hostid = fnvlist_lookup_uint64(config,
3876 ZPOOL_CONFIG_HOSTID);
3877
3878 (void) fprintf(stderr, gettext("cannot import '%s': "
3879 "pool was previously in use from another system.\n"
3880 "Last accessed by %s (hostid=%"PRIx64") at %s"
3881 "The pool can be imported, use 'zpool import -f' "
3882 "to import the pool.\n"), name, hostname,
3883 hostid, ctime(×tamp));
3884 }
3885
3886 return (1);
3887 }
3888
3889 if (zpool_import_props(g_zfs, config, newname, props, flags) != 0)
3890 return (1);
3891
3892 if (newname != NULL)
3893 name = newname;
3894
3895 if ((zhp = zpool_open_canfail(g_zfs, name)) == NULL)
3896 return (1);
3897
3898 /*
3899 * Loading keys is best effort. We don't want to return immediately
3900 * if it fails but we do want to give the error to the caller.
3901 */
3902 if (flags & ZFS_IMPORT_LOAD_KEYS &&
3903 zfs_crypto_attempt_load_keys(g_zfs, name) != 0)
3904 ret = 1;
3905
3906 if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL &&
3907 !(flags & ZFS_IMPORT_ONLY)) {
3908 ms_status = zpool_enable_datasets(zhp, mntopts, 0, mntthreads);
3909 if (ms_status == EZFS_SHAREFAILED) {
3910 (void) fprintf(stderr, gettext("Import was "
3911 "successful, but unable to share some datasets\n"));
3912 } else if (ms_status == EZFS_MOUNTFAILED) {
3913 (void) fprintf(stderr, gettext("Import was "
3914 "successful, but unable to mount some datasets\n"));
3915 }
3916 }
3917
3918 zpool_close(zhp);
3919 return (ret);
3920 }
3921
3922 typedef struct import_parameters {
3923 nvlist_t *ip_config;
3924 const char *ip_mntopts;
3925 nvlist_t *ip_props;
3926 int ip_flags;
3927 uint_t ip_mntthreads;
3928 int *ip_err;
3929 } import_parameters_t;
3930
3931 static void
do_import_task(void * arg)3932 do_import_task(void *arg)
3933 {
3934 import_parameters_t *ip = arg;
3935 *ip->ip_err |= do_import(ip->ip_config, NULL, ip->ip_mntopts,
3936 ip->ip_props, ip->ip_flags, ip->ip_mntthreads);
3937 free(ip);
3938 }
3939
3940
3941 static int
import_pools(nvlist_t * pools,nvlist_t * props,char * mntopts,int flags,char * orig_name,char * new_name,importargs_t * import)3942 import_pools(nvlist_t *pools, nvlist_t *props, char *mntopts, int flags,
3943 char *orig_name, char *new_name, importargs_t *import)
3944 {
3945 nvlist_t *config = NULL;
3946 nvlist_t *found_config = NULL;
3947 uint64_t pool_state;
3948 boolean_t pool_specified = (import->poolname != NULL ||
3949 import->guid != 0);
3950 uint_t npools = 0;
3951
3952
3953 tpool_t *tp = NULL;
3954 if (import->do_all) {
3955 tp = tpool_create(1, 5 * sysconf(_SC_NPROCESSORS_ONLN),
3956 0, NULL);
3957 }
3958
3959 /*
3960 * At this point we have a list of import candidate configs. Even if
3961 * we were searching by pool name or guid, we still need to
3962 * post-process the list to deal with pool state and possible
3963 * duplicate names.
3964 */
3965 int err = 0;
3966 nvpair_t *elem = NULL;
3967 boolean_t first = B_TRUE;
3968 if (!pool_specified && import->do_all) {
3969 while ((elem = nvlist_next_nvpair(pools, elem)) != NULL)
3970 npools++;
3971 }
3972 while ((elem = nvlist_next_nvpair(pools, elem)) != NULL) {
3973
3974 verify(nvpair_value_nvlist(elem, &config) == 0);
3975
3976 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
3977 &pool_state) == 0);
3978 if (!import->do_destroyed &&
3979 pool_state == POOL_STATE_DESTROYED)
3980 continue;
3981 if (import->do_destroyed &&
3982 pool_state != POOL_STATE_DESTROYED)
3983 continue;
3984
3985 verify(nvlist_add_nvlist(config, ZPOOL_LOAD_POLICY,
3986 import->policy) == 0);
3987
3988 if (!pool_specified) {
3989 if (first)
3990 first = B_FALSE;
3991 else if (!import->do_all)
3992 (void) fputc('\n', stdout);
3993
3994 if (import->do_all) {
3995 import_parameters_t *ip = safe_malloc(
3996 sizeof (import_parameters_t));
3997
3998 ip->ip_config = config;
3999 ip->ip_mntopts = mntopts;
4000 ip->ip_props = props;
4001 ip->ip_flags = flags;
4002 ip->ip_mntthreads = mount_tp_nthr / npools;
4003 ip->ip_err = &err;
4004
4005 (void) tpool_dispatch(tp, do_import_task,
4006 (void *)ip);
4007 } else {
4008 /*
4009 * If we're importing from cachefile, then
4010 * we don't want to report errors until we
4011 * are in the scan phase of the import. If
4012 * we get an error, then we return that error
4013 * to invoke the scan phase.
4014 */
4015 if (import->cachefile && !import->scan)
4016 err = show_import(config, B_FALSE);
4017 else
4018 (void) show_import(config, B_TRUE);
4019 }
4020 } else if (import->poolname != NULL) {
4021 const char *name;
4022
4023 /*
4024 * We are searching for a pool based on name.
4025 */
4026 verify(nvlist_lookup_string(config,
4027 ZPOOL_CONFIG_POOL_NAME, &name) == 0);
4028
4029 if (strcmp(name, import->poolname) == 0) {
4030 if (found_config != NULL) {
4031 (void) fprintf(stderr, gettext(
4032 "cannot import '%s': more than "
4033 "one matching pool\n"),
4034 import->poolname);
4035 (void) fprintf(stderr, gettext(
4036 "import by numeric ID instead\n"));
4037 err = B_TRUE;
4038 }
4039 found_config = config;
4040 }
4041 } else {
4042 uint64_t guid;
4043
4044 /*
4045 * Search for a pool by guid.
4046 */
4047 verify(nvlist_lookup_uint64(config,
4048 ZPOOL_CONFIG_POOL_GUID, &guid) == 0);
4049
4050 if (guid == import->guid)
4051 found_config = config;
4052 }
4053 }
4054 if (import->do_all) {
4055 tpool_wait(tp);
4056 tpool_destroy(tp);
4057 }
4058
4059 /*
4060 * If we were searching for a specific pool, verify that we found a
4061 * pool, and then do the import.
4062 */
4063 if (pool_specified && err == 0) {
4064 if (found_config == NULL) {
4065 (void) fprintf(stderr, gettext("cannot import '%s': "
4066 "no such pool available\n"), orig_name);
4067 err = B_TRUE;
4068 } else {
4069 err |= do_import(found_config, new_name,
4070 mntopts, props, flags, mount_tp_nthr);
4071 }
4072 }
4073
4074 /*
4075 * If we were just looking for pools, report an error if none were
4076 * found.
4077 */
4078 if (!pool_specified && first)
4079 (void) fprintf(stderr,
4080 gettext("no pools available to import\n"));
4081 return (err);
4082 }
4083
4084 typedef struct target_exists_args {
4085 const char *poolname;
4086 uint64_t poolguid;
4087 } target_exists_args_t;
4088
4089 static int
name_or_guid_exists(zpool_handle_t * zhp,void * data)4090 name_or_guid_exists(zpool_handle_t *zhp, void *data)
4091 {
4092 target_exists_args_t *args = data;
4093 nvlist_t *config = zpool_get_config(zhp, NULL);
4094 int found = 0;
4095
4096 if (config == NULL)
4097 return (0);
4098
4099 if (args->poolname != NULL) {
4100 const char *pool_name;
4101
4102 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
4103 &pool_name) == 0);
4104 if (strcmp(pool_name, args->poolname) == 0)
4105 found = 1;
4106 } else {
4107 uint64_t pool_guid;
4108
4109 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
4110 &pool_guid) == 0);
4111 if (pool_guid == args->poolguid)
4112 found = 1;
4113 }
4114 zpool_close(zhp);
4115
4116 return (found);
4117 }
4118 /*
4119 * zpool checkpoint <pool>
4120 * checkpoint --discard <pool>
4121 *
4122 * -d Discard the checkpoint from a checkpointed
4123 * --discard pool.
4124 *
4125 * -w Wait for discarding a checkpoint to complete.
4126 * --wait
4127 *
4128 * Checkpoints the specified pool, by taking a "snapshot" of its
4129 * current state. A pool can only have one checkpoint at a time.
4130 */
4131 int
zpool_do_checkpoint(int argc,char ** argv)4132 zpool_do_checkpoint(int argc, char **argv)
4133 {
4134 boolean_t discard, wait;
4135 char *pool;
4136 zpool_handle_t *zhp;
4137 int c, err;
4138
4139 struct option long_options[] = {
4140 {"discard", no_argument, NULL, 'd'},
4141 {"wait", no_argument, NULL, 'w'},
4142 {0, 0, 0, 0}
4143 };
4144
4145 discard = B_FALSE;
4146 wait = B_FALSE;
4147 while ((c = getopt_long(argc, argv, ":dw", long_options, NULL)) != -1) {
4148 switch (c) {
4149 case 'd':
4150 discard = B_TRUE;
4151 break;
4152 case 'w':
4153 wait = B_TRUE;
4154 break;
4155 case '?':
4156 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
4157 optopt);
4158 usage(B_FALSE);
4159 }
4160 }
4161
4162 if (wait && !discard) {
4163 (void) fprintf(stderr, gettext("--wait only valid when "
4164 "--discard also specified\n"));
4165 usage(B_FALSE);
4166 }
4167
4168 argc -= optind;
4169 argv += optind;
4170
4171 if (argc < 1) {
4172 (void) fprintf(stderr, gettext("missing pool argument\n"));
4173 usage(B_FALSE);
4174 }
4175
4176 if (argc > 1) {
4177 (void) fprintf(stderr, gettext("too many arguments\n"));
4178 usage(B_FALSE);
4179 }
4180
4181 pool = argv[0];
4182
4183 if ((zhp = zpool_open(g_zfs, pool)) == NULL) {
4184 /* As a special case, check for use of '/' in the name */
4185 if (strchr(pool, '/') != NULL)
4186 (void) fprintf(stderr, gettext("'zpool checkpoint' "
4187 "doesn't work on datasets. To save the state "
4188 "of a dataset from a specific point in time "
4189 "please use 'zfs snapshot'\n"));
4190 return (1);
4191 }
4192
4193 if (discard) {
4194 err = (zpool_discard_checkpoint(zhp) != 0);
4195 if (err == 0 && wait)
4196 err = zpool_wait(zhp, ZPOOL_WAIT_CKPT_DISCARD);
4197 } else {
4198 err = (zpool_checkpoint(zhp) != 0);
4199 }
4200
4201 zpool_close(zhp);
4202
4203 return (err);
4204 }
4205
4206 #define CHECKPOINT_OPT 1024
4207
4208 /*
4209 * zpool prefetch <type> [<type opts>] <pool>
4210 *
4211 * Prefetchs a particular type of data in the specified pool.
4212 */
4213 int
zpool_do_prefetch(int argc,char ** argv)4214 zpool_do_prefetch(int argc, char **argv)
4215 {
4216 int c;
4217 char *poolname;
4218 char *typestr = NULL;
4219 zpool_prefetch_type_t type;
4220 zpool_handle_t *zhp;
4221 int err = 0;
4222
4223 while ((c = getopt(argc, argv, "t:")) != -1) {
4224 switch (c) {
4225 case 't':
4226 typestr = optarg;
4227 break;
4228 case ':':
4229 (void) fprintf(stderr, gettext("missing argument for "
4230 "'%c' option\n"), optopt);
4231 usage(B_FALSE);
4232 break;
4233 case '?':
4234 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
4235 optopt);
4236 usage(B_FALSE);
4237 }
4238 }
4239 argc -= optind;
4240 argv += optind;
4241
4242 if (argc < 1) {
4243 (void) fprintf(stderr, gettext("missing pool name argument\n"));
4244 usage(B_FALSE);
4245 }
4246
4247 if (argc > 1) {
4248 (void) fprintf(stderr, gettext("too many arguments\n"));
4249 usage(B_FALSE);
4250 }
4251
4252 poolname = argv[0];
4253
4254 argc--;
4255 argv++;
4256
4257 if (strcmp(typestr, "ddt") == 0) {
4258 type = ZPOOL_PREFETCH_DDT;
4259 } else {
4260 (void) fprintf(stderr, gettext("unsupported prefetch type\n"));
4261 usage(B_FALSE);
4262 }
4263
4264 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
4265 return (1);
4266
4267 err = zpool_prefetch(zhp, type);
4268
4269 zpool_close(zhp);
4270
4271 return (err);
4272 }
4273
4274 /*
4275 * zpool import [-d dir] [-D]
4276 * import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l]
4277 * [-d dir | -c cachefile | -s] [-f] -a
4278 * import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l]
4279 * [-d dir | -c cachefile | -s] [-f] [-n] [-F] <pool | id>
4280 * [newpool]
4281 *
4282 * -c Read pool information from a cachefile instead of searching
4283 * devices. If importing from a cachefile config fails, then
4284 * fallback to searching for devices only in the directories that
4285 * exist in the cachefile.
4286 *
4287 * -d Scan in a specific directory, other than /dev/. More than
4288 * one directory can be specified using multiple '-d' options.
4289 *
4290 * -D Scan for previously destroyed pools or import all or only
4291 * specified destroyed pools.
4292 *
4293 * -R Temporarily import the pool, with all mountpoints relative to
4294 * the given root. The pool will remain exported when the machine
4295 * is rebooted.
4296 *
4297 * -V Import even in the presence of faulted vdevs. This is an
4298 * intentionally undocumented option for testing purposes, and
4299 * treats the pool configuration as complete, leaving any bad
4300 * vdevs in the FAULTED state. In other words, it does verbatim
4301 * import.
4302 *
4303 * -f Force import, even if it appears that the pool is active.
4304 *
4305 * -F Attempt rewind if necessary.
4306 *
4307 * -n See if rewind would work, but don't actually rewind.
4308 *
4309 * -N Import the pool but don't mount datasets.
4310 *
4311 * -T Specify a starting txg to use for import. This option is
4312 * intentionally undocumented option for testing purposes.
4313 *
4314 * -a Import all pools found.
4315 *
4316 * -l Load encryption keys while importing.
4317 *
4318 * -o Set property=value and/or temporary mount options (without '=').
4319 *
4320 * -s Scan using the default search path, the libblkid cache will
4321 * not be consulted.
4322 *
4323 * --rewind-to-checkpoint
4324 * Import the pool and revert back to the checkpoint.
4325 *
4326 * The import command scans for pools to import, and import pools based on pool
4327 * name and GUID. The pool can also be renamed as part of the import process.
4328 */
4329 int
zpool_do_import(int argc,char ** argv)4330 zpool_do_import(int argc, char **argv)
4331 {
4332 char **searchdirs = NULL;
4333 char *env, *envdup = NULL;
4334 int nsearch = 0;
4335 int c;
4336 int err = 0;
4337 nvlist_t *pools = NULL;
4338 boolean_t do_all = B_FALSE;
4339 boolean_t do_destroyed = B_FALSE;
4340 char *mntopts = NULL;
4341 uint64_t searchguid = 0;
4342 char *searchname = NULL;
4343 char *propval;
4344 nvlist_t *policy = NULL;
4345 nvlist_t *props = NULL;
4346 int flags = ZFS_IMPORT_NORMAL;
4347 uint32_t rewind_policy = ZPOOL_NO_REWIND;
4348 boolean_t dryrun = B_FALSE;
4349 boolean_t do_rewind = B_FALSE;
4350 boolean_t xtreme_rewind = B_FALSE;
4351 boolean_t do_scan = B_FALSE;
4352 boolean_t pool_exists = B_FALSE;
4353 uint64_t txg = -1ULL;
4354 char *cachefile = NULL;
4355 importargs_t idata = { 0 };
4356 char *endptr;
4357
4358 struct option long_options[] = {
4359 {"rewind-to-checkpoint", no_argument, NULL, CHECKPOINT_OPT},
4360 {0, 0, 0, 0}
4361 };
4362
4363 /* check options */
4364 while ((c = getopt_long(argc, argv, ":aCc:d:DEfFlmnNo:R:stT:VX",
4365 long_options, NULL)) != -1) {
4366 switch (c) {
4367 case 'a':
4368 do_all = B_TRUE;
4369 break;
4370 case 'c':
4371 cachefile = optarg;
4372 break;
4373 case 'd':
4374 searchdirs = safe_realloc(searchdirs,
4375 (nsearch + 1) * sizeof (char *));
4376 searchdirs[nsearch++] = optarg;
4377 break;
4378 case 'D':
4379 do_destroyed = B_TRUE;
4380 break;
4381 case 'f':
4382 flags |= ZFS_IMPORT_ANY_HOST;
4383 break;
4384 case 'F':
4385 do_rewind = B_TRUE;
4386 break;
4387 case 'l':
4388 flags |= ZFS_IMPORT_LOAD_KEYS;
4389 break;
4390 case 'm':
4391 flags |= ZFS_IMPORT_MISSING_LOG;
4392 break;
4393 case 'n':
4394 dryrun = B_TRUE;
4395 break;
4396 case 'N':
4397 flags |= ZFS_IMPORT_ONLY;
4398 break;
4399 case 'o':
4400 if ((propval = strchr(optarg, '=')) != NULL) {
4401 *propval = '\0';
4402 propval++;
4403 if (add_prop_list(optarg, propval,
4404 &props, B_TRUE))
4405 goto error;
4406 } else {
4407 mntopts = optarg;
4408 }
4409 break;
4410 case 'R':
4411 if (add_prop_list(zpool_prop_to_name(
4412 ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE))
4413 goto error;
4414 if (add_prop_list_default(zpool_prop_to_name(
4415 ZPOOL_PROP_CACHEFILE), "none", &props))
4416 goto error;
4417 break;
4418 case 's':
4419 do_scan = B_TRUE;
4420 break;
4421 case 't':
4422 flags |= ZFS_IMPORT_TEMP_NAME;
4423 if (add_prop_list_default(zpool_prop_to_name(
4424 ZPOOL_PROP_CACHEFILE), "none", &props))
4425 goto error;
4426 break;
4427
4428 case 'T':
4429 errno = 0;
4430 txg = strtoull(optarg, &endptr, 0);
4431 if (errno != 0 || *endptr != '\0') {
4432 (void) fprintf(stderr,
4433 gettext("invalid txg value\n"));
4434 usage(B_FALSE);
4435 }
4436 rewind_policy = ZPOOL_DO_REWIND | ZPOOL_EXTREME_REWIND;
4437 break;
4438 case 'V':
4439 flags |= ZFS_IMPORT_VERBATIM;
4440 break;
4441 case 'X':
4442 xtreme_rewind = B_TRUE;
4443 break;
4444 case CHECKPOINT_OPT:
4445 flags |= ZFS_IMPORT_CHECKPOINT;
4446 break;
4447 case ':':
4448 (void) fprintf(stderr, gettext("missing argument for "
4449 "'%c' option\n"), optopt);
4450 usage(B_FALSE);
4451 break;
4452 case '?':
4453 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
4454 optopt);
4455 usage(B_FALSE);
4456 }
4457 }
4458
4459 argc -= optind;
4460 argv += optind;
4461
4462 if (cachefile && nsearch != 0) {
4463 (void) fprintf(stderr, gettext("-c is incompatible with -d\n"));
4464 usage(B_FALSE);
4465 }
4466
4467 if (cachefile && do_scan) {
4468 (void) fprintf(stderr, gettext("-c is incompatible with -s\n"));
4469 usage(B_FALSE);
4470 }
4471
4472 if ((flags & ZFS_IMPORT_LOAD_KEYS) && (flags & ZFS_IMPORT_ONLY)) {
4473 (void) fprintf(stderr, gettext("-l is incompatible with -N\n"));
4474 usage(B_FALSE);
4475 }
4476
4477 if ((flags & ZFS_IMPORT_LOAD_KEYS) && !do_all && argc == 0) {
4478 (void) fprintf(stderr, gettext("-l is only meaningful during "
4479 "an import\n"));
4480 usage(B_FALSE);
4481 }
4482
4483 if ((dryrun || xtreme_rewind) && !do_rewind) {
4484 (void) fprintf(stderr,
4485 gettext("-n or -X only meaningful with -F\n"));
4486 usage(B_FALSE);
4487 }
4488 if (dryrun)
4489 rewind_policy = ZPOOL_TRY_REWIND;
4490 else if (do_rewind)
4491 rewind_policy = ZPOOL_DO_REWIND;
4492 if (xtreme_rewind)
4493 rewind_policy |= ZPOOL_EXTREME_REWIND;
4494
4495 /* In the future, we can capture further policy and include it here */
4496 if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 ||
4497 nvlist_add_uint64(policy, ZPOOL_LOAD_REQUEST_TXG, txg) != 0 ||
4498 nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY,
4499 rewind_policy) != 0)
4500 goto error;
4501
4502 /* check argument count */
4503 if (do_all) {
4504 if (argc != 0) {
4505 (void) fprintf(stderr, gettext("too many arguments\n"));
4506 usage(B_FALSE);
4507 }
4508 } else {
4509 if (argc > 2) {
4510 (void) fprintf(stderr, gettext("too many arguments\n"));
4511 usage(B_FALSE);
4512 }
4513 }
4514
4515 /*
4516 * Check for the effective uid. We do this explicitly here because
4517 * otherwise any attempt to discover pools will silently fail.
4518 */
4519 if (argc == 0 && geteuid() != 0) {
4520 (void) fprintf(stderr, gettext("cannot "
4521 "discover pools: permission denied\n"));
4522
4523 free(searchdirs);
4524 nvlist_free(props);
4525 nvlist_free(policy);
4526 return (1);
4527 }
4528
4529 /*
4530 * Depending on the arguments given, we do one of the following:
4531 *
4532 * <none> Iterate through all pools and display information about
4533 * each one.
4534 *
4535 * -a Iterate through all pools and try to import each one.
4536 *
4537 * <id> Find the pool that corresponds to the given GUID/pool
4538 * name and import that one.
4539 *
4540 * -D Above options applies only to destroyed pools.
4541 */
4542 if (argc != 0) {
4543 char *endptr;
4544
4545 errno = 0;
4546 searchguid = strtoull(argv[0], &endptr, 10);
4547 if (errno != 0 || *endptr != '\0') {
4548 searchname = argv[0];
4549 searchguid = 0;
4550 }
4551
4552 /*
4553 * User specified a name or guid. Ensure it's unique.
4554 */
4555 target_exists_args_t search = {searchname, searchguid};
4556 pool_exists = zpool_iter(g_zfs, name_or_guid_exists, &search);
4557 }
4558
4559 /*
4560 * Check the environment for the preferred search path.
4561 */
4562 if ((searchdirs == NULL) && (env = getenv("ZPOOL_IMPORT_PATH"))) {
4563 char *dir, *tmp = NULL;
4564
4565 envdup = strdup(env);
4566
4567 for (dir = strtok_r(envdup, ":", &tmp);
4568 dir != NULL;
4569 dir = strtok_r(NULL, ":", &tmp)) {
4570 searchdirs = safe_realloc(searchdirs,
4571 (nsearch + 1) * sizeof (char *));
4572 searchdirs[nsearch++] = dir;
4573 }
4574 }
4575
4576 idata.path = searchdirs;
4577 idata.paths = nsearch;
4578 idata.poolname = searchname;
4579 idata.guid = searchguid;
4580 idata.cachefile = cachefile;
4581 idata.scan = do_scan;
4582 idata.policy = policy;
4583 idata.do_destroyed = do_destroyed;
4584 idata.do_all = do_all;
4585
4586 libpc_handle_t lpch = {
4587 .lpc_lib_handle = g_zfs,
4588 .lpc_ops = &libzfs_config_ops,
4589 .lpc_printerr = B_TRUE
4590 };
4591 pools = zpool_search_import(&lpch, &idata);
4592
4593 if (pools != NULL && pool_exists &&
4594 (argc == 1 || strcmp(argv[0], argv[1]) == 0)) {
4595 (void) fprintf(stderr, gettext("cannot import '%s': "
4596 "a pool with that name already exists\n"),
4597 argv[0]);
4598 (void) fprintf(stderr, gettext("use the form '%s "
4599 "<pool | id> <newpool>' to give it a new name\n"),
4600 "zpool import");
4601 err = 1;
4602 } else if (pools == NULL && pool_exists) {
4603 (void) fprintf(stderr, gettext("cannot import '%s': "
4604 "a pool with that name is already created/imported,\n"),
4605 argv[0]);
4606 (void) fprintf(stderr, gettext("and no additional pools "
4607 "with that name were found\n"));
4608 err = 1;
4609 } else if (pools == NULL) {
4610 if (argc != 0) {
4611 (void) fprintf(stderr, gettext("cannot import '%s': "
4612 "no such pool available\n"), argv[0]);
4613 }
4614 err = 1;
4615 }
4616
4617 if (err == 1) {
4618 free(searchdirs);
4619 free(envdup);
4620 nvlist_free(policy);
4621 nvlist_free(pools);
4622 nvlist_free(props);
4623 return (1);
4624 }
4625
4626 err = import_pools(pools, props, mntopts, flags,
4627 argc >= 1 ? argv[0] : NULL, argc >= 2 ? argv[1] : NULL, &idata);
4628
4629 /*
4630 * If we're using the cachefile and we failed to import, then
4631 * fallback to scanning the directory for pools that match
4632 * those in the cachefile.
4633 */
4634 if (err != 0 && cachefile != NULL) {
4635 (void) printf(gettext("cachefile import failed, retrying\n"));
4636
4637 /*
4638 * We use the scan flag to gather the directories that exist
4639 * in the cachefile. If we need to fallback to searching for
4640 * the pool config, we will only search devices in these
4641 * directories.
4642 */
4643 idata.scan = B_TRUE;
4644 nvlist_free(pools);
4645 pools = zpool_search_import(&lpch, &idata);
4646
4647 err = import_pools(pools, props, mntopts, flags,
4648 argc >= 1 ? argv[0] : NULL, argc >= 2 ? argv[1] : NULL,
4649 &idata);
4650 }
4651
4652 error:
4653 nvlist_free(props);
4654 nvlist_free(pools);
4655 nvlist_free(policy);
4656 free(searchdirs);
4657 free(envdup);
4658
4659 return (err ? 1 : 0);
4660 }
4661
4662 /*
4663 * zpool sync [-f] [pool] ...
4664 *
4665 * -f (undocumented) force uberblock (and config including zpool cache file)
4666 * update.
4667 *
4668 * Sync the specified pool(s).
4669 * Without arguments "zpool sync" will sync all pools.
4670 * This command initiates TXG sync(s) and will return after the TXG(s) commit.
4671 *
4672 */
4673 static int
zpool_do_sync(int argc,char ** argv)4674 zpool_do_sync(int argc, char **argv)
4675 {
4676 int ret;
4677 boolean_t force = B_FALSE;
4678
4679 /* check options */
4680 while ((ret = getopt(argc, argv, "f")) != -1) {
4681 switch (ret) {
4682 case 'f':
4683 force = B_TRUE;
4684 break;
4685 case '?':
4686 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
4687 optopt);
4688 usage(B_FALSE);
4689 }
4690 }
4691
4692 argc -= optind;
4693 argv += optind;
4694
4695 /* if argc == 0 we will execute zpool_sync_one on all pools */
4696 ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL,
4697 B_FALSE, zpool_sync_one, &force);
4698
4699 return (ret);
4700 }
4701
4702 typedef struct iostat_cbdata {
4703 uint64_t cb_flags;
4704 int cb_namewidth;
4705 int cb_iteration;
4706 boolean_t cb_verbose;
4707 boolean_t cb_literal;
4708 boolean_t cb_scripted;
4709 zpool_list_t *cb_list;
4710 vdev_cmd_data_list_t *vcdl;
4711 vdev_cbdata_t cb_vdevs;
4712 } iostat_cbdata_t;
4713
4714 /* iostat labels */
4715 typedef struct name_and_columns {
4716 const char *name; /* Column name */
4717 unsigned int columns; /* Center name to this number of columns */
4718 } name_and_columns_t;
4719
4720 #define IOSTAT_MAX_LABELS 15 /* Max number of labels on one line */
4721
4722 static const name_and_columns_t iostat_top_labels[][IOSTAT_MAX_LABELS] =
4723 {
4724 [IOS_DEFAULT] = {{"capacity", 2}, {"operations", 2}, {"bandwidth", 2},
4725 {NULL}},
4726 [IOS_LATENCY] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2},
4727 {"asyncq_wait", 2}, {"scrub", 1}, {"trim", 1}, {"rebuild", 1},
4728 {NULL}},
4729 [IOS_QUEUES] = {{"syncq_read", 2}, {"syncq_write", 2},
4730 {"asyncq_read", 2}, {"asyncq_write", 2}, {"scrubq_read", 2},
4731 {"trimq_write", 2}, {"rebuildq_write", 2}, {NULL}},
4732 [IOS_L_HISTO] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2},
4733 {"asyncq_wait", 2}, {NULL}},
4734 [IOS_RQ_HISTO] = {{"sync_read", 2}, {"sync_write", 2},
4735 {"async_read", 2}, {"async_write", 2}, {"scrub", 2},
4736 {"trim", 2}, {"rebuild", 2}, {NULL}},
4737 };
4738
4739 /* Shorthand - if "columns" field not set, default to 1 column */
4740 static const name_and_columns_t iostat_bottom_labels[][IOSTAT_MAX_LABELS] =
4741 {
4742 [IOS_DEFAULT] = {{"alloc"}, {"free"}, {"read"}, {"write"}, {"read"},
4743 {"write"}, {NULL}},
4744 [IOS_LATENCY] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"},
4745 {"write"}, {"read"}, {"write"}, {"wait"}, {"wait"}, {"wait"},
4746 {NULL}},
4747 [IOS_QUEUES] = {{"pend"}, {"activ"}, {"pend"}, {"activ"}, {"pend"},
4748 {"activ"}, {"pend"}, {"activ"}, {"pend"}, {"activ"},
4749 {"pend"}, {"activ"}, {"pend"}, {"activ"}, {NULL}},
4750 [IOS_L_HISTO] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"},
4751 {"write"}, {"read"}, {"write"}, {"scrub"}, {"trim"}, {"rebuild"},
4752 {NULL}},
4753 [IOS_RQ_HISTO] = {{"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"},
4754 {"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"},
4755 {"ind"}, {"agg"}, {NULL}},
4756 };
4757
4758 static const char *histo_to_title[] = {
4759 [IOS_L_HISTO] = "latency",
4760 [IOS_RQ_HISTO] = "req_size",
4761 };
4762
4763 /*
4764 * Return the number of labels in a null-terminated name_and_columns_t
4765 * array.
4766 *
4767 */
4768 static unsigned int
label_array_len(const name_and_columns_t * labels)4769 label_array_len(const name_and_columns_t *labels)
4770 {
4771 int i = 0;
4772
4773 while (labels[i].name)
4774 i++;
4775
4776 return (i);
4777 }
4778
4779 /*
4780 * Return the number of strings in a null-terminated string array.
4781 * For example:
4782 *
4783 * const char foo[] = {"bar", "baz", NULL}
4784 *
4785 * returns 2
4786 */
4787 static uint64_t
str_array_len(const char * array[])4788 str_array_len(const char *array[])
4789 {
4790 uint64_t i = 0;
4791 while (array[i])
4792 i++;
4793
4794 return (i);
4795 }
4796
4797
4798 /*
4799 * Return a default column width for default/latency/queue columns. This does
4800 * not include histograms, which have their columns autosized.
4801 */
4802 static unsigned int
default_column_width(iostat_cbdata_t * cb,enum iostat_type type)4803 default_column_width(iostat_cbdata_t *cb, enum iostat_type type)
4804 {
4805 unsigned long column_width = 5; /* Normal niceprint */
4806 static unsigned long widths[] = {
4807 /*
4808 * Choose some sane default column sizes for printing the
4809 * raw numbers.
4810 */
4811 [IOS_DEFAULT] = 15, /* 1PB capacity */
4812 [IOS_LATENCY] = 10, /* 1B ns = 10sec */
4813 [IOS_QUEUES] = 6, /* 1M queue entries */
4814 [IOS_L_HISTO] = 10, /* 1B ns = 10sec */
4815 [IOS_RQ_HISTO] = 6, /* 1M queue entries */
4816 };
4817
4818 if (cb->cb_literal)
4819 column_width = widths[type];
4820
4821 return (column_width);
4822 }
4823
4824 /*
4825 * Print the column labels, i.e:
4826 *
4827 * capacity operations bandwidth
4828 * alloc free read write read write ...
4829 *
4830 * If force_column_width is set, use it for the column width. If not set, use
4831 * the default column width.
4832 */
4833 static void
print_iostat_labels(iostat_cbdata_t * cb,unsigned int force_column_width,const name_and_columns_t labels[][IOSTAT_MAX_LABELS])4834 print_iostat_labels(iostat_cbdata_t *cb, unsigned int force_column_width,
4835 const name_and_columns_t labels[][IOSTAT_MAX_LABELS])
4836 {
4837 int i, idx, s;
4838 int text_start, rw_column_width, spaces_to_end;
4839 uint64_t flags = cb->cb_flags;
4840 uint64_t f;
4841 unsigned int column_width = force_column_width;
4842
4843 /* For each bit set in flags */
4844 for (f = flags; f; f &= ~(1ULL << idx)) {
4845 idx = lowbit64(f) - 1;
4846 if (!force_column_width)
4847 column_width = default_column_width(cb, idx);
4848 /* Print our top labels centered over "read write" label. */
4849 for (i = 0; i < label_array_len(labels[idx]); i++) {
4850 const char *name = labels[idx][i].name;
4851 /*
4852 * We treat labels[][].columns == 0 as shorthand
4853 * for one column. It makes writing out the label
4854 * tables more concise.
4855 */
4856 unsigned int columns = MAX(1, labels[idx][i].columns);
4857 unsigned int slen = strlen(name);
4858
4859 rw_column_width = (column_width * columns) +
4860 (2 * (columns - 1));
4861
4862 text_start = (int)((rw_column_width) / columns -
4863 slen / columns);
4864 if (text_start < 0)
4865 text_start = 0;
4866
4867 printf(" "); /* Two spaces between columns */
4868
4869 /* Space from beginning of column to label */
4870 for (s = 0; s < text_start; s++)
4871 printf(" ");
4872
4873 printf("%s", name);
4874
4875 /* Print space after label to end of column */
4876 spaces_to_end = rw_column_width - text_start - slen;
4877 if (spaces_to_end < 0)
4878 spaces_to_end = 0;
4879
4880 for (s = 0; s < spaces_to_end; s++)
4881 printf(" ");
4882 }
4883 }
4884 }
4885
4886
4887 /*
4888 * print_cmd_columns - Print custom column titles from -c
4889 *
4890 * If the user specified the "zpool status|iostat -c" then print their custom
4891 * column titles in the header. For example, print_cmd_columns() would print
4892 * the " col1 col2" part of this:
4893 *
4894 * $ zpool iostat -vc 'echo col1=val1; echo col2=val2'
4895 * ...
4896 * capacity operations bandwidth
4897 * pool alloc free read write read write col1 col2
4898 * ---------- ----- ----- ----- ----- ----- ----- ---- ----
4899 * mypool 269K 1008M 0 0 107 946
4900 * mirror 269K 1008M 0 0 107 946
4901 * sdb - - 0 0 102 473 val1 val2
4902 * sdc - - 0 0 5 473 val1 val2
4903 * ---------- ----- ----- ----- ----- ----- ----- ---- ----
4904 */
4905 static void
print_cmd_columns(vdev_cmd_data_list_t * vcdl,int use_dashes)4906 print_cmd_columns(vdev_cmd_data_list_t *vcdl, int use_dashes)
4907 {
4908 int i, j;
4909 vdev_cmd_data_t *data = &vcdl->data[0];
4910
4911 if (vcdl->count == 0 || data == NULL)
4912 return;
4913
4914 /*
4915 * Each vdev cmd should have the same column names unless the user did
4916 * something weird with their cmd. Just take the column names from the
4917 * first vdev and assume it works for all of them.
4918 */
4919 for (i = 0; i < vcdl->uniq_cols_cnt; i++) {
4920 printf(" ");
4921 if (use_dashes) {
4922 for (j = 0; j < vcdl->uniq_cols_width[i]; j++)
4923 printf("-");
4924 } else {
4925 printf_color(ANSI_BOLD, "%*s", vcdl->uniq_cols_width[i],
4926 vcdl->uniq_cols[i]);
4927 }
4928 }
4929 }
4930
4931
4932 /*
4933 * Utility function to print out a line of dashes like:
4934 *
4935 * -------------------------------- ----- ----- ----- ----- -----
4936 *
4937 * ...or a dashed named-row line like:
4938 *
4939 * logs - - - - -
4940 *
4941 * @cb: iostat data
4942 *
4943 * @force_column_width If non-zero, use the value as the column width.
4944 * Otherwise use the default column widths.
4945 *
4946 * @name: Print a dashed named-row line starting
4947 * with @name. Otherwise, print a regular
4948 * dashed line.
4949 */
4950 static void
print_iostat_dashes(iostat_cbdata_t * cb,unsigned int force_column_width,const char * name)4951 print_iostat_dashes(iostat_cbdata_t *cb, unsigned int force_column_width,
4952 const char *name)
4953 {
4954 int i;
4955 unsigned int namewidth;
4956 uint64_t flags = cb->cb_flags;
4957 uint64_t f;
4958 int idx;
4959 const name_and_columns_t *labels;
4960 const char *title;
4961
4962
4963 if (cb->cb_flags & IOS_ANYHISTO_M) {
4964 title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)];
4965 } else if (cb->cb_vdevs.cb_names_count) {
4966 title = "vdev";
4967 } else {
4968 title = "pool";
4969 }
4970
4971 namewidth = MAX(MAX(strlen(title), cb->cb_namewidth),
4972 name ? strlen(name) : 0);
4973
4974
4975 if (name) {
4976 printf("%-*s", namewidth, name);
4977 } else {
4978 for (i = 0; i < namewidth; i++)
4979 (void) printf("-");
4980 }
4981
4982 /* For each bit in flags */
4983 for (f = flags; f; f &= ~(1ULL << idx)) {
4984 unsigned int column_width;
4985 idx = lowbit64(f) - 1;
4986 if (force_column_width)
4987 column_width = force_column_width;
4988 else
4989 column_width = default_column_width(cb, idx);
4990
4991 labels = iostat_bottom_labels[idx];
4992 for (i = 0; i < label_array_len(labels); i++) {
4993 if (name)
4994 printf(" %*s-", column_width - 1, " ");
4995 else
4996 printf(" %.*s", column_width,
4997 "--------------------");
4998 }
4999 }
5000 }
5001
5002
5003 static void
print_iostat_separator_impl(iostat_cbdata_t * cb,unsigned int force_column_width)5004 print_iostat_separator_impl(iostat_cbdata_t *cb,
5005 unsigned int force_column_width)
5006 {
5007 print_iostat_dashes(cb, force_column_width, NULL);
5008 }
5009
5010 static void
print_iostat_separator(iostat_cbdata_t * cb)5011 print_iostat_separator(iostat_cbdata_t *cb)
5012 {
5013 print_iostat_separator_impl(cb, 0);
5014 }
5015
5016 static void
print_iostat_header_impl(iostat_cbdata_t * cb,unsigned int force_column_width,const char * histo_vdev_name)5017 print_iostat_header_impl(iostat_cbdata_t *cb, unsigned int force_column_width,
5018 const char *histo_vdev_name)
5019 {
5020 unsigned int namewidth;
5021 const char *title;
5022
5023 color_start(ANSI_BOLD);
5024
5025 if (cb->cb_flags & IOS_ANYHISTO_M) {
5026 title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)];
5027 } else if (cb->cb_vdevs.cb_names_count) {
5028 title = "vdev";
5029 } else {
5030 title = "pool";
5031 }
5032
5033 namewidth = MAX(MAX(strlen(title), cb->cb_namewidth),
5034 histo_vdev_name ? strlen(histo_vdev_name) : 0);
5035
5036 if (histo_vdev_name)
5037 printf("%-*s", namewidth, histo_vdev_name);
5038 else
5039 printf("%*s", namewidth, "");
5040
5041
5042 print_iostat_labels(cb, force_column_width, iostat_top_labels);
5043 printf("\n");
5044
5045 printf("%-*s", namewidth, title);
5046
5047 print_iostat_labels(cb, force_column_width, iostat_bottom_labels);
5048 if (cb->vcdl != NULL)
5049 print_cmd_columns(cb->vcdl, 0);
5050
5051 printf("\n");
5052
5053 print_iostat_separator_impl(cb, force_column_width);
5054
5055 if (cb->vcdl != NULL)
5056 print_cmd_columns(cb->vcdl, 1);
5057
5058 color_end();
5059
5060 printf("\n");
5061 }
5062
5063 static void
print_iostat_header(iostat_cbdata_t * cb)5064 print_iostat_header(iostat_cbdata_t *cb)
5065 {
5066 print_iostat_header_impl(cb, 0, NULL);
5067 }
5068
5069 /*
5070 * Prints a size string (i.e. 120M) with the suffix ("M") colored
5071 * by order of magnitude. Uses column_size to add padding.
5072 */
5073 static void
print_stat_color(const char * statbuf,unsigned int column_size)5074 print_stat_color(const char *statbuf, unsigned int column_size)
5075 {
5076 fputs(" ", stdout);
5077 size_t len = strlen(statbuf);
5078 while (len < column_size) {
5079 fputc(' ', stdout);
5080 column_size--;
5081 }
5082 if (*statbuf == '0') {
5083 color_start(ANSI_GRAY);
5084 fputc('0', stdout);
5085 } else {
5086 for (; *statbuf; statbuf++) {
5087 if (*statbuf == 'K') color_start(ANSI_GREEN);
5088 else if (*statbuf == 'M') color_start(ANSI_YELLOW);
5089 else if (*statbuf == 'G') color_start(ANSI_RED);
5090 else if (*statbuf == 'T') color_start(ANSI_BOLD_BLUE);
5091 else if (*statbuf == 'P') color_start(ANSI_MAGENTA);
5092 else if (*statbuf == 'E') color_start(ANSI_CYAN);
5093 fputc(*statbuf, stdout);
5094 if (--column_size <= 0)
5095 break;
5096 }
5097 }
5098 color_end();
5099 }
5100
5101 /*
5102 * Display a single statistic.
5103 */
5104 static void
print_one_stat(uint64_t value,enum zfs_nicenum_format format,unsigned int column_size,boolean_t scripted)5105 print_one_stat(uint64_t value, enum zfs_nicenum_format format,
5106 unsigned int column_size, boolean_t scripted)
5107 {
5108 char buf[64];
5109
5110 zfs_nicenum_format(value, buf, sizeof (buf), format);
5111
5112 if (scripted)
5113 printf("\t%s", buf);
5114 else
5115 print_stat_color(buf, column_size);
5116 }
5117
5118 /*
5119 * Calculate the default vdev stats
5120 *
5121 * Subtract oldvs from newvs, apply a scaling factor, and save the resulting
5122 * stats into calcvs.
5123 */
5124 static void
calc_default_iostats(vdev_stat_t * oldvs,vdev_stat_t * newvs,vdev_stat_t * calcvs)5125 calc_default_iostats(vdev_stat_t *oldvs, vdev_stat_t *newvs,
5126 vdev_stat_t *calcvs)
5127 {
5128 int i;
5129
5130 memcpy(calcvs, newvs, sizeof (*calcvs));
5131 for (i = 0; i < ARRAY_SIZE(calcvs->vs_ops); i++)
5132 calcvs->vs_ops[i] = (newvs->vs_ops[i] - oldvs->vs_ops[i]);
5133
5134 for (i = 0; i < ARRAY_SIZE(calcvs->vs_bytes); i++)
5135 calcvs->vs_bytes[i] = (newvs->vs_bytes[i] - oldvs->vs_bytes[i]);
5136 }
5137
5138 /*
5139 * Internal representation of the extended iostats data.
5140 *
5141 * The extended iostat stats are exported in nvlists as either uint64_t arrays
5142 * or single uint64_t's. We make both look like arrays to make them easier
5143 * to process. In order to make single uint64_t's look like arrays, we set
5144 * __data to the stat data, and then set *data = &__data with count = 1. Then,
5145 * we can just use *data and count.
5146 */
5147 struct stat_array {
5148 uint64_t *data;
5149 uint_t count; /* Number of entries in data[] */
5150 uint64_t __data; /* Only used when data is a single uint64_t */
5151 };
5152
5153 static uint64_t
stat_histo_max(struct stat_array * nva,unsigned int len)5154 stat_histo_max(struct stat_array *nva, unsigned int len)
5155 {
5156 uint64_t max = 0;
5157 int i;
5158 for (i = 0; i < len; i++)
5159 max = MAX(max, array64_max(nva[i].data, nva[i].count));
5160
5161 return (max);
5162 }
5163
5164 /*
5165 * Helper function to lookup a uint64_t array or uint64_t value and store its
5166 * data as a stat_array. If the nvpair is a single uint64_t value, then we make
5167 * it look like a one element array to make it easier to process.
5168 */
5169 static int
nvpair64_to_stat_array(nvlist_t * nvl,const char * name,struct stat_array * nva)5170 nvpair64_to_stat_array(nvlist_t *nvl, const char *name,
5171 struct stat_array *nva)
5172 {
5173 nvpair_t *tmp;
5174 int ret;
5175
5176 verify(nvlist_lookup_nvpair(nvl, name, &tmp) == 0);
5177 switch (nvpair_type(tmp)) {
5178 case DATA_TYPE_UINT64_ARRAY:
5179 ret = nvpair_value_uint64_array(tmp, &nva->data, &nva->count);
5180 break;
5181 case DATA_TYPE_UINT64:
5182 ret = nvpair_value_uint64(tmp, &nva->__data);
5183 nva->data = &nva->__data;
5184 nva->count = 1;
5185 break;
5186 default:
5187 /* Not a uint64_t */
5188 ret = EINVAL;
5189 break;
5190 }
5191
5192 return (ret);
5193 }
5194
5195 /*
5196 * Given a list of nvlist names, look up the extended stats in newnv and oldnv,
5197 * subtract them, and return the results in a newly allocated stat_array.
5198 * You must free the returned array after you are done with it with
5199 * free_calc_stats().
5200 *
5201 * Additionally, you can set "oldnv" to NULL if you simply want the newnv
5202 * values.
5203 */
5204 static struct stat_array *
calc_and_alloc_stats_ex(const char ** names,unsigned int len,nvlist_t * oldnv,nvlist_t * newnv)5205 calc_and_alloc_stats_ex(const char **names, unsigned int len, nvlist_t *oldnv,
5206 nvlist_t *newnv)
5207 {
5208 nvlist_t *oldnvx = NULL, *newnvx;
5209 struct stat_array *oldnva, *newnva, *calcnva;
5210 int i, j;
5211 unsigned int alloc_size = (sizeof (struct stat_array)) * len;
5212
5213 /* Extract our extended stats nvlist from the main list */
5214 verify(nvlist_lookup_nvlist(newnv, ZPOOL_CONFIG_VDEV_STATS_EX,
5215 &newnvx) == 0);
5216 if (oldnv) {
5217 verify(nvlist_lookup_nvlist(oldnv, ZPOOL_CONFIG_VDEV_STATS_EX,
5218 &oldnvx) == 0);
5219 }
5220
5221 newnva = safe_malloc(alloc_size);
5222 oldnva = safe_malloc(alloc_size);
5223 calcnva = safe_malloc(alloc_size);
5224
5225 for (j = 0; j < len; j++) {
5226 verify(nvpair64_to_stat_array(newnvx, names[j],
5227 &newnva[j]) == 0);
5228 calcnva[j].count = newnva[j].count;
5229 alloc_size = calcnva[j].count * sizeof (calcnva[j].data[0]);
5230 calcnva[j].data = safe_malloc(alloc_size);
5231 memcpy(calcnva[j].data, newnva[j].data, alloc_size);
5232
5233 if (oldnvx) {
5234 verify(nvpair64_to_stat_array(oldnvx, names[j],
5235 &oldnva[j]) == 0);
5236 for (i = 0; i < oldnva[j].count; i++)
5237 calcnva[j].data[i] -= oldnva[j].data[i];
5238 }
5239 }
5240 free(newnva);
5241 free(oldnva);
5242 return (calcnva);
5243 }
5244
5245 static void
free_calc_stats(struct stat_array * nva,unsigned int len)5246 free_calc_stats(struct stat_array *nva, unsigned int len)
5247 {
5248 int i;
5249 for (i = 0; i < len; i++)
5250 free(nva[i].data);
5251
5252 free(nva);
5253 }
5254
5255 static void
print_iostat_histo(struct stat_array * nva,unsigned int len,iostat_cbdata_t * cb,unsigned int column_width,unsigned int namewidth,double scale)5256 print_iostat_histo(struct stat_array *nva, unsigned int len,
5257 iostat_cbdata_t *cb, unsigned int column_width, unsigned int namewidth,
5258 double scale)
5259 {
5260 int i, j;
5261 char buf[6];
5262 uint64_t val;
5263 enum zfs_nicenum_format format;
5264 unsigned int buckets;
5265 unsigned int start_bucket;
5266
5267 if (cb->cb_literal)
5268 format = ZFS_NICENUM_RAW;
5269 else
5270 format = ZFS_NICENUM_1024;
5271
5272 /* All these histos are the same size, so just use nva[0].count */
5273 buckets = nva[0].count;
5274
5275 if (cb->cb_flags & IOS_RQ_HISTO_M) {
5276 /* Start at 512 - req size should never be lower than this */
5277 start_bucket = 9;
5278 } else {
5279 start_bucket = 0;
5280 }
5281
5282 for (j = start_bucket; j < buckets; j++) {
5283 /* Print histogram bucket label */
5284 if (cb->cb_flags & IOS_L_HISTO_M) {
5285 /* Ending range of this bucket */
5286 val = (1UL << (j + 1)) - 1;
5287 zfs_nicetime(val, buf, sizeof (buf));
5288 } else {
5289 /* Request size (starting range of bucket) */
5290 val = (1UL << j);
5291 zfs_nicenum(val, buf, sizeof (buf));
5292 }
5293
5294 if (cb->cb_scripted)
5295 printf("%llu", (u_longlong_t)val);
5296 else
5297 printf("%-*s", namewidth, buf);
5298
5299 /* Print the values on the line */
5300 for (i = 0; i < len; i++) {
5301 print_one_stat(nva[i].data[j] * scale, format,
5302 column_width, cb->cb_scripted);
5303 }
5304 printf("\n");
5305 }
5306 }
5307
5308 static void
print_solid_separator(unsigned int length)5309 print_solid_separator(unsigned int length)
5310 {
5311 while (length--)
5312 printf("-");
5313 printf("\n");
5314 }
5315
5316 static void
print_iostat_histos(iostat_cbdata_t * cb,nvlist_t * oldnv,nvlist_t * newnv,double scale,const char * name)5317 print_iostat_histos(iostat_cbdata_t *cb, nvlist_t *oldnv,
5318 nvlist_t *newnv, double scale, const char *name)
5319 {
5320 unsigned int column_width;
5321 unsigned int namewidth;
5322 unsigned int entire_width;
5323 enum iostat_type type;
5324 struct stat_array *nva;
5325 const char **names;
5326 unsigned int names_len;
5327
5328 /* What type of histo are we? */
5329 type = IOS_HISTO_IDX(cb->cb_flags);
5330
5331 /* Get NULL-terminated array of nvlist names for our histo */
5332 names = vsx_type_to_nvlist[type];
5333 names_len = str_array_len(names); /* num of names */
5334
5335 nva = calc_and_alloc_stats_ex(names, names_len, oldnv, newnv);
5336
5337 if (cb->cb_literal) {
5338 column_width = MAX(5,
5339 (unsigned int) log10(stat_histo_max(nva, names_len)) + 1);
5340 } else {
5341 column_width = 5;
5342 }
5343
5344 namewidth = MAX(cb->cb_namewidth,
5345 strlen(histo_to_title[IOS_HISTO_IDX(cb->cb_flags)]));
5346
5347 /*
5348 * Calculate the entire line width of what we're printing. The
5349 * +2 is for the two spaces between columns:
5350 */
5351 /* read write */
5352 /* ----- ----- */
5353 /* |___| <---------- column_width */
5354 /* */
5355 /* |__________| <--- entire_width */
5356 /* */
5357 entire_width = namewidth + (column_width + 2) *
5358 label_array_len(iostat_bottom_labels[type]);
5359
5360 if (cb->cb_scripted)
5361 printf("%s\n", name);
5362 else
5363 print_iostat_header_impl(cb, column_width, name);
5364
5365 print_iostat_histo(nva, names_len, cb, column_width,
5366 namewidth, scale);
5367
5368 free_calc_stats(nva, names_len);
5369 if (!cb->cb_scripted)
5370 print_solid_separator(entire_width);
5371 }
5372
5373 /*
5374 * Calculate the average latency of a power-of-two latency histogram
5375 */
5376 static uint64_t
single_histo_average(uint64_t * histo,unsigned int buckets)5377 single_histo_average(uint64_t *histo, unsigned int buckets)
5378 {
5379 int i;
5380 uint64_t count = 0, total = 0;
5381
5382 for (i = 0; i < buckets; i++) {
5383 /*
5384 * Our buckets are power-of-two latency ranges. Use the
5385 * midpoint latency of each bucket to calculate the average.
5386 * For example:
5387 *
5388 * Bucket Midpoint
5389 * 8ns-15ns: 12ns
5390 * 16ns-31ns: 24ns
5391 * ...
5392 */
5393 if (histo[i] != 0) {
5394 total += histo[i] * (((1UL << i) + ((1UL << i)/2)));
5395 count += histo[i];
5396 }
5397 }
5398
5399 /* Prevent divide by zero */
5400 return (count == 0 ? 0 : total / count);
5401 }
5402
5403 static void
print_iostat_queues(iostat_cbdata_t * cb,nvlist_t * newnv)5404 print_iostat_queues(iostat_cbdata_t *cb, nvlist_t *newnv)
5405 {
5406 const char *names[] = {
5407 ZPOOL_CONFIG_VDEV_SYNC_R_PEND_QUEUE,
5408 ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE,
5409 ZPOOL_CONFIG_VDEV_SYNC_W_PEND_QUEUE,
5410 ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE,
5411 ZPOOL_CONFIG_VDEV_ASYNC_R_PEND_QUEUE,
5412 ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE,
5413 ZPOOL_CONFIG_VDEV_ASYNC_W_PEND_QUEUE,
5414 ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE,
5415 ZPOOL_CONFIG_VDEV_SCRUB_PEND_QUEUE,
5416 ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,
5417 ZPOOL_CONFIG_VDEV_TRIM_PEND_QUEUE,
5418 ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE,
5419 ZPOOL_CONFIG_VDEV_REBUILD_PEND_QUEUE,
5420 ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE,
5421 };
5422
5423 struct stat_array *nva;
5424
5425 unsigned int column_width = default_column_width(cb, IOS_QUEUES);
5426 enum zfs_nicenum_format format;
5427
5428 nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), NULL, newnv);
5429
5430 if (cb->cb_literal)
5431 format = ZFS_NICENUM_RAW;
5432 else
5433 format = ZFS_NICENUM_1024;
5434
5435 for (int i = 0; i < ARRAY_SIZE(names); i++) {
5436 uint64_t val = nva[i].data[0];
5437 print_one_stat(val, format, column_width, cb->cb_scripted);
5438 }
5439
5440 free_calc_stats(nva, ARRAY_SIZE(names));
5441 }
5442
5443 static void
print_iostat_latency(iostat_cbdata_t * cb,nvlist_t * oldnv,nvlist_t * newnv)5444 print_iostat_latency(iostat_cbdata_t *cb, nvlist_t *oldnv,
5445 nvlist_t *newnv)
5446 {
5447 int i;
5448 uint64_t val;
5449 const char *names[] = {
5450 ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
5451 ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
5452 ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
5453 ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
5454 ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO,
5455 ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO,
5456 ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO,
5457 ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO,
5458 ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO,
5459 ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
5460 ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
5461 };
5462 struct stat_array *nva;
5463
5464 unsigned int column_width = default_column_width(cb, IOS_LATENCY);
5465 enum zfs_nicenum_format format;
5466
5467 nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), oldnv, newnv);
5468
5469 if (cb->cb_literal)
5470 format = ZFS_NICENUM_RAWTIME;
5471 else
5472 format = ZFS_NICENUM_TIME;
5473
5474 /* Print our avg latencies on the line */
5475 for (i = 0; i < ARRAY_SIZE(names); i++) {
5476 /* Compute average latency for a latency histo */
5477 val = single_histo_average(nva[i].data, nva[i].count);
5478 print_one_stat(val, format, column_width, cb->cb_scripted);
5479 }
5480 free_calc_stats(nva, ARRAY_SIZE(names));
5481 }
5482
5483 /*
5484 * Print default statistics (capacity/operations/bandwidth)
5485 */
5486 static void
print_iostat_default(vdev_stat_t * vs,iostat_cbdata_t * cb,double scale)5487 print_iostat_default(vdev_stat_t *vs, iostat_cbdata_t *cb, double scale)
5488 {
5489 unsigned int column_width = default_column_width(cb, IOS_DEFAULT);
5490 enum zfs_nicenum_format format;
5491 char na; /* char to print for "not applicable" values */
5492
5493 if (cb->cb_literal) {
5494 format = ZFS_NICENUM_RAW;
5495 na = '0';
5496 } else {
5497 format = ZFS_NICENUM_1024;
5498 na = '-';
5499 }
5500
5501 /* only toplevel vdevs have capacity stats */
5502 if (vs->vs_space == 0) {
5503 if (cb->cb_scripted)
5504 printf("\t%c\t%c", na, na);
5505 else
5506 printf(" %*c %*c", column_width, na, column_width,
5507 na);
5508 } else {
5509 print_one_stat(vs->vs_alloc, format, column_width,
5510 cb->cb_scripted);
5511 print_one_stat(vs->vs_space - vs->vs_alloc, format,
5512 column_width, cb->cb_scripted);
5513 }
5514
5515 print_one_stat((uint64_t)(vs->vs_ops[ZIO_TYPE_READ] * scale),
5516 format, column_width, cb->cb_scripted);
5517 print_one_stat((uint64_t)(vs->vs_ops[ZIO_TYPE_WRITE] * scale),
5518 format, column_width, cb->cb_scripted);
5519 print_one_stat((uint64_t)(vs->vs_bytes[ZIO_TYPE_READ] * scale),
5520 format, column_width, cb->cb_scripted);
5521 print_one_stat((uint64_t)(vs->vs_bytes[ZIO_TYPE_WRITE] * scale),
5522 format, column_width, cb->cb_scripted);
5523 }
5524
5525 static const char *const class_name[] = {
5526 VDEV_ALLOC_BIAS_DEDUP,
5527 VDEV_ALLOC_BIAS_SPECIAL,
5528 VDEV_ALLOC_CLASS_LOGS
5529 };
5530
5531 /*
5532 * Print out all the statistics for the given vdev. This can either be the
5533 * toplevel configuration, or called recursively. If 'name' is NULL, then this
5534 * is a verbose output, and we don't want to display the toplevel pool stats.
5535 *
5536 * Returns the number of stat lines printed.
5537 */
5538 static unsigned int
print_vdev_stats(zpool_handle_t * zhp,const char * name,nvlist_t * oldnv,nvlist_t * newnv,iostat_cbdata_t * cb,int depth)5539 print_vdev_stats(zpool_handle_t *zhp, const char *name, nvlist_t *oldnv,
5540 nvlist_t *newnv, iostat_cbdata_t *cb, int depth)
5541 {
5542 nvlist_t **oldchild, **newchild;
5543 uint_t c, children, oldchildren;
5544 vdev_stat_t *oldvs, *newvs, *calcvs;
5545 vdev_stat_t zerovs = { 0 };
5546 char *vname;
5547 int i;
5548 int ret = 0;
5549 uint64_t tdelta;
5550 double scale;
5551
5552 if (strcmp(name, VDEV_TYPE_INDIRECT) == 0)
5553 return (ret);
5554
5555 calcvs = safe_malloc(sizeof (*calcvs));
5556
5557 if (oldnv != NULL) {
5558 verify(nvlist_lookup_uint64_array(oldnv,
5559 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&oldvs, &c) == 0);
5560 } else {
5561 oldvs = &zerovs;
5562 }
5563
5564 /* Do we only want to see a specific vdev? */
5565 for (i = 0; i < cb->cb_vdevs.cb_names_count; i++) {
5566 /* Yes we do. Is this the vdev? */
5567 if (strcmp(name, cb->cb_vdevs.cb_names[i]) == 0) {
5568 /*
5569 * This is our vdev. Since it is the only vdev we
5570 * will be displaying, make depth = 0 so that it
5571 * doesn't get indented.
5572 */
5573 depth = 0;
5574 break;
5575 }
5576 }
5577
5578 if (cb->cb_vdevs.cb_names_count && (i == cb->cb_vdevs.cb_names_count)) {
5579 /* Couldn't match the name */
5580 goto children;
5581 }
5582
5583
5584 verify(nvlist_lookup_uint64_array(newnv, ZPOOL_CONFIG_VDEV_STATS,
5585 (uint64_t **)&newvs, &c) == 0);
5586
5587 /*
5588 * Print the vdev name unless it's is a histogram. Histograms
5589 * display the vdev name in the header itself.
5590 */
5591 if (!(cb->cb_flags & IOS_ANYHISTO_M)) {
5592 if (cb->cb_scripted) {
5593 printf("%s", name);
5594 } else {
5595 if (strlen(name) + depth > cb->cb_namewidth)
5596 (void) printf("%*s%s", depth, "", name);
5597 else
5598 (void) printf("%*s%s%*s", depth, "", name,
5599 (int)(cb->cb_namewidth - strlen(name) -
5600 depth), "");
5601 }
5602 }
5603
5604 /* Calculate our scaling factor */
5605 tdelta = newvs->vs_timestamp - oldvs->vs_timestamp;
5606 if ((oldvs->vs_timestamp == 0) && (cb->cb_flags & IOS_ANYHISTO_M)) {
5607 /*
5608 * If we specify printing histograms with no time interval, then
5609 * print the histogram numbers over the entire lifetime of the
5610 * vdev.
5611 */
5612 scale = 1;
5613 } else {
5614 if (tdelta == 0)
5615 scale = 1.0;
5616 else
5617 scale = (double)NANOSEC / tdelta;
5618 }
5619
5620 if (cb->cb_flags & IOS_DEFAULT_M) {
5621 calc_default_iostats(oldvs, newvs, calcvs);
5622 print_iostat_default(calcvs, cb, scale);
5623 }
5624 if (cb->cb_flags & IOS_LATENCY_M)
5625 print_iostat_latency(cb, oldnv, newnv);
5626 if (cb->cb_flags & IOS_QUEUES_M)
5627 print_iostat_queues(cb, newnv);
5628 if (cb->cb_flags & IOS_ANYHISTO_M) {
5629 printf("\n");
5630 print_iostat_histos(cb, oldnv, newnv, scale, name);
5631 }
5632
5633 if (cb->vcdl != NULL) {
5634 const char *path;
5635 if (nvlist_lookup_string(newnv, ZPOOL_CONFIG_PATH,
5636 &path) == 0) {
5637 printf(" ");
5638 zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path);
5639 }
5640 }
5641
5642 if (!(cb->cb_flags & IOS_ANYHISTO_M))
5643 printf("\n");
5644
5645 ret++;
5646
5647 children:
5648
5649 free(calcvs);
5650
5651 if (!cb->cb_verbose)
5652 return (ret);
5653
5654 if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_CHILDREN,
5655 &newchild, &children) != 0)
5656 return (ret);
5657
5658 if (oldnv) {
5659 if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_CHILDREN,
5660 &oldchild, &oldchildren) != 0)
5661 return (ret);
5662
5663 children = MIN(oldchildren, children);
5664 }
5665
5666 /*
5667 * print normal top-level devices
5668 */
5669 for (c = 0; c < children; c++) {
5670 uint64_t ishole = B_FALSE, islog = B_FALSE;
5671
5672 (void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_HOLE,
5673 &ishole);
5674
5675 (void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_LOG,
5676 &islog);
5677
5678 if (ishole || islog)
5679 continue;
5680
5681 if (nvlist_exists(newchild[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
5682 continue;
5683
5684 vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
5685 cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID);
5686 ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c] : NULL,
5687 newchild[c], cb, depth + 2);
5688 free(vname);
5689 }
5690
5691 /*
5692 * print all other top-level devices
5693 */
5694 for (uint_t n = 0; n < ARRAY_SIZE(class_name); n++) {
5695 boolean_t printed = B_FALSE;
5696
5697 for (c = 0; c < children; c++) {
5698 uint64_t islog = B_FALSE;
5699 const char *bias = NULL;
5700 const char *type = NULL;
5701
5702 (void) nvlist_lookup_uint64(newchild[c],
5703 ZPOOL_CONFIG_IS_LOG, &islog);
5704 if (islog) {
5705 bias = VDEV_ALLOC_CLASS_LOGS;
5706 } else {
5707 (void) nvlist_lookup_string(newchild[c],
5708 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
5709 (void) nvlist_lookup_string(newchild[c],
5710 ZPOOL_CONFIG_TYPE, &type);
5711 }
5712 if (bias == NULL || strcmp(bias, class_name[n]) != 0)
5713 continue;
5714 if (!islog && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
5715 continue;
5716
5717 if (!printed) {
5718 if ((!(cb->cb_flags & IOS_ANYHISTO_M)) &&
5719 !cb->cb_scripted &&
5720 !cb->cb_vdevs.cb_names) {
5721 print_iostat_dashes(cb, 0,
5722 class_name[n]);
5723 }
5724 printf("\n");
5725 printed = B_TRUE;
5726 }
5727
5728 vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
5729 cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID);
5730 ret += print_vdev_stats(zhp, vname, oldnv ?
5731 oldchild[c] : NULL, newchild[c], cb, depth + 2);
5732 free(vname);
5733 }
5734 }
5735
5736 /*
5737 * Include level 2 ARC devices in iostat output
5738 */
5739 if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_L2CACHE,
5740 &newchild, &children) != 0)
5741 return (ret);
5742
5743 if (oldnv) {
5744 if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_L2CACHE,
5745 &oldchild, &oldchildren) != 0)
5746 return (ret);
5747
5748 children = MIN(oldchildren, children);
5749 }
5750
5751 if (children > 0) {
5752 if ((!(cb->cb_flags & IOS_ANYHISTO_M)) && !cb->cb_scripted &&
5753 !cb->cb_vdevs.cb_names) {
5754 print_iostat_dashes(cb, 0, "cache");
5755 }
5756 printf("\n");
5757
5758 for (c = 0; c < children; c++) {
5759 vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
5760 cb->cb_vdevs.cb_name_flags);
5761 ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c]
5762 : NULL, newchild[c], cb, depth + 2);
5763 free(vname);
5764 }
5765 }
5766
5767 return (ret);
5768 }
5769
5770 static int
refresh_iostat(zpool_handle_t * zhp,void * data)5771 refresh_iostat(zpool_handle_t *zhp, void *data)
5772 {
5773 iostat_cbdata_t *cb = data;
5774 boolean_t missing;
5775
5776 /*
5777 * If the pool has disappeared, remove it from the list and continue.
5778 */
5779 if (zpool_refresh_stats(zhp, &missing) != 0)
5780 return (-1);
5781
5782 if (missing)
5783 pool_list_remove(cb->cb_list, zhp);
5784
5785 return (0);
5786 }
5787
5788 /*
5789 * Callback to print out the iostats for the given pool.
5790 */
5791 static int
print_iostat(zpool_handle_t * zhp,void * data)5792 print_iostat(zpool_handle_t *zhp, void *data)
5793 {
5794 iostat_cbdata_t *cb = data;
5795 nvlist_t *oldconfig, *newconfig;
5796 nvlist_t *oldnvroot, *newnvroot;
5797 int ret;
5798
5799 newconfig = zpool_get_config(zhp, &oldconfig);
5800
5801 if (cb->cb_iteration == 1)
5802 oldconfig = NULL;
5803
5804 verify(nvlist_lookup_nvlist(newconfig, ZPOOL_CONFIG_VDEV_TREE,
5805 &newnvroot) == 0);
5806
5807 if (oldconfig == NULL)
5808 oldnvroot = NULL;
5809 else
5810 verify(nvlist_lookup_nvlist(oldconfig, ZPOOL_CONFIG_VDEV_TREE,
5811 &oldnvroot) == 0);
5812
5813 ret = print_vdev_stats(zhp, zpool_get_name(zhp), oldnvroot, newnvroot,
5814 cb, 0);
5815 if ((ret != 0) && !(cb->cb_flags & IOS_ANYHISTO_M) &&
5816 !cb->cb_scripted && cb->cb_verbose &&
5817 !cb->cb_vdevs.cb_names_count) {
5818 print_iostat_separator(cb);
5819 if (cb->vcdl != NULL) {
5820 print_cmd_columns(cb->vcdl, 1);
5821 }
5822 printf("\n");
5823 }
5824
5825 return (ret);
5826 }
5827
5828 static int
get_columns(void)5829 get_columns(void)
5830 {
5831 struct winsize ws;
5832 int columns = 80;
5833 int error;
5834
5835 if (isatty(STDOUT_FILENO)) {
5836 error = ioctl(STDOUT_FILENO, TIOCGWINSZ, &ws);
5837 if (error == 0)
5838 columns = ws.ws_col;
5839 } else {
5840 columns = 999;
5841 }
5842
5843 return (columns);
5844 }
5845
5846 /*
5847 * Return the required length of the pool/vdev name column. The minimum
5848 * allowed width and output formatting flags must be provided.
5849 */
5850 static int
get_namewidth(zpool_handle_t * zhp,int min_width,int flags,boolean_t verbose)5851 get_namewidth(zpool_handle_t *zhp, int min_width, int flags, boolean_t verbose)
5852 {
5853 nvlist_t *config, *nvroot;
5854 int width = min_width;
5855
5856 if ((config = zpool_get_config(zhp, NULL)) != NULL) {
5857 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
5858 &nvroot) == 0);
5859 size_t poolname_len = strlen(zpool_get_name(zhp));
5860 if (verbose == B_FALSE) {
5861 width = MAX(poolname_len, min_width);
5862 } else {
5863 width = MAX(poolname_len,
5864 max_width(zhp, nvroot, 0, min_width, flags));
5865 }
5866 }
5867
5868 return (width);
5869 }
5870
5871 /*
5872 * Parse the input string, get the 'interval' and 'count' value if there is one.
5873 */
5874 static void
get_interval_count(int * argcp,char ** argv,float * iv,unsigned long * cnt)5875 get_interval_count(int *argcp, char **argv, float *iv,
5876 unsigned long *cnt)
5877 {
5878 float interval = 0;
5879 unsigned long count = 0;
5880 int argc = *argcp;
5881
5882 /*
5883 * Determine if the last argument is an integer or a pool name
5884 */
5885 if (argc > 0 && zfs_isnumber(argv[argc - 1])) {
5886 char *end;
5887
5888 errno = 0;
5889 interval = strtof(argv[argc - 1], &end);
5890
5891 if (*end == '\0' && errno == 0) {
5892 if (interval == 0) {
5893 (void) fprintf(stderr, gettext(
5894 "interval cannot be zero\n"));
5895 usage(B_FALSE);
5896 }
5897 /*
5898 * Ignore the last parameter
5899 */
5900 argc--;
5901 } else {
5902 /*
5903 * If this is not a valid number, just plow on. The
5904 * user will get a more informative error message later
5905 * on.
5906 */
5907 interval = 0;
5908 }
5909 }
5910
5911 /*
5912 * If the last argument is also an integer, then we have both a count
5913 * and an interval.
5914 */
5915 if (argc > 0 && zfs_isnumber(argv[argc - 1])) {
5916 char *end;
5917
5918 errno = 0;
5919 count = interval;
5920 interval = strtof(argv[argc - 1], &end);
5921
5922 if (*end == '\0' && errno == 0) {
5923 if (interval == 0) {
5924 (void) fprintf(stderr, gettext(
5925 "interval cannot be zero\n"));
5926 usage(B_FALSE);
5927 }
5928
5929 /*
5930 * Ignore the last parameter
5931 */
5932 argc--;
5933 } else {
5934 interval = 0;
5935 }
5936 }
5937
5938 *iv = interval;
5939 *cnt = count;
5940 *argcp = argc;
5941 }
5942
5943 static void
get_timestamp_arg(char c)5944 get_timestamp_arg(char c)
5945 {
5946 if (c == 'u')
5947 timestamp_fmt = UDATE;
5948 else if (c == 'd')
5949 timestamp_fmt = DDATE;
5950 else
5951 usage(B_FALSE);
5952 }
5953
5954 /*
5955 * Return stat flags that are supported by all pools by both the module and
5956 * zpool iostat. "*data" should be initialized to all 0xFFs before running.
5957 * It will get ANDed down until only the flags that are supported on all pools
5958 * remain.
5959 */
5960 static int
get_stat_flags_cb(zpool_handle_t * zhp,void * data)5961 get_stat_flags_cb(zpool_handle_t *zhp, void *data)
5962 {
5963 uint64_t *mask = data;
5964 nvlist_t *config, *nvroot, *nvx;
5965 uint64_t flags = 0;
5966 int i, j;
5967
5968 config = zpool_get_config(zhp, NULL);
5969 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
5970 &nvroot) == 0);
5971
5972 /* Default stats are always supported, but for completeness.. */
5973 if (nvlist_exists(nvroot, ZPOOL_CONFIG_VDEV_STATS))
5974 flags |= IOS_DEFAULT_M;
5975
5976 /* Get our extended stats nvlist from the main list */
5977 if (nvlist_lookup_nvlist(nvroot, ZPOOL_CONFIG_VDEV_STATS_EX,
5978 &nvx) != 0) {
5979 /*
5980 * No extended stats; they're probably running an older
5981 * module. No big deal, we support that too.
5982 */
5983 goto end;
5984 }
5985
5986 /* For each extended stat, make sure all its nvpairs are supported */
5987 for (j = 0; j < ARRAY_SIZE(vsx_type_to_nvlist); j++) {
5988 if (!vsx_type_to_nvlist[j][0])
5989 continue;
5990
5991 /* Start off by assuming the flag is supported, then check */
5992 flags |= (1ULL << j);
5993 for (i = 0; vsx_type_to_nvlist[j][i]; i++) {
5994 if (!nvlist_exists(nvx, vsx_type_to_nvlist[j][i])) {
5995 /* flag isn't supported */
5996 flags = flags & ~(1ULL << j);
5997 break;
5998 }
5999 }
6000 }
6001 end:
6002 *mask = *mask & flags;
6003 return (0);
6004 }
6005
6006 /*
6007 * Return a bitmask of stats that are supported on all pools by both the module
6008 * and zpool iostat.
6009 */
6010 static uint64_t
get_stat_flags(zpool_list_t * list)6011 get_stat_flags(zpool_list_t *list)
6012 {
6013 uint64_t mask = -1;
6014
6015 /*
6016 * get_stat_flags_cb() will lop off bits from "mask" until only the
6017 * flags that are supported on all pools remain.
6018 */
6019 pool_list_iter(list, B_FALSE, get_stat_flags_cb, &mask);
6020 return (mask);
6021 }
6022
6023 /*
6024 * Return 1 if cb_data->cb_names[0] is this vdev's name, 0 otherwise.
6025 */
6026 static int
is_vdev_cb(void * zhp_data,nvlist_t * nv,void * cb_data)6027 is_vdev_cb(void *zhp_data, nvlist_t *nv, void *cb_data)
6028 {
6029 uint64_t guid;
6030 vdev_cbdata_t *cb = cb_data;
6031 zpool_handle_t *zhp = zhp_data;
6032
6033 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
6034 return (0);
6035
6036 return (guid == zpool_vdev_path_to_guid(zhp, cb->cb_names[0]));
6037 }
6038
6039 /*
6040 * Returns 1 if cb_data->cb_names[0] is a vdev name, 0 otherwise.
6041 */
6042 static int
is_vdev(zpool_handle_t * zhp,void * cb_data)6043 is_vdev(zpool_handle_t *zhp, void *cb_data)
6044 {
6045 return (for_each_vdev(zhp, is_vdev_cb, cb_data));
6046 }
6047
6048 /*
6049 * Check if vdevs are in a pool
6050 *
6051 * Return 1 if all argv[] strings are vdev names in pool "pool_name". Otherwise
6052 * return 0. If pool_name is NULL, then search all pools.
6053 */
6054 static int
are_vdevs_in_pool(int argc,char ** argv,char * pool_name,vdev_cbdata_t * cb)6055 are_vdevs_in_pool(int argc, char **argv, char *pool_name,
6056 vdev_cbdata_t *cb)
6057 {
6058 char **tmp_name;
6059 int ret = 0;
6060 int i;
6061 int pool_count = 0;
6062
6063 if ((argc == 0) || !*argv)
6064 return (0);
6065
6066 if (pool_name)
6067 pool_count = 1;
6068
6069 /* Temporarily hijack cb_names for a second... */
6070 tmp_name = cb->cb_names;
6071
6072 /* Go though our list of prospective vdev names */
6073 for (i = 0; i < argc; i++) {
6074 cb->cb_names = argv + i;
6075
6076 /* Is this name a vdev in our pools? */
6077 ret = for_each_pool(pool_count, &pool_name, B_TRUE, NULL,
6078 ZFS_TYPE_POOL, B_FALSE, is_vdev, cb);
6079 if (!ret) {
6080 /* No match */
6081 break;
6082 }
6083 }
6084
6085 cb->cb_names = tmp_name;
6086
6087 return (ret);
6088 }
6089
6090 static int
is_pool_cb(zpool_handle_t * zhp,void * data)6091 is_pool_cb(zpool_handle_t *zhp, void *data)
6092 {
6093 char *name = data;
6094 if (strcmp(name, zpool_get_name(zhp)) == 0)
6095 return (1);
6096
6097 return (0);
6098 }
6099
6100 /*
6101 * Do we have a pool named *name? If so, return 1, otherwise 0.
6102 */
6103 static int
is_pool(char * name)6104 is_pool(char *name)
6105 {
6106 return (for_each_pool(0, NULL, B_TRUE, NULL, ZFS_TYPE_POOL, B_FALSE,
6107 is_pool_cb, name));
6108 }
6109
6110 /* Are all our argv[] strings pool names? If so return 1, 0 otherwise. */
6111 static int
are_all_pools(int argc,char ** argv)6112 are_all_pools(int argc, char **argv)
6113 {
6114 if ((argc == 0) || !*argv)
6115 return (0);
6116
6117 while (--argc >= 0)
6118 if (!is_pool(argv[argc]))
6119 return (0);
6120
6121 return (1);
6122 }
6123
6124 /*
6125 * Helper function to print out vdev/pool names we can't resolve. Used for an
6126 * error message.
6127 */
6128 static void
error_list_unresolved_vdevs(int argc,char ** argv,char * pool_name,vdev_cbdata_t * cb)6129 error_list_unresolved_vdevs(int argc, char **argv, char *pool_name,
6130 vdev_cbdata_t *cb)
6131 {
6132 int i;
6133 char *name;
6134 char *str;
6135 for (i = 0; i < argc; i++) {
6136 name = argv[i];
6137
6138 if (is_pool(name))
6139 str = gettext("pool");
6140 else if (are_vdevs_in_pool(1, &name, pool_name, cb))
6141 str = gettext("vdev in this pool");
6142 else if (are_vdevs_in_pool(1, &name, NULL, cb))
6143 str = gettext("vdev in another pool");
6144 else
6145 str = gettext("unknown");
6146
6147 fprintf(stderr, "\t%s (%s)\n", name, str);
6148 }
6149 }
6150
6151 /*
6152 * Same as get_interval_count(), but with additional checks to not misinterpret
6153 * guids as interval/count values. Assumes VDEV_NAME_GUID is set in
6154 * cb.cb_vdevs.cb_name_flags.
6155 */
6156 static void
get_interval_count_filter_guids(int * argc,char ** argv,float * interval,unsigned long * count,iostat_cbdata_t * cb)6157 get_interval_count_filter_guids(int *argc, char **argv, float *interval,
6158 unsigned long *count, iostat_cbdata_t *cb)
6159 {
6160 char **tmpargv = argv;
6161 int argc_for_interval = 0;
6162
6163 /* Is the last arg an interval value? Or a guid? */
6164 if (*argc >= 1 && !are_vdevs_in_pool(1, &argv[*argc - 1], NULL,
6165 &cb->cb_vdevs)) {
6166 /*
6167 * The last arg is not a guid, so it's probably an
6168 * interval value.
6169 */
6170 argc_for_interval++;
6171
6172 if (*argc >= 2 &&
6173 !are_vdevs_in_pool(1, &argv[*argc - 2], NULL,
6174 &cb->cb_vdevs)) {
6175 /*
6176 * The 2nd to last arg is not a guid, so it's probably
6177 * an interval value.
6178 */
6179 argc_for_interval++;
6180 }
6181 }
6182
6183 /* Point to our list of possible intervals */
6184 tmpargv = &argv[*argc - argc_for_interval];
6185
6186 *argc = *argc - argc_for_interval;
6187 get_interval_count(&argc_for_interval, tmpargv,
6188 interval, count);
6189 }
6190
6191 /*
6192 * Terminal height, in rows. Returns -1 if stdout is not connected to a TTY or
6193 * if we were unable to determine its size.
6194 */
6195 static int
terminal_height(void)6196 terminal_height(void)
6197 {
6198 struct winsize win;
6199
6200 if (isatty(STDOUT_FILENO) == 0)
6201 return (-1);
6202
6203 if (ioctl(STDOUT_FILENO, TIOCGWINSZ, &win) != -1 && win.ws_row > 0)
6204 return (win.ws_row);
6205
6206 return (-1);
6207 }
6208
6209 /*
6210 * Run one of the zpool status/iostat -c scripts with the help (-h) option and
6211 * print the result.
6212 *
6213 * name: Short name of the script ('iostat').
6214 * path: Full path to the script ('/usr/local/etc/zfs/zpool.d/iostat');
6215 */
6216 static void
print_zpool_script_help(char * name,char * path)6217 print_zpool_script_help(char *name, char *path)
6218 {
6219 char *argv[] = {path, (char *)"-h", NULL};
6220 char **lines = NULL;
6221 int lines_cnt = 0;
6222 int rc;
6223
6224 rc = libzfs_run_process_get_stdout_nopath(path, argv, NULL, &lines,
6225 &lines_cnt);
6226 if (rc != 0 || lines == NULL || lines_cnt <= 0) {
6227 if (lines != NULL)
6228 libzfs_free_str_array(lines, lines_cnt);
6229 return;
6230 }
6231
6232 for (int i = 0; i < lines_cnt; i++)
6233 if (!is_blank_str(lines[i]))
6234 printf(" %-14s %s\n", name, lines[i]);
6235
6236 libzfs_free_str_array(lines, lines_cnt);
6237 }
6238
6239 /*
6240 * Go though the zpool status/iostat -c scripts in the user's path, run their
6241 * help option (-h), and print out the results.
6242 */
6243 static void
print_zpool_dir_scripts(char * dirpath)6244 print_zpool_dir_scripts(char *dirpath)
6245 {
6246 DIR *dir;
6247 struct dirent *ent;
6248 char fullpath[MAXPATHLEN];
6249 struct stat dir_stat;
6250
6251 if ((dir = opendir(dirpath)) != NULL) {
6252 /* print all the files and directories within directory */
6253 while ((ent = readdir(dir)) != NULL) {
6254 if (snprintf(fullpath, sizeof (fullpath), "%s/%s",
6255 dirpath, ent->d_name) >= sizeof (fullpath)) {
6256 (void) fprintf(stderr,
6257 gettext("internal error: "
6258 "ZPOOL_SCRIPTS_PATH too large.\n"));
6259 exit(1);
6260 }
6261
6262 /* Print the scripts */
6263 if (stat(fullpath, &dir_stat) == 0)
6264 if (dir_stat.st_mode & S_IXUSR &&
6265 S_ISREG(dir_stat.st_mode))
6266 print_zpool_script_help(ent->d_name,
6267 fullpath);
6268 }
6269 closedir(dir);
6270 }
6271 }
6272
6273 /*
6274 * Print out help text for all zpool status/iostat -c scripts.
6275 */
6276 static void
print_zpool_script_list(const char * subcommand)6277 print_zpool_script_list(const char *subcommand)
6278 {
6279 char *dir, *sp, *tmp;
6280
6281 printf(gettext("Available 'zpool %s -c' commands:\n"), subcommand);
6282
6283 sp = zpool_get_cmd_search_path();
6284 if (sp == NULL)
6285 return;
6286
6287 for (dir = strtok_r(sp, ":", &tmp);
6288 dir != NULL;
6289 dir = strtok_r(NULL, ":", &tmp))
6290 print_zpool_dir_scripts(dir);
6291
6292 free(sp);
6293 }
6294
6295 /*
6296 * Set the minimum pool/vdev name column width. The width must be at least 10,
6297 * but may be as large as the column width - 42 so it still fits on one line.
6298 * NOTE: 42 is the width of the default capacity/operations/bandwidth output
6299 */
6300 static int
get_namewidth_iostat(zpool_handle_t * zhp,void * data)6301 get_namewidth_iostat(zpool_handle_t *zhp, void *data)
6302 {
6303 iostat_cbdata_t *cb = data;
6304 int width, available_width;
6305
6306 /*
6307 * get_namewidth() returns the maximum width of any name in that column
6308 * for any pool/vdev/device line that will be output.
6309 */
6310 width = get_namewidth(zhp, cb->cb_namewidth,
6311 cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID, cb->cb_verbose);
6312
6313 /*
6314 * The width we are calculating is the width of the header and also the
6315 * padding width for names that are less than maximum width. The stats
6316 * take up 42 characters, so the width available for names is:
6317 */
6318 available_width = get_columns() - 42;
6319
6320 /*
6321 * If the maximum width fits on a screen, then great! Make everything
6322 * line up by justifying all lines to the same width. If that max
6323 * width is larger than what's available, the name plus stats won't fit
6324 * on one line, and justifying to that width would cause every line to
6325 * wrap on the screen. We only want lines with long names to wrap.
6326 * Limit the padding to what won't wrap.
6327 */
6328 if (width > available_width)
6329 width = available_width;
6330
6331 /*
6332 * And regardless of whatever the screen width is (get_columns can
6333 * return 0 if the width is not known or less than 42 for a narrow
6334 * terminal) have the width be a minimum of 10.
6335 */
6336 if (width < 10)
6337 width = 10;
6338
6339 /* Save the calculated width */
6340 cb->cb_namewidth = width;
6341
6342 return (0);
6343 }
6344
6345 /*
6346 * zpool iostat [[-c [script1,script2,...]] [-lq]|[-rw]] [-ghHLpPvy] [-n name]
6347 * [-T d|u] [[ pool ...]|[pool vdev ...]|[vdev ...]]
6348 * [interval [count]]
6349 *
6350 * -c CMD For each vdev, run command CMD
6351 * -g Display guid for individual vdev name.
6352 * -L Follow links when resolving vdev path name.
6353 * -P Display full path for vdev name.
6354 * -v Display statistics for individual vdevs
6355 * -h Display help
6356 * -p Display values in parsable (exact) format.
6357 * -H Scripted mode. Don't display headers, and separate properties
6358 * by a single tab.
6359 * -l Display average latency
6360 * -q Display queue depths
6361 * -w Display latency histograms
6362 * -r Display request size histogram
6363 * -T Display a timestamp in date(1) or Unix format
6364 * -n Only print headers once
6365 *
6366 * This command can be tricky because we want to be able to deal with pool
6367 * creation/destruction as well as vdev configuration changes. The bulk of this
6368 * processing is handled by the pool_list_* routines in zpool_iter.c. We rely
6369 * on pool_list_update() to detect the addition of new pools. Configuration
6370 * changes are all handled within libzfs.
6371 */
6372 int
zpool_do_iostat(int argc,char ** argv)6373 zpool_do_iostat(int argc, char **argv)
6374 {
6375 int c;
6376 int ret;
6377 int npools;
6378 float interval = 0;
6379 unsigned long count = 0;
6380 int winheight = 24;
6381 zpool_list_t *list;
6382 boolean_t verbose = B_FALSE;
6383 boolean_t latency = B_FALSE, l_histo = B_FALSE, rq_histo = B_FALSE;
6384 boolean_t queues = B_FALSE, parsable = B_FALSE, scripted = B_FALSE;
6385 boolean_t omit_since_boot = B_FALSE;
6386 boolean_t guid = B_FALSE;
6387 boolean_t follow_links = B_FALSE;
6388 boolean_t full_name = B_FALSE;
6389 boolean_t headers_once = B_FALSE;
6390 iostat_cbdata_t cb = { 0 };
6391 char *cmd = NULL;
6392
6393 /* Used for printing error message */
6394 const char flag_to_arg[] = {[IOS_LATENCY] = 'l', [IOS_QUEUES] = 'q',
6395 [IOS_L_HISTO] = 'w', [IOS_RQ_HISTO] = 'r'};
6396
6397 uint64_t unsupported_flags;
6398
6399 /* check options */
6400 while ((c = getopt(argc, argv, "c:gLPT:vyhplqrwnH")) != -1) {
6401 switch (c) {
6402 case 'c':
6403 if (cmd != NULL) {
6404 fprintf(stderr,
6405 gettext("Can't set -c flag twice\n"));
6406 exit(1);
6407 }
6408
6409 if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL &&
6410 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) {
6411 fprintf(stderr, gettext(
6412 "Can't run -c, disabled by "
6413 "ZPOOL_SCRIPTS_ENABLED.\n"));
6414 exit(1);
6415 }
6416
6417 if ((getuid() <= 0 || geteuid() <= 0) &&
6418 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) {
6419 fprintf(stderr, gettext(
6420 "Can't run -c with root privileges "
6421 "unless ZPOOL_SCRIPTS_AS_ROOT is set.\n"));
6422 exit(1);
6423 }
6424 cmd = optarg;
6425 verbose = B_TRUE;
6426 break;
6427 case 'g':
6428 guid = B_TRUE;
6429 break;
6430 case 'L':
6431 follow_links = B_TRUE;
6432 break;
6433 case 'P':
6434 full_name = B_TRUE;
6435 break;
6436 case 'T':
6437 get_timestamp_arg(*optarg);
6438 break;
6439 case 'v':
6440 verbose = B_TRUE;
6441 break;
6442 case 'p':
6443 parsable = B_TRUE;
6444 break;
6445 case 'l':
6446 latency = B_TRUE;
6447 break;
6448 case 'q':
6449 queues = B_TRUE;
6450 break;
6451 case 'H':
6452 scripted = B_TRUE;
6453 break;
6454 case 'w':
6455 l_histo = B_TRUE;
6456 break;
6457 case 'r':
6458 rq_histo = B_TRUE;
6459 break;
6460 case 'y':
6461 omit_since_boot = B_TRUE;
6462 break;
6463 case 'n':
6464 headers_once = B_TRUE;
6465 break;
6466 case 'h':
6467 usage(B_FALSE);
6468 break;
6469 case '?':
6470 if (optopt == 'c') {
6471 print_zpool_script_list("iostat");
6472 exit(0);
6473 } else {
6474 fprintf(stderr,
6475 gettext("invalid option '%c'\n"), optopt);
6476 }
6477 usage(B_FALSE);
6478 }
6479 }
6480
6481 argc -= optind;
6482 argv += optind;
6483
6484 cb.cb_literal = parsable;
6485 cb.cb_scripted = scripted;
6486
6487 if (guid)
6488 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_GUID;
6489 if (follow_links)
6490 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
6491 if (full_name)
6492 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_PATH;
6493 cb.cb_iteration = 0;
6494 cb.cb_namewidth = 0;
6495 cb.cb_verbose = verbose;
6496
6497 /* Get our interval and count values (if any) */
6498 if (guid) {
6499 get_interval_count_filter_guids(&argc, argv, &interval,
6500 &count, &cb);
6501 } else {
6502 get_interval_count(&argc, argv, &interval, &count);
6503 }
6504
6505 if (argc == 0) {
6506 /* No args, so just print the defaults. */
6507 } else if (are_all_pools(argc, argv)) {
6508 /* All the args are pool names */
6509 } else if (are_vdevs_in_pool(argc, argv, NULL, &cb.cb_vdevs)) {
6510 /* All the args are vdevs */
6511 cb.cb_vdevs.cb_names = argv;
6512 cb.cb_vdevs.cb_names_count = argc;
6513 argc = 0; /* No pools to process */
6514 } else if (are_all_pools(1, argv)) {
6515 /* The first arg is a pool name */
6516 if (are_vdevs_in_pool(argc - 1, argv + 1, argv[0],
6517 &cb.cb_vdevs)) {
6518 /* ...and the rest are vdev names */
6519 cb.cb_vdevs.cb_names = argv + 1;
6520 cb.cb_vdevs.cb_names_count = argc - 1;
6521 argc = 1; /* One pool to process */
6522 } else {
6523 fprintf(stderr, gettext("Expected either a list of "));
6524 fprintf(stderr, gettext("pools, or list of vdevs in"));
6525 fprintf(stderr, " \"%s\", ", argv[0]);
6526 fprintf(stderr, gettext("but got:\n"));
6527 error_list_unresolved_vdevs(argc - 1, argv + 1,
6528 argv[0], &cb.cb_vdevs);
6529 fprintf(stderr, "\n");
6530 usage(B_FALSE);
6531 return (1);
6532 }
6533 } else {
6534 /*
6535 * The args don't make sense. The first arg isn't a pool name,
6536 * nor are all the args vdevs.
6537 */
6538 fprintf(stderr, gettext("Unable to parse pools/vdevs list.\n"));
6539 fprintf(stderr, "\n");
6540 return (1);
6541 }
6542
6543 if (cb.cb_vdevs.cb_names_count != 0) {
6544 /*
6545 * If user specified vdevs, it implies verbose.
6546 */
6547 cb.cb_verbose = B_TRUE;
6548 }
6549
6550 /*
6551 * Construct the list of all interesting pools.
6552 */
6553 ret = 0;
6554 if ((list = pool_list_get(argc, argv, NULL, ZFS_TYPE_POOL, parsable,
6555 &ret)) == NULL)
6556 return (1);
6557
6558 if (pool_list_count(list) == 0 && argc != 0) {
6559 pool_list_free(list);
6560 return (1);
6561 }
6562
6563 if (pool_list_count(list) == 0 && interval == 0) {
6564 pool_list_free(list);
6565 (void) fprintf(stderr, gettext("no pools available\n"));
6566 return (1);
6567 }
6568
6569 if ((l_histo || rq_histo) && (cmd != NULL || latency || queues)) {
6570 pool_list_free(list);
6571 (void) fprintf(stderr,
6572 gettext("[-r|-w] isn't allowed with [-c|-l|-q]\n"));
6573 usage(B_FALSE);
6574 return (1);
6575 }
6576
6577 if (l_histo && rq_histo) {
6578 pool_list_free(list);
6579 (void) fprintf(stderr,
6580 gettext("Only one of [-r|-w] can be passed at a time\n"));
6581 usage(B_FALSE);
6582 return (1);
6583 }
6584
6585 /*
6586 * Enter the main iostat loop.
6587 */
6588 cb.cb_list = list;
6589
6590 if (l_histo) {
6591 /*
6592 * Histograms tables look out of place when you try to display
6593 * them with the other stats, so make a rule that you can only
6594 * print histograms by themselves.
6595 */
6596 cb.cb_flags = IOS_L_HISTO_M;
6597 } else if (rq_histo) {
6598 cb.cb_flags = IOS_RQ_HISTO_M;
6599 } else {
6600 cb.cb_flags = IOS_DEFAULT_M;
6601 if (latency)
6602 cb.cb_flags |= IOS_LATENCY_M;
6603 if (queues)
6604 cb.cb_flags |= IOS_QUEUES_M;
6605 }
6606
6607 /*
6608 * See if the module supports all the stats we want to display.
6609 */
6610 unsupported_flags = cb.cb_flags & ~get_stat_flags(list);
6611 if (unsupported_flags) {
6612 uint64_t f;
6613 int idx;
6614 fprintf(stderr,
6615 gettext("The loaded zfs module doesn't support:"));
6616
6617 /* for each bit set in unsupported_flags */
6618 for (f = unsupported_flags; f; f &= ~(1ULL << idx)) {
6619 idx = lowbit64(f) - 1;
6620 fprintf(stderr, " -%c", flag_to_arg[idx]);
6621 }
6622
6623 fprintf(stderr, ". Try running a newer module.\n");
6624 pool_list_free(list);
6625
6626 return (1);
6627 }
6628
6629 for (;;) {
6630 if ((npools = pool_list_count(list)) == 0)
6631 (void) fprintf(stderr, gettext("no pools available\n"));
6632 else {
6633 /*
6634 * If this is the first iteration and -y was supplied
6635 * we skip any printing.
6636 */
6637 boolean_t skip = (omit_since_boot &&
6638 cb.cb_iteration == 0);
6639
6640 /*
6641 * Refresh all statistics. This is done as an
6642 * explicit step before calculating the maximum name
6643 * width, so that any * configuration changes are
6644 * properly accounted for.
6645 */
6646 (void) pool_list_iter(list, B_FALSE, refresh_iostat,
6647 &cb);
6648
6649 /*
6650 * Iterate over all pools to determine the maximum width
6651 * for the pool / device name column across all pools.
6652 */
6653 cb.cb_namewidth = 0;
6654 (void) pool_list_iter(list, B_FALSE,
6655 get_namewidth_iostat, &cb);
6656
6657 if (timestamp_fmt != NODATE)
6658 print_timestamp(timestamp_fmt);
6659
6660 if (cmd != NULL && cb.cb_verbose &&
6661 !(cb.cb_flags & IOS_ANYHISTO_M)) {
6662 cb.vcdl = all_pools_for_each_vdev_run(argc,
6663 argv, cmd, g_zfs, cb.cb_vdevs.cb_names,
6664 cb.cb_vdevs.cb_names_count,
6665 cb.cb_vdevs.cb_name_flags);
6666 } else {
6667 cb.vcdl = NULL;
6668 }
6669
6670
6671 /*
6672 * Check terminal size so we can print headers
6673 * even when terminal window has its height
6674 * changed.
6675 */
6676 winheight = terminal_height();
6677 /*
6678 * Are we connected to TTY? If not, headers_once
6679 * should be true, to avoid breaking scripts.
6680 */
6681 if (winheight < 0)
6682 headers_once = B_TRUE;
6683
6684 /*
6685 * If it's the first time and we're not skipping it,
6686 * or either skip or verbose mode, print the header.
6687 *
6688 * The histogram code explicitly prints its header on
6689 * every vdev, so skip this for histograms.
6690 */
6691 if (((++cb.cb_iteration == 1 && !skip) ||
6692 (skip != verbose) ||
6693 (!headers_once &&
6694 (cb.cb_iteration % winheight) == 0)) &&
6695 (!(cb.cb_flags & IOS_ANYHISTO_M)) &&
6696 !cb.cb_scripted)
6697 print_iostat_header(&cb);
6698
6699 if (skip) {
6700 (void) fflush(stdout);
6701 (void) fsleep(interval);
6702 continue;
6703 }
6704
6705 pool_list_iter(list, B_FALSE, print_iostat, &cb);
6706
6707 /*
6708 * If there's more than one pool, and we're not in
6709 * verbose mode (which prints a separator for us),
6710 * then print a separator.
6711 *
6712 * In addition, if we're printing specific vdevs then
6713 * we also want an ending separator.
6714 */
6715 if (((npools > 1 && !verbose &&
6716 !(cb.cb_flags & IOS_ANYHISTO_M)) ||
6717 (!(cb.cb_flags & IOS_ANYHISTO_M) &&
6718 cb.cb_vdevs.cb_names_count)) &&
6719 !cb.cb_scripted) {
6720 print_iostat_separator(&cb);
6721 if (cb.vcdl != NULL)
6722 print_cmd_columns(cb.vcdl, 1);
6723 printf("\n");
6724 }
6725
6726 if (cb.vcdl != NULL)
6727 free_vdev_cmd_data_list(cb.vcdl);
6728
6729 }
6730
6731 if (interval == 0)
6732 break;
6733
6734 if (count != 0 && --count == 0)
6735 break;
6736
6737 (void) fflush(stdout);
6738 (void) fsleep(interval);
6739 }
6740
6741 pool_list_free(list);
6742
6743 return (ret);
6744 }
6745
6746 typedef struct list_cbdata {
6747 boolean_t cb_verbose;
6748 int cb_name_flags;
6749 int cb_namewidth;
6750 boolean_t cb_json;
6751 boolean_t cb_scripted;
6752 zprop_list_t *cb_proplist;
6753 boolean_t cb_literal;
6754 nvlist_t *cb_jsobj;
6755 boolean_t cb_json_as_int;
6756 boolean_t cb_json_pool_key_guid;
6757 } list_cbdata_t;
6758
6759
6760 /*
6761 * Given a list of columns to display, output appropriate headers for each one.
6762 */
6763 static void
print_header(list_cbdata_t * cb)6764 print_header(list_cbdata_t *cb)
6765 {
6766 zprop_list_t *pl = cb->cb_proplist;
6767 char headerbuf[ZPOOL_MAXPROPLEN];
6768 const char *header;
6769 boolean_t first = B_TRUE;
6770 boolean_t right_justify;
6771 size_t width = 0;
6772
6773 for (; pl != NULL; pl = pl->pl_next) {
6774 width = pl->pl_width;
6775 if (first && cb->cb_verbose) {
6776 /*
6777 * Reset the width to accommodate the verbose listing
6778 * of devices.
6779 */
6780 width = cb->cb_namewidth;
6781 }
6782
6783 if (!first)
6784 (void) fputs(" ", stdout);
6785 else
6786 first = B_FALSE;
6787
6788 right_justify = B_FALSE;
6789 if (pl->pl_prop != ZPROP_USERPROP) {
6790 header = zpool_prop_column_name(pl->pl_prop);
6791 right_justify = zpool_prop_align_right(pl->pl_prop);
6792 } else {
6793 int i;
6794
6795 for (i = 0; pl->pl_user_prop[i] != '\0'; i++)
6796 headerbuf[i] = toupper(pl->pl_user_prop[i]);
6797 headerbuf[i] = '\0';
6798 header = headerbuf;
6799 }
6800
6801 if (pl->pl_next == NULL && !right_justify)
6802 (void) fputs(header, stdout);
6803 else if (right_justify)
6804 (void) printf("%*s", (int)width, header);
6805 else
6806 (void) printf("%-*s", (int)width, header);
6807 }
6808
6809 (void) fputc('\n', stdout);
6810 }
6811
6812 /*
6813 * Given a pool and a list of properties, print out all the properties according
6814 * to the described layout. Used by zpool_do_list().
6815 */
6816 static void
collect_pool(zpool_handle_t * zhp,list_cbdata_t * cb)6817 collect_pool(zpool_handle_t *zhp, list_cbdata_t *cb)
6818 {
6819 zprop_list_t *pl = cb->cb_proplist;
6820 boolean_t first = B_TRUE;
6821 char property[ZPOOL_MAXPROPLEN];
6822 const char *propstr;
6823 boolean_t right_justify;
6824 size_t width;
6825 zprop_source_t sourcetype = ZPROP_SRC_NONE;
6826 nvlist_t *item, *d, *props;
6827 item = d = props = NULL;
6828
6829 if (cb->cb_json) {
6830 item = fnvlist_alloc();
6831 props = fnvlist_alloc();
6832 d = fnvlist_lookup_nvlist(cb->cb_jsobj, "pools");
6833 if (d == NULL) {
6834 fprintf(stderr, "pools obj not found.\n");
6835 exit(1);
6836 }
6837 fill_pool_info(item, zhp, B_TRUE, cb->cb_json_as_int);
6838 }
6839
6840 for (; pl != NULL; pl = pl->pl_next) {
6841
6842 width = pl->pl_width;
6843 if (first && cb->cb_verbose) {
6844 /*
6845 * Reset the width to accommodate the verbose listing
6846 * of devices.
6847 */
6848 width = cb->cb_namewidth;
6849 }
6850
6851 if (!cb->cb_json && !first) {
6852 if (cb->cb_scripted)
6853 (void) fputc('\t', stdout);
6854 else
6855 (void) fputs(" ", stdout);
6856 } else {
6857 first = B_FALSE;
6858 }
6859
6860 right_justify = B_FALSE;
6861 if (pl->pl_prop != ZPROP_USERPROP) {
6862 if (zpool_get_prop(zhp, pl->pl_prop, property,
6863 sizeof (property), &sourcetype,
6864 cb->cb_literal) != 0)
6865 propstr = "-";
6866 else
6867 propstr = property;
6868
6869 right_justify = zpool_prop_align_right(pl->pl_prop);
6870 } else if ((zpool_prop_feature(pl->pl_user_prop) ||
6871 zpool_prop_unsupported(pl->pl_user_prop)) &&
6872 zpool_prop_get_feature(zhp, pl->pl_user_prop, property,
6873 sizeof (property)) == 0) {
6874 propstr = property;
6875 sourcetype = ZPROP_SRC_LOCAL;
6876 } else if (zfs_prop_user(pl->pl_user_prop) &&
6877 zpool_get_userprop(zhp, pl->pl_user_prop, property,
6878 sizeof (property), &sourcetype) == 0) {
6879 propstr = property;
6880 } else {
6881 propstr = "-";
6882 }
6883
6884 if (cb->cb_json) {
6885 if (pl->pl_prop == ZPOOL_PROP_NAME)
6886 continue;
6887 const char *prop_name;
6888 if (pl->pl_prop != ZPROP_USERPROP)
6889 prop_name = zpool_prop_to_name(pl->pl_prop);
6890 else
6891 prop_name = pl->pl_user_prop;
6892 (void) zprop_nvlist_one_property(
6893 prop_name, propstr,
6894 sourcetype, NULL, NULL, props, cb->cb_json_as_int);
6895 } else {
6896 /*
6897 * If this is being called in scripted mode, or if this
6898 * is the last column and it is left-justified, don't
6899 * include a width format specifier.
6900 */
6901 if (cb->cb_scripted || (pl->pl_next == NULL &&
6902 !right_justify))
6903 (void) fputs(propstr, stdout);
6904 else if (right_justify)
6905 (void) printf("%*s", (int)width, propstr);
6906 else
6907 (void) printf("%-*s", (int)width, propstr);
6908 }
6909 }
6910
6911 if (cb->cb_json) {
6912 fnvlist_add_nvlist(item, "properties", props);
6913 if (cb->cb_json_pool_key_guid) {
6914 char pool_guid[256];
6915 uint64_t guid = fnvlist_lookup_uint64(
6916 zpool_get_config(zhp, NULL),
6917 ZPOOL_CONFIG_POOL_GUID);
6918 snprintf(pool_guid, 256, "%llu",
6919 (u_longlong_t)guid);
6920 fnvlist_add_nvlist(d, pool_guid, item);
6921 } else {
6922 fnvlist_add_nvlist(d, zpool_get_name(zhp),
6923 item);
6924 }
6925 fnvlist_free(props);
6926 fnvlist_free(item);
6927 } else
6928 (void) fputc('\n', stdout);
6929 }
6930
6931 static void
collect_vdev_prop(zpool_prop_t prop,uint64_t value,const char * str,boolean_t scripted,boolean_t valid,enum zfs_nicenum_format format,boolean_t json,nvlist_t * nvl,boolean_t as_int)6932 collect_vdev_prop(zpool_prop_t prop, uint64_t value, const char *str,
6933 boolean_t scripted, boolean_t valid, enum zfs_nicenum_format format,
6934 boolean_t json, nvlist_t *nvl, boolean_t as_int)
6935 {
6936 char propval[64];
6937 boolean_t fixed;
6938 size_t width = zprop_width(prop, &fixed, ZFS_TYPE_POOL);
6939
6940 switch (prop) {
6941 case ZPOOL_PROP_SIZE:
6942 case ZPOOL_PROP_EXPANDSZ:
6943 case ZPOOL_PROP_CHECKPOINT:
6944 case ZPOOL_PROP_DEDUPRATIO:
6945 case ZPOOL_PROP_DEDUPCACHED:
6946 if (value == 0)
6947 (void) strlcpy(propval, "-", sizeof (propval));
6948 else
6949 zfs_nicenum_format(value, propval, sizeof (propval),
6950 format);
6951 break;
6952 case ZPOOL_PROP_FRAGMENTATION:
6953 if (value == ZFS_FRAG_INVALID) {
6954 (void) strlcpy(propval, "-", sizeof (propval));
6955 } else if (format == ZFS_NICENUM_RAW) {
6956 (void) snprintf(propval, sizeof (propval), "%llu",
6957 (unsigned long long)value);
6958 } else {
6959 (void) snprintf(propval, sizeof (propval), "%llu%%",
6960 (unsigned long long)value);
6961 }
6962 break;
6963 case ZPOOL_PROP_CAPACITY:
6964 /* capacity value is in parts-per-10,000 (aka permyriad) */
6965 if (format == ZFS_NICENUM_RAW)
6966 (void) snprintf(propval, sizeof (propval), "%llu",
6967 (unsigned long long)value / 100);
6968 else
6969 (void) snprintf(propval, sizeof (propval),
6970 value < 1000 ? "%1.2f%%" : value < 10000 ?
6971 "%2.1f%%" : "%3.0f%%", value / 100.0);
6972 break;
6973 case ZPOOL_PROP_HEALTH:
6974 width = 8;
6975 (void) strlcpy(propval, str, sizeof (propval));
6976 break;
6977 default:
6978 zfs_nicenum_format(value, propval, sizeof (propval), format);
6979 }
6980
6981 if (!valid)
6982 (void) strlcpy(propval, "-", sizeof (propval));
6983
6984 if (json) {
6985 zprop_nvlist_one_property(zpool_prop_to_name(prop), propval,
6986 ZPROP_SRC_NONE, NULL, NULL, nvl, as_int);
6987 } else {
6988 if (scripted)
6989 (void) printf("\t%s", propval);
6990 else
6991 (void) printf(" %*s", (int)width, propval);
6992 }
6993 }
6994
6995 /*
6996 * print static default line per vdev
6997 * not compatible with '-o' <proplist> option
6998 */
6999 static void
collect_list_stats(zpool_handle_t * zhp,const char * name,nvlist_t * nv,list_cbdata_t * cb,int depth,boolean_t isspare,nvlist_t * item)7000 collect_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv,
7001 list_cbdata_t *cb, int depth, boolean_t isspare, nvlist_t *item)
7002 {
7003 nvlist_t **child;
7004 vdev_stat_t *vs;
7005 uint_t c, children = 0;
7006 char *vname;
7007 boolean_t scripted = cb->cb_scripted;
7008 uint64_t islog = B_FALSE;
7009 nvlist_t *props, *ent, *ch, *obj, *l2c, *sp;
7010 props = ent = ch = obj = sp = l2c = NULL;
7011 const char *dashes = "%-*s - - - - "
7012 "- - - - -\n";
7013
7014 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
7015 (uint64_t **)&vs, &c) == 0);
7016
7017 if (name != NULL) {
7018 boolean_t toplevel = (vs->vs_space != 0);
7019 uint64_t cap;
7020 enum zfs_nicenum_format format;
7021 const char *state;
7022
7023 if (cb->cb_literal)
7024 format = ZFS_NICENUM_RAW;
7025 else
7026 format = ZFS_NICENUM_1024;
7027
7028 if (strcmp(name, VDEV_TYPE_INDIRECT) == 0)
7029 return;
7030
7031 if (cb->cb_json) {
7032 props = fnvlist_alloc();
7033 ent = fnvlist_alloc();
7034 fill_vdev_info(ent, zhp, (char *)name, B_FALSE,
7035 cb->cb_json_as_int);
7036 } else {
7037 if (scripted)
7038 (void) printf("\t%s", name);
7039 else if (strlen(name) + depth > cb->cb_namewidth)
7040 (void) printf("%*s%s", depth, "", name);
7041 else
7042 (void) printf("%*s%s%*s", depth, "", name,
7043 (int)(cb->cb_namewidth - strlen(name) -
7044 depth), "");
7045 }
7046
7047 /*
7048 * Print the properties for the individual vdevs. Some
7049 * properties are only applicable to toplevel vdevs. The
7050 * 'toplevel' boolean value is passed to the print_one_column()
7051 * to indicate that the value is valid.
7052 */
7053 if (VDEV_STAT_VALID(vs_pspace, c) && vs->vs_pspace) {
7054 collect_vdev_prop(ZPOOL_PROP_SIZE, vs->vs_pspace, NULL,
7055 scripted, B_TRUE, format, cb->cb_json, props,
7056 cb->cb_json_as_int);
7057 } else {
7058 collect_vdev_prop(ZPOOL_PROP_SIZE, vs->vs_space, NULL,
7059 scripted, toplevel, format, cb->cb_json, props,
7060 cb->cb_json_as_int);
7061 }
7062 collect_vdev_prop(ZPOOL_PROP_ALLOCATED, vs->vs_alloc, NULL,
7063 scripted, toplevel, format, cb->cb_json, props,
7064 cb->cb_json_as_int);
7065 collect_vdev_prop(ZPOOL_PROP_FREE, vs->vs_space - vs->vs_alloc,
7066 NULL, scripted, toplevel, format, cb->cb_json, props,
7067 cb->cb_json_as_int);
7068 collect_vdev_prop(ZPOOL_PROP_CHECKPOINT,
7069 vs->vs_checkpoint_space, NULL, scripted, toplevel, format,
7070 cb->cb_json, props, cb->cb_json_as_int);
7071 collect_vdev_prop(ZPOOL_PROP_EXPANDSZ, vs->vs_esize, NULL,
7072 scripted, B_TRUE, format, cb->cb_json, props,
7073 cb->cb_json_as_int);
7074 collect_vdev_prop(ZPOOL_PROP_FRAGMENTATION,
7075 vs->vs_fragmentation, NULL, scripted,
7076 (vs->vs_fragmentation != ZFS_FRAG_INVALID && toplevel),
7077 format, cb->cb_json, props, cb->cb_json_as_int);
7078 cap = (vs->vs_space == 0) ? 0 :
7079 (vs->vs_alloc * 10000 / vs->vs_space);
7080 collect_vdev_prop(ZPOOL_PROP_CAPACITY, cap, NULL,
7081 scripted, toplevel, format, cb->cb_json, props,
7082 cb->cb_json_as_int);
7083 collect_vdev_prop(ZPOOL_PROP_DEDUPRATIO, 0, NULL,
7084 scripted, toplevel, format, cb->cb_json, props,
7085 cb->cb_json_as_int);
7086 state = zpool_state_to_name(vs->vs_state, vs->vs_aux);
7087 if (isspare) {
7088 if (vs->vs_aux == VDEV_AUX_SPARED)
7089 state = "INUSE";
7090 else if (vs->vs_state == VDEV_STATE_HEALTHY)
7091 state = "AVAIL";
7092 }
7093 collect_vdev_prop(ZPOOL_PROP_HEALTH, 0, state, scripted,
7094 B_TRUE, format, cb->cb_json, props, cb->cb_json_as_int);
7095
7096 if (cb->cb_json) {
7097 fnvlist_add_nvlist(ent, "properties", props);
7098 fnvlist_free(props);
7099 } else
7100 (void) fputc('\n', stdout);
7101 }
7102
7103 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
7104 &child, &children) != 0) {
7105 if (cb->cb_json) {
7106 fnvlist_add_nvlist(item, name, ent);
7107 fnvlist_free(ent);
7108 }
7109 return;
7110 }
7111
7112 if (cb->cb_json) {
7113 ch = fnvlist_alloc();
7114 }
7115
7116 /* list the normal vdevs first */
7117 for (c = 0; c < children; c++) {
7118 uint64_t ishole = B_FALSE;
7119
7120 if (nvlist_lookup_uint64(child[c],
7121 ZPOOL_CONFIG_IS_HOLE, &ishole) == 0 && ishole)
7122 continue;
7123
7124 if (nvlist_lookup_uint64(child[c],
7125 ZPOOL_CONFIG_IS_LOG, &islog) == 0 && islog)
7126 continue;
7127
7128 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
7129 continue;
7130
7131 vname = zpool_vdev_name(g_zfs, zhp, child[c],
7132 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
7133
7134 if (name == NULL || cb->cb_json != B_TRUE)
7135 collect_list_stats(zhp, vname, child[c], cb, depth + 2,
7136 B_FALSE, item);
7137 else if (cb->cb_json) {
7138 collect_list_stats(zhp, vname, child[c], cb, depth + 2,
7139 B_FALSE, ch);
7140 }
7141 free(vname);
7142 }
7143
7144 if (cb->cb_json) {
7145 if (!nvlist_empty(ch))
7146 fnvlist_add_nvlist(ent, "vdevs", ch);
7147 fnvlist_free(ch);
7148 }
7149
7150 /* list the classes: 'logs', 'dedup', and 'special' */
7151 for (uint_t n = 0; n < ARRAY_SIZE(class_name); n++) {
7152 boolean_t printed = B_FALSE;
7153 if (cb->cb_json)
7154 obj = fnvlist_alloc();
7155 for (c = 0; c < children; c++) {
7156 const char *bias = NULL;
7157 const char *type = NULL;
7158
7159 if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
7160 &islog) == 0 && islog) {
7161 bias = VDEV_ALLOC_CLASS_LOGS;
7162 } else {
7163 (void) nvlist_lookup_string(child[c],
7164 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
7165 (void) nvlist_lookup_string(child[c],
7166 ZPOOL_CONFIG_TYPE, &type);
7167 }
7168 if (bias == NULL || strcmp(bias, class_name[n]) != 0)
7169 continue;
7170 if (!islog && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
7171 continue;
7172
7173 if (!printed && !cb->cb_json) {
7174 /* LINTED E_SEC_PRINTF_VAR_FMT */
7175 (void) printf(dashes, cb->cb_namewidth,
7176 class_name[n]);
7177 printed = B_TRUE;
7178 }
7179 vname = zpool_vdev_name(g_zfs, zhp, child[c],
7180 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
7181 collect_list_stats(zhp, vname, child[c], cb, depth + 2,
7182 B_FALSE, obj);
7183 free(vname);
7184 }
7185 if (cb->cb_json) {
7186 if (!nvlist_empty(obj))
7187 fnvlist_add_nvlist(item, class_name[n], obj);
7188 fnvlist_free(obj);
7189 }
7190 }
7191
7192 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
7193 &child, &children) == 0 && children > 0) {
7194 if (cb->cb_json) {
7195 l2c = fnvlist_alloc();
7196 } else {
7197 /* LINTED E_SEC_PRINTF_VAR_FMT */
7198 (void) printf(dashes, cb->cb_namewidth, "cache");
7199 }
7200 for (c = 0; c < children; c++) {
7201 vname = zpool_vdev_name(g_zfs, zhp, child[c],
7202 cb->cb_name_flags);
7203 collect_list_stats(zhp, vname, child[c], cb, depth + 2,
7204 B_FALSE, l2c);
7205 free(vname);
7206 }
7207 if (cb->cb_json) {
7208 if (!nvlist_empty(l2c))
7209 fnvlist_add_nvlist(item, "l2cache", l2c);
7210 fnvlist_free(l2c);
7211 }
7212 }
7213
7214 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, &child,
7215 &children) == 0 && children > 0) {
7216 if (cb->cb_json) {
7217 sp = fnvlist_alloc();
7218 } else {
7219 /* LINTED E_SEC_PRINTF_VAR_FMT */
7220 (void) printf(dashes, cb->cb_namewidth, "spare");
7221 }
7222 for (c = 0; c < children; c++) {
7223 vname = zpool_vdev_name(g_zfs, zhp, child[c],
7224 cb->cb_name_flags);
7225 collect_list_stats(zhp, vname, child[c], cb, depth + 2,
7226 B_TRUE, sp);
7227 free(vname);
7228 }
7229 if (cb->cb_json) {
7230 if (!nvlist_empty(sp))
7231 fnvlist_add_nvlist(item, "spares", sp);
7232 fnvlist_free(sp);
7233 }
7234 }
7235
7236 if (name != NULL && cb->cb_json) {
7237 fnvlist_add_nvlist(item, name, ent);
7238 fnvlist_free(ent);
7239 }
7240 }
7241
7242 /*
7243 * Generic callback function to list a pool.
7244 */
7245 static int
list_callback(zpool_handle_t * zhp,void * data)7246 list_callback(zpool_handle_t *zhp, void *data)
7247 {
7248 nvlist_t *p, *d, *nvdevs;
7249 uint64_t guid;
7250 char pool_guid[256];
7251 const char *pool_name = zpool_get_name(zhp);
7252 list_cbdata_t *cbp = data;
7253 p = d = nvdevs = NULL;
7254
7255 collect_pool(zhp, cbp);
7256
7257 if (cbp->cb_verbose) {
7258 nvlist_t *config, *nvroot;
7259 config = zpool_get_config(zhp, NULL);
7260 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
7261 &nvroot) == 0);
7262 if (cbp->cb_json) {
7263 d = fnvlist_lookup_nvlist(cbp->cb_jsobj,
7264 "pools");
7265 if (cbp->cb_json_pool_key_guid) {
7266 guid = fnvlist_lookup_uint64(config,
7267 ZPOOL_CONFIG_POOL_GUID);
7268 snprintf(pool_guid, 256, "%llu",
7269 (u_longlong_t)guid);
7270 p = fnvlist_lookup_nvlist(d, pool_guid);
7271 } else {
7272 p = fnvlist_lookup_nvlist(d, pool_name);
7273 }
7274 nvdevs = fnvlist_alloc();
7275 }
7276 collect_list_stats(zhp, NULL, nvroot, cbp, 0, B_FALSE, nvdevs);
7277 if (cbp->cb_json) {
7278 fnvlist_add_nvlist(p, "vdevs", nvdevs);
7279 if (cbp->cb_json_pool_key_guid)
7280 fnvlist_add_nvlist(d, pool_guid, p);
7281 else
7282 fnvlist_add_nvlist(d, pool_name, p);
7283 fnvlist_add_nvlist(cbp->cb_jsobj, "pools", d);
7284 fnvlist_free(nvdevs);
7285 }
7286 }
7287
7288 return (0);
7289 }
7290
7291 /*
7292 * Set the minimum pool/vdev name column width. The width must be at least 9,
7293 * but may be as large as needed.
7294 */
7295 static int
get_namewidth_list(zpool_handle_t * zhp,void * data)7296 get_namewidth_list(zpool_handle_t *zhp, void *data)
7297 {
7298 list_cbdata_t *cb = data;
7299 int width;
7300
7301 width = get_namewidth(zhp, cb->cb_namewidth,
7302 cb->cb_name_flags | VDEV_NAME_TYPE_ID, cb->cb_verbose);
7303
7304 if (width < 9)
7305 width = 9;
7306
7307 cb->cb_namewidth = width;
7308
7309 return (0);
7310 }
7311
7312 /*
7313 * zpool list [-gHLpP] [-o prop[,prop]*] [-T d|u] [pool] ... [interval [count]]
7314 *
7315 * -g Display guid for individual vdev name.
7316 * -H Scripted mode. Don't display headers, and separate properties
7317 * by a single tab.
7318 * -L Follow links when resolving vdev path name.
7319 * -o List of properties to display. Defaults to
7320 * "name,size,allocated,free,expandsize,fragmentation,capacity,"
7321 * "dedupratio,health,altroot"
7322 * -p Display values in parsable (exact) format.
7323 * -P Display full path for vdev name.
7324 * -T Display a timestamp in date(1) or Unix format
7325 * -j Display the output in JSON format
7326 * --json-int Display the numbers as integer instead of strings.
7327 * --json-pool-key-guid Set pool GUID as key for pool objects.
7328 *
7329 * List all pools in the system, whether or not they're healthy. Output space
7330 * statistics for each one, as well as health status summary.
7331 */
7332 int
zpool_do_list(int argc,char ** argv)7333 zpool_do_list(int argc, char **argv)
7334 {
7335 int c;
7336 int ret = 0;
7337 list_cbdata_t cb = { 0 };
7338 static char default_props[] =
7339 "name,size,allocated,free,checkpoint,expandsize,fragmentation,"
7340 "capacity,dedupratio,health,altroot";
7341 char *props = default_props;
7342 float interval = 0;
7343 unsigned long count = 0;
7344 zpool_list_t *list;
7345 boolean_t first = B_TRUE;
7346 nvlist_t *data = NULL;
7347 current_prop_type = ZFS_TYPE_POOL;
7348
7349 struct option long_options[] = {
7350 {"json", no_argument, NULL, 'j'},
7351 {"json-int", no_argument, NULL, ZPOOL_OPTION_JSON_NUMS_AS_INT},
7352 {"json-pool-key-guid", no_argument, NULL,
7353 ZPOOL_OPTION_POOL_KEY_GUID},
7354 {0, 0, 0, 0}
7355 };
7356
7357 /* check options */
7358 while ((c = getopt_long(argc, argv, ":gjHLo:pPT:v", long_options,
7359 NULL)) != -1) {
7360 switch (c) {
7361 case 'g':
7362 cb.cb_name_flags |= VDEV_NAME_GUID;
7363 break;
7364 case 'H':
7365 cb.cb_scripted = B_TRUE;
7366 break;
7367 case 'L':
7368 cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
7369 break;
7370 case 'o':
7371 props = optarg;
7372 break;
7373 case 'P':
7374 cb.cb_name_flags |= VDEV_NAME_PATH;
7375 break;
7376 case 'p':
7377 cb.cb_literal = B_TRUE;
7378 break;
7379 case 'j':
7380 cb.cb_json = B_TRUE;
7381 break;
7382 case ZPOOL_OPTION_JSON_NUMS_AS_INT:
7383 cb.cb_json_as_int = B_TRUE;
7384 cb.cb_literal = B_TRUE;
7385 break;
7386 case ZPOOL_OPTION_POOL_KEY_GUID:
7387 cb.cb_json_pool_key_guid = B_TRUE;
7388 break;
7389 case 'T':
7390 get_timestamp_arg(*optarg);
7391 break;
7392 case 'v':
7393 cb.cb_verbose = B_TRUE;
7394 cb.cb_namewidth = 8; /* 8 until precalc is avail */
7395 break;
7396 case ':':
7397 (void) fprintf(stderr, gettext("missing argument for "
7398 "'%c' option\n"), optopt);
7399 usage(B_FALSE);
7400 break;
7401 case '?':
7402 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7403 optopt);
7404 usage(B_FALSE);
7405 }
7406 }
7407
7408 argc -= optind;
7409 argv += optind;
7410
7411 if (!cb.cb_json && cb.cb_json_as_int) {
7412 (void) fprintf(stderr, gettext("'--json-int' only works with"
7413 " '-j' option\n"));
7414 usage(B_FALSE);
7415 }
7416
7417 if (!cb.cb_json && cb.cb_json_pool_key_guid) {
7418 (void) fprintf(stderr, gettext("'json-pool-key-guid' only"
7419 " works with '-j' option\n"));
7420 usage(B_FALSE);
7421 }
7422
7423 get_interval_count(&argc, argv, &interval, &count);
7424
7425 if (zprop_get_list(g_zfs, props, &cb.cb_proplist, ZFS_TYPE_POOL) != 0)
7426 usage(B_FALSE);
7427
7428 for (;;) {
7429 if ((list = pool_list_get(argc, argv, &cb.cb_proplist,
7430 ZFS_TYPE_POOL, cb.cb_literal, &ret)) == NULL)
7431 return (1);
7432
7433 if (pool_list_count(list) == 0)
7434 break;
7435
7436 if (cb.cb_json) {
7437 cb.cb_jsobj = zpool_json_schema(0, 1);
7438 data = fnvlist_alloc();
7439 fnvlist_add_nvlist(cb.cb_jsobj, "pools", data);
7440 fnvlist_free(data);
7441 }
7442
7443 cb.cb_namewidth = 0;
7444 (void) pool_list_iter(list, B_FALSE, get_namewidth_list, &cb);
7445
7446 if (timestamp_fmt != NODATE) {
7447 if (cb.cb_json) {
7448 if (cb.cb_json_as_int) {
7449 fnvlist_add_uint64(cb.cb_jsobj, "time",
7450 time(NULL));
7451 } else {
7452 char ts[128];
7453 get_timestamp(timestamp_fmt, ts, 128);
7454 fnvlist_add_string(cb.cb_jsobj, "time",
7455 ts);
7456 }
7457 } else
7458 print_timestamp(timestamp_fmt);
7459 }
7460
7461 if (!cb.cb_scripted && (first || cb.cb_verbose) &&
7462 !cb.cb_json) {
7463 print_header(&cb);
7464 first = B_FALSE;
7465 }
7466 ret = pool_list_iter(list, B_TRUE, list_callback, &cb);
7467
7468 if (ret == 0 && cb.cb_json)
7469 zcmd_print_json(cb.cb_jsobj);
7470 else if (ret != 0 && cb.cb_json)
7471 nvlist_free(cb.cb_jsobj);
7472
7473 if (interval == 0)
7474 break;
7475
7476 if (count != 0 && --count == 0)
7477 break;
7478
7479 pool_list_free(list);
7480
7481 (void) fflush(stdout);
7482 (void) fsleep(interval);
7483 }
7484
7485 if (argc == 0 && !cb.cb_scripted && !cb.cb_json &&
7486 pool_list_count(list) == 0) {
7487 (void) printf(gettext("no pools available\n"));
7488 ret = 0;
7489 }
7490
7491 pool_list_free(list);
7492 zprop_free_list(cb.cb_proplist);
7493 return (ret);
7494 }
7495
7496 static int
zpool_do_attach_or_replace(int argc,char ** argv,int replacing)7497 zpool_do_attach_or_replace(int argc, char **argv, int replacing)
7498 {
7499 boolean_t force = B_FALSE;
7500 boolean_t rebuild = B_FALSE;
7501 boolean_t wait = B_FALSE;
7502 int c;
7503 nvlist_t *nvroot;
7504 char *poolname, *old_disk, *new_disk;
7505 zpool_handle_t *zhp;
7506 nvlist_t *props = NULL;
7507 char *propval;
7508 int ret;
7509
7510 /* check options */
7511 while ((c = getopt(argc, argv, "fo:sw")) != -1) {
7512 switch (c) {
7513 case 'f':
7514 force = B_TRUE;
7515 break;
7516 case 'o':
7517 if ((propval = strchr(optarg, '=')) == NULL) {
7518 (void) fprintf(stderr, gettext("missing "
7519 "'=' for -o option\n"));
7520 usage(B_FALSE);
7521 }
7522 *propval = '\0';
7523 propval++;
7524
7525 if ((strcmp(optarg, ZPOOL_CONFIG_ASHIFT) != 0) ||
7526 (add_prop_list(optarg, propval, &props, B_TRUE)))
7527 usage(B_FALSE);
7528 break;
7529 case 's':
7530 rebuild = B_TRUE;
7531 break;
7532 case 'w':
7533 wait = B_TRUE;
7534 break;
7535 case '?':
7536 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7537 optopt);
7538 usage(B_FALSE);
7539 }
7540 }
7541
7542 argc -= optind;
7543 argv += optind;
7544
7545 /* get pool name and check number of arguments */
7546 if (argc < 1) {
7547 (void) fprintf(stderr, gettext("missing pool name argument\n"));
7548 usage(B_FALSE);
7549 }
7550
7551 poolname = argv[0];
7552
7553 if (argc < 2) {
7554 (void) fprintf(stderr,
7555 gettext("missing <device> specification\n"));
7556 usage(B_FALSE);
7557 }
7558
7559 old_disk = argv[1];
7560
7561 if (argc < 3) {
7562 if (!replacing) {
7563 (void) fprintf(stderr,
7564 gettext("missing <new_device> specification\n"));
7565 usage(B_FALSE);
7566 }
7567 new_disk = old_disk;
7568 argc -= 1;
7569 argv += 1;
7570 } else {
7571 new_disk = argv[2];
7572 argc -= 2;
7573 argv += 2;
7574 }
7575
7576 if (argc > 1) {
7577 (void) fprintf(stderr, gettext("too many arguments\n"));
7578 usage(B_FALSE);
7579 }
7580
7581 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) {
7582 nvlist_free(props);
7583 return (1);
7584 }
7585
7586 if (zpool_get_config(zhp, NULL) == NULL) {
7587 (void) fprintf(stderr, gettext("pool '%s' is unavailable\n"),
7588 poolname);
7589 zpool_close(zhp);
7590 nvlist_free(props);
7591 return (1);
7592 }
7593
7594 /* unless manually specified use "ashift" pool property (if set) */
7595 if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) {
7596 int intval;
7597 zprop_source_t src;
7598 char strval[ZPOOL_MAXPROPLEN];
7599
7600 intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src);
7601 if (src != ZPROP_SRC_DEFAULT) {
7602 (void) sprintf(strval, "%" PRId32, intval);
7603 verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval,
7604 &props, B_TRUE) == 0);
7605 }
7606 }
7607
7608 nvroot = make_root_vdev(zhp, props, force, B_FALSE, replacing, B_FALSE,
7609 argc, argv);
7610 if (nvroot == NULL) {
7611 zpool_close(zhp);
7612 nvlist_free(props);
7613 return (1);
7614 }
7615
7616 ret = zpool_vdev_attach(zhp, old_disk, new_disk, nvroot, replacing,
7617 rebuild);
7618
7619 if (ret == 0 && wait) {
7620 zpool_wait_activity_t activity = ZPOOL_WAIT_RESILVER;
7621 char raidz_prefix[] = "raidz";
7622 if (replacing) {
7623 activity = ZPOOL_WAIT_REPLACE;
7624 } else if (strncmp(old_disk,
7625 raidz_prefix, strlen(raidz_prefix)) == 0) {
7626 activity = ZPOOL_WAIT_RAIDZ_EXPAND;
7627 }
7628 ret = zpool_wait(zhp, activity);
7629 }
7630
7631 nvlist_free(props);
7632 nvlist_free(nvroot);
7633 zpool_close(zhp);
7634
7635 return (ret);
7636 }
7637
7638 /*
7639 * zpool replace [-fsw] [-o property=value] <pool> <device> <new_device>
7640 *
7641 * -f Force attach, even if <new_device> appears to be in use.
7642 * -s Use sequential instead of healing reconstruction for resilver.
7643 * -o Set property=value.
7644 * -w Wait for replacing to complete before returning
7645 *
7646 * Replace <device> with <new_device>.
7647 */
7648 int
zpool_do_replace(int argc,char ** argv)7649 zpool_do_replace(int argc, char **argv)
7650 {
7651 return (zpool_do_attach_or_replace(argc, argv, B_TRUE));
7652 }
7653
7654 /*
7655 * zpool attach [-fsw] [-o property=value] <pool> <device>|<vdev> <new_device>
7656 *
7657 * -f Force attach, even if <new_device> appears to be in use.
7658 * -s Use sequential instead of healing reconstruction for resilver.
7659 * -o Set property=value.
7660 * -w Wait for resilvering (mirror) or expansion (raidz) to complete
7661 * before returning.
7662 *
7663 * Attach <new_device> to a <device> or <vdev>, where the vdev can be of type
7664 * mirror or raidz. If <device> is not part of a mirror, then <device> will
7665 * be transformed into a mirror of <device> and <new_device>. When a mirror
7666 * is involved, <new_device> will begin life with a DTL of [0, now], and will
7667 * immediately begin to resilver itself. For the raidz case, a expansion will
7668 * commence and reflow the raidz data across all the disks including the
7669 * <new_device>.
7670 */
7671 int
zpool_do_attach(int argc,char ** argv)7672 zpool_do_attach(int argc, char **argv)
7673 {
7674 return (zpool_do_attach_or_replace(argc, argv, B_FALSE));
7675 }
7676
7677 /*
7678 * zpool detach [-f] <pool> <device>
7679 *
7680 * -f Force detach of <device>, even if DTLs argue against it
7681 * (not supported yet)
7682 *
7683 * Detach a device from a mirror. The operation will be refused if <device>
7684 * is the last device in the mirror, or if the DTLs indicate that this device
7685 * has the only valid copy of some data.
7686 */
7687 int
zpool_do_detach(int argc,char ** argv)7688 zpool_do_detach(int argc, char **argv)
7689 {
7690 int c;
7691 char *poolname, *path;
7692 zpool_handle_t *zhp;
7693 int ret;
7694
7695 /* check options */
7696 while ((c = getopt(argc, argv, "")) != -1) {
7697 switch (c) {
7698 case '?':
7699 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7700 optopt);
7701 usage(B_FALSE);
7702 }
7703 }
7704
7705 argc -= optind;
7706 argv += optind;
7707
7708 /* get pool name and check number of arguments */
7709 if (argc < 1) {
7710 (void) fprintf(stderr, gettext("missing pool name argument\n"));
7711 usage(B_FALSE);
7712 }
7713
7714 if (argc < 2) {
7715 (void) fprintf(stderr,
7716 gettext("missing <device> specification\n"));
7717 usage(B_FALSE);
7718 }
7719
7720 poolname = argv[0];
7721 path = argv[1];
7722
7723 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
7724 return (1);
7725
7726 ret = zpool_vdev_detach(zhp, path);
7727
7728 zpool_close(zhp);
7729
7730 return (ret);
7731 }
7732
7733 /*
7734 * zpool split [-gLnP] [-o prop=val] ...
7735 * [-o mntopt] ...
7736 * [-R altroot] <pool> <newpool> [<device> ...]
7737 *
7738 * -g Display guid for individual vdev name.
7739 * -L Follow links when resolving vdev path name.
7740 * -n Do not split the pool, but display the resulting layout if
7741 * it were to be split.
7742 * -o Set property=value, or set mount options.
7743 * -P Display full path for vdev name.
7744 * -R Mount the split-off pool under an alternate root.
7745 * -l Load encryption keys while importing.
7746 *
7747 * Splits the named pool and gives it the new pool name. Devices to be split
7748 * off may be listed, provided that no more than one device is specified
7749 * per top-level vdev mirror. The newly split pool is left in an exported
7750 * state unless -R is specified.
7751 *
7752 * Restrictions: the top-level of the pool pool must only be made up of
7753 * mirrors; all devices in the pool must be healthy; no device may be
7754 * undergoing a resilvering operation.
7755 */
7756 int
zpool_do_split(int argc,char ** argv)7757 zpool_do_split(int argc, char **argv)
7758 {
7759 char *srcpool, *newpool, *propval;
7760 char *mntopts = NULL;
7761 splitflags_t flags;
7762 int c, ret = 0;
7763 int ms_status = 0;
7764 boolean_t loadkeys = B_FALSE;
7765 zpool_handle_t *zhp;
7766 nvlist_t *config, *props = NULL;
7767
7768 flags.dryrun = B_FALSE;
7769 flags.import = B_FALSE;
7770 flags.name_flags = 0;
7771
7772 /* check options */
7773 while ((c = getopt(argc, argv, ":gLR:lno:P")) != -1) {
7774 switch (c) {
7775 case 'g':
7776 flags.name_flags |= VDEV_NAME_GUID;
7777 break;
7778 case 'L':
7779 flags.name_flags |= VDEV_NAME_FOLLOW_LINKS;
7780 break;
7781 case 'R':
7782 flags.import = B_TRUE;
7783 if (add_prop_list(
7784 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), optarg,
7785 &props, B_TRUE) != 0) {
7786 nvlist_free(props);
7787 usage(B_FALSE);
7788 }
7789 break;
7790 case 'l':
7791 loadkeys = B_TRUE;
7792 break;
7793 case 'n':
7794 flags.dryrun = B_TRUE;
7795 break;
7796 case 'o':
7797 if ((propval = strchr(optarg, '=')) != NULL) {
7798 *propval = '\0';
7799 propval++;
7800 if (add_prop_list(optarg, propval,
7801 &props, B_TRUE) != 0) {
7802 nvlist_free(props);
7803 usage(B_FALSE);
7804 }
7805 } else {
7806 mntopts = optarg;
7807 }
7808 break;
7809 case 'P':
7810 flags.name_flags |= VDEV_NAME_PATH;
7811 break;
7812 case ':':
7813 (void) fprintf(stderr, gettext("missing argument for "
7814 "'%c' option\n"), optopt);
7815 usage(B_FALSE);
7816 break;
7817 case '?':
7818 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7819 optopt);
7820 usage(B_FALSE);
7821 break;
7822 }
7823 }
7824
7825 if (!flags.import && mntopts != NULL) {
7826 (void) fprintf(stderr, gettext("setting mntopts is only "
7827 "valid when importing the pool\n"));
7828 usage(B_FALSE);
7829 }
7830
7831 if (!flags.import && loadkeys) {
7832 (void) fprintf(stderr, gettext("loading keys is only "
7833 "valid when importing the pool\n"));
7834 usage(B_FALSE);
7835 }
7836
7837 argc -= optind;
7838 argv += optind;
7839
7840 if (argc < 1) {
7841 (void) fprintf(stderr, gettext("Missing pool name\n"));
7842 usage(B_FALSE);
7843 }
7844 if (argc < 2) {
7845 (void) fprintf(stderr, gettext("Missing new pool name\n"));
7846 usage(B_FALSE);
7847 }
7848
7849 srcpool = argv[0];
7850 newpool = argv[1];
7851
7852 argc -= 2;
7853 argv += 2;
7854
7855 if ((zhp = zpool_open(g_zfs, srcpool)) == NULL) {
7856 nvlist_free(props);
7857 return (1);
7858 }
7859
7860 config = split_mirror_vdev(zhp, newpool, props, flags, argc, argv);
7861 if (config == NULL) {
7862 ret = 1;
7863 } else {
7864 if (flags.dryrun) {
7865 (void) printf(gettext("would create '%s' with the "
7866 "following layout:\n\n"), newpool);
7867 print_vdev_tree(NULL, newpool, config, 0, "",
7868 flags.name_flags);
7869 print_vdev_tree(NULL, "dedup", config, 0,
7870 VDEV_ALLOC_BIAS_DEDUP, 0);
7871 print_vdev_tree(NULL, "special", config, 0,
7872 VDEV_ALLOC_BIAS_SPECIAL, 0);
7873 }
7874 }
7875
7876 zpool_close(zhp);
7877
7878 if (ret != 0 || flags.dryrun || !flags.import) {
7879 nvlist_free(config);
7880 nvlist_free(props);
7881 return (ret);
7882 }
7883
7884 /*
7885 * The split was successful. Now we need to open the new
7886 * pool and import it.
7887 */
7888 if ((zhp = zpool_open_canfail(g_zfs, newpool)) == NULL) {
7889 nvlist_free(config);
7890 nvlist_free(props);
7891 return (1);
7892 }
7893
7894 if (loadkeys) {
7895 ret = zfs_crypto_attempt_load_keys(g_zfs, newpool);
7896 if (ret != 0)
7897 ret = 1;
7898 }
7899
7900 if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL) {
7901 ms_status = zpool_enable_datasets(zhp, mntopts, 0,
7902 mount_tp_nthr);
7903 if (ms_status == EZFS_SHAREFAILED) {
7904 (void) fprintf(stderr, gettext("Split was successful, "
7905 "datasets are mounted but sharing of some datasets "
7906 "has failed\n"));
7907 } else if (ms_status == EZFS_MOUNTFAILED) {
7908 (void) fprintf(stderr, gettext("Split was successful"
7909 ", but some datasets could not be mounted\n"));
7910 (void) fprintf(stderr, gettext("Try doing '%s' with a "
7911 "different altroot\n"), "zpool import");
7912 }
7913 }
7914 zpool_close(zhp);
7915 nvlist_free(config);
7916 nvlist_free(props);
7917
7918 return (ret);
7919 }
7920
7921
7922 /*
7923 * zpool online [--power] <pool> <device> ...
7924 *
7925 * --power: Power on the enclosure slot to the drive (if possible)
7926 */
7927 int
zpool_do_online(int argc,char ** argv)7928 zpool_do_online(int argc, char **argv)
7929 {
7930 int c, i;
7931 char *poolname;
7932 zpool_handle_t *zhp;
7933 int ret = 0;
7934 vdev_state_t newstate;
7935 int flags = 0;
7936 boolean_t is_power_on = B_FALSE;
7937 struct option long_options[] = {
7938 {"power", no_argument, NULL, ZPOOL_OPTION_POWER},
7939 {0, 0, 0, 0}
7940 };
7941
7942 /* check options */
7943 while ((c = getopt_long(argc, argv, "e", long_options, NULL)) != -1) {
7944 switch (c) {
7945 case 'e':
7946 flags |= ZFS_ONLINE_EXPAND;
7947 break;
7948 case ZPOOL_OPTION_POWER:
7949 is_power_on = B_TRUE;
7950 break;
7951 case '?':
7952 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7953 optopt);
7954 usage(B_FALSE);
7955 }
7956 }
7957
7958 if (libzfs_envvar_is_set("ZPOOL_AUTO_POWER_ON_SLOT"))
7959 is_power_on = B_TRUE;
7960
7961 argc -= optind;
7962 argv += optind;
7963
7964 /* get pool name and check number of arguments */
7965 if (argc < 1) {
7966 (void) fprintf(stderr, gettext("missing pool name\n"));
7967 usage(B_FALSE);
7968 }
7969 if (argc < 2) {
7970 (void) fprintf(stderr, gettext("missing device name\n"));
7971 usage(B_FALSE);
7972 }
7973
7974 poolname = argv[0];
7975
7976 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) {
7977 (void) fprintf(stderr, gettext("failed to open pool "
7978 "\"%s\""), poolname);
7979 return (1);
7980 }
7981
7982 for (i = 1; i < argc; i++) {
7983 vdev_state_t oldstate;
7984 boolean_t avail_spare, l2cache;
7985 int rc;
7986
7987 if (is_power_on) {
7988 rc = zpool_power_on_and_disk_wait(zhp, argv[i]);
7989 if (rc == ENOTSUP) {
7990 (void) fprintf(stderr,
7991 gettext("Power control not supported\n"));
7992 }
7993 if (rc != 0)
7994 return (rc);
7995 }
7996
7997 nvlist_t *tgt = zpool_find_vdev(zhp, argv[i], &avail_spare,
7998 &l2cache, NULL);
7999 if (tgt == NULL) {
8000 ret = 1;
8001 (void) fprintf(stderr, gettext("couldn't find device "
8002 "\"%s\" in pool \"%s\"\n"), argv[i], poolname);
8003 continue;
8004 }
8005 uint_t vsc;
8006 oldstate = ((vdev_stat_t *)fnvlist_lookup_uint64_array(tgt,
8007 ZPOOL_CONFIG_VDEV_STATS, &vsc))->vs_state;
8008 if ((rc = zpool_vdev_online(zhp, argv[i], flags,
8009 &newstate)) == 0) {
8010 if (newstate != VDEV_STATE_HEALTHY) {
8011 (void) printf(gettext("warning: device '%s' "
8012 "onlined, but remains in faulted state\n"),
8013 argv[i]);
8014 if (newstate == VDEV_STATE_FAULTED)
8015 (void) printf(gettext("use 'zpool "
8016 "clear' to restore a faulted "
8017 "device\n"));
8018 else
8019 (void) printf(gettext("use 'zpool "
8020 "replace' to replace devices "
8021 "that are no longer present\n"));
8022 if ((flags & ZFS_ONLINE_EXPAND)) {
8023 (void) printf(gettext("%s: failed "
8024 "to expand usable space on "
8025 "unhealthy device '%s'\n"),
8026 (oldstate >= VDEV_STATE_DEGRADED ?
8027 "error" : "warning"), argv[i]);
8028 if (oldstate >= VDEV_STATE_DEGRADED) {
8029 ret = 1;
8030 break;
8031 }
8032 }
8033 }
8034 } else {
8035 (void) fprintf(stderr, gettext("Failed to online "
8036 "\"%s\" in pool \"%s\": %d\n"),
8037 argv[i], poolname, rc);
8038 ret = 1;
8039 }
8040 }
8041
8042 zpool_close(zhp);
8043
8044 return (ret);
8045 }
8046
8047 /*
8048 * zpool offline [-ft]|[--power] <pool> <device> ...
8049 *
8050 *
8051 * -f Force the device into a faulted state.
8052 *
8053 * -t Only take the device off-line temporarily. The offline/faulted
8054 * state will not be persistent across reboots.
8055 *
8056 * --power Power off the enclosure slot to the drive (if possible)
8057 */
8058 int
zpool_do_offline(int argc,char ** argv)8059 zpool_do_offline(int argc, char **argv)
8060 {
8061 int c, i;
8062 char *poolname;
8063 zpool_handle_t *zhp;
8064 int ret = 0;
8065 boolean_t istmp = B_FALSE;
8066 boolean_t fault = B_FALSE;
8067 boolean_t is_power_off = B_FALSE;
8068
8069 struct option long_options[] = {
8070 {"power", no_argument, NULL, ZPOOL_OPTION_POWER},
8071 {0, 0, 0, 0}
8072 };
8073
8074 /* check options */
8075 while ((c = getopt_long(argc, argv, "ft", long_options, NULL)) != -1) {
8076 switch (c) {
8077 case 'f':
8078 fault = B_TRUE;
8079 break;
8080 case 't':
8081 istmp = B_TRUE;
8082 break;
8083 case ZPOOL_OPTION_POWER:
8084 is_power_off = B_TRUE;
8085 break;
8086 case '?':
8087 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
8088 optopt);
8089 usage(B_FALSE);
8090 }
8091 }
8092
8093 if (is_power_off && fault) {
8094 (void) fprintf(stderr,
8095 gettext("-0 and -f cannot be used together\n"));
8096 usage(B_FALSE);
8097 return (1);
8098 }
8099
8100 if (is_power_off && istmp) {
8101 (void) fprintf(stderr,
8102 gettext("-0 and -t cannot be used together\n"));
8103 usage(B_FALSE);
8104 return (1);
8105 }
8106
8107 argc -= optind;
8108 argv += optind;
8109
8110 /* get pool name and check number of arguments */
8111 if (argc < 1) {
8112 (void) fprintf(stderr, gettext("missing pool name\n"));
8113 usage(B_FALSE);
8114 }
8115 if (argc < 2) {
8116 (void) fprintf(stderr, gettext("missing device name\n"));
8117 usage(B_FALSE);
8118 }
8119
8120 poolname = argv[0];
8121
8122 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) {
8123 (void) fprintf(stderr, gettext("failed to open pool "
8124 "\"%s\""), poolname);
8125 return (1);
8126 }
8127
8128 for (i = 1; i < argc; i++) {
8129 uint64_t guid = zpool_vdev_path_to_guid(zhp, argv[i]);
8130 if (is_power_off) {
8131 /*
8132 * Note: we have to power off first, then set REMOVED,
8133 * or else zpool_vdev_set_removed_state() returns
8134 * EAGAIN.
8135 */
8136 ret = zpool_power_off(zhp, argv[i]);
8137 if (ret != 0) {
8138 (void) fprintf(stderr, "%s %s %d\n",
8139 gettext("unable to power off slot for"),
8140 argv[i], ret);
8141 }
8142 zpool_vdev_set_removed_state(zhp, guid, VDEV_AUX_NONE);
8143
8144 } else if (fault) {
8145 vdev_aux_t aux;
8146 if (istmp == B_FALSE) {
8147 /* Force the fault to persist across imports */
8148 aux = VDEV_AUX_EXTERNAL_PERSIST;
8149 } else {
8150 aux = VDEV_AUX_EXTERNAL;
8151 }
8152
8153 if (guid == 0 || zpool_vdev_fault(zhp, guid, aux) != 0)
8154 ret = 1;
8155 } else {
8156 if (zpool_vdev_offline(zhp, argv[i], istmp) != 0)
8157 ret = 1;
8158 }
8159 }
8160
8161 zpool_close(zhp);
8162
8163 return (ret);
8164 }
8165
8166 /*
8167 * zpool clear [-nF]|[--power] <pool> [device]
8168 *
8169 * Clear all errors associated with a pool or a particular device.
8170 */
8171 int
zpool_do_clear(int argc,char ** argv)8172 zpool_do_clear(int argc, char **argv)
8173 {
8174 int c;
8175 int ret = 0;
8176 boolean_t dryrun = B_FALSE;
8177 boolean_t do_rewind = B_FALSE;
8178 boolean_t xtreme_rewind = B_FALSE;
8179 boolean_t is_power_on = B_FALSE;
8180 uint32_t rewind_policy = ZPOOL_NO_REWIND;
8181 nvlist_t *policy = NULL;
8182 zpool_handle_t *zhp;
8183 char *pool, *device;
8184
8185 struct option long_options[] = {
8186 {"power", no_argument, NULL, ZPOOL_OPTION_POWER},
8187 {0, 0, 0, 0}
8188 };
8189
8190 /* check options */
8191 while ((c = getopt_long(argc, argv, "FnX", long_options,
8192 NULL)) != -1) {
8193 switch (c) {
8194 case 'F':
8195 do_rewind = B_TRUE;
8196 break;
8197 case 'n':
8198 dryrun = B_TRUE;
8199 break;
8200 case 'X':
8201 xtreme_rewind = B_TRUE;
8202 break;
8203 case ZPOOL_OPTION_POWER:
8204 is_power_on = B_TRUE;
8205 break;
8206 case '?':
8207 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
8208 optopt);
8209 usage(B_FALSE);
8210 }
8211 }
8212
8213 if (libzfs_envvar_is_set("ZPOOL_AUTO_POWER_ON_SLOT"))
8214 is_power_on = B_TRUE;
8215
8216 argc -= optind;
8217 argv += optind;
8218
8219 if (argc < 1) {
8220 (void) fprintf(stderr, gettext("missing pool name\n"));
8221 usage(B_FALSE);
8222 }
8223
8224 if (argc > 2) {
8225 (void) fprintf(stderr, gettext("too many arguments\n"));
8226 usage(B_FALSE);
8227 }
8228
8229 if ((dryrun || xtreme_rewind) && !do_rewind) {
8230 (void) fprintf(stderr,
8231 gettext("-n or -X only meaningful with -F\n"));
8232 usage(B_FALSE);
8233 }
8234 if (dryrun)
8235 rewind_policy = ZPOOL_TRY_REWIND;
8236 else if (do_rewind)
8237 rewind_policy = ZPOOL_DO_REWIND;
8238 if (xtreme_rewind)
8239 rewind_policy |= ZPOOL_EXTREME_REWIND;
8240
8241 /* In future, further rewind policy choices can be passed along here */
8242 if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 ||
8243 nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY,
8244 rewind_policy) != 0) {
8245 return (1);
8246 }
8247
8248 pool = argv[0];
8249 device = argc == 2 ? argv[1] : NULL;
8250
8251 if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) {
8252 nvlist_free(policy);
8253 return (1);
8254 }
8255
8256 if (is_power_on) {
8257 if (device == NULL) {
8258 zpool_power_on_pool_and_wait_for_devices(zhp);
8259 } else {
8260 zpool_power_on_and_disk_wait(zhp, device);
8261 }
8262 }
8263
8264 if (zpool_clear(zhp, device, policy) != 0)
8265 ret = 1;
8266
8267 zpool_close(zhp);
8268
8269 nvlist_free(policy);
8270
8271 return (ret);
8272 }
8273
8274 /*
8275 * zpool reguid [-g <guid>] <pool>
8276 */
8277 int
zpool_do_reguid(int argc,char ** argv)8278 zpool_do_reguid(int argc, char **argv)
8279 {
8280 uint64_t guid;
8281 uint64_t *guidp = NULL;
8282 int c;
8283 char *endptr;
8284 char *poolname;
8285 zpool_handle_t *zhp;
8286 int ret = 0;
8287
8288 /* check options */
8289 while ((c = getopt(argc, argv, "g:")) != -1) {
8290 switch (c) {
8291 case 'g':
8292 errno = 0;
8293 guid = strtoull(optarg, &endptr, 10);
8294 if (errno != 0 || *endptr != '\0') {
8295 (void) fprintf(stderr,
8296 gettext("invalid GUID: %s\n"), optarg);
8297 usage(B_FALSE);
8298 }
8299 guidp = &guid;
8300 break;
8301 case '?':
8302 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
8303 optopt);
8304 usage(B_FALSE);
8305 }
8306 }
8307
8308 argc -= optind;
8309 argv += optind;
8310
8311 /* get pool name and check number of arguments */
8312 if (argc < 1) {
8313 (void) fprintf(stderr, gettext("missing pool name\n"));
8314 usage(B_FALSE);
8315 }
8316
8317 if (argc > 1) {
8318 (void) fprintf(stderr, gettext("too many arguments\n"));
8319 usage(B_FALSE);
8320 }
8321
8322 poolname = argv[0];
8323 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
8324 return (1);
8325
8326 ret = zpool_set_guid(zhp, guidp);
8327
8328 zpool_close(zhp);
8329 return (ret);
8330 }
8331
8332
8333 /*
8334 * zpool reopen <pool>
8335 *
8336 * Reopen the pool so that the kernel can update the sizes of all vdevs.
8337 */
8338 int
zpool_do_reopen(int argc,char ** argv)8339 zpool_do_reopen(int argc, char **argv)
8340 {
8341 int c;
8342 int ret = 0;
8343 boolean_t scrub_restart = B_TRUE;
8344
8345 /* check options */
8346 while ((c = getopt(argc, argv, "n")) != -1) {
8347 switch (c) {
8348 case 'n':
8349 scrub_restart = B_FALSE;
8350 break;
8351 case '?':
8352 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
8353 optopt);
8354 usage(B_FALSE);
8355 }
8356 }
8357
8358 argc -= optind;
8359 argv += optind;
8360
8361 /* if argc == 0 we will execute zpool_reopen_one on all pools */
8362 ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
8363 B_FALSE, zpool_reopen_one, &scrub_restart);
8364
8365 return (ret);
8366 }
8367
8368 typedef struct scrub_cbdata {
8369 int cb_type;
8370 pool_scrub_cmd_t cb_scrub_cmd;
8371 } scrub_cbdata_t;
8372
8373 static boolean_t
zpool_has_checkpoint(zpool_handle_t * zhp)8374 zpool_has_checkpoint(zpool_handle_t *zhp)
8375 {
8376 nvlist_t *config, *nvroot;
8377
8378 config = zpool_get_config(zhp, NULL);
8379
8380 if (config != NULL) {
8381 pool_checkpoint_stat_t *pcs = NULL;
8382 uint_t c;
8383
8384 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
8385 (void) nvlist_lookup_uint64_array(nvroot,
8386 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
8387
8388 if (pcs == NULL || pcs->pcs_state == CS_NONE)
8389 return (B_FALSE);
8390
8391 assert(pcs->pcs_state == CS_CHECKPOINT_EXISTS ||
8392 pcs->pcs_state == CS_CHECKPOINT_DISCARDING);
8393 return (B_TRUE);
8394 }
8395
8396 return (B_FALSE);
8397 }
8398
8399 static int
scrub_callback(zpool_handle_t * zhp,void * data)8400 scrub_callback(zpool_handle_t *zhp, void *data)
8401 {
8402 scrub_cbdata_t *cb = data;
8403 int err;
8404
8405 /*
8406 * Ignore faulted pools.
8407 */
8408 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
8409 (void) fprintf(stderr, gettext("cannot scan '%s': pool is "
8410 "currently unavailable\n"), zpool_get_name(zhp));
8411 return (1);
8412 }
8413
8414 err = zpool_scan(zhp, cb->cb_type, cb->cb_scrub_cmd);
8415
8416 if (err == 0 && zpool_has_checkpoint(zhp) &&
8417 cb->cb_type == POOL_SCAN_SCRUB) {
8418 (void) printf(gettext("warning: will not scrub state that "
8419 "belongs to the checkpoint of pool '%s'\n"),
8420 zpool_get_name(zhp));
8421 }
8422
8423 return (err != 0);
8424 }
8425
8426 static int
wait_callback(zpool_handle_t * zhp,void * data)8427 wait_callback(zpool_handle_t *zhp, void *data)
8428 {
8429 zpool_wait_activity_t *act = data;
8430 return (zpool_wait(zhp, *act));
8431 }
8432
8433 /*
8434 * zpool scrub [-e | -s | -p | -C] [-w] <pool> ...
8435 *
8436 * -e Only scrub blocks in the error log.
8437 * -s Stop. Stops any in-progress scrub.
8438 * -p Pause. Pause in-progress scrub.
8439 * -w Wait. Blocks until scrub has completed.
8440 * -C Scrub from last saved txg.
8441 */
8442 int
zpool_do_scrub(int argc,char ** argv)8443 zpool_do_scrub(int argc, char **argv)
8444 {
8445 int c;
8446 scrub_cbdata_t cb;
8447 boolean_t wait = B_FALSE;
8448 int error;
8449
8450 cb.cb_type = POOL_SCAN_SCRUB;
8451 cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
8452
8453 boolean_t is_error_scrub = B_FALSE;
8454 boolean_t is_pause = B_FALSE;
8455 boolean_t is_stop = B_FALSE;
8456 boolean_t is_txg_continue = B_FALSE;
8457
8458 /* check options */
8459 while ((c = getopt(argc, argv, "spweC")) != -1) {
8460 switch (c) {
8461 case 'e':
8462 is_error_scrub = B_TRUE;
8463 break;
8464 case 's':
8465 is_stop = B_TRUE;
8466 break;
8467 case 'p':
8468 is_pause = B_TRUE;
8469 break;
8470 case 'w':
8471 wait = B_TRUE;
8472 break;
8473 case 'C':
8474 is_txg_continue = B_TRUE;
8475 break;
8476 case '?':
8477 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
8478 optopt);
8479 usage(B_FALSE);
8480 }
8481 }
8482
8483 if (is_pause && is_stop) {
8484 (void) fprintf(stderr, gettext("invalid option "
8485 "combination: -s and -p are mutually exclusive\n"));
8486 usage(B_FALSE);
8487 } else if (is_pause && is_txg_continue) {
8488 (void) fprintf(stderr, gettext("invalid option "
8489 "combination: -p and -C are mutually exclusive\n"));
8490 usage(B_FALSE);
8491 } else if (is_stop && is_txg_continue) {
8492 (void) fprintf(stderr, gettext("invalid option "
8493 "combination: -s and -C are mutually exclusive\n"));
8494 usage(B_FALSE);
8495 } else if (is_error_scrub && is_txg_continue) {
8496 (void) fprintf(stderr, gettext("invalid option "
8497 "combination: -e and -C are mutually exclusive\n"));
8498 usage(B_FALSE);
8499 } else {
8500 if (is_error_scrub)
8501 cb.cb_type = POOL_SCAN_ERRORSCRUB;
8502
8503 if (is_pause) {
8504 cb.cb_scrub_cmd = POOL_SCRUB_PAUSE;
8505 } else if (is_stop) {
8506 cb.cb_type = POOL_SCAN_NONE;
8507 } else if (is_txg_continue) {
8508 cb.cb_scrub_cmd = POOL_SCRUB_FROM_LAST_TXG;
8509 } else {
8510 cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
8511 }
8512 }
8513
8514 if (wait && (cb.cb_type == POOL_SCAN_NONE ||
8515 cb.cb_scrub_cmd == POOL_SCRUB_PAUSE)) {
8516 (void) fprintf(stderr, gettext("invalid option combination: "
8517 "-w cannot be used with -p or -s\n"));
8518 usage(B_FALSE);
8519 }
8520
8521 argc -= optind;
8522 argv += optind;
8523
8524 if (argc < 1) {
8525 (void) fprintf(stderr, gettext("missing pool name argument\n"));
8526 usage(B_FALSE);
8527 }
8528
8529 error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
8530 B_FALSE, scrub_callback, &cb);
8531
8532 if (wait && !error) {
8533 zpool_wait_activity_t act = ZPOOL_WAIT_SCRUB;
8534 error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
8535 B_FALSE, wait_callback, &act);
8536 }
8537
8538 return (error);
8539 }
8540
8541 /*
8542 * zpool resilver <pool> ...
8543 *
8544 * Restarts any in-progress resilver
8545 */
8546 int
zpool_do_resilver(int argc,char ** argv)8547 zpool_do_resilver(int argc, char **argv)
8548 {
8549 int c;
8550 scrub_cbdata_t cb;
8551
8552 cb.cb_type = POOL_SCAN_RESILVER;
8553 cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
8554
8555 /* check options */
8556 while ((c = getopt(argc, argv, "")) != -1) {
8557 switch (c) {
8558 case '?':
8559 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
8560 optopt);
8561 usage(B_FALSE);
8562 }
8563 }
8564
8565 argc -= optind;
8566 argv += optind;
8567
8568 if (argc < 1) {
8569 (void) fprintf(stderr, gettext("missing pool name argument\n"));
8570 usage(B_FALSE);
8571 }
8572
8573 return (for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
8574 B_FALSE, scrub_callback, &cb));
8575 }
8576
8577 /*
8578 * zpool trim [-d] [-r <rate>] [-c | -s] <pool> [<device> ...]
8579 *
8580 * -c Cancel. Ends any in-progress trim.
8581 * -d Secure trim. Requires kernel and device support.
8582 * -r <rate> Sets the TRIM rate in bytes (per second). Supports
8583 * adding a multiplier suffix such as 'k' or 'm'.
8584 * -s Suspend. TRIM can then be restarted with no flags.
8585 * -w Wait. Blocks until trimming has completed.
8586 */
8587 int
zpool_do_trim(int argc,char ** argv)8588 zpool_do_trim(int argc, char **argv)
8589 {
8590 struct option long_options[] = {
8591 {"cancel", no_argument, NULL, 'c'},
8592 {"secure", no_argument, NULL, 'd'},
8593 {"rate", required_argument, NULL, 'r'},
8594 {"suspend", no_argument, NULL, 's'},
8595 {"wait", no_argument, NULL, 'w'},
8596 {0, 0, 0, 0}
8597 };
8598
8599 pool_trim_func_t cmd_type = POOL_TRIM_START;
8600 uint64_t rate = 0;
8601 boolean_t secure = B_FALSE;
8602 boolean_t wait = B_FALSE;
8603
8604 int c;
8605 while ((c = getopt_long(argc, argv, "cdr:sw", long_options, NULL))
8606 != -1) {
8607 switch (c) {
8608 case 'c':
8609 if (cmd_type != POOL_TRIM_START &&
8610 cmd_type != POOL_TRIM_CANCEL) {
8611 (void) fprintf(stderr, gettext("-c cannot be "
8612 "combined with other options\n"));
8613 usage(B_FALSE);
8614 }
8615 cmd_type = POOL_TRIM_CANCEL;
8616 break;
8617 case 'd':
8618 if (cmd_type != POOL_TRIM_START) {
8619 (void) fprintf(stderr, gettext("-d cannot be "
8620 "combined with the -c or -s options\n"));
8621 usage(B_FALSE);
8622 }
8623 secure = B_TRUE;
8624 break;
8625 case 'r':
8626 if (cmd_type != POOL_TRIM_START) {
8627 (void) fprintf(stderr, gettext("-r cannot be "
8628 "combined with the -c or -s options\n"));
8629 usage(B_FALSE);
8630 }
8631 if (zfs_nicestrtonum(g_zfs, optarg, &rate) == -1) {
8632 (void) fprintf(stderr, "%s: %s\n",
8633 gettext("invalid value for rate"),
8634 libzfs_error_description(g_zfs));
8635 usage(B_FALSE);
8636 }
8637 break;
8638 case 's':
8639 if (cmd_type != POOL_TRIM_START &&
8640 cmd_type != POOL_TRIM_SUSPEND) {
8641 (void) fprintf(stderr, gettext("-s cannot be "
8642 "combined with other options\n"));
8643 usage(B_FALSE);
8644 }
8645 cmd_type = POOL_TRIM_SUSPEND;
8646 break;
8647 case 'w':
8648 wait = B_TRUE;
8649 break;
8650 case '?':
8651 if (optopt != 0) {
8652 (void) fprintf(stderr,
8653 gettext("invalid option '%c'\n"), optopt);
8654 } else {
8655 (void) fprintf(stderr,
8656 gettext("invalid option '%s'\n"),
8657 argv[optind - 1]);
8658 }
8659 usage(B_FALSE);
8660 }
8661 }
8662
8663 argc -= optind;
8664 argv += optind;
8665
8666 if (argc < 1) {
8667 (void) fprintf(stderr, gettext("missing pool name argument\n"));
8668 usage(B_FALSE);
8669 return (-1);
8670 }
8671
8672 if (wait && (cmd_type != POOL_TRIM_START)) {
8673 (void) fprintf(stderr, gettext("-w cannot be used with -c or "
8674 "-s\n"));
8675 usage(B_FALSE);
8676 }
8677
8678 char *poolname = argv[0];
8679 zpool_handle_t *zhp = zpool_open(g_zfs, poolname);
8680 if (zhp == NULL)
8681 return (-1);
8682
8683 trimflags_t trim_flags = {
8684 .secure = secure,
8685 .rate = rate,
8686 .wait = wait,
8687 };
8688
8689 nvlist_t *vdevs = fnvlist_alloc();
8690 if (argc == 1) {
8691 /* no individual leaf vdevs specified, so add them all */
8692 nvlist_t *config = zpool_get_config(zhp, NULL);
8693 nvlist_t *nvroot = fnvlist_lookup_nvlist(config,
8694 ZPOOL_CONFIG_VDEV_TREE);
8695 zpool_collect_leaves(zhp, nvroot, vdevs);
8696 trim_flags.fullpool = B_TRUE;
8697 } else {
8698 trim_flags.fullpool = B_FALSE;
8699 for (int i = 1; i < argc; i++) {
8700 fnvlist_add_boolean(vdevs, argv[i]);
8701 }
8702 }
8703
8704 int error = zpool_trim(zhp, cmd_type, vdevs, &trim_flags);
8705
8706 fnvlist_free(vdevs);
8707 zpool_close(zhp);
8708
8709 return (error);
8710 }
8711
8712 /*
8713 * Converts a total number of seconds to a human readable string broken
8714 * down in to days/hours/minutes/seconds.
8715 */
8716 static void
secs_to_dhms(uint64_t total,char * buf)8717 secs_to_dhms(uint64_t total, char *buf)
8718 {
8719 uint64_t days = total / 60 / 60 / 24;
8720 uint64_t hours = (total / 60 / 60) % 24;
8721 uint64_t mins = (total / 60) % 60;
8722 uint64_t secs = (total % 60);
8723
8724 if (days > 0) {
8725 (void) sprintf(buf, "%llu days %02llu:%02llu:%02llu",
8726 (u_longlong_t)days, (u_longlong_t)hours,
8727 (u_longlong_t)mins, (u_longlong_t)secs);
8728 } else {
8729 (void) sprintf(buf, "%02llu:%02llu:%02llu",
8730 (u_longlong_t)hours, (u_longlong_t)mins,
8731 (u_longlong_t)secs);
8732 }
8733 }
8734
8735 /*
8736 * Print out detailed error scrub status.
8737 */
8738 static void
print_err_scrub_status(pool_scan_stat_t * ps)8739 print_err_scrub_status(pool_scan_stat_t *ps)
8740 {
8741 time_t start, end, pause;
8742 uint64_t total_secs_left;
8743 uint64_t secs_left, mins_left, hours_left, days_left;
8744 uint64_t examined, to_be_examined;
8745
8746 if (ps == NULL || ps->pss_error_scrub_func != POOL_SCAN_ERRORSCRUB) {
8747 return;
8748 }
8749
8750 (void) printf(gettext(" scrub: "));
8751
8752 start = ps->pss_error_scrub_start;
8753 end = ps->pss_error_scrub_end;
8754 pause = ps->pss_pass_error_scrub_pause;
8755 examined = ps->pss_error_scrub_examined;
8756 to_be_examined = ps->pss_error_scrub_to_be_examined;
8757
8758 assert(ps->pss_error_scrub_func == POOL_SCAN_ERRORSCRUB);
8759
8760 if (ps->pss_error_scrub_state == DSS_FINISHED) {
8761 total_secs_left = end - start;
8762 days_left = total_secs_left / 60 / 60 / 24;
8763 hours_left = (total_secs_left / 60 / 60) % 24;
8764 mins_left = (total_secs_left / 60) % 60;
8765 secs_left = (total_secs_left % 60);
8766
8767 (void) printf(gettext("scrubbed %llu error blocks in %llu days "
8768 "%02llu:%02llu:%02llu on %s"), (u_longlong_t)examined,
8769 (u_longlong_t)days_left, (u_longlong_t)hours_left,
8770 (u_longlong_t)mins_left, (u_longlong_t)secs_left,
8771 ctime(&end));
8772
8773 return;
8774 } else if (ps->pss_error_scrub_state == DSS_CANCELED) {
8775 (void) printf(gettext("error scrub canceled on %s"),
8776 ctime(&end));
8777 return;
8778 }
8779 assert(ps->pss_error_scrub_state == DSS_ERRORSCRUBBING);
8780
8781 /* Error scrub is in progress. */
8782 if (pause == 0) {
8783 (void) printf(gettext("error scrub in progress since %s"),
8784 ctime(&start));
8785 } else {
8786 (void) printf(gettext("error scrub paused since %s"),
8787 ctime(&pause));
8788 (void) printf(gettext("\terror scrub started on %s"),
8789 ctime(&start));
8790 }
8791
8792 double fraction_done = (double)examined / (to_be_examined + examined);
8793 (void) printf(gettext("\t%.2f%% done, issued I/O for %llu error"
8794 " blocks"), 100 * fraction_done, (u_longlong_t)examined);
8795
8796 (void) printf("\n");
8797 }
8798
8799 /*
8800 * Print out detailed scrub status.
8801 */
8802 static void
print_scan_scrub_resilver_status(pool_scan_stat_t * ps)8803 print_scan_scrub_resilver_status(pool_scan_stat_t *ps)
8804 {
8805 time_t start, end, pause;
8806 uint64_t pass_scanned, scanned, pass_issued, issued, total_s, total_i;
8807 uint64_t elapsed, scan_rate, issue_rate;
8808 double fraction_done;
8809 char processed_buf[7], scanned_buf[7], issued_buf[7], total_s_buf[7];
8810 char total_i_buf[7], srate_buf[7], irate_buf[7], time_buf[32];
8811
8812 printf(" ");
8813 printf_color(ANSI_BOLD, gettext("scan:"));
8814 printf(" ");
8815
8816 /* If there's never been a scan, there's not much to say. */
8817 if (ps == NULL || ps->pss_func == POOL_SCAN_NONE ||
8818 ps->pss_func >= POOL_SCAN_FUNCS) {
8819 (void) printf(gettext("none requested\n"));
8820 return;
8821 }
8822
8823 start = ps->pss_start_time;
8824 end = ps->pss_end_time;
8825 pause = ps->pss_pass_scrub_pause;
8826
8827 zfs_nicebytes(ps->pss_processed, processed_buf, sizeof (processed_buf));
8828
8829 int is_resilver = ps->pss_func == POOL_SCAN_RESILVER;
8830 int is_scrub = ps->pss_func == POOL_SCAN_SCRUB;
8831 assert(is_resilver || is_scrub);
8832
8833 /* Scan is finished or canceled. */
8834 if (ps->pss_state == DSS_FINISHED) {
8835 secs_to_dhms(end - start, time_buf);
8836
8837 if (is_scrub) {
8838 (void) printf(gettext("scrub repaired %s "
8839 "in %s with %llu errors on %s"), processed_buf,
8840 time_buf, (u_longlong_t)ps->pss_errors,
8841 ctime(&end));
8842 } else if (is_resilver) {
8843 (void) printf(gettext("resilvered %s "
8844 "in %s with %llu errors on %s"), processed_buf,
8845 time_buf, (u_longlong_t)ps->pss_errors,
8846 ctime(&end));
8847 }
8848 return;
8849 } else if (ps->pss_state == DSS_CANCELED) {
8850 if (is_scrub) {
8851 (void) printf(gettext("scrub canceled on %s"),
8852 ctime(&end));
8853 } else if (is_resilver) {
8854 (void) printf(gettext("resilver canceled on %s"),
8855 ctime(&end));
8856 }
8857 return;
8858 }
8859
8860 assert(ps->pss_state == DSS_SCANNING);
8861
8862 /* Scan is in progress. Resilvers can't be paused. */
8863 if (is_scrub) {
8864 if (pause == 0) {
8865 (void) printf(gettext("scrub in progress since %s"),
8866 ctime(&start));
8867 } else {
8868 (void) printf(gettext("scrub paused since %s"),
8869 ctime(&pause));
8870 (void) printf(gettext("\tscrub started on %s"),
8871 ctime(&start));
8872 }
8873 } else if (is_resilver) {
8874 (void) printf(gettext("resilver in progress since %s"),
8875 ctime(&start));
8876 }
8877
8878 scanned = ps->pss_examined;
8879 pass_scanned = ps->pss_pass_exam;
8880 issued = ps->pss_issued;
8881 pass_issued = ps->pss_pass_issued;
8882 total_s = ps->pss_to_examine;
8883 total_i = ps->pss_to_examine - ps->pss_skipped;
8884
8885 /* we are only done with a block once we have issued the IO for it */
8886 fraction_done = (double)issued / total_i;
8887
8888 /* elapsed time for this pass, rounding up to 1 if it's 0 */
8889 elapsed = time(NULL) - ps->pss_pass_start;
8890 elapsed -= ps->pss_pass_scrub_spent_paused;
8891 elapsed = (elapsed != 0) ? elapsed : 1;
8892
8893 scan_rate = pass_scanned / elapsed;
8894 issue_rate = pass_issued / elapsed;
8895
8896 /* format all of the numbers we will be reporting */
8897 zfs_nicebytes(scanned, scanned_buf, sizeof (scanned_buf));
8898 zfs_nicebytes(issued, issued_buf, sizeof (issued_buf));
8899 zfs_nicebytes(total_s, total_s_buf, sizeof (total_s_buf));
8900 zfs_nicebytes(total_i, total_i_buf, sizeof (total_i_buf));
8901
8902 /* do not print estimated time if we have a paused scrub */
8903 (void) printf(gettext("\t%s / %s scanned"), scanned_buf, total_s_buf);
8904 if (pause == 0 && scan_rate > 0) {
8905 zfs_nicebytes(scan_rate, srate_buf, sizeof (srate_buf));
8906 (void) printf(gettext(" at %s/s"), srate_buf);
8907 }
8908 (void) printf(gettext(", %s / %s issued"), issued_buf, total_i_buf);
8909 if (pause == 0 && issue_rate > 0) {
8910 zfs_nicebytes(issue_rate, irate_buf, sizeof (irate_buf));
8911 (void) printf(gettext(" at %s/s"), irate_buf);
8912 }
8913 (void) printf(gettext("\n"));
8914
8915 if (is_resilver) {
8916 (void) printf(gettext("\t%s resilvered, %.2f%% done"),
8917 processed_buf, 100 * fraction_done);
8918 } else if (is_scrub) {
8919 (void) printf(gettext("\t%s repaired, %.2f%% done"),
8920 processed_buf, 100 * fraction_done);
8921 }
8922
8923 if (pause == 0) {
8924 /*
8925 * Only provide an estimate iff:
8926 * 1) we haven't yet issued all we expected, and
8927 * 2) the issue rate exceeds 10 MB/s, and
8928 * 3) it's either:
8929 * a) a resilver which has started repairs, or
8930 * b) a scrub which has entered the issue phase.
8931 */
8932 if (total_i >= issued && issue_rate >= 10 * 1024 * 1024 &&
8933 ((is_resilver && ps->pss_processed > 0) ||
8934 (is_scrub && issued > 0))) {
8935 secs_to_dhms((total_i - issued) / issue_rate, time_buf);
8936 (void) printf(gettext(", %s to go\n"), time_buf);
8937 } else {
8938 (void) printf(gettext(", no estimated "
8939 "completion time\n"));
8940 }
8941 } else {
8942 (void) printf(gettext("\n"));
8943 }
8944 }
8945
8946 static void
print_rebuild_status_impl(vdev_rebuild_stat_t * vrs,uint_t c,char * vdev_name)8947 print_rebuild_status_impl(vdev_rebuild_stat_t *vrs, uint_t c, char *vdev_name)
8948 {
8949 if (vrs == NULL || vrs->vrs_state == VDEV_REBUILD_NONE)
8950 return;
8951
8952 printf(" ");
8953 printf_color(ANSI_BOLD, gettext("scan:"));
8954 printf(" ");
8955
8956 uint64_t bytes_scanned = vrs->vrs_bytes_scanned;
8957 uint64_t bytes_issued = vrs->vrs_bytes_issued;
8958 uint64_t bytes_rebuilt = vrs->vrs_bytes_rebuilt;
8959 uint64_t bytes_est_s = vrs->vrs_bytes_est;
8960 uint64_t bytes_est_i = vrs->vrs_bytes_est;
8961 if (c > offsetof(vdev_rebuild_stat_t, vrs_pass_bytes_skipped) / 8)
8962 bytes_est_i -= vrs->vrs_pass_bytes_skipped;
8963 uint64_t scan_rate = (vrs->vrs_pass_bytes_scanned /
8964 (vrs->vrs_pass_time_ms + 1)) * 1000;
8965 uint64_t issue_rate = (vrs->vrs_pass_bytes_issued /
8966 (vrs->vrs_pass_time_ms + 1)) * 1000;
8967 double scan_pct = MIN((double)bytes_scanned * 100 /
8968 (bytes_est_s + 1), 100);
8969
8970 /* Format all of the numbers we will be reporting */
8971 char bytes_scanned_buf[7], bytes_issued_buf[7];
8972 char bytes_rebuilt_buf[7], bytes_est_s_buf[7], bytes_est_i_buf[7];
8973 char scan_rate_buf[7], issue_rate_buf[7], time_buf[32];
8974 zfs_nicebytes(bytes_scanned, bytes_scanned_buf,
8975 sizeof (bytes_scanned_buf));
8976 zfs_nicebytes(bytes_issued, bytes_issued_buf,
8977 sizeof (bytes_issued_buf));
8978 zfs_nicebytes(bytes_rebuilt, bytes_rebuilt_buf,
8979 sizeof (bytes_rebuilt_buf));
8980 zfs_nicebytes(bytes_est_s, bytes_est_s_buf, sizeof (bytes_est_s_buf));
8981 zfs_nicebytes(bytes_est_i, bytes_est_i_buf, sizeof (bytes_est_i_buf));
8982
8983 time_t start = vrs->vrs_start_time;
8984 time_t end = vrs->vrs_end_time;
8985
8986 /* Rebuild is finished or canceled. */
8987 if (vrs->vrs_state == VDEV_REBUILD_COMPLETE) {
8988 secs_to_dhms(vrs->vrs_scan_time_ms / 1000, time_buf);
8989 (void) printf(gettext("resilvered (%s) %s in %s "
8990 "with %llu errors on %s"), vdev_name, bytes_rebuilt_buf,
8991 time_buf, (u_longlong_t)vrs->vrs_errors, ctime(&end));
8992 return;
8993 } else if (vrs->vrs_state == VDEV_REBUILD_CANCELED) {
8994 (void) printf(gettext("resilver (%s) canceled on %s"),
8995 vdev_name, ctime(&end));
8996 return;
8997 } else if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
8998 (void) printf(gettext("resilver (%s) in progress since %s"),
8999 vdev_name, ctime(&start));
9000 }
9001
9002 assert(vrs->vrs_state == VDEV_REBUILD_ACTIVE);
9003
9004 (void) printf(gettext("\t%s / %s scanned"), bytes_scanned_buf,
9005 bytes_est_s_buf);
9006 if (scan_rate > 0) {
9007 zfs_nicebytes(scan_rate, scan_rate_buf, sizeof (scan_rate_buf));
9008 (void) printf(gettext(" at %s/s"), scan_rate_buf);
9009 }
9010 (void) printf(gettext(", %s / %s issued"), bytes_issued_buf,
9011 bytes_est_i_buf);
9012 if (issue_rate > 0) {
9013 zfs_nicebytes(issue_rate, issue_rate_buf,
9014 sizeof (issue_rate_buf));
9015 (void) printf(gettext(" at %s/s"), issue_rate_buf);
9016 }
9017 (void) printf(gettext("\n"));
9018
9019 (void) printf(gettext("\t%s resilvered, %.2f%% done"),
9020 bytes_rebuilt_buf, scan_pct);
9021
9022 if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
9023 if (bytes_est_s >= bytes_scanned &&
9024 scan_rate >= 10 * 1024 * 1024) {
9025 secs_to_dhms((bytes_est_s - bytes_scanned) / scan_rate,
9026 time_buf);
9027 (void) printf(gettext(", %s to go\n"), time_buf);
9028 } else {
9029 (void) printf(gettext(", no estimated "
9030 "completion time\n"));
9031 }
9032 } else {
9033 (void) printf(gettext("\n"));
9034 }
9035 }
9036
9037 /*
9038 * Print rebuild status for top-level vdevs.
9039 */
9040 static void
print_rebuild_status(zpool_handle_t * zhp,nvlist_t * nvroot)9041 print_rebuild_status(zpool_handle_t *zhp, nvlist_t *nvroot)
9042 {
9043 nvlist_t **child;
9044 uint_t children;
9045
9046 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
9047 &child, &children) != 0)
9048 children = 0;
9049
9050 for (uint_t c = 0; c < children; c++) {
9051 vdev_rebuild_stat_t *vrs;
9052 uint_t i;
9053
9054 if (nvlist_lookup_uint64_array(child[c],
9055 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i) == 0) {
9056 char *name = zpool_vdev_name(g_zfs, zhp,
9057 child[c], VDEV_NAME_TYPE_ID);
9058 print_rebuild_status_impl(vrs, i, name);
9059 free(name);
9060 }
9061 }
9062 }
9063
9064 /*
9065 * As we don't scrub checkpointed blocks, we want to warn the user that we
9066 * skipped scanning some blocks if a checkpoint exists or existed at any
9067 * time during the scan. If a sequential instead of healing reconstruction
9068 * was performed then the blocks were reconstructed. However, their checksums
9069 * have not been verified so we still print the warning.
9070 */
9071 static void
print_checkpoint_scan_warning(pool_scan_stat_t * ps,pool_checkpoint_stat_t * pcs)9072 print_checkpoint_scan_warning(pool_scan_stat_t *ps, pool_checkpoint_stat_t *pcs)
9073 {
9074 if (ps == NULL || pcs == NULL)
9075 return;
9076
9077 if (pcs->pcs_state == CS_NONE ||
9078 pcs->pcs_state == CS_CHECKPOINT_DISCARDING)
9079 return;
9080
9081 assert(pcs->pcs_state == CS_CHECKPOINT_EXISTS);
9082
9083 if (ps->pss_state == DSS_NONE)
9084 return;
9085
9086 if ((ps->pss_state == DSS_FINISHED || ps->pss_state == DSS_CANCELED) &&
9087 ps->pss_end_time < pcs->pcs_start_time)
9088 return;
9089
9090 if (ps->pss_state == DSS_FINISHED || ps->pss_state == DSS_CANCELED) {
9091 (void) printf(gettext(" scan warning: skipped blocks "
9092 "that are only referenced by the checkpoint.\n"));
9093 } else {
9094 assert(ps->pss_state == DSS_SCANNING);
9095 (void) printf(gettext(" scan warning: skipping blocks "
9096 "that are only referenced by the checkpoint.\n"));
9097 }
9098 }
9099
9100 /*
9101 * Returns B_TRUE if there is an active rebuild in progress. Otherwise,
9102 * B_FALSE is returned and 'rebuild_end_time' is set to the end time for
9103 * the last completed (or cancelled) rebuild.
9104 */
9105 static boolean_t
check_rebuilding(nvlist_t * nvroot,uint64_t * rebuild_end_time)9106 check_rebuilding(nvlist_t *nvroot, uint64_t *rebuild_end_time)
9107 {
9108 nvlist_t **child;
9109 uint_t children;
9110 boolean_t rebuilding = B_FALSE;
9111 uint64_t end_time = 0;
9112
9113 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
9114 &child, &children) != 0)
9115 children = 0;
9116
9117 for (uint_t c = 0; c < children; c++) {
9118 vdev_rebuild_stat_t *vrs;
9119 uint_t i;
9120
9121 if (nvlist_lookup_uint64_array(child[c],
9122 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i) == 0) {
9123
9124 if (vrs->vrs_end_time > end_time)
9125 end_time = vrs->vrs_end_time;
9126
9127 if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
9128 rebuilding = B_TRUE;
9129 end_time = 0;
9130 break;
9131 }
9132 }
9133 }
9134
9135 if (rebuild_end_time != NULL)
9136 *rebuild_end_time = end_time;
9137
9138 return (rebuilding);
9139 }
9140
9141 static void
vdev_stats_nvlist(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t * nv,int depth,boolean_t isspare,char * parent,nvlist_t * item)9142 vdev_stats_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,
9143 int depth, boolean_t isspare, char *parent, nvlist_t *item)
9144 {
9145 nvlist_t *vds, **child, *ch = NULL;
9146 uint_t vsc, children;
9147 vdev_stat_t *vs;
9148 char *vname;
9149 uint64_t notpresent;
9150 const char *type, *path;
9151
9152 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
9153 &child, &children) != 0)
9154 children = 0;
9155 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
9156 (uint64_t **)&vs, &vsc) == 0);
9157 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
9158 if (strcmp(type, VDEV_TYPE_INDIRECT) == 0)
9159 return;
9160
9161 if (cb->cb_print_unhealthy && depth > 0 &&
9162 for_each_vdev_in_nvlist(nv, vdev_health_check_cb, cb) == 0) {
9163 return;
9164 }
9165 vname = zpool_vdev_name(g_zfs, zhp, nv,
9166 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
9167 vds = fnvlist_alloc();
9168 fill_vdev_info(vds, zhp, vname, B_FALSE, cb->cb_json_as_int);
9169 if (cb->cb_flat_vdevs && parent != NULL) {
9170 fnvlist_add_string(vds, "parent", parent);
9171 }
9172
9173 if (isspare) {
9174 if (vs->vs_aux == VDEV_AUX_SPARED) {
9175 fnvlist_add_string(vds, "state", "INUSE");
9176 used_by_other(zhp, nv, vds);
9177 } else if (vs->vs_state == VDEV_STATE_HEALTHY)
9178 fnvlist_add_string(vds, "state", "AVAIL");
9179 } else {
9180 if (vs->vs_alloc) {
9181 nice_num_str_nvlist(vds, "alloc_space", vs->vs_alloc,
9182 cb->cb_literal, cb->cb_json_as_int,
9183 ZFS_NICENUM_BYTES);
9184 }
9185 if (vs->vs_space) {
9186 nice_num_str_nvlist(vds, "total_space", vs->vs_space,
9187 cb->cb_literal, cb->cb_json_as_int,
9188 ZFS_NICENUM_BYTES);
9189 }
9190 if (vs->vs_dspace) {
9191 nice_num_str_nvlist(vds, "def_space", vs->vs_dspace,
9192 cb->cb_literal, cb->cb_json_as_int,
9193 ZFS_NICENUM_BYTES);
9194 }
9195 if (vs->vs_rsize) {
9196 nice_num_str_nvlist(vds, "rep_dev_size", vs->vs_rsize,
9197 cb->cb_literal, cb->cb_json_as_int,
9198 ZFS_NICENUM_BYTES);
9199 }
9200 if (vs->vs_esize) {
9201 nice_num_str_nvlist(vds, "ex_dev_size", vs->vs_esize,
9202 cb->cb_literal, cb->cb_json_as_int,
9203 ZFS_NICENUM_BYTES);
9204 }
9205 if (vs->vs_self_healed) {
9206 nice_num_str_nvlist(vds, "self_healed",
9207 vs->vs_self_healed, cb->cb_literal,
9208 cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9209 }
9210 if (vs->vs_pspace) {
9211 nice_num_str_nvlist(vds, "phys_space", vs->vs_pspace,
9212 cb->cb_literal, cb->cb_json_as_int,
9213 ZFS_NICENUM_BYTES);
9214 }
9215 nice_num_str_nvlist(vds, "read_errors", vs->vs_read_errors,
9216 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
9217 nice_num_str_nvlist(vds, "write_errors", vs->vs_write_errors,
9218 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
9219 nice_num_str_nvlist(vds, "checksum_errors",
9220 vs->vs_checksum_errors, cb->cb_literal,
9221 cb->cb_json_as_int, ZFS_NICENUM_1024);
9222 if (vs->vs_scan_processed) {
9223 nice_num_str_nvlist(vds, "scan_processed",
9224 vs->vs_scan_processed, cb->cb_literal,
9225 cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9226 }
9227 if (vs->vs_checkpoint_space) {
9228 nice_num_str_nvlist(vds, "checkpoint_space",
9229 vs->vs_checkpoint_space, cb->cb_literal,
9230 cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9231 }
9232 if (vs->vs_resilver_deferred) {
9233 nice_num_str_nvlist(vds, "resilver_deferred",
9234 vs->vs_resilver_deferred, B_TRUE,
9235 cb->cb_json_as_int, ZFS_NICENUM_1024);
9236 }
9237 if (children == 0) {
9238 nice_num_str_nvlist(vds, "slow_ios", vs->vs_slow_ios,
9239 cb->cb_literal, cb->cb_json_as_int,
9240 ZFS_NICENUM_1024);
9241 }
9242 if (cb->cb_print_power) {
9243 if (children == 0) {
9244 /* Only leaf vdevs have physical slots */
9245 switch (zpool_power_current_state(zhp, (char *)
9246 fnvlist_lookup_string(nv,
9247 ZPOOL_CONFIG_PATH))) {
9248 case 0:
9249 fnvlist_add_string(vds, "power_state",
9250 "off");
9251 break;
9252 case 1:
9253 fnvlist_add_string(vds, "power_state",
9254 "on");
9255 break;
9256 default:
9257 fnvlist_add_string(vds, "power_state",
9258 "-");
9259 }
9260 } else {
9261 fnvlist_add_string(vds, "power_state", "-");
9262 }
9263 }
9264 }
9265
9266 if (cb->cb_print_dio_verify) {
9267 nice_num_str_nvlist(vds, "dio_verify_errors",
9268 vs->vs_dio_verify_errors, cb->cb_literal,
9269 cb->cb_json_as_int, ZFS_NICENUM_1024);
9270 }
9271
9272 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
9273 ¬present) == 0) {
9274 nice_num_str_nvlist(vds, ZPOOL_CONFIG_NOT_PRESENT,
9275 1, B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9276 fnvlist_add_string(vds, "was",
9277 fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH));
9278 } else if (vs->vs_aux != VDEV_AUX_NONE) {
9279 fnvlist_add_string(vds, "aux", vdev_aux_str[vs->vs_aux]);
9280 } else if (children == 0 && !isspare &&
9281 getenv("ZPOOL_STATUS_NON_NATIVE_ASHIFT_IGNORE") == NULL &&
9282 VDEV_STAT_VALID(vs_physical_ashift, vsc) &&
9283 vs->vs_configured_ashift < vs->vs_physical_ashift) {
9284 nice_num_str_nvlist(vds, "configured_ashift",
9285 vs->vs_configured_ashift, B_TRUE, cb->cb_json_as_int,
9286 ZFS_NICENUM_1024);
9287 nice_num_str_nvlist(vds, "physical_ashift",
9288 vs->vs_physical_ashift, B_TRUE, cb->cb_json_as_int,
9289 ZFS_NICENUM_1024);
9290 }
9291 if (vs->vs_scan_removing != 0) {
9292 nice_num_str_nvlist(vds, "removing", vs->vs_scan_removing,
9293 B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024);
9294 } else if (VDEV_STAT_VALID(vs_noalloc, vsc) && vs->vs_noalloc != 0) {
9295 nice_num_str_nvlist(vds, "noalloc", vs->vs_noalloc,
9296 B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024);
9297 }
9298
9299 if (cb->vcdl != NULL) {
9300 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
9301 zpool_nvlist_cmd(cb->vcdl, zpool_get_name(zhp),
9302 path, vds);
9303 }
9304 }
9305
9306 if (children == 0) {
9307 if (cb->cb_print_vdev_init) {
9308 if (vs->vs_initialize_state != 0) {
9309 uint64_t st = vs->vs_initialize_state;
9310 fnvlist_add_string(vds, "init_state",
9311 vdev_init_state_str[st]);
9312 nice_num_str_nvlist(vds, "initialized",
9313 vs->vs_initialize_bytes_done,
9314 cb->cb_literal, cb->cb_json_as_int,
9315 ZFS_NICENUM_BYTES);
9316 nice_num_str_nvlist(vds, "to_initialize",
9317 vs->vs_initialize_bytes_est,
9318 cb->cb_literal, cb->cb_json_as_int,
9319 ZFS_NICENUM_BYTES);
9320 nice_num_str_nvlist(vds, "init_time",
9321 vs->vs_initialize_action_time,
9322 cb->cb_literal, cb->cb_json_as_int,
9323 ZFS_NICE_TIMESTAMP);
9324 nice_num_str_nvlist(vds, "init_errors",
9325 vs->vs_initialize_errors,
9326 cb->cb_literal, cb->cb_json_as_int,
9327 ZFS_NICENUM_1024);
9328 } else {
9329 fnvlist_add_string(vds, "init_state",
9330 "UNINITIALIZED");
9331 }
9332 }
9333 if (cb->cb_print_vdev_trim) {
9334 if (vs->vs_trim_notsup == 0) {
9335 if (vs->vs_trim_state != 0) {
9336 uint64_t st = vs->vs_trim_state;
9337 fnvlist_add_string(vds, "trim_state",
9338 vdev_trim_state_str[st]);
9339 nice_num_str_nvlist(vds, "trimmed",
9340 vs->vs_trim_bytes_done,
9341 cb->cb_literal, cb->cb_json_as_int,
9342 ZFS_NICENUM_BYTES);
9343 nice_num_str_nvlist(vds, "to_trim",
9344 vs->vs_trim_bytes_est,
9345 cb->cb_literal, cb->cb_json_as_int,
9346 ZFS_NICENUM_BYTES);
9347 nice_num_str_nvlist(vds, "trim_time",
9348 vs->vs_trim_action_time,
9349 cb->cb_literal, cb->cb_json_as_int,
9350 ZFS_NICE_TIMESTAMP);
9351 nice_num_str_nvlist(vds, "trim_errors",
9352 vs->vs_trim_errors,
9353 cb->cb_literal, cb->cb_json_as_int,
9354 ZFS_NICENUM_1024);
9355 } else
9356 fnvlist_add_string(vds, "trim_state",
9357 "UNTRIMMED");
9358 }
9359 nice_num_str_nvlist(vds, "trim_notsup",
9360 vs->vs_trim_notsup, B_TRUE,
9361 cb->cb_json_as_int, ZFS_NICENUM_1024);
9362 }
9363 } else {
9364 ch = fnvlist_alloc();
9365 }
9366
9367 if (cb->cb_flat_vdevs && children == 0) {
9368 fnvlist_add_nvlist(item, vname, vds);
9369 }
9370
9371 for (int c = 0; c < children; c++) {
9372 uint64_t islog = B_FALSE, ishole = B_FALSE;
9373 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
9374 &islog);
9375 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
9376 &ishole);
9377 if (islog || ishole)
9378 continue;
9379 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
9380 continue;
9381 if (cb->cb_flat_vdevs) {
9382 vdev_stats_nvlist(zhp, cb, child[c], depth + 2, isspare,
9383 vname, item);
9384 }
9385 vdev_stats_nvlist(zhp, cb, child[c], depth + 2, isspare,
9386 vname, ch);
9387 }
9388
9389 if (ch != NULL) {
9390 if (!nvlist_empty(ch))
9391 fnvlist_add_nvlist(vds, "vdevs", ch);
9392 fnvlist_free(ch);
9393 }
9394 fnvlist_add_nvlist(item, vname, vds);
9395 fnvlist_free(vds);
9396 free(vname);
9397 }
9398
9399 static void
class_vdevs_nvlist(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t * nv,const char * class,nvlist_t * item)9400 class_vdevs_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,
9401 const char *class, nvlist_t *item)
9402 {
9403 uint_t c, children;
9404 nvlist_t **child;
9405 nvlist_t *class_obj = NULL;
9406
9407 if (!cb->cb_flat_vdevs)
9408 class_obj = fnvlist_alloc();
9409
9410 assert(zhp != NULL || !cb->cb_verbose);
9411
9412 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &child,
9413 &children) != 0)
9414 return;
9415
9416 for (c = 0; c < children; c++) {
9417 uint64_t is_log = B_FALSE;
9418 const char *bias = NULL;
9419 const char *type = NULL;
9420 char *name = zpool_vdev_name(g_zfs, zhp, child[c],
9421 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
9422
9423 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
9424 &is_log);
9425
9426 if (is_log) {
9427 bias = (char *)VDEV_ALLOC_CLASS_LOGS;
9428 } else {
9429 (void) nvlist_lookup_string(child[c],
9430 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
9431 (void) nvlist_lookup_string(child[c],
9432 ZPOOL_CONFIG_TYPE, &type);
9433 }
9434
9435 if (bias == NULL || strcmp(bias, class) != 0)
9436 continue;
9437 if (!is_log && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
9438 continue;
9439
9440 if (cb->cb_flat_vdevs) {
9441 vdev_stats_nvlist(zhp, cb, child[c], 2, B_FALSE,
9442 NULL, item);
9443 } else {
9444 vdev_stats_nvlist(zhp, cb, child[c], 2, B_FALSE,
9445 NULL, class_obj);
9446 }
9447 free(name);
9448 }
9449 if (!cb->cb_flat_vdevs) {
9450 if (!nvlist_empty(class_obj))
9451 fnvlist_add_nvlist(item, class, class_obj);
9452 fnvlist_free(class_obj);
9453 }
9454 }
9455
9456 static void
l2cache_nvlist(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t * nv,nvlist_t * item)9457 l2cache_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,
9458 nvlist_t *item)
9459 {
9460 nvlist_t *l2c = NULL, **l2cache;
9461 uint_t nl2cache;
9462 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
9463 &l2cache, &nl2cache) == 0) {
9464 if (nl2cache == 0)
9465 return;
9466 if (!cb->cb_flat_vdevs)
9467 l2c = fnvlist_alloc();
9468 for (int i = 0; i < nl2cache; i++) {
9469 if (cb->cb_flat_vdevs) {
9470 vdev_stats_nvlist(zhp, cb, l2cache[i], 2,
9471 B_FALSE, NULL, item);
9472 } else {
9473 vdev_stats_nvlist(zhp, cb, l2cache[i], 2,
9474 B_FALSE, NULL, l2c);
9475 }
9476 }
9477 }
9478 if (!cb->cb_flat_vdevs) {
9479 if (!nvlist_empty(l2c))
9480 fnvlist_add_nvlist(item, "l2cache", l2c);
9481 fnvlist_free(l2c);
9482 }
9483 }
9484
9485 static void
spares_nvlist(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t * nv,nvlist_t * item)9486 spares_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,
9487 nvlist_t *item)
9488 {
9489 nvlist_t *sp = NULL, **spares;
9490 uint_t nspares;
9491 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
9492 &spares, &nspares) == 0) {
9493 if (nspares == 0)
9494 return;
9495 if (!cb->cb_flat_vdevs)
9496 sp = fnvlist_alloc();
9497 for (int i = 0; i < nspares; i++) {
9498 if (cb->cb_flat_vdevs) {
9499 vdev_stats_nvlist(zhp, cb, spares[i], 2, B_TRUE,
9500 NULL, item);
9501 } else {
9502 vdev_stats_nvlist(zhp, cb, spares[i], 2, B_TRUE,
9503 NULL, sp);
9504 }
9505 }
9506 }
9507 if (!cb->cb_flat_vdevs) {
9508 if (!nvlist_empty(sp))
9509 fnvlist_add_nvlist(item, "spares", sp);
9510 fnvlist_free(sp);
9511 }
9512 }
9513
9514 static void
errors_nvlist(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t * item)9515 errors_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *item)
9516 {
9517 uint64_t nerr;
9518 nvlist_t *config = zpool_get_config(zhp, NULL);
9519 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT,
9520 &nerr) == 0) {
9521 nice_num_str_nvlist(item, ZPOOL_CONFIG_ERRCOUNT, nerr,
9522 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
9523 if (nerr != 0 && cb->cb_verbose) {
9524 nvlist_t *nverrlist = NULL;
9525 if (zpool_get_errlog(zhp, &nverrlist) == 0) {
9526 int i = 0;
9527 int count = 0;
9528 size_t len = MAXPATHLEN * 2;
9529 nvpair_t *elem = NULL;
9530
9531 for (nvpair_t *pair =
9532 nvlist_next_nvpair(nverrlist, NULL);
9533 pair != NULL;
9534 pair = nvlist_next_nvpair(nverrlist, pair))
9535 count++;
9536 char **errl = (char **)malloc(
9537 count * sizeof (char *));
9538
9539 while ((elem = nvlist_next_nvpair(nverrlist,
9540 elem)) != NULL) {
9541 nvlist_t *nv;
9542 uint64_t dsobj, obj;
9543
9544 verify(nvpair_value_nvlist(elem,
9545 &nv) == 0);
9546 verify(nvlist_lookup_uint64(nv,
9547 ZPOOL_ERR_DATASET, &dsobj) == 0);
9548 verify(nvlist_lookup_uint64(nv,
9549 ZPOOL_ERR_OBJECT, &obj) == 0);
9550 errl[i] = safe_malloc(len);
9551 zpool_obj_to_path(zhp, dsobj, obj,
9552 errl[i++], len);
9553 }
9554 nvlist_free(nverrlist);
9555 fnvlist_add_string_array(item, "errlist",
9556 (const char **)errl, count);
9557 for (int i = 0; i < count; ++i)
9558 free(errl[i]);
9559 free(errl);
9560 } else
9561 fnvlist_add_string(item, "errlist",
9562 strerror(errno));
9563 }
9564 }
9565 }
9566
9567 static void
ddt_stats_nvlist(ddt_stat_t * dds,status_cbdata_t * cb,nvlist_t * item)9568 ddt_stats_nvlist(ddt_stat_t *dds, status_cbdata_t *cb, nvlist_t *item)
9569 {
9570 nice_num_str_nvlist(item, "blocks", dds->dds_blocks,
9571 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
9572 nice_num_str_nvlist(item, "logical_size", dds->dds_lsize,
9573 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9574 nice_num_str_nvlist(item, "physical_size", dds->dds_psize,
9575 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9576 nice_num_str_nvlist(item, "deflated_size", dds->dds_dsize,
9577 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9578 nice_num_str_nvlist(item, "ref_blocks", dds->dds_ref_blocks,
9579 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
9580 nice_num_str_nvlist(item, "ref_lsize", dds->dds_ref_lsize,
9581 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9582 nice_num_str_nvlist(item, "ref_psize", dds->dds_ref_psize,
9583 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9584 nice_num_str_nvlist(item, "ref_dsize", dds->dds_ref_dsize,
9585 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9586 }
9587
9588 static void
dedup_stats_nvlist(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t * item)9589 dedup_stats_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *item)
9590 {
9591 nvlist_t *config;
9592 if (cb->cb_dedup_stats) {
9593 ddt_histogram_t *ddh;
9594 ddt_stat_t *dds;
9595 ddt_object_t *ddo;
9596 nvlist_t *ddt_stat, *ddt_obj, *dedup;
9597 uint_t c;
9598 uint64_t cspace_prop;
9599
9600 config = zpool_get_config(zhp, NULL);
9601 if (nvlist_lookup_uint64_array(config,
9602 ZPOOL_CONFIG_DDT_OBJ_STATS, (uint64_t **)&ddo, &c) != 0)
9603 return;
9604
9605 dedup = fnvlist_alloc();
9606 ddt_obj = fnvlist_alloc();
9607 nice_num_str_nvlist(dedup, "obj_count", ddo->ddo_count,
9608 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
9609 if (ddo->ddo_count == 0) {
9610 fnvlist_add_nvlist(dedup, ZPOOL_CONFIG_DDT_OBJ_STATS,
9611 ddt_obj);
9612 fnvlist_add_nvlist(item, "dedup_stats", dedup);
9613 fnvlist_free(ddt_obj);
9614 fnvlist_free(dedup);
9615 return;
9616 } else {
9617 nice_num_str_nvlist(dedup, "dspace", ddo->ddo_dspace,
9618 cb->cb_literal, cb->cb_json_as_int,
9619 ZFS_NICENUM_1024);
9620 nice_num_str_nvlist(dedup, "mspace", ddo->ddo_mspace,
9621 cb->cb_literal, cb->cb_json_as_int,
9622 ZFS_NICENUM_1024);
9623 /*
9624 * Squash cached size into in-core size to handle race.
9625 * Only include cached size if it is available.
9626 */
9627 cspace_prop = zpool_get_prop_int(zhp,
9628 ZPOOL_PROP_DEDUPCACHED, NULL);
9629 cspace_prop = MIN(cspace_prop, ddo->ddo_mspace);
9630 nice_num_str_nvlist(dedup, "cspace", cspace_prop,
9631 cb->cb_literal, cb->cb_json_as_int,
9632 ZFS_NICENUM_1024);
9633 }
9634
9635 ddt_stat = fnvlist_alloc();
9636 if (nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_STATS,
9637 (uint64_t **)&dds, &c) == 0) {
9638 nvlist_t *total = fnvlist_alloc();
9639 if (dds->dds_blocks == 0)
9640 fnvlist_add_string(total, "blocks", "0");
9641 else
9642 ddt_stats_nvlist(dds, cb, total);
9643 fnvlist_add_nvlist(ddt_stat, "total", total);
9644 fnvlist_free(total);
9645 }
9646 if (nvlist_lookup_uint64_array(config,
9647 ZPOOL_CONFIG_DDT_HISTOGRAM, (uint64_t **)&ddh, &c) == 0) {
9648 nvlist_t *hist = fnvlist_alloc();
9649 nvlist_t *entry = NULL;
9650 char buf[16];
9651 for (int h = 0; h < 64; h++) {
9652 if (ddh->ddh_stat[h].dds_blocks != 0) {
9653 entry = fnvlist_alloc();
9654 ddt_stats_nvlist(&ddh->ddh_stat[h], cb,
9655 entry);
9656 snprintf(buf, 16, "%d", h);
9657 fnvlist_add_nvlist(hist, buf, entry);
9658 fnvlist_free(entry);
9659 }
9660 }
9661 if (!nvlist_empty(hist))
9662 fnvlist_add_nvlist(ddt_stat, "histogram", hist);
9663 fnvlist_free(hist);
9664 }
9665
9666 if (!nvlist_empty(ddt_obj)) {
9667 fnvlist_add_nvlist(dedup, ZPOOL_CONFIG_DDT_OBJ_STATS,
9668 ddt_obj);
9669 }
9670 fnvlist_free(ddt_obj);
9671 if (!nvlist_empty(ddt_stat)) {
9672 fnvlist_add_nvlist(dedup, ZPOOL_CONFIG_DDT_STATS,
9673 ddt_stat);
9674 }
9675 fnvlist_free(ddt_stat);
9676 if (!nvlist_empty(dedup))
9677 fnvlist_add_nvlist(item, "dedup_stats", dedup);
9678 fnvlist_free(dedup);
9679 }
9680 }
9681
9682 static void
raidz_expand_status_nvlist(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t * nvroot,nvlist_t * item)9683 raidz_expand_status_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb,
9684 nvlist_t *nvroot, nvlist_t *item)
9685 {
9686 uint_t c;
9687 pool_raidz_expand_stat_t *pres = NULL;
9688 if (nvlist_lookup_uint64_array(nvroot,
9689 ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, (uint64_t **)&pres, &c) == 0) {
9690 nvlist_t **child;
9691 uint_t children;
9692 nvlist_t *nv = fnvlist_alloc();
9693 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
9694 &child, &children) == 0);
9695 assert(pres->pres_expanding_vdev < children);
9696 char *name =
9697 zpool_vdev_name(g_zfs, zhp,
9698 child[pres->pres_expanding_vdev], 0);
9699 fill_vdev_info(nv, zhp, name, B_FALSE, cb->cb_json_as_int);
9700 fnvlist_add_string(nv, "state",
9701 pool_scan_state_str[pres->pres_state]);
9702 nice_num_str_nvlist(nv, "expanding_vdev",
9703 pres->pres_expanding_vdev, B_TRUE, cb->cb_json_as_int,
9704 ZFS_NICENUM_1024);
9705 nice_num_str_nvlist(nv, "start_time", pres->pres_start_time,
9706 cb->cb_literal, cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);
9707 nice_num_str_nvlist(nv, "end_time", pres->pres_end_time,
9708 cb->cb_literal, cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);
9709 nice_num_str_nvlist(nv, "to_reflow", pres->pres_to_reflow,
9710 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9711 nice_num_str_nvlist(nv, "reflowed", pres->pres_reflowed,
9712 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9713 nice_num_str_nvlist(nv, "waiting_for_resilver",
9714 pres->pres_waiting_for_resilver, B_TRUE,
9715 cb->cb_json_as_int, ZFS_NICENUM_1024);
9716 fnvlist_add_nvlist(item, ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, nv);
9717 fnvlist_free(nv);
9718 free(name);
9719 }
9720 }
9721
9722 static void
checkpoint_status_nvlist(nvlist_t * nvroot,status_cbdata_t * cb,nvlist_t * item)9723 checkpoint_status_nvlist(nvlist_t *nvroot, status_cbdata_t *cb,
9724 nvlist_t *item)
9725 {
9726 uint_t c;
9727 pool_checkpoint_stat_t *pcs = NULL;
9728 if (nvlist_lookup_uint64_array(nvroot,
9729 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c) == 0) {
9730 nvlist_t *nv = fnvlist_alloc();
9731 fnvlist_add_string(nv, "state",
9732 checkpoint_state_str[pcs->pcs_state]);
9733 nice_num_str_nvlist(nv, "start_time",
9734 pcs->pcs_start_time, cb->cb_literal, cb->cb_json_as_int,
9735 ZFS_NICE_TIMESTAMP);
9736 nice_num_str_nvlist(nv, "space",
9737 pcs->pcs_space, cb->cb_literal, cb->cb_json_as_int,
9738 ZFS_NICENUM_BYTES);
9739 fnvlist_add_nvlist(item, ZPOOL_CONFIG_CHECKPOINT_STATS, nv);
9740 fnvlist_free(nv);
9741 }
9742 }
9743
9744 static void
removal_status_nvlist(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t * nvroot,nvlist_t * item)9745 removal_status_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb,
9746 nvlist_t *nvroot, nvlist_t *item)
9747 {
9748 uint_t c;
9749 pool_removal_stat_t *prs = NULL;
9750 if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_REMOVAL_STATS,
9751 (uint64_t **)&prs, &c) == 0) {
9752 if (prs->prs_state != DSS_NONE) {
9753 nvlist_t **child;
9754 uint_t children;
9755 verify(nvlist_lookup_nvlist_array(nvroot,
9756 ZPOOL_CONFIG_CHILDREN, &child, &children) == 0);
9757 assert(prs->prs_removing_vdev < children);
9758 char *vdev_name = zpool_vdev_name(g_zfs, zhp,
9759 child[prs->prs_removing_vdev], B_TRUE);
9760 nvlist_t *nv = fnvlist_alloc();
9761 fill_vdev_info(nv, zhp, vdev_name, B_FALSE,
9762 cb->cb_json_as_int);
9763 fnvlist_add_string(nv, "state",
9764 pool_scan_state_str[prs->prs_state]);
9765 nice_num_str_nvlist(nv, "removing_vdev",
9766 prs->prs_removing_vdev, B_TRUE, cb->cb_json_as_int,
9767 ZFS_NICENUM_1024);
9768 nice_num_str_nvlist(nv, "start_time",
9769 prs->prs_start_time, cb->cb_literal,
9770 cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);
9771 nice_num_str_nvlist(nv, "end_time", prs->prs_end_time,
9772 cb->cb_literal, cb->cb_json_as_int,
9773 ZFS_NICE_TIMESTAMP);
9774 nice_num_str_nvlist(nv, "to_copy", prs->prs_to_copy,
9775 cb->cb_literal, cb->cb_json_as_int,
9776 ZFS_NICENUM_BYTES);
9777 nice_num_str_nvlist(nv, "copied", prs->prs_copied,
9778 cb->cb_literal, cb->cb_json_as_int,
9779 ZFS_NICENUM_BYTES);
9780 nice_num_str_nvlist(nv, "mapping_memory",
9781 prs->prs_mapping_memory, cb->cb_literal,
9782 cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9783 fnvlist_add_nvlist(item,
9784 ZPOOL_CONFIG_REMOVAL_STATS, nv);
9785 fnvlist_free(nv);
9786 free(vdev_name);
9787 }
9788 }
9789 }
9790
9791 static void
scan_status_nvlist(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t * nvroot,nvlist_t * item)9792 scan_status_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb,
9793 nvlist_t *nvroot, nvlist_t *item)
9794 {
9795 pool_scan_stat_t *ps = NULL;
9796 uint_t c;
9797 nvlist_t *scan = fnvlist_alloc();
9798 nvlist_t **child;
9799 uint_t children;
9800
9801 if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_SCAN_STATS,
9802 (uint64_t **)&ps, &c) == 0) {
9803 fnvlist_add_string(scan, "function",
9804 pool_scan_func_str[ps->pss_func]);
9805 fnvlist_add_string(scan, "state",
9806 pool_scan_state_str[ps->pss_state]);
9807 nice_num_str_nvlist(scan, "start_time", ps->pss_start_time,
9808 cb->cb_literal, cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);
9809 nice_num_str_nvlist(scan, "end_time", ps->pss_end_time,
9810 cb->cb_literal, cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);
9811 nice_num_str_nvlist(scan, "to_examine", ps->pss_to_examine,
9812 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9813 nice_num_str_nvlist(scan, "examined", ps->pss_examined,
9814 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9815 nice_num_str_nvlist(scan, "skipped", ps->pss_skipped,
9816 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9817 nice_num_str_nvlist(scan, "processed", ps->pss_processed,
9818 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9819 nice_num_str_nvlist(scan, "errors", ps->pss_errors,
9820 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
9821 nice_num_str_nvlist(scan, "bytes_per_scan", ps->pss_pass_exam,
9822 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9823 nice_num_str_nvlist(scan, "pass_start", ps->pss_pass_start,
9824 B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024);
9825 nice_num_str_nvlist(scan, "scrub_pause",
9826 ps->pss_pass_scrub_pause, cb->cb_literal,
9827 cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);
9828 nice_num_str_nvlist(scan, "scrub_spent_paused",
9829 ps->pss_pass_scrub_spent_paused,
9830 B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024);
9831 nice_num_str_nvlist(scan, "issued_bytes_per_scan",
9832 ps->pss_pass_issued, cb->cb_literal,
9833 cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9834 nice_num_str_nvlist(scan, "issued", ps->pss_issued,
9835 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9836 if (ps->pss_error_scrub_func == POOL_SCAN_ERRORSCRUB &&
9837 ps->pss_error_scrub_start > ps->pss_start_time) {
9838 fnvlist_add_string(scan, "err_scrub_func",
9839 pool_scan_func_str[ps->pss_error_scrub_func]);
9840 fnvlist_add_string(scan, "err_scrub_state",
9841 pool_scan_state_str[ps->pss_error_scrub_state]);
9842 nice_num_str_nvlist(scan, "err_scrub_start_time",
9843 ps->pss_error_scrub_start,
9844 cb->cb_literal, cb->cb_json_as_int,
9845 ZFS_NICE_TIMESTAMP);
9846 nice_num_str_nvlist(scan, "err_scrub_end_time",
9847 ps->pss_error_scrub_end,
9848 cb->cb_literal, cb->cb_json_as_int,
9849 ZFS_NICE_TIMESTAMP);
9850 nice_num_str_nvlist(scan, "err_scrub_examined",
9851 ps->pss_error_scrub_examined,
9852 cb->cb_literal, cb->cb_json_as_int,
9853 ZFS_NICENUM_1024);
9854 nice_num_str_nvlist(scan, "err_scrub_to_examine",
9855 ps->pss_error_scrub_to_be_examined,
9856 cb->cb_literal, cb->cb_json_as_int,
9857 ZFS_NICENUM_1024);
9858 nice_num_str_nvlist(scan, "err_scrub_pause",
9859 ps->pss_pass_error_scrub_pause,
9860 B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024);
9861 }
9862 }
9863
9864 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
9865 &child, &children) == 0) {
9866 vdev_rebuild_stat_t *vrs;
9867 uint_t i;
9868 char *name;
9869 nvlist_t *nv;
9870 nvlist_t *rebuild = fnvlist_alloc();
9871 uint64_t st;
9872 for (uint_t c = 0; c < children; c++) {
9873 if (nvlist_lookup_uint64_array(child[c],
9874 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs,
9875 &i) == 0) {
9876 if (vrs->vrs_state != VDEV_REBUILD_NONE) {
9877 nv = fnvlist_alloc();
9878 name = zpool_vdev_name(g_zfs, zhp,
9879 child[c], VDEV_NAME_TYPE_ID);
9880 fill_vdev_info(nv, zhp, name, B_FALSE,
9881 cb->cb_json_as_int);
9882 st = vrs->vrs_state;
9883 fnvlist_add_string(nv, "state",
9884 vdev_rebuild_state_str[st]);
9885 nice_num_str_nvlist(nv, "start_time",
9886 vrs->vrs_start_time, cb->cb_literal,
9887 cb->cb_json_as_int,
9888 ZFS_NICE_TIMESTAMP);
9889 nice_num_str_nvlist(nv, "end_time",
9890 vrs->vrs_end_time, cb->cb_literal,
9891 cb->cb_json_as_int,
9892 ZFS_NICE_TIMESTAMP);
9893 nice_num_str_nvlist(nv, "scan_time",
9894 vrs->vrs_scan_time_ms * 1000000,
9895 cb->cb_literal, cb->cb_json_as_int,
9896 ZFS_NICENUM_TIME);
9897 nice_num_str_nvlist(nv, "scanned",
9898 vrs->vrs_bytes_scanned,
9899 cb->cb_literal, cb->cb_json_as_int,
9900 ZFS_NICENUM_BYTES);
9901 nice_num_str_nvlist(nv, "issued",
9902 vrs->vrs_bytes_issued,
9903 cb->cb_literal, cb->cb_json_as_int,
9904 ZFS_NICENUM_BYTES);
9905 nice_num_str_nvlist(nv, "rebuilt",
9906 vrs->vrs_bytes_rebuilt,
9907 cb->cb_literal, cb->cb_json_as_int,
9908 ZFS_NICENUM_BYTES);
9909 nice_num_str_nvlist(nv, "to_scan",
9910 vrs->vrs_bytes_est, cb->cb_literal,
9911 cb->cb_json_as_int,
9912 ZFS_NICENUM_BYTES);
9913 nice_num_str_nvlist(nv, "errors",
9914 vrs->vrs_errors, cb->cb_literal,
9915 cb->cb_json_as_int,
9916 ZFS_NICENUM_1024);
9917 nice_num_str_nvlist(nv, "pass_time",
9918 vrs->vrs_pass_time_ms * 1000000,
9919 cb->cb_literal, cb->cb_json_as_int,
9920 ZFS_NICENUM_TIME);
9921 nice_num_str_nvlist(nv, "pass_scanned",
9922 vrs->vrs_pass_bytes_scanned,
9923 cb->cb_literal, cb->cb_json_as_int,
9924 ZFS_NICENUM_BYTES);
9925 nice_num_str_nvlist(nv, "pass_issued",
9926 vrs->vrs_pass_bytes_issued,
9927 cb->cb_literal, cb->cb_json_as_int,
9928 ZFS_NICENUM_BYTES);
9929 nice_num_str_nvlist(nv, "pass_skipped",
9930 vrs->vrs_pass_bytes_skipped,
9931 cb->cb_literal, cb->cb_json_as_int,
9932 ZFS_NICENUM_BYTES);
9933 fnvlist_add_nvlist(rebuild, name, nv);
9934 free(name);
9935 }
9936 }
9937 }
9938 if (!nvlist_empty(rebuild))
9939 fnvlist_add_nvlist(scan, "rebuild_stats", rebuild);
9940 fnvlist_free(rebuild);
9941 }
9942
9943 if (!nvlist_empty(scan))
9944 fnvlist_add_nvlist(item, ZPOOL_CONFIG_SCAN_STATS, scan);
9945 fnvlist_free(scan);
9946 }
9947
9948 /*
9949 * Print the scan status.
9950 */
9951 static void
print_scan_status(zpool_handle_t * zhp,nvlist_t * nvroot)9952 print_scan_status(zpool_handle_t *zhp, nvlist_t *nvroot)
9953 {
9954 uint64_t rebuild_end_time = 0, resilver_end_time = 0;
9955 boolean_t have_resilver = B_FALSE, have_scrub = B_FALSE;
9956 boolean_t have_errorscrub = B_FALSE;
9957 boolean_t active_resilver = B_FALSE;
9958 pool_checkpoint_stat_t *pcs = NULL;
9959 pool_scan_stat_t *ps = NULL;
9960 uint_t c;
9961 time_t scrub_start = 0, errorscrub_start = 0;
9962
9963 if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_SCAN_STATS,
9964 (uint64_t **)&ps, &c) == 0) {
9965 if (ps->pss_func == POOL_SCAN_RESILVER) {
9966 resilver_end_time = ps->pss_end_time;
9967 active_resilver = (ps->pss_state == DSS_SCANNING);
9968 }
9969
9970 have_resilver = (ps->pss_func == POOL_SCAN_RESILVER);
9971 have_scrub = (ps->pss_func == POOL_SCAN_SCRUB);
9972 scrub_start = ps->pss_start_time;
9973 if (c > offsetof(pool_scan_stat_t,
9974 pss_pass_error_scrub_pause) / 8) {
9975 have_errorscrub = (ps->pss_error_scrub_func ==
9976 POOL_SCAN_ERRORSCRUB);
9977 errorscrub_start = ps->pss_error_scrub_start;
9978 }
9979 }
9980
9981 boolean_t active_rebuild = check_rebuilding(nvroot, &rebuild_end_time);
9982 boolean_t have_rebuild = (active_rebuild || (rebuild_end_time > 0));
9983
9984 /* Always print the scrub status when available. */
9985 if (have_scrub && scrub_start > errorscrub_start)
9986 print_scan_scrub_resilver_status(ps);
9987 else if (have_errorscrub && errorscrub_start >= scrub_start)
9988 print_err_scrub_status(ps);
9989
9990 /*
9991 * When there is an active resilver or rebuild print its status.
9992 * Otherwise print the status of the last resilver or rebuild.
9993 */
9994 if (active_resilver || (!active_rebuild && have_resilver &&
9995 resilver_end_time && resilver_end_time > rebuild_end_time)) {
9996 print_scan_scrub_resilver_status(ps);
9997 } else if (active_rebuild || (!active_resilver && have_rebuild &&
9998 rebuild_end_time && rebuild_end_time > resilver_end_time)) {
9999 print_rebuild_status(zhp, nvroot);
10000 }
10001
10002 (void) nvlist_lookup_uint64_array(nvroot,
10003 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
10004 print_checkpoint_scan_warning(ps, pcs);
10005 }
10006
10007 /*
10008 * Print out detailed removal status.
10009 */
10010 static void
print_removal_status(zpool_handle_t * zhp,pool_removal_stat_t * prs)10011 print_removal_status(zpool_handle_t *zhp, pool_removal_stat_t *prs)
10012 {
10013 char copied_buf[7], examined_buf[7], total_buf[7], rate_buf[7];
10014 time_t start, end;
10015 nvlist_t *config, *nvroot;
10016 nvlist_t **child;
10017 uint_t children;
10018 char *vdev_name;
10019
10020 if (prs == NULL || prs->prs_state == DSS_NONE)
10021 return;
10022
10023 /*
10024 * Determine name of vdev.
10025 */
10026 config = zpool_get_config(zhp, NULL);
10027 nvroot = fnvlist_lookup_nvlist(config,
10028 ZPOOL_CONFIG_VDEV_TREE);
10029 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
10030 &child, &children) == 0);
10031 assert(prs->prs_removing_vdev < children);
10032 vdev_name = zpool_vdev_name(g_zfs, zhp,
10033 child[prs->prs_removing_vdev], B_TRUE);
10034
10035 printf_color(ANSI_BOLD, gettext("remove: "));
10036
10037 start = prs->prs_start_time;
10038 end = prs->prs_end_time;
10039 zfs_nicenum(prs->prs_copied, copied_buf, sizeof (copied_buf));
10040
10041 /*
10042 * Removal is finished or canceled.
10043 */
10044 if (prs->prs_state == DSS_FINISHED) {
10045 uint64_t minutes_taken = (end - start) / 60;
10046
10047 (void) printf(gettext("Removal of vdev %llu copied %s "
10048 "in %lluh%um, completed on %s"),
10049 (longlong_t)prs->prs_removing_vdev,
10050 copied_buf,
10051 (u_longlong_t)(minutes_taken / 60),
10052 (uint_t)(minutes_taken % 60),
10053 ctime((time_t *)&end));
10054 } else if (prs->prs_state == DSS_CANCELED) {
10055 (void) printf(gettext("Removal of %s canceled on %s"),
10056 vdev_name, ctime(&end));
10057 } else {
10058 uint64_t copied, total, elapsed, rate, mins_left, hours_left;
10059 double fraction_done;
10060
10061 assert(prs->prs_state == DSS_SCANNING);
10062
10063 /*
10064 * Removal is in progress.
10065 */
10066 (void) printf(gettext(
10067 "Evacuation of %s in progress since %s"),
10068 vdev_name, ctime(&start));
10069
10070 copied = prs->prs_copied > 0 ? prs->prs_copied : 1;
10071 total = prs->prs_to_copy;
10072 fraction_done = (double)copied / total;
10073
10074 /* elapsed time for this pass */
10075 elapsed = time(NULL) - prs->prs_start_time;
10076 elapsed = elapsed > 0 ? elapsed : 1;
10077 rate = copied / elapsed;
10078 rate = rate > 0 ? rate : 1;
10079 mins_left = ((total - copied) / rate) / 60;
10080 hours_left = mins_left / 60;
10081
10082 zfs_nicenum(copied, examined_buf, sizeof (examined_buf));
10083 zfs_nicenum(total, total_buf, sizeof (total_buf));
10084 zfs_nicenum(rate, rate_buf, sizeof (rate_buf));
10085
10086 /*
10087 * do not print estimated time if hours_left is more than
10088 * 30 days
10089 */
10090 (void) printf(gettext(
10091 "\t%s copied out of %s at %s/s, %.2f%% done"),
10092 examined_buf, total_buf, rate_buf, 100 * fraction_done);
10093 if (hours_left < (30 * 24)) {
10094 (void) printf(gettext(", %lluh%um to go\n"),
10095 (u_longlong_t)hours_left, (uint_t)(mins_left % 60));
10096 } else {
10097 (void) printf(gettext(
10098 ", (copy is slow, no estimated time)\n"));
10099 }
10100 }
10101 free(vdev_name);
10102
10103 if (prs->prs_mapping_memory > 0) {
10104 char mem_buf[7];
10105 zfs_nicenum(prs->prs_mapping_memory, mem_buf, sizeof (mem_buf));
10106 (void) printf(gettext(
10107 "\t%s memory used for removed device mappings\n"),
10108 mem_buf);
10109 }
10110 }
10111
10112 /*
10113 * Print out detailed raidz expansion status.
10114 */
10115 static void
print_raidz_expand_status(zpool_handle_t * zhp,pool_raidz_expand_stat_t * pres)10116 print_raidz_expand_status(zpool_handle_t *zhp, pool_raidz_expand_stat_t *pres)
10117 {
10118 char copied_buf[7];
10119
10120 if (pres == NULL || pres->pres_state == DSS_NONE)
10121 return;
10122
10123 /*
10124 * Determine name of vdev.
10125 */
10126 nvlist_t *config = zpool_get_config(zhp, NULL);
10127 nvlist_t *nvroot = fnvlist_lookup_nvlist(config,
10128 ZPOOL_CONFIG_VDEV_TREE);
10129 nvlist_t **child;
10130 uint_t children;
10131 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
10132 &child, &children) == 0);
10133 assert(pres->pres_expanding_vdev < children);
10134
10135 printf_color(ANSI_BOLD, gettext("expand: "));
10136
10137 time_t start = pres->pres_start_time;
10138 time_t end = pres->pres_end_time;
10139 char *vname =
10140 zpool_vdev_name(g_zfs, zhp, child[pres->pres_expanding_vdev], 0);
10141 zfs_nicenum(pres->pres_reflowed, copied_buf, sizeof (copied_buf));
10142
10143 /*
10144 * Expansion is finished or canceled.
10145 */
10146 if (pres->pres_state == DSS_FINISHED) {
10147 char time_buf[32];
10148 secs_to_dhms(end - start, time_buf);
10149
10150 (void) printf(gettext("expanded %s-%u copied %s in %s, "
10151 "on %s"), vname, (int)pres->pres_expanding_vdev,
10152 copied_buf, time_buf, ctime((time_t *)&end));
10153 } else {
10154 char examined_buf[7], total_buf[7], rate_buf[7];
10155 uint64_t copied, total, elapsed, rate, secs_left;
10156 double fraction_done;
10157
10158 assert(pres->pres_state == DSS_SCANNING);
10159
10160 /*
10161 * Expansion is in progress.
10162 */
10163 (void) printf(gettext(
10164 "expansion of %s-%u in progress since %s"),
10165 vname, (int)pres->pres_expanding_vdev, ctime(&start));
10166
10167 copied = pres->pres_reflowed > 0 ? pres->pres_reflowed : 1;
10168 total = pres->pres_to_reflow;
10169 fraction_done = (double)copied / total;
10170
10171 /* elapsed time for this pass */
10172 elapsed = time(NULL) - pres->pres_start_time;
10173 elapsed = elapsed > 0 ? elapsed : 1;
10174 rate = copied / elapsed;
10175 rate = rate > 0 ? rate : 1;
10176 secs_left = (total - copied) / rate;
10177
10178 zfs_nicenum(copied, examined_buf, sizeof (examined_buf));
10179 zfs_nicenum(total, total_buf, sizeof (total_buf));
10180 zfs_nicenum(rate, rate_buf, sizeof (rate_buf));
10181
10182 /*
10183 * do not print estimated time if hours_left is more than
10184 * 30 days
10185 */
10186 (void) printf(gettext("\t%s / %s copied at %s/s, %.2f%% done"),
10187 examined_buf, total_buf, rate_buf, 100 * fraction_done);
10188 if (pres->pres_waiting_for_resilver) {
10189 (void) printf(gettext(", paused for resilver or "
10190 "clear\n"));
10191 } else if (secs_left < (30 * 24 * 3600)) {
10192 char time_buf[32];
10193 secs_to_dhms(secs_left, time_buf);
10194 (void) printf(gettext(", %s to go\n"), time_buf);
10195 } else {
10196 (void) printf(gettext(
10197 ", (copy is slow, no estimated time)\n"));
10198 }
10199 }
10200 free(vname);
10201 }
10202 static void
print_checkpoint_status(pool_checkpoint_stat_t * pcs)10203 print_checkpoint_status(pool_checkpoint_stat_t *pcs)
10204 {
10205 time_t start;
10206 char space_buf[7];
10207
10208 if (pcs == NULL || pcs->pcs_state == CS_NONE)
10209 return;
10210
10211 (void) printf(gettext("checkpoint: "));
10212
10213 start = pcs->pcs_start_time;
10214 zfs_nicenum(pcs->pcs_space, space_buf, sizeof (space_buf));
10215
10216 if (pcs->pcs_state == CS_CHECKPOINT_EXISTS) {
10217 char *date = ctime(&start);
10218
10219 /*
10220 * ctime() adds a newline at the end of the generated
10221 * string, thus the weird format specifier and the
10222 * strlen() call used to chop it off from the output.
10223 */
10224 (void) printf(gettext("created %.*s, consumes %s\n"),
10225 (int)(strlen(date) - 1), date, space_buf);
10226 return;
10227 }
10228
10229 assert(pcs->pcs_state == CS_CHECKPOINT_DISCARDING);
10230
10231 (void) printf(gettext("discarding, %s remaining.\n"),
10232 space_buf);
10233 }
10234
10235 static void
print_error_log(zpool_handle_t * zhp)10236 print_error_log(zpool_handle_t *zhp)
10237 {
10238 nvlist_t *nverrlist = NULL;
10239 nvpair_t *elem;
10240 char *pathname;
10241 size_t len = MAXPATHLEN * 2;
10242
10243 if (zpool_get_errlog(zhp, &nverrlist) != 0)
10244 return;
10245
10246 (void) printf("errors: Permanent errors have been "
10247 "detected in the following files:\n\n");
10248
10249 pathname = safe_malloc(len);
10250 elem = NULL;
10251 while ((elem = nvlist_next_nvpair(nverrlist, elem)) != NULL) {
10252 nvlist_t *nv;
10253 uint64_t dsobj, obj;
10254
10255 verify(nvpair_value_nvlist(elem, &nv) == 0);
10256 verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_DATASET,
10257 &dsobj) == 0);
10258 verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_OBJECT,
10259 &obj) == 0);
10260 zpool_obj_to_path(zhp, dsobj, obj, pathname, len);
10261 (void) printf("%7s %s\n", "", pathname);
10262 }
10263 free(pathname);
10264 nvlist_free(nverrlist);
10265 }
10266
10267 static void
print_spares(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t ** spares,uint_t nspares)10268 print_spares(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **spares,
10269 uint_t nspares)
10270 {
10271 uint_t i;
10272 char *name;
10273
10274 if (nspares == 0)
10275 return;
10276
10277 (void) printf(gettext("\tspares\n"));
10278
10279 for (i = 0; i < nspares; i++) {
10280 name = zpool_vdev_name(g_zfs, zhp, spares[i],
10281 cb->cb_name_flags);
10282 print_status_config(zhp, cb, name, spares[i], 2, B_TRUE, NULL);
10283 free(name);
10284 }
10285 }
10286
10287 static void
print_l2cache(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t ** l2cache,uint_t nl2cache)10288 print_l2cache(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **l2cache,
10289 uint_t nl2cache)
10290 {
10291 uint_t i;
10292 char *name;
10293
10294 if (nl2cache == 0)
10295 return;
10296
10297 (void) printf(gettext("\tcache\n"));
10298
10299 for (i = 0; i < nl2cache; i++) {
10300 name = zpool_vdev_name(g_zfs, zhp, l2cache[i],
10301 cb->cb_name_flags);
10302 print_status_config(zhp, cb, name, l2cache[i], 2,
10303 B_FALSE, NULL);
10304 free(name);
10305 }
10306 }
10307
10308 static void
print_dedup_stats(zpool_handle_t * zhp,nvlist_t * config,boolean_t literal)10309 print_dedup_stats(zpool_handle_t *zhp, nvlist_t *config, boolean_t literal)
10310 {
10311 ddt_histogram_t *ddh;
10312 ddt_stat_t *dds;
10313 ddt_object_t *ddo;
10314 uint_t c;
10315 /* Extra space provided for literal display */
10316 char dspace[32], mspace[32], cspace[32];
10317 uint64_t cspace_prop;
10318 enum zfs_nicenum_format format;
10319 zprop_source_t src;
10320
10321 /*
10322 * If the pool was faulted then we may not have been able to
10323 * obtain the config. Otherwise, if we have anything in the dedup
10324 * table continue processing the stats.
10325 */
10326 if (nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_OBJ_STATS,
10327 (uint64_t **)&ddo, &c) != 0)
10328 return;
10329
10330 (void) printf("\n");
10331 (void) printf(gettext(" dedup: "));
10332 if (ddo->ddo_count == 0) {
10333 (void) printf(gettext("no DDT entries\n"));
10334 return;
10335 }
10336
10337 /*
10338 * Squash cached size into in-core size to handle race.
10339 * Only include cached size if it is available.
10340 */
10341 cspace_prop = zpool_get_prop_int(zhp, ZPOOL_PROP_DEDUPCACHED, &src);
10342 cspace_prop = MIN(cspace_prop, ddo->ddo_mspace);
10343 format = literal ? ZFS_NICENUM_RAW : ZFS_NICENUM_1024;
10344 zfs_nicenum_format(cspace_prop, cspace, sizeof (cspace), format);
10345 zfs_nicenum_format(ddo->ddo_dspace, dspace, sizeof (dspace), format);
10346 zfs_nicenum_format(ddo->ddo_mspace, mspace, sizeof (mspace), format);
10347 (void) printf("DDT entries %llu, size %s on disk, %s in core",
10348 (u_longlong_t)ddo->ddo_count,
10349 dspace,
10350 mspace);
10351 if (src != ZPROP_SRC_DEFAULT) {
10352 (void) printf(", %s cached (%.02f%%)",
10353 cspace,
10354 (double)cspace_prop / (double)ddo->ddo_mspace * 100.0);
10355 }
10356 (void) printf("\n");
10357
10358 verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_STATS,
10359 (uint64_t **)&dds, &c) == 0);
10360 verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_HISTOGRAM,
10361 (uint64_t **)&ddh, &c) == 0);
10362 zpool_dump_ddt(dds, ddh);
10363 }
10364
10365 #define ST_SIZE 4096
10366 #define AC_SIZE 2048
10367
10368 static void
print_status_reason(zpool_handle_t * zhp,status_cbdata_t * cbp,zpool_status_t reason,zpool_errata_t errata,nvlist_t * item)10369 print_status_reason(zpool_handle_t *zhp, status_cbdata_t *cbp,
10370 zpool_status_t reason, zpool_errata_t errata, nvlist_t *item)
10371 {
10372 char status[ST_SIZE];
10373 char action[AC_SIZE];
10374 memset(status, 0, ST_SIZE);
10375 memset(action, 0, AC_SIZE);
10376
10377 switch (reason) {
10378 case ZPOOL_STATUS_MISSING_DEV_R:
10379 snprintf(status, ST_SIZE, gettext("One or more devices could "
10380 "not be opened. Sufficient replicas exist for\n\tthe pool "
10381 "to continue functioning in a degraded state.\n"));
10382 snprintf(action, AC_SIZE, gettext("Attach the missing device "
10383 "and online it using 'zpool online'.\n"));
10384 break;
10385
10386 case ZPOOL_STATUS_MISSING_DEV_NR:
10387 snprintf(status, ST_SIZE, gettext("One or more devices could "
10388 "not be opened. There are insufficient\n\treplicas for the"
10389 " pool to continue functioning.\n"));
10390 snprintf(action, AC_SIZE, gettext("Attach the missing device "
10391 "and online it using 'zpool online'.\n"));
10392 break;
10393
10394 case ZPOOL_STATUS_CORRUPT_LABEL_R:
10395 snprintf(status, ST_SIZE, gettext("One or more devices could "
10396 "not be used because the label is missing or\n\tinvalid. "
10397 "Sufficient replicas exist for the pool to continue\n\t"
10398 "functioning in a degraded state.\n"));
10399 snprintf(action, AC_SIZE, gettext("Replace the device using "
10400 "'zpool replace'.\n"));
10401 break;
10402
10403 case ZPOOL_STATUS_CORRUPT_LABEL_NR:
10404 snprintf(status, ST_SIZE, gettext("One or more devices could "
10405 "not be used because the label is missing \n\tor invalid. "
10406 "There are insufficient replicas for the pool to "
10407 "continue\n\tfunctioning.\n"));
10408 zpool_explain_recover(zpool_get_handle(zhp),
10409 zpool_get_name(zhp), reason, zpool_get_config(zhp, NULL),
10410 action, AC_SIZE);
10411 break;
10412
10413 case ZPOOL_STATUS_FAILING_DEV:
10414 snprintf(status, ST_SIZE, gettext("One or more devices has "
10415 "experienced an unrecoverable error. An\n\tattempt was "
10416 "made to correct the error. Applications are "
10417 "unaffected.\n"));
10418 snprintf(action, AC_SIZE, gettext("Determine if the "
10419 "device needs to be replaced, and clear the errors\n\tusing"
10420 " 'zpool clear' or replace the device with 'zpool "
10421 "replace'.\n"));
10422 break;
10423
10424 case ZPOOL_STATUS_OFFLINE_DEV:
10425 snprintf(status, ST_SIZE, gettext("One or more devices has "
10426 "been taken offline by the administrator.\n\tSufficient "
10427 "replicas exist for the pool to continue functioning in "
10428 "a\n\tdegraded state.\n"));
10429 snprintf(action, AC_SIZE, gettext("Online the device "
10430 "using 'zpool online' or replace the device with\n\t'zpool "
10431 "replace'.\n"));
10432 break;
10433
10434 case ZPOOL_STATUS_REMOVED_DEV:
10435 snprintf(status, ST_SIZE, gettext("One or more devices has "
10436 "been removed by the administrator.\n\tSufficient "
10437 "replicas exist for the pool to continue functioning in "
10438 "a\n\tdegraded state.\n"));
10439 snprintf(action, AC_SIZE, gettext("Online the device "
10440 "using zpool online' or replace the device with\n\t'zpool "
10441 "replace'.\n"));
10442 break;
10443
10444 case ZPOOL_STATUS_RESILVERING:
10445 case ZPOOL_STATUS_REBUILDING:
10446 snprintf(status, ST_SIZE, gettext("One or more devices is "
10447 "currently being resilvered. The pool will\n\tcontinue "
10448 "to function, possibly in a degraded state.\n"));
10449 snprintf(action, AC_SIZE, gettext("Wait for the resilver to "
10450 "complete.\n"));
10451 break;
10452
10453 case ZPOOL_STATUS_REBUILD_SCRUB:
10454 snprintf(status, ST_SIZE, gettext("One or more devices have "
10455 "been sequentially resilvered, scrubbing\n\tthe pool "
10456 "is recommended.\n"));
10457 snprintf(action, AC_SIZE, gettext("Use 'zpool scrub' to "
10458 "verify all data checksums.\n"));
10459 break;
10460
10461 case ZPOOL_STATUS_CORRUPT_DATA:
10462 snprintf(status, ST_SIZE, gettext("One or more devices has "
10463 "experienced an error resulting in data\n\tcorruption. "
10464 "Applications may be affected.\n"));
10465 snprintf(action, AC_SIZE, gettext("Restore the file in question"
10466 " if possible. Otherwise restore the\n\tentire pool from "
10467 "backup.\n"));
10468 break;
10469
10470 case ZPOOL_STATUS_CORRUPT_POOL:
10471 snprintf(status, ST_SIZE, gettext("The pool metadata is "
10472 "corrupted and the pool cannot be opened.\n"));
10473 zpool_explain_recover(zpool_get_handle(zhp),
10474 zpool_get_name(zhp), reason, zpool_get_config(zhp, NULL),
10475 action, AC_SIZE);
10476 break;
10477
10478 case ZPOOL_STATUS_VERSION_OLDER:
10479 snprintf(status, ST_SIZE, gettext("The pool is formatted using "
10480 "a legacy on-disk format. The pool can\n\tstill be used, "
10481 "but some features are unavailable.\n"));
10482 snprintf(action, AC_SIZE, gettext("Upgrade the pool using "
10483 "'zpool upgrade'. Once this is done, the\n\tpool will no "
10484 "longer be accessible on software that does not support\n\t"
10485 "feature flags.\n"));
10486 break;
10487
10488 case ZPOOL_STATUS_VERSION_NEWER:
10489 snprintf(status, ST_SIZE, gettext("The pool has been upgraded "
10490 "to a newer, incompatible on-disk version.\n\tThe pool "
10491 "cannot be accessed on this system.\n"));
10492 snprintf(action, AC_SIZE, gettext("Access the pool from a "
10493 "system running more recent software, or\n\trestore the "
10494 "pool from backup.\n"));
10495 break;
10496
10497 case ZPOOL_STATUS_FEAT_DISABLED:
10498 snprintf(status, ST_SIZE, gettext("Some supported and "
10499 "requested features are not enabled on the pool.\n\t"
10500 "The pool can still be used, but some features are "
10501 "unavailable.\n"));
10502 snprintf(action, AC_SIZE, gettext("Enable all features using "
10503 "'zpool upgrade'. Once this is done,\n\tthe pool may no "
10504 "longer be accessible by software that does not support\n\t"
10505 "the features. See zpool-features(7) for details.\n"));
10506 break;
10507
10508 case ZPOOL_STATUS_COMPATIBILITY_ERR:
10509 snprintf(status, ST_SIZE, gettext("This pool has a "
10510 "compatibility list specified, but it could not be\n\t"
10511 "read/parsed at this time. The pool can still be used, "
10512 "but this\n\tshould be investigated.\n"));
10513 snprintf(action, AC_SIZE, gettext("Check the value of the "
10514 "'compatibility' property against the\n\t"
10515 "appropriate file in " ZPOOL_SYSCONF_COMPAT_D " or "
10516 ZPOOL_DATA_COMPAT_D ".\n"));
10517 break;
10518
10519 case ZPOOL_STATUS_INCOMPATIBLE_FEAT:
10520 snprintf(status, ST_SIZE, gettext("One or more features "
10521 "are enabled on the pool despite not being\n\t"
10522 "requested by the 'compatibility' property.\n"));
10523 snprintf(action, AC_SIZE, gettext("Consider setting "
10524 "'compatibility' to an appropriate value, or\n\t"
10525 "adding needed features to the relevant file in\n\t"
10526 ZPOOL_SYSCONF_COMPAT_D " or " ZPOOL_DATA_COMPAT_D ".\n"));
10527 break;
10528
10529 case ZPOOL_STATUS_UNSUP_FEAT_READ:
10530 snprintf(status, ST_SIZE, gettext("The pool cannot be accessed "
10531 "on this system because it uses the\n\tfollowing feature(s)"
10532 " not supported on this system:\n"));
10533 zpool_collect_unsup_feat(zpool_get_config(zhp, NULL), status,
10534 1024);
10535 snprintf(action, AC_SIZE, gettext("Access the pool from a "
10536 "system that supports the required feature(s),\n\tor "
10537 "restore the pool from backup.\n"));
10538 break;
10539
10540 case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
10541 snprintf(status, ST_SIZE, gettext("The pool can only be "
10542 "accessed in read-only mode on this system. It\n\tcannot be"
10543 " accessed in read-write mode because it uses the "
10544 "following\n\tfeature(s) not supported on this system:\n"));
10545 zpool_collect_unsup_feat(zpool_get_config(zhp, NULL), status,
10546 1024);
10547 snprintf(action, AC_SIZE, gettext("The pool cannot be accessed "
10548 "in read-write mode. Import the pool with\n"
10549 "\t\"-o readonly=on\", access the pool from a system that "
10550 "supports the\n\trequired feature(s), or restore the "
10551 "pool from backup.\n"));
10552 break;
10553
10554 case ZPOOL_STATUS_FAULTED_DEV_R:
10555 snprintf(status, ST_SIZE, gettext("One or more devices are "
10556 "faulted in response to persistent errors.\n\tSufficient "
10557 "replicas exist for the pool to continue functioning "
10558 "in a\n\tdegraded state.\n"));
10559 snprintf(action, AC_SIZE, gettext("Replace the faulted device, "
10560 "or use 'zpool clear' to mark the device\n\trepaired.\n"));
10561 break;
10562
10563 case ZPOOL_STATUS_FAULTED_DEV_NR:
10564 snprintf(status, ST_SIZE, gettext("One or more devices are "
10565 "faulted in response to persistent errors. There are "
10566 "insufficient replicas for the pool to\n\tcontinue "
10567 "functioning.\n"));
10568 snprintf(action, AC_SIZE, gettext("Destroy and re-create the "
10569 "pool from a backup source. Manually marking the device\n"
10570 "\trepaired using 'zpool clear' may allow some data "
10571 "to be recovered.\n"));
10572 break;
10573
10574 case ZPOOL_STATUS_IO_FAILURE_MMP:
10575 snprintf(status, ST_SIZE, gettext("The pool is suspended "
10576 "because multihost writes failed or were delayed;\n\t"
10577 "another system could import the pool undetected.\n"));
10578 snprintf(action, AC_SIZE, gettext("Make sure the pool's devices"
10579 " are connected, then reboot your system and\n\timport the "
10580 "pool or run 'zpool clear' to resume the pool.\n"));
10581 break;
10582
10583 case ZPOOL_STATUS_IO_FAILURE_WAIT:
10584 case ZPOOL_STATUS_IO_FAILURE_CONTINUE:
10585 snprintf(status, ST_SIZE, gettext("One or more devices are "
10586 "faulted in response to IO failures.\n"));
10587 snprintf(action, AC_SIZE, gettext("Make sure the affected "
10588 "devices are connected, then run 'zpool clear'.\n"));
10589 break;
10590
10591 case ZPOOL_STATUS_BAD_LOG:
10592 snprintf(status, ST_SIZE, gettext("An intent log record "
10593 "could not be read.\n"
10594 "\tWaiting for administrator intervention to fix the "
10595 "faulted pool.\n"));
10596 snprintf(action, AC_SIZE, gettext("Either restore the affected "
10597 "device(s) and run 'zpool online',\n"
10598 "\tor ignore the intent log records by running "
10599 "'zpool clear'.\n"));
10600 break;
10601
10602 case ZPOOL_STATUS_NON_NATIVE_ASHIFT:
10603 snprintf(status, ST_SIZE, gettext("One or more devices are "
10604 "configured to use a non-native block size.\n"
10605 "\tExpect reduced performance.\n"));
10606 snprintf(action, AC_SIZE, gettext("Replace affected devices "
10607 "with devices that support the\n\tconfigured block size, "
10608 "or migrate data to a properly configured\n\tpool.\n"));
10609 break;
10610
10611 case ZPOOL_STATUS_HOSTID_MISMATCH:
10612 snprintf(status, ST_SIZE, gettext("Mismatch between pool hostid"
10613 " and system hostid on imported pool.\n\tThis pool was "
10614 "previously imported into a system with a different "
10615 "hostid,\n\tand then was verbatim imported into this "
10616 "system.\n"));
10617 snprintf(action, AC_SIZE, gettext("Export this pool on all "
10618 "systems on which it is imported.\n"
10619 "\tThen import it to correct the mismatch.\n"));
10620 break;
10621
10622 case ZPOOL_STATUS_ERRATA:
10623 snprintf(status, ST_SIZE, gettext("Errata #%d detected.\n"),
10624 errata);
10625 switch (errata) {
10626 case ZPOOL_ERRATA_NONE:
10627 break;
10628
10629 case ZPOOL_ERRATA_ZOL_2094_SCRUB:
10630 snprintf(action, AC_SIZE, gettext("To correct the issue"
10631 " run 'zpool scrub'.\n"));
10632 break;
10633
10634 case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION:
10635 (void) strlcat(status, gettext("\tExisting encrypted "
10636 "datasets contain an on-disk incompatibility\n\t "
10637 "which needs to be corrected.\n"), ST_SIZE);
10638 snprintf(action, AC_SIZE, gettext("To correct the issue"
10639 " backup existing encrypted datasets to new\n\t"
10640 "encrypted datasets and destroy the old ones. "
10641 "'zfs mount -o ro' can\n\tbe used to temporarily "
10642 "mount existing encrypted datasets readonly.\n"));
10643 break;
10644
10645 case ZPOOL_ERRATA_ZOL_8308_ENCRYPTION:
10646 (void) strlcat(status, gettext("\tExisting encrypted "
10647 "snapshots and bookmarks contain an on-disk\n\t"
10648 "incompatibility. This may cause on-disk "
10649 "corruption if they are used\n\twith "
10650 "'zfs recv'.\n"), ST_SIZE);
10651 snprintf(action, AC_SIZE, gettext("To correct the"
10652 "issue, enable the bookmark_v2 feature. No "
10653 "additional\n\taction is needed if there are no "
10654 "encrypted snapshots or bookmarks.\n\tIf preserving"
10655 "the encrypted snapshots and bookmarks is required,"
10656 " use\n\ta non-raw send to backup and restore them."
10657 " Alternately, they may be\n\tremoved to resolve "
10658 "the incompatibility.\n"));
10659 break;
10660
10661 default:
10662 /*
10663 * All errata which allow the pool to be imported
10664 * must contain an action message.
10665 */
10666 assert(0);
10667 }
10668 break;
10669
10670 default:
10671 /*
10672 * The remaining errors can't actually be generated, yet.
10673 */
10674 assert(reason == ZPOOL_STATUS_OK);
10675 }
10676
10677 if (status[0] != 0) {
10678 if (cbp->cb_json)
10679 fnvlist_add_string(item, "status", status);
10680 else {
10681 printf_color(ANSI_BOLD, gettext("status: "));
10682 printf_color(ANSI_YELLOW, status);
10683 }
10684 }
10685
10686 if (action[0] != 0) {
10687 if (cbp->cb_json)
10688 fnvlist_add_string(item, "action", action);
10689 else {
10690 printf_color(ANSI_BOLD, gettext("action: "));
10691 printf_color(ANSI_YELLOW, action);
10692 }
10693 }
10694 }
10695
10696 static int
status_callback_json(zpool_handle_t * zhp,void * data)10697 status_callback_json(zpool_handle_t *zhp, void *data)
10698 {
10699 status_cbdata_t *cbp = data;
10700 nvlist_t *config, *nvroot;
10701 const char *msgid;
10702 char pool_guid[256];
10703 char msgbuf[256];
10704 uint64_t guid;
10705 zpool_status_t reason;
10706 zpool_errata_t errata;
10707 uint_t c;
10708 vdev_stat_t *vs;
10709 nvlist_t *item, *d, *load_info, *vds;
10710 item = d = NULL;
10711
10712 /* If dedup stats were requested, also fetch dedupcached. */
10713 if (cbp->cb_dedup_stats > 1)
10714 zpool_add_propname(zhp, ZPOOL_DEDUPCACHED_PROP_NAME);
10715 reason = zpool_get_status(zhp, &msgid, &errata);
10716 /*
10717 * If we were given 'zpool status -x', only report those pools with
10718 * problems.
10719 */
10720 if (cbp->cb_explain &&
10721 (reason == ZPOOL_STATUS_OK ||
10722 reason == ZPOOL_STATUS_VERSION_OLDER ||
10723 reason == ZPOOL_STATUS_FEAT_DISABLED ||
10724 reason == ZPOOL_STATUS_COMPATIBILITY_ERR ||
10725 reason == ZPOOL_STATUS_INCOMPATIBLE_FEAT)) {
10726 return (0);
10727 }
10728
10729 d = fnvlist_lookup_nvlist(cbp->cb_jsobj, "pools");
10730 item = fnvlist_alloc();
10731 vds = fnvlist_alloc();
10732 fill_pool_info(item, zhp, B_FALSE, cbp->cb_json_as_int);
10733 config = zpool_get_config(zhp, NULL);
10734
10735 if (config != NULL) {
10736 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
10737 verify(nvlist_lookup_uint64_array(nvroot,
10738 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &c) == 0);
10739 if (cbp->cb_json_pool_key_guid) {
10740 guid = fnvlist_lookup_uint64(config,
10741 ZPOOL_CONFIG_POOL_GUID);
10742 snprintf(pool_guid, 256, "%llu", (u_longlong_t)guid);
10743 }
10744 cbp->cb_count++;
10745
10746 print_status_reason(zhp, cbp, reason, errata, item);
10747 if (msgid != NULL) {
10748 snprintf(msgbuf, 256,
10749 "https://openzfs.github.io/openzfs-docs/msg/%s",
10750 msgid);
10751 fnvlist_add_string(item, "msgid", msgid);
10752 fnvlist_add_string(item, "moreinfo", msgbuf);
10753 }
10754
10755 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
10756 &load_info) == 0) {
10757 fnvlist_add_nvlist(item, ZPOOL_CONFIG_LOAD_INFO,
10758 load_info);
10759 }
10760
10761 scan_status_nvlist(zhp, cbp, nvroot, item);
10762 removal_status_nvlist(zhp, cbp, nvroot, item);
10763 checkpoint_status_nvlist(nvroot, cbp, item);
10764 raidz_expand_status_nvlist(zhp, cbp, nvroot, item);
10765 vdev_stats_nvlist(zhp, cbp, nvroot, 0, B_FALSE, NULL, vds);
10766 if (cbp->cb_flat_vdevs) {
10767 class_vdevs_nvlist(zhp, cbp, nvroot,
10768 VDEV_ALLOC_BIAS_DEDUP, vds);
10769 class_vdevs_nvlist(zhp, cbp, nvroot,
10770 VDEV_ALLOC_BIAS_SPECIAL, vds);
10771 class_vdevs_nvlist(zhp, cbp, nvroot,
10772 VDEV_ALLOC_CLASS_LOGS, vds);
10773 l2cache_nvlist(zhp, cbp, nvroot, vds);
10774 spares_nvlist(zhp, cbp, nvroot, vds);
10775
10776 fnvlist_add_nvlist(item, "vdevs", vds);
10777 fnvlist_free(vds);
10778 } else {
10779 fnvlist_add_nvlist(item, "vdevs", vds);
10780 fnvlist_free(vds);
10781
10782 class_vdevs_nvlist(zhp, cbp, nvroot,
10783 VDEV_ALLOC_BIAS_DEDUP, item);
10784 class_vdevs_nvlist(zhp, cbp, nvroot,
10785 VDEV_ALLOC_BIAS_SPECIAL, item);
10786 class_vdevs_nvlist(zhp, cbp, nvroot,
10787 VDEV_ALLOC_CLASS_LOGS, item);
10788 l2cache_nvlist(zhp, cbp, nvroot, item);
10789 spares_nvlist(zhp, cbp, nvroot, item);
10790 }
10791 dedup_stats_nvlist(zhp, cbp, item);
10792 errors_nvlist(zhp, cbp, item);
10793 }
10794 if (cbp->cb_json_pool_key_guid) {
10795 fnvlist_add_nvlist(d, pool_guid, item);
10796 } else {
10797 fnvlist_add_nvlist(d, zpool_get_name(zhp),
10798 item);
10799 }
10800 fnvlist_free(item);
10801 return (0);
10802 }
10803
10804 /*
10805 * Display a summary of pool status. Displays a summary such as:
10806 *
10807 * pool: tank
10808 * status: DEGRADED
10809 * reason: One or more devices ...
10810 * see: https://openzfs.github.io/openzfs-docs/msg/ZFS-xxxx-01
10811 * config:
10812 * mirror DEGRADED
10813 * c1t0d0 OK
10814 * c2t0d0 UNAVAIL
10815 *
10816 * When given the '-v' option, we print out the complete config. If the '-e'
10817 * option is specified, then we print out error rate information as well.
10818 */
10819 static int
status_callback(zpool_handle_t * zhp,void * data)10820 status_callback(zpool_handle_t *zhp, void *data)
10821 {
10822 status_cbdata_t *cbp = data;
10823 nvlist_t *config, *nvroot;
10824 const char *msgid;
10825 zpool_status_t reason;
10826 zpool_errata_t errata;
10827 const char *health;
10828 uint_t c;
10829 vdev_stat_t *vs;
10830
10831 /* If dedup stats were requested, also fetch dedupcached. */
10832 if (cbp->cb_dedup_stats > 1)
10833 zpool_add_propname(zhp, ZPOOL_DEDUPCACHED_PROP_NAME);
10834
10835 config = zpool_get_config(zhp, NULL);
10836 reason = zpool_get_status(zhp, &msgid, &errata);
10837
10838 cbp->cb_count++;
10839
10840 /*
10841 * If we were given 'zpool status -x', only report those pools with
10842 * problems.
10843 */
10844 if (cbp->cb_explain &&
10845 (reason == ZPOOL_STATUS_OK ||
10846 reason == ZPOOL_STATUS_VERSION_OLDER ||
10847 reason == ZPOOL_STATUS_FEAT_DISABLED ||
10848 reason == ZPOOL_STATUS_COMPATIBILITY_ERR ||
10849 reason == ZPOOL_STATUS_INCOMPATIBLE_FEAT)) {
10850 if (!cbp->cb_allpools) {
10851 (void) printf(gettext("pool '%s' is healthy\n"),
10852 zpool_get_name(zhp));
10853 if (cbp->cb_first)
10854 cbp->cb_first = B_FALSE;
10855 }
10856 return (0);
10857 }
10858
10859 if (cbp->cb_first)
10860 cbp->cb_first = B_FALSE;
10861 else
10862 (void) printf("\n");
10863
10864 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
10865 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
10866 (uint64_t **)&vs, &c) == 0);
10867
10868 health = zpool_get_state_str(zhp);
10869
10870 printf(" ");
10871 printf_color(ANSI_BOLD, gettext("pool:"));
10872 printf(" %s\n", zpool_get_name(zhp));
10873 fputc(' ', stdout);
10874 printf_color(ANSI_BOLD, gettext("state: "));
10875
10876 printf_color(health_str_to_color(health), "%s", health);
10877
10878 fputc('\n', stdout);
10879 print_status_reason(zhp, cbp, reason, errata, NULL);
10880
10881 if (msgid != NULL) {
10882 printf(" ");
10883 printf_color(ANSI_BOLD, gettext("see:"));
10884 printf(gettext(
10885 " https://openzfs.github.io/openzfs-docs/msg/%s\n"),
10886 msgid);
10887 }
10888
10889 if (config != NULL) {
10890 uint64_t nerr;
10891 nvlist_t **spares, **l2cache;
10892 uint_t nspares, nl2cache;
10893
10894 print_scan_status(zhp, nvroot);
10895
10896 pool_removal_stat_t *prs = NULL;
10897 (void) nvlist_lookup_uint64_array(nvroot,
10898 ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c);
10899 print_removal_status(zhp, prs);
10900
10901 pool_checkpoint_stat_t *pcs = NULL;
10902 (void) nvlist_lookup_uint64_array(nvroot,
10903 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
10904 print_checkpoint_status(pcs);
10905
10906 pool_raidz_expand_stat_t *pres = NULL;
10907 (void) nvlist_lookup_uint64_array(nvroot,
10908 ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, (uint64_t **)&pres, &c);
10909 print_raidz_expand_status(zhp, pres);
10910
10911 cbp->cb_namewidth = max_width(zhp, nvroot, 0, 0,
10912 cbp->cb_name_flags | VDEV_NAME_TYPE_ID);
10913 if (cbp->cb_namewidth < 10)
10914 cbp->cb_namewidth = 10;
10915
10916 color_start(ANSI_BOLD);
10917 (void) printf(gettext("config:\n\n"));
10918 (void) printf(gettext("\t%-*s %-8s %5s %5s %5s"),
10919 cbp->cb_namewidth, "NAME", "STATE", "READ", "WRITE",
10920 "CKSUM");
10921 color_end();
10922
10923 if (cbp->cb_print_slow_ios) {
10924 printf_color(ANSI_BOLD, " %5s", gettext("SLOW"));
10925 }
10926
10927 if (cbp->cb_print_power) {
10928 printf_color(ANSI_BOLD, " %5s", gettext("POWER"));
10929 }
10930
10931 if (cbp->cb_print_dio_verify) {
10932 printf_color(ANSI_BOLD, " %5s", gettext("DIO"));
10933 }
10934
10935 if (cbp->vcdl != NULL)
10936 print_cmd_columns(cbp->vcdl, 0);
10937
10938 printf("\n");
10939
10940 print_status_config(zhp, cbp, zpool_get_name(zhp), nvroot, 0,
10941 B_FALSE, NULL);
10942
10943 print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_BIAS_DEDUP);
10944 print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_BIAS_SPECIAL);
10945 print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_CLASS_LOGS);
10946
10947 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
10948 &l2cache, &nl2cache) == 0)
10949 print_l2cache(zhp, cbp, l2cache, nl2cache);
10950
10951 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
10952 &spares, &nspares) == 0)
10953 print_spares(zhp, cbp, spares, nspares);
10954
10955 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT,
10956 &nerr) == 0) {
10957 (void) printf("\n");
10958 if (nerr == 0) {
10959 (void) printf(gettext(
10960 "errors: No known data errors\n"));
10961 } else if (!cbp->cb_verbose) {
10962 color_start(ANSI_RED);
10963 (void) printf(gettext("errors: %llu data "
10964 "errors, use '-v' for a list\n"),
10965 (u_longlong_t)nerr);
10966 color_end();
10967 } else {
10968 print_error_log(zhp);
10969 }
10970 }
10971
10972 if (cbp->cb_dedup_stats)
10973 print_dedup_stats(zhp, config, cbp->cb_literal);
10974 } else {
10975 (void) printf(gettext("config: The configuration cannot be "
10976 "determined.\n"));
10977 }
10978
10979 return (0);
10980 }
10981
10982 /*
10983 * zpool status [-c [script1,script2,...]] [-dDegiLpPstvx] [--power] ...
10984 * [-T d|u] [pool] [interval [count]]
10985 *
10986 * -c CMD For each vdev, run command CMD
10987 * -d Display Direct I/O write verify errors
10988 * -D Display dedup status (undocumented)
10989 * -e Display only unhealthy vdevs
10990 * -g Display guid for individual vdev name.
10991 * -i Display vdev initialization status.
10992 * -L Follow links when resolving vdev path name.
10993 * -p Display values in parsable (exact) format.
10994 * -P Display full path for vdev name.
10995 * -s Display slow IOs column.
10996 * -t Display vdev TRIM status.
10997 * -T Display a timestamp in date(1) or Unix format
10998 * -v Display complete error logs
10999 * -x Display only pools with potential problems
11000 * -j Display output in JSON format
11001 * --power Display vdev enclosure slot power status
11002 * --json-int Display numbers in inteeger format instead of string
11003 * --json-flat-vdevs Display vdevs in flat hierarchy
11004 * --json-pool-key-guid Use pool GUID as key for pool objects
11005 *
11006 * Describes the health status of all pools or some subset.
11007 */
11008 int
zpool_do_status(int argc,char ** argv)11009 zpool_do_status(int argc, char **argv)
11010 {
11011 int c;
11012 int ret;
11013 float interval = 0;
11014 unsigned long count = 0;
11015 status_cbdata_t cb = { 0 };
11016 nvlist_t *data;
11017 char *cmd = NULL;
11018
11019 struct option long_options[] = {
11020 {"power", no_argument, NULL, ZPOOL_OPTION_POWER},
11021 {"json", no_argument, NULL, 'j'},
11022 {"json-int", no_argument, NULL, ZPOOL_OPTION_JSON_NUMS_AS_INT},
11023 {"json-flat-vdevs", no_argument, NULL,
11024 ZPOOL_OPTION_JSON_FLAT_VDEVS},
11025 {"json-pool-key-guid", no_argument, NULL,
11026 ZPOOL_OPTION_POOL_KEY_GUID},
11027 {0, 0, 0, 0}
11028 };
11029
11030 /* check options */
11031 while ((c = getopt_long(argc, argv, "c:jdDegiLpPstT:vx", long_options,
11032 NULL)) != -1) {
11033 switch (c) {
11034 case 'c':
11035 if (cmd != NULL) {
11036 fprintf(stderr,
11037 gettext("Can't set -c flag twice\n"));
11038 exit(1);
11039 }
11040
11041 if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL &&
11042 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) {
11043 fprintf(stderr, gettext(
11044 "Can't run -c, disabled by "
11045 "ZPOOL_SCRIPTS_ENABLED.\n"));
11046 exit(1);
11047 }
11048
11049 if ((getuid() <= 0 || geteuid() <= 0) &&
11050 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) {
11051 fprintf(stderr, gettext(
11052 "Can't run -c with root privileges "
11053 "unless ZPOOL_SCRIPTS_AS_ROOT is set.\n"));
11054 exit(1);
11055 }
11056 cmd = optarg;
11057 break;
11058 case 'd':
11059 cb.cb_print_dio_verify = B_TRUE;
11060 break;
11061 case 'D':
11062 if (++cb.cb_dedup_stats > 2)
11063 cb.cb_dedup_stats = 2;
11064 break;
11065 case 'e':
11066 cb.cb_print_unhealthy = B_TRUE;
11067 break;
11068 case 'g':
11069 cb.cb_name_flags |= VDEV_NAME_GUID;
11070 break;
11071 case 'i':
11072 cb.cb_print_vdev_init = B_TRUE;
11073 break;
11074 case 'L':
11075 cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
11076 break;
11077 case 'p':
11078 cb.cb_literal = B_TRUE;
11079 break;
11080 case 'P':
11081 cb.cb_name_flags |= VDEV_NAME_PATH;
11082 break;
11083 case 's':
11084 cb.cb_print_slow_ios = B_TRUE;
11085 break;
11086 case 't':
11087 cb.cb_print_vdev_trim = B_TRUE;
11088 break;
11089 case 'T':
11090 get_timestamp_arg(*optarg);
11091 break;
11092 case 'v':
11093 cb.cb_verbose = B_TRUE;
11094 break;
11095 case 'j':
11096 cb.cb_json = B_TRUE;
11097 break;
11098 case 'x':
11099 cb.cb_explain = B_TRUE;
11100 break;
11101 case ZPOOL_OPTION_POWER:
11102 cb.cb_print_power = B_TRUE;
11103 break;
11104 case ZPOOL_OPTION_JSON_FLAT_VDEVS:
11105 cb.cb_flat_vdevs = B_TRUE;
11106 break;
11107 case ZPOOL_OPTION_JSON_NUMS_AS_INT:
11108 cb.cb_json_as_int = B_TRUE;
11109 cb.cb_literal = B_TRUE;
11110 break;
11111 case ZPOOL_OPTION_POOL_KEY_GUID:
11112 cb.cb_json_pool_key_guid = B_TRUE;
11113 break;
11114 case '?':
11115 if (optopt == 'c') {
11116 print_zpool_script_list("status");
11117 exit(0);
11118 } else {
11119 fprintf(stderr,
11120 gettext("invalid option '%c'\n"), optopt);
11121 }
11122 usage(B_FALSE);
11123 }
11124 }
11125
11126 argc -= optind;
11127 argv += optind;
11128
11129 get_interval_count(&argc, argv, &interval, &count);
11130
11131 if (argc == 0)
11132 cb.cb_allpools = B_TRUE;
11133
11134 cb.cb_first = B_TRUE;
11135 cb.cb_print_status = B_TRUE;
11136
11137 if (cb.cb_flat_vdevs && !cb.cb_json) {
11138 fprintf(stderr, gettext("'--json-flat-vdevs' only works with"
11139 " '-j' option\n"));
11140 usage(B_FALSE);
11141 }
11142
11143 if (cb.cb_json_as_int && !cb.cb_json) {
11144 (void) fprintf(stderr, gettext("'--json-int' only works with"
11145 " '-j' option\n"));
11146 usage(B_FALSE);
11147 }
11148
11149 if (!cb.cb_json && cb.cb_json_pool_key_guid) {
11150 (void) fprintf(stderr, gettext("'json-pool-key-guid' only"
11151 " works with '-j' option\n"));
11152 usage(B_FALSE);
11153 }
11154
11155 for (;;) {
11156 if (cb.cb_json) {
11157 cb.cb_jsobj = zpool_json_schema(0, 1);
11158 data = fnvlist_alloc();
11159 fnvlist_add_nvlist(cb.cb_jsobj, "pools", data);
11160 fnvlist_free(data);
11161 }
11162
11163 if (timestamp_fmt != NODATE) {
11164 if (cb.cb_json) {
11165 if (cb.cb_json_as_int) {
11166 fnvlist_add_uint64(cb.cb_jsobj, "time",
11167 time(NULL));
11168 } else {
11169 char ts[128];
11170 get_timestamp(timestamp_fmt, ts, 128);
11171 fnvlist_add_string(cb.cb_jsobj, "time",
11172 ts);
11173 }
11174 } else
11175 print_timestamp(timestamp_fmt);
11176 }
11177
11178 if (cmd != NULL)
11179 cb.vcdl = all_pools_for_each_vdev_run(argc, argv, cmd,
11180 NULL, NULL, 0, 0);
11181
11182 if (cb.cb_json) {
11183 ret = for_each_pool(argc, argv, B_TRUE, NULL,
11184 ZFS_TYPE_POOL, cb.cb_literal,
11185 status_callback_json, &cb);
11186 } else {
11187 ret = for_each_pool(argc, argv, B_TRUE, NULL,
11188 ZFS_TYPE_POOL, cb.cb_literal,
11189 status_callback, &cb);
11190 }
11191
11192 if (cb.vcdl != NULL)
11193 free_vdev_cmd_data_list(cb.vcdl);
11194
11195 if (cb.cb_json) {
11196 if (ret == 0)
11197 zcmd_print_json(cb.cb_jsobj);
11198 else
11199 nvlist_free(cb.cb_jsobj);
11200 } else {
11201 if (argc == 0 && cb.cb_count == 0) {
11202 (void) fprintf(stderr, "%s",
11203 gettext("no pools available\n"));
11204 } else if (cb.cb_explain && cb.cb_first &&
11205 cb.cb_allpools) {
11206 (void) printf("%s",
11207 gettext("all pools are healthy\n"));
11208 }
11209 }
11210
11211 if (ret != 0)
11212 return (ret);
11213
11214 if (interval == 0)
11215 break;
11216
11217 if (count != 0 && --count == 0)
11218 break;
11219
11220 (void) fflush(stdout);
11221 (void) fsleep(interval);
11222 }
11223
11224 return (0);
11225 }
11226
11227 typedef struct upgrade_cbdata {
11228 int cb_first;
11229 int cb_argc;
11230 uint64_t cb_version;
11231 char **cb_argv;
11232 } upgrade_cbdata_t;
11233
11234 static int
check_unsupp_fs(zfs_handle_t * zhp,void * unsupp_fs)11235 check_unsupp_fs(zfs_handle_t *zhp, void *unsupp_fs)
11236 {
11237 int zfs_version = (int)zfs_prop_get_int(zhp, ZFS_PROP_VERSION);
11238 int *count = (int *)unsupp_fs;
11239
11240 if (zfs_version > ZPL_VERSION) {
11241 (void) printf(gettext("%s (v%d) is not supported by this "
11242 "implementation of ZFS.\n"),
11243 zfs_get_name(zhp), zfs_version);
11244 (*count)++;
11245 }
11246
11247 zfs_iter_filesystems_v2(zhp, 0, check_unsupp_fs, unsupp_fs);
11248
11249 zfs_close(zhp);
11250
11251 return (0);
11252 }
11253
11254 static int
upgrade_version(zpool_handle_t * zhp,uint64_t version)11255 upgrade_version(zpool_handle_t *zhp, uint64_t version)
11256 {
11257 int ret;
11258 nvlist_t *config;
11259 uint64_t oldversion;
11260 int unsupp_fs = 0;
11261
11262 config = zpool_get_config(zhp, NULL);
11263 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
11264 &oldversion) == 0);
11265
11266 char compat[ZFS_MAXPROPLEN];
11267 if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, compat,
11268 ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
11269 compat[0] = '\0';
11270
11271 assert(SPA_VERSION_IS_SUPPORTED(oldversion));
11272 assert(oldversion < version);
11273
11274 ret = zfs_iter_root(zpool_get_handle(zhp), check_unsupp_fs, &unsupp_fs);
11275 if (ret != 0)
11276 return (ret);
11277
11278 if (unsupp_fs) {
11279 (void) fprintf(stderr, gettext("Upgrade not performed due "
11280 "to %d unsupported filesystems (max v%d).\n"),
11281 unsupp_fs, (int)ZPL_VERSION);
11282 return (1);
11283 }
11284
11285 if (strcmp(compat, ZPOOL_COMPAT_LEGACY) == 0) {
11286 (void) fprintf(stderr, gettext("Upgrade not performed because "
11287 "'compatibility' property set to '"
11288 ZPOOL_COMPAT_LEGACY "'.\n"));
11289 return (1);
11290 }
11291
11292 ret = zpool_upgrade(zhp, version);
11293 if (ret != 0)
11294 return (ret);
11295
11296 if (version >= SPA_VERSION_FEATURES) {
11297 (void) printf(gettext("Successfully upgraded "
11298 "'%s' from version %llu to feature flags.\n"),
11299 zpool_get_name(zhp), (u_longlong_t)oldversion);
11300 } else {
11301 (void) printf(gettext("Successfully upgraded "
11302 "'%s' from version %llu to version %llu.\n"),
11303 zpool_get_name(zhp), (u_longlong_t)oldversion,
11304 (u_longlong_t)version);
11305 }
11306
11307 return (0);
11308 }
11309
11310 static int
upgrade_enable_all(zpool_handle_t * zhp,int * countp)11311 upgrade_enable_all(zpool_handle_t *zhp, int *countp)
11312 {
11313 int i, ret, count;
11314 boolean_t firstff = B_TRUE;
11315 nvlist_t *enabled = zpool_get_features(zhp);
11316
11317 char compat[ZFS_MAXPROPLEN];
11318 if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, compat,
11319 ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
11320 compat[0] = '\0';
11321
11322 boolean_t requested_features[SPA_FEATURES];
11323 if (zpool_do_load_compat(compat, requested_features) !=
11324 ZPOOL_COMPATIBILITY_OK)
11325 return (-1);
11326
11327 count = 0;
11328 for (i = 0; i < SPA_FEATURES; i++) {
11329 const char *fname = spa_feature_table[i].fi_uname;
11330 const char *fguid = spa_feature_table[i].fi_guid;
11331
11332 if (!spa_feature_table[i].fi_zfs_mod_supported)
11333 continue;
11334
11335 if (!nvlist_exists(enabled, fguid) && requested_features[i]) {
11336 char *propname;
11337 verify(-1 != asprintf(&propname, "feature@%s", fname));
11338 ret = zpool_set_prop(zhp, propname,
11339 ZFS_FEATURE_ENABLED);
11340 if (ret != 0) {
11341 free(propname);
11342 return (ret);
11343 }
11344 count++;
11345
11346 if (firstff) {
11347 (void) printf(gettext("Enabled the "
11348 "following features on '%s':\n"),
11349 zpool_get_name(zhp));
11350 firstff = B_FALSE;
11351 }
11352 (void) printf(gettext(" %s\n"), fname);
11353 free(propname);
11354 }
11355 }
11356
11357 if (countp != NULL)
11358 *countp = count;
11359 return (0);
11360 }
11361
11362 static int
upgrade_cb(zpool_handle_t * zhp,void * arg)11363 upgrade_cb(zpool_handle_t *zhp, void *arg)
11364 {
11365 upgrade_cbdata_t *cbp = arg;
11366 nvlist_t *config;
11367 uint64_t version;
11368 boolean_t modified_pool = B_FALSE;
11369 int ret;
11370
11371 config = zpool_get_config(zhp, NULL);
11372 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
11373 &version) == 0);
11374
11375 assert(SPA_VERSION_IS_SUPPORTED(version));
11376
11377 if (version < cbp->cb_version) {
11378 cbp->cb_first = B_FALSE;
11379 ret = upgrade_version(zhp, cbp->cb_version);
11380 if (ret != 0)
11381 return (ret);
11382 modified_pool = B_TRUE;
11383
11384 /*
11385 * If they did "zpool upgrade -a", then we could
11386 * be doing ioctls to different pools. We need
11387 * to log this history once to each pool, and bypass
11388 * the normal history logging that happens in main().
11389 */
11390 (void) zpool_log_history(g_zfs, history_str);
11391 log_history = B_FALSE;
11392 }
11393
11394 if (cbp->cb_version >= SPA_VERSION_FEATURES) {
11395 int count;
11396 ret = upgrade_enable_all(zhp, &count);
11397 if (ret != 0)
11398 return (ret);
11399
11400 if (count > 0) {
11401 cbp->cb_first = B_FALSE;
11402 modified_pool = B_TRUE;
11403 }
11404 }
11405
11406 if (modified_pool) {
11407 (void) printf("\n");
11408 (void) after_zpool_upgrade(zhp);
11409 }
11410
11411 return (0);
11412 }
11413
11414 static int
upgrade_list_older_cb(zpool_handle_t * zhp,void * arg)11415 upgrade_list_older_cb(zpool_handle_t *zhp, void *arg)
11416 {
11417 upgrade_cbdata_t *cbp = arg;
11418 nvlist_t *config;
11419 uint64_t version;
11420
11421 config = zpool_get_config(zhp, NULL);
11422 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
11423 &version) == 0);
11424
11425 assert(SPA_VERSION_IS_SUPPORTED(version));
11426
11427 if (version < SPA_VERSION_FEATURES) {
11428 if (cbp->cb_first) {
11429 (void) printf(gettext("The following pools are "
11430 "formatted with legacy version numbers and can\n"
11431 "be upgraded to use feature flags. After "
11432 "being upgraded, these pools\nwill no "
11433 "longer be accessible by software that does not "
11434 "support feature\nflags.\n\n"
11435 "Note that setting a pool's 'compatibility' "
11436 "feature to '" ZPOOL_COMPAT_LEGACY "' will\n"
11437 "inhibit upgrades.\n\n"));
11438 (void) printf(gettext("VER POOL\n"));
11439 (void) printf(gettext("--- ------------\n"));
11440 cbp->cb_first = B_FALSE;
11441 }
11442
11443 (void) printf("%2llu %s\n", (u_longlong_t)version,
11444 zpool_get_name(zhp));
11445 }
11446
11447 return (0);
11448 }
11449
11450 static int
upgrade_list_disabled_cb(zpool_handle_t * zhp,void * arg)11451 upgrade_list_disabled_cb(zpool_handle_t *zhp, void *arg)
11452 {
11453 upgrade_cbdata_t *cbp = arg;
11454 nvlist_t *config;
11455 uint64_t version;
11456
11457 config = zpool_get_config(zhp, NULL);
11458 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
11459 &version) == 0);
11460
11461 if (version >= SPA_VERSION_FEATURES) {
11462 int i;
11463 boolean_t poolfirst = B_TRUE;
11464 nvlist_t *enabled = zpool_get_features(zhp);
11465
11466 for (i = 0; i < SPA_FEATURES; i++) {
11467 const char *fguid = spa_feature_table[i].fi_guid;
11468 const char *fname = spa_feature_table[i].fi_uname;
11469
11470 if (!spa_feature_table[i].fi_zfs_mod_supported)
11471 continue;
11472
11473 if (!nvlist_exists(enabled, fguid)) {
11474 if (cbp->cb_first) {
11475 (void) printf(gettext("\nSome "
11476 "supported features are not "
11477 "enabled on the following pools. "
11478 "Once a\nfeature is enabled the "
11479 "pool may become incompatible with "
11480 "software\nthat does not support "
11481 "the feature. See "
11482 "zpool-features(7) for "
11483 "details.\n\n"
11484 "Note that the pool "
11485 "'compatibility' feature can be "
11486 "used to inhibit\nfeature "
11487 "upgrades.\n\n"));
11488 (void) printf(gettext("POOL "
11489 "FEATURE\n"));
11490 (void) printf(gettext("------"
11491 "---------\n"));
11492 cbp->cb_first = B_FALSE;
11493 }
11494
11495 if (poolfirst) {
11496 (void) printf(gettext("%s\n"),
11497 zpool_get_name(zhp));
11498 poolfirst = B_FALSE;
11499 }
11500
11501 (void) printf(gettext(" %s\n"), fname);
11502 }
11503 /*
11504 * If they did "zpool upgrade -a", then we could
11505 * be doing ioctls to different pools. We need
11506 * to log this history once to each pool, and bypass
11507 * the normal history logging that happens in main().
11508 */
11509 (void) zpool_log_history(g_zfs, history_str);
11510 log_history = B_FALSE;
11511 }
11512 }
11513
11514 return (0);
11515 }
11516
11517 static int
upgrade_one(zpool_handle_t * zhp,void * data)11518 upgrade_one(zpool_handle_t *zhp, void *data)
11519 {
11520 boolean_t modified_pool = B_FALSE;
11521 upgrade_cbdata_t *cbp = data;
11522 uint64_t cur_version;
11523 int ret;
11524
11525 if (strcmp("log", zpool_get_name(zhp)) == 0) {
11526 (void) fprintf(stderr, gettext("'log' is now a reserved word\n"
11527 "Pool 'log' must be renamed using export and import"
11528 " to upgrade.\n"));
11529 return (1);
11530 }
11531
11532 cur_version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
11533 if (cur_version > cbp->cb_version) {
11534 (void) printf(gettext("Pool '%s' is already formatted "
11535 "using more current version '%llu'.\n\n"),
11536 zpool_get_name(zhp), (u_longlong_t)cur_version);
11537 return (0);
11538 }
11539
11540 if (cbp->cb_version != SPA_VERSION && cur_version == cbp->cb_version) {
11541 (void) printf(gettext("Pool '%s' is already formatted "
11542 "using version %llu.\n\n"), zpool_get_name(zhp),
11543 (u_longlong_t)cbp->cb_version);
11544 return (0);
11545 }
11546
11547 if (cur_version != cbp->cb_version) {
11548 modified_pool = B_TRUE;
11549 ret = upgrade_version(zhp, cbp->cb_version);
11550 if (ret != 0)
11551 return (ret);
11552 }
11553
11554 if (cbp->cb_version >= SPA_VERSION_FEATURES) {
11555 int count = 0;
11556 ret = upgrade_enable_all(zhp, &count);
11557 if (ret != 0)
11558 return (ret);
11559
11560 if (count != 0) {
11561 modified_pool = B_TRUE;
11562 } else if (cur_version == SPA_VERSION) {
11563 (void) printf(gettext("Pool '%s' already has all "
11564 "supported and requested features enabled.\n"),
11565 zpool_get_name(zhp));
11566 }
11567 }
11568
11569 if (modified_pool) {
11570 (void) printf("\n");
11571 (void) after_zpool_upgrade(zhp);
11572 }
11573
11574 return (0);
11575 }
11576
11577 /*
11578 * zpool upgrade
11579 * zpool upgrade -v
11580 * zpool upgrade [-V version] <-a | pool ...>
11581 *
11582 * With no arguments, display downrev'd ZFS pool available for upgrade.
11583 * Individual pools can be upgraded by specifying the pool, and '-a' will
11584 * upgrade all pools.
11585 */
11586 int
zpool_do_upgrade(int argc,char ** argv)11587 zpool_do_upgrade(int argc, char **argv)
11588 {
11589 int c;
11590 upgrade_cbdata_t cb = { 0 };
11591 int ret = 0;
11592 boolean_t showversions = B_FALSE;
11593 boolean_t upgradeall = B_FALSE;
11594 char *end;
11595
11596
11597 /* check options */
11598 while ((c = getopt(argc, argv, ":avV:")) != -1) {
11599 switch (c) {
11600 case 'a':
11601 upgradeall = B_TRUE;
11602 break;
11603 case 'v':
11604 showversions = B_TRUE;
11605 break;
11606 case 'V':
11607 cb.cb_version = strtoll(optarg, &end, 10);
11608 if (*end != '\0' ||
11609 !SPA_VERSION_IS_SUPPORTED(cb.cb_version)) {
11610 (void) fprintf(stderr,
11611 gettext("invalid version '%s'\n"), optarg);
11612 usage(B_FALSE);
11613 }
11614 break;
11615 case ':':
11616 (void) fprintf(stderr, gettext("missing argument for "
11617 "'%c' option\n"), optopt);
11618 usage(B_FALSE);
11619 break;
11620 case '?':
11621 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
11622 optopt);
11623 usage(B_FALSE);
11624 }
11625 }
11626
11627 cb.cb_argc = argc;
11628 cb.cb_argv = argv;
11629 argc -= optind;
11630 argv += optind;
11631
11632 if (cb.cb_version == 0) {
11633 cb.cb_version = SPA_VERSION;
11634 } else if (!upgradeall && argc == 0) {
11635 (void) fprintf(stderr, gettext("-V option is "
11636 "incompatible with other arguments\n"));
11637 usage(B_FALSE);
11638 }
11639
11640 if (showversions) {
11641 if (upgradeall || argc != 0) {
11642 (void) fprintf(stderr, gettext("-v option is "
11643 "incompatible with other arguments\n"));
11644 usage(B_FALSE);
11645 }
11646 } else if (upgradeall) {
11647 if (argc != 0) {
11648 (void) fprintf(stderr, gettext("-a option should not "
11649 "be used along with a pool name\n"));
11650 usage(B_FALSE);
11651 }
11652 }
11653
11654 (void) printf("%s", gettext("This system supports ZFS pool feature "
11655 "flags.\n\n"));
11656 if (showversions) {
11657 int i;
11658
11659 (void) printf(gettext("The following features are "
11660 "supported:\n\n"));
11661 (void) printf(gettext("FEAT DESCRIPTION\n"));
11662 (void) printf("----------------------------------------------"
11663 "---------------\n");
11664 for (i = 0; i < SPA_FEATURES; i++) {
11665 zfeature_info_t *fi = &spa_feature_table[i];
11666 if (!fi->fi_zfs_mod_supported)
11667 continue;
11668 const char *ro =
11669 (fi->fi_flags & ZFEATURE_FLAG_READONLY_COMPAT) ?
11670 " (read-only compatible)" : "";
11671
11672 (void) printf("%-37s%s\n", fi->fi_uname, ro);
11673 (void) printf(" %s\n", fi->fi_desc);
11674 }
11675 (void) printf("\n");
11676
11677 (void) printf(gettext("The following legacy versions are also "
11678 "supported:\n\n"));
11679 (void) printf(gettext("VER DESCRIPTION\n"));
11680 (void) printf("--- -----------------------------------------"
11681 "---------------\n");
11682 (void) printf(gettext(" 1 Initial ZFS version\n"));
11683 (void) printf(gettext(" 2 Ditto blocks "
11684 "(replicated metadata)\n"));
11685 (void) printf(gettext(" 3 Hot spares and double parity "
11686 "RAID-Z\n"));
11687 (void) printf(gettext(" 4 zpool history\n"));
11688 (void) printf(gettext(" 5 Compression using the gzip "
11689 "algorithm\n"));
11690 (void) printf(gettext(" 6 bootfs pool property\n"));
11691 (void) printf(gettext(" 7 Separate intent log devices\n"));
11692 (void) printf(gettext(" 8 Delegated administration\n"));
11693 (void) printf(gettext(" 9 refquota and refreservation "
11694 "properties\n"));
11695 (void) printf(gettext(" 10 Cache devices\n"));
11696 (void) printf(gettext(" 11 Improved scrub performance\n"));
11697 (void) printf(gettext(" 12 Snapshot properties\n"));
11698 (void) printf(gettext(" 13 snapused property\n"));
11699 (void) printf(gettext(" 14 passthrough-x aclinherit\n"));
11700 (void) printf(gettext(" 15 user/group space accounting\n"));
11701 (void) printf(gettext(" 16 stmf property support\n"));
11702 (void) printf(gettext(" 17 Triple-parity RAID-Z\n"));
11703 (void) printf(gettext(" 18 Snapshot user holds\n"));
11704 (void) printf(gettext(" 19 Log device removal\n"));
11705 (void) printf(gettext(" 20 Compression using zle "
11706 "(zero-length encoding)\n"));
11707 (void) printf(gettext(" 21 Deduplication\n"));
11708 (void) printf(gettext(" 22 Received properties\n"));
11709 (void) printf(gettext(" 23 Slim ZIL\n"));
11710 (void) printf(gettext(" 24 System attributes\n"));
11711 (void) printf(gettext(" 25 Improved scrub stats\n"));
11712 (void) printf(gettext(" 26 Improved snapshot deletion "
11713 "performance\n"));
11714 (void) printf(gettext(" 27 Improved snapshot creation "
11715 "performance\n"));
11716 (void) printf(gettext(" 28 Multiple vdev replacements\n"));
11717 (void) printf(gettext("\nFor more information on a particular "
11718 "version, including supported releases,\n"));
11719 (void) printf(gettext("see the ZFS Administration Guide.\n\n"));
11720 } else if (argc == 0 && upgradeall) {
11721 cb.cb_first = B_TRUE;
11722 ret = zpool_iter(g_zfs, upgrade_cb, &cb);
11723 if (ret == 0 && cb.cb_first) {
11724 if (cb.cb_version == SPA_VERSION) {
11725 (void) printf(gettext("All pools are already "
11726 "formatted using feature flags.\n\n"));
11727 (void) printf(gettext("Every feature flags "
11728 "pool already has all supported and "
11729 "requested features enabled.\n"));
11730 } else {
11731 (void) printf(gettext("All pools are already "
11732 "formatted with version %llu or higher.\n"),
11733 (u_longlong_t)cb.cb_version);
11734 }
11735 }
11736 } else if (argc == 0) {
11737 cb.cb_first = B_TRUE;
11738 ret = zpool_iter(g_zfs, upgrade_list_older_cb, &cb);
11739 assert(ret == 0);
11740
11741 if (cb.cb_first) {
11742 (void) printf(gettext("All pools are formatted "
11743 "using feature flags.\n\n"));
11744 } else {
11745 (void) printf(gettext("\nUse 'zpool upgrade -v' "
11746 "for a list of available legacy versions.\n"));
11747 }
11748
11749 cb.cb_first = B_TRUE;
11750 ret = zpool_iter(g_zfs, upgrade_list_disabled_cb, &cb);
11751 assert(ret == 0);
11752
11753 if (cb.cb_first) {
11754 (void) printf(gettext("Every feature flags pool has "
11755 "all supported and requested features enabled.\n"));
11756 } else {
11757 (void) printf(gettext("\n"));
11758 }
11759 } else {
11760 ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL,
11761 B_FALSE, upgrade_one, &cb);
11762 }
11763
11764 return (ret);
11765 }
11766
11767 typedef struct hist_cbdata {
11768 boolean_t first;
11769 boolean_t longfmt;
11770 boolean_t internal;
11771 } hist_cbdata_t;
11772
11773 static void
print_history_records(nvlist_t * nvhis,hist_cbdata_t * cb)11774 print_history_records(nvlist_t *nvhis, hist_cbdata_t *cb)
11775 {
11776 nvlist_t **records;
11777 uint_t numrecords;
11778 int i;
11779
11780 verify(nvlist_lookup_nvlist_array(nvhis, ZPOOL_HIST_RECORD,
11781 &records, &numrecords) == 0);
11782 for (i = 0; i < numrecords; i++) {
11783 nvlist_t *rec = records[i];
11784 char tbuf[64] = "";
11785
11786 if (nvlist_exists(rec, ZPOOL_HIST_TIME)) {
11787 time_t tsec;
11788 struct tm t;
11789
11790 tsec = fnvlist_lookup_uint64(records[i],
11791 ZPOOL_HIST_TIME);
11792 (void) localtime_r(&tsec, &t);
11793 (void) strftime(tbuf, sizeof (tbuf), "%F.%T", &t);
11794 }
11795
11796 if (nvlist_exists(rec, ZPOOL_HIST_ELAPSED_NS)) {
11797 uint64_t elapsed_ns = fnvlist_lookup_int64(records[i],
11798 ZPOOL_HIST_ELAPSED_NS);
11799 (void) snprintf(tbuf + strlen(tbuf),
11800 sizeof (tbuf) - strlen(tbuf),
11801 " (%lldms)", (long long)elapsed_ns / 1000 / 1000);
11802 }
11803
11804 if (nvlist_exists(rec, ZPOOL_HIST_CMD)) {
11805 (void) printf("%s %s", tbuf,
11806 fnvlist_lookup_string(rec, ZPOOL_HIST_CMD));
11807 } else if (nvlist_exists(rec, ZPOOL_HIST_INT_EVENT)) {
11808 int ievent =
11809 fnvlist_lookup_uint64(rec, ZPOOL_HIST_INT_EVENT);
11810 if (!cb->internal)
11811 continue;
11812 if (ievent >= ZFS_NUM_LEGACY_HISTORY_EVENTS) {
11813 (void) printf("%s unrecognized record:\n",
11814 tbuf);
11815 dump_nvlist(rec, 4);
11816 continue;
11817 }
11818 (void) printf("%s [internal %s txg:%lld] %s", tbuf,
11819 zfs_history_event_names[ievent],
11820 (longlong_t)fnvlist_lookup_uint64(
11821 rec, ZPOOL_HIST_TXG),
11822 fnvlist_lookup_string(rec, ZPOOL_HIST_INT_STR));
11823 } else if (nvlist_exists(rec, ZPOOL_HIST_INT_NAME)) {
11824 if (!cb->internal)
11825 continue;
11826 (void) printf("%s [txg:%lld] %s", tbuf,
11827 (longlong_t)fnvlist_lookup_uint64(
11828 rec, ZPOOL_HIST_TXG),
11829 fnvlist_lookup_string(rec, ZPOOL_HIST_INT_NAME));
11830 if (nvlist_exists(rec, ZPOOL_HIST_DSNAME)) {
11831 (void) printf(" %s (%llu)",
11832 fnvlist_lookup_string(rec,
11833 ZPOOL_HIST_DSNAME),
11834 (u_longlong_t)fnvlist_lookup_uint64(rec,
11835 ZPOOL_HIST_DSID));
11836 }
11837 (void) printf(" %s", fnvlist_lookup_string(rec,
11838 ZPOOL_HIST_INT_STR));
11839 } else if (nvlist_exists(rec, ZPOOL_HIST_IOCTL)) {
11840 if (!cb->internal)
11841 continue;
11842 (void) printf("%s ioctl %s\n", tbuf,
11843 fnvlist_lookup_string(rec, ZPOOL_HIST_IOCTL));
11844 if (nvlist_exists(rec, ZPOOL_HIST_INPUT_NVL)) {
11845 (void) printf(" input:\n");
11846 dump_nvlist(fnvlist_lookup_nvlist(rec,
11847 ZPOOL_HIST_INPUT_NVL), 8);
11848 }
11849 if (nvlist_exists(rec, ZPOOL_HIST_OUTPUT_NVL)) {
11850 (void) printf(" output:\n");
11851 dump_nvlist(fnvlist_lookup_nvlist(rec,
11852 ZPOOL_HIST_OUTPUT_NVL), 8);
11853 }
11854 if (nvlist_exists(rec, ZPOOL_HIST_OUTPUT_SIZE)) {
11855 (void) printf(" output nvlist omitted; "
11856 "original size: %lldKB\n",
11857 (longlong_t)fnvlist_lookup_int64(rec,
11858 ZPOOL_HIST_OUTPUT_SIZE) / 1024);
11859 }
11860 if (nvlist_exists(rec, ZPOOL_HIST_ERRNO)) {
11861 (void) printf(" errno: %lld\n",
11862 (longlong_t)fnvlist_lookup_int64(rec,
11863 ZPOOL_HIST_ERRNO));
11864 }
11865 } else {
11866 if (!cb->internal)
11867 continue;
11868 (void) printf("%s unrecognized record:\n", tbuf);
11869 dump_nvlist(rec, 4);
11870 }
11871
11872 if (!cb->longfmt) {
11873 (void) printf("\n");
11874 continue;
11875 }
11876 (void) printf(" [");
11877 if (nvlist_exists(rec, ZPOOL_HIST_WHO)) {
11878 uid_t who = fnvlist_lookup_uint64(rec, ZPOOL_HIST_WHO);
11879 struct passwd *pwd = getpwuid(who);
11880 (void) printf("user %d ", (int)who);
11881 if (pwd != NULL)
11882 (void) printf("(%s) ", pwd->pw_name);
11883 }
11884 if (nvlist_exists(rec, ZPOOL_HIST_HOST)) {
11885 (void) printf("on %s",
11886 fnvlist_lookup_string(rec, ZPOOL_HIST_HOST));
11887 }
11888 if (nvlist_exists(rec, ZPOOL_HIST_ZONE)) {
11889 (void) printf(":%s",
11890 fnvlist_lookup_string(rec, ZPOOL_HIST_ZONE));
11891 }
11892
11893 (void) printf("]");
11894 (void) printf("\n");
11895 }
11896 }
11897
11898 /*
11899 * Print out the command history for a specific pool.
11900 */
11901 static int
get_history_one(zpool_handle_t * zhp,void * data)11902 get_history_one(zpool_handle_t *zhp, void *data)
11903 {
11904 nvlist_t *nvhis;
11905 int ret;
11906 hist_cbdata_t *cb = (hist_cbdata_t *)data;
11907 uint64_t off = 0;
11908 boolean_t eof = B_FALSE;
11909
11910 cb->first = B_FALSE;
11911
11912 (void) printf(gettext("History for '%s':\n"), zpool_get_name(zhp));
11913
11914 while (!eof) {
11915 if ((ret = zpool_get_history(zhp, &nvhis, &off, &eof)) != 0)
11916 return (ret);
11917
11918 print_history_records(nvhis, cb);
11919 nvlist_free(nvhis);
11920 }
11921 (void) printf("\n");
11922
11923 return (ret);
11924 }
11925
11926 /*
11927 * zpool history <pool>
11928 *
11929 * Displays the history of commands that modified pools.
11930 */
11931 int
zpool_do_history(int argc,char ** argv)11932 zpool_do_history(int argc, char **argv)
11933 {
11934 hist_cbdata_t cbdata = { 0 };
11935 int ret;
11936 int c;
11937
11938 cbdata.first = B_TRUE;
11939 /* check options */
11940 while ((c = getopt(argc, argv, "li")) != -1) {
11941 switch (c) {
11942 case 'l':
11943 cbdata.longfmt = B_TRUE;
11944 break;
11945 case 'i':
11946 cbdata.internal = B_TRUE;
11947 break;
11948 case '?':
11949 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
11950 optopt);
11951 usage(B_FALSE);
11952 }
11953 }
11954 argc -= optind;
11955 argv += optind;
11956
11957 ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL,
11958 B_FALSE, get_history_one, &cbdata);
11959
11960 if (argc == 0 && cbdata.first == B_TRUE) {
11961 (void) fprintf(stderr, gettext("no pools available\n"));
11962 return (0);
11963 }
11964
11965 return (ret);
11966 }
11967
11968 typedef struct ev_opts {
11969 int verbose;
11970 int scripted;
11971 int follow;
11972 int clear;
11973 char poolname[ZFS_MAX_DATASET_NAME_LEN];
11974 } ev_opts_t;
11975
11976 static void
zpool_do_events_short(nvlist_t * nvl,ev_opts_t * opts)11977 zpool_do_events_short(nvlist_t *nvl, ev_opts_t *opts)
11978 {
11979 char ctime_str[26], str[32];
11980 const char *ptr;
11981 int64_t *tv;
11982 uint_t n;
11983
11984 verify(nvlist_lookup_int64_array(nvl, FM_EREPORT_TIME, &tv, &n) == 0);
11985 memset(str, ' ', 32);
11986 (void) ctime_r((const time_t *)&tv[0], ctime_str);
11987 (void) memcpy(str, ctime_str+4, 6); /* 'Jun 30' */
11988 (void) memcpy(str+7, ctime_str+20, 4); /* '1993' */
11989 (void) memcpy(str+12, ctime_str+11, 8); /* '21:49:08' */
11990 (void) sprintf(str+20, ".%09lld", (longlong_t)tv[1]); /* '.123456789' */
11991 if (opts->scripted)
11992 (void) printf(gettext("%s\t"), str);
11993 else
11994 (void) printf(gettext("%s "), str);
11995
11996 verify(nvlist_lookup_string(nvl, FM_CLASS, &ptr) == 0);
11997 (void) printf(gettext("%s\n"), ptr);
11998 }
11999
12000 static void
zpool_do_events_nvprint(nvlist_t * nvl,int depth)12001 zpool_do_events_nvprint(nvlist_t *nvl, int depth)
12002 {
12003 nvpair_t *nvp;
12004 static char flagstr[256];
12005
12006 for (nvp = nvlist_next_nvpair(nvl, NULL);
12007 nvp != NULL; nvp = nvlist_next_nvpair(nvl, nvp)) {
12008
12009 data_type_t type = nvpair_type(nvp);
12010 const char *name = nvpair_name(nvp);
12011
12012 boolean_t b;
12013 uint8_t i8;
12014 uint16_t i16;
12015 uint32_t i32;
12016 uint64_t i64;
12017 const char *str;
12018 nvlist_t *cnv;
12019
12020 printf(gettext("%*s%s = "), depth, "", name);
12021
12022 switch (type) {
12023 case DATA_TYPE_BOOLEAN:
12024 printf(gettext("%s"), "1");
12025 break;
12026
12027 case DATA_TYPE_BOOLEAN_VALUE:
12028 (void) nvpair_value_boolean_value(nvp, &b);
12029 printf(gettext("%s"), b ? "1" : "0");
12030 break;
12031
12032 case DATA_TYPE_BYTE:
12033 (void) nvpair_value_byte(nvp, &i8);
12034 printf(gettext("0x%x"), i8);
12035 break;
12036
12037 case DATA_TYPE_INT8:
12038 (void) nvpair_value_int8(nvp, (void *)&i8);
12039 printf(gettext("0x%x"), i8);
12040 break;
12041
12042 case DATA_TYPE_UINT8:
12043 (void) nvpair_value_uint8(nvp, &i8);
12044 printf(gettext("0x%x"), i8);
12045 break;
12046
12047 case DATA_TYPE_INT16:
12048 (void) nvpair_value_int16(nvp, (void *)&i16);
12049 printf(gettext("0x%x"), i16);
12050 break;
12051
12052 case DATA_TYPE_UINT16:
12053 (void) nvpair_value_uint16(nvp, &i16);
12054 printf(gettext("0x%x"), i16);
12055 break;
12056
12057 case DATA_TYPE_INT32:
12058 (void) nvpair_value_int32(nvp, (void *)&i32);
12059 printf(gettext("0x%x"), i32);
12060 break;
12061
12062 case DATA_TYPE_UINT32:
12063 (void) nvpair_value_uint32(nvp, &i32);
12064 if (strcmp(name,
12065 FM_EREPORT_PAYLOAD_ZFS_ZIO_STAGE) == 0 ||
12066 strcmp(name,
12067 FM_EREPORT_PAYLOAD_ZFS_ZIO_PIPELINE) == 0) {
12068 zfs_valstr_zio_stage(i32, flagstr,
12069 sizeof (flagstr));
12070 printf(gettext("0x%x [%s]"), i32, flagstr);
12071 } else if (strcmp(name,
12072 FM_EREPORT_PAYLOAD_ZFS_ZIO_PRIORITY) == 0) {
12073 zfs_valstr_zio_priority(i32, flagstr,
12074 sizeof (flagstr));
12075 printf(gettext("0x%x [%s]"), i32, flagstr);
12076 } else {
12077 printf(gettext("0x%x"), i32);
12078 }
12079 break;
12080
12081 case DATA_TYPE_INT64:
12082 (void) nvpair_value_int64(nvp, (void *)&i64);
12083 printf(gettext("0x%llx"), (u_longlong_t)i64);
12084 break;
12085
12086 case DATA_TYPE_UINT64:
12087 (void) nvpair_value_uint64(nvp, &i64);
12088 /*
12089 * translate vdev state values to readable
12090 * strings to aide zpool events consumers
12091 */
12092 if (strcmp(name,
12093 FM_EREPORT_PAYLOAD_ZFS_VDEV_STATE) == 0 ||
12094 strcmp(name,
12095 FM_EREPORT_PAYLOAD_ZFS_VDEV_LASTSTATE) == 0) {
12096 printf(gettext("\"%s\" (0x%llx)"),
12097 zpool_state_to_name(i64, VDEV_AUX_NONE),
12098 (u_longlong_t)i64);
12099 } else if (strcmp(name,
12100 FM_EREPORT_PAYLOAD_ZFS_ZIO_FLAGS) == 0) {
12101 zfs_valstr_zio_flag(i64, flagstr,
12102 sizeof (flagstr));
12103 printf(gettext("0x%llx [%s]"),
12104 (u_longlong_t)i64, flagstr);
12105 } else {
12106 printf(gettext("0x%llx"), (u_longlong_t)i64);
12107 }
12108 break;
12109
12110 case DATA_TYPE_HRTIME:
12111 (void) nvpair_value_hrtime(nvp, (void *)&i64);
12112 printf(gettext("0x%llx"), (u_longlong_t)i64);
12113 break;
12114
12115 case DATA_TYPE_STRING:
12116 (void) nvpair_value_string(nvp, &str);
12117 printf(gettext("\"%s\""), str ? str : "<NULL>");
12118 break;
12119
12120 case DATA_TYPE_NVLIST:
12121 printf(gettext("(embedded nvlist)\n"));
12122 (void) nvpair_value_nvlist(nvp, &cnv);
12123 zpool_do_events_nvprint(cnv, depth + 8);
12124 printf(gettext("%*s(end %s)"), depth, "", name);
12125 break;
12126
12127 case DATA_TYPE_NVLIST_ARRAY: {
12128 nvlist_t **val;
12129 uint_t i, nelem;
12130
12131 (void) nvpair_value_nvlist_array(nvp, &val, &nelem);
12132 printf(gettext("(%d embedded nvlists)\n"), nelem);
12133 for (i = 0; i < nelem; i++) {
12134 printf(gettext("%*s%s[%d] = %s\n"),
12135 depth, "", name, i, "(embedded nvlist)");
12136 zpool_do_events_nvprint(val[i], depth + 8);
12137 printf(gettext("%*s(end %s[%i])\n"),
12138 depth, "", name, i);
12139 }
12140 printf(gettext("%*s(end %s)\n"), depth, "", name);
12141 }
12142 break;
12143
12144 case DATA_TYPE_INT8_ARRAY: {
12145 int8_t *val;
12146 uint_t i, nelem;
12147
12148 (void) nvpair_value_int8_array(nvp, &val, &nelem);
12149 for (i = 0; i < nelem; i++)
12150 printf(gettext("0x%x "), val[i]);
12151
12152 break;
12153 }
12154
12155 case DATA_TYPE_UINT8_ARRAY: {
12156 uint8_t *val;
12157 uint_t i, nelem;
12158
12159 (void) nvpair_value_uint8_array(nvp, &val, &nelem);
12160 for (i = 0; i < nelem; i++)
12161 printf(gettext("0x%x "), val[i]);
12162
12163 break;
12164 }
12165
12166 case DATA_TYPE_INT16_ARRAY: {
12167 int16_t *val;
12168 uint_t i, nelem;
12169
12170 (void) nvpair_value_int16_array(nvp, &val, &nelem);
12171 for (i = 0; i < nelem; i++)
12172 printf(gettext("0x%x "), val[i]);
12173
12174 break;
12175 }
12176
12177 case DATA_TYPE_UINT16_ARRAY: {
12178 uint16_t *val;
12179 uint_t i, nelem;
12180
12181 (void) nvpair_value_uint16_array(nvp, &val, &nelem);
12182 for (i = 0; i < nelem; i++)
12183 printf(gettext("0x%x "), val[i]);
12184
12185 break;
12186 }
12187
12188 case DATA_TYPE_INT32_ARRAY: {
12189 int32_t *val;
12190 uint_t i, nelem;
12191
12192 (void) nvpair_value_int32_array(nvp, &val, &nelem);
12193 for (i = 0; i < nelem; i++)
12194 printf(gettext("0x%x "), val[i]);
12195
12196 break;
12197 }
12198
12199 case DATA_TYPE_UINT32_ARRAY: {
12200 uint32_t *val;
12201 uint_t i, nelem;
12202
12203 (void) nvpair_value_uint32_array(nvp, &val, &nelem);
12204 for (i = 0; i < nelem; i++)
12205 printf(gettext("0x%x "), val[i]);
12206
12207 break;
12208 }
12209
12210 case DATA_TYPE_INT64_ARRAY: {
12211 int64_t *val;
12212 uint_t i, nelem;
12213
12214 (void) nvpair_value_int64_array(nvp, &val, &nelem);
12215 for (i = 0; i < nelem; i++)
12216 printf(gettext("0x%llx "),
12217 (u_longlong_t)val[i]);
12218
12219 break;
12220 }
12221
12222 case DATA_TYPE_UINT64_ARRAY: {
12223 uint64_t *val;
12224 uint_t i, nelem;
12225
12226 (void) nvpair_value_uint64_array(nvp, &val, &nelem);
12227 for (i = 0; i < nelem; i++)
12228 printf(gettext("0x%llx "),
12229 (u_longlong_t)val[i]);
12230
12231 break;
12232 }
12233
12234 case DATA_TYPE_STRING_ARRAY: {
12235 const char **str;
12236 uint_t i, nelem;
12237
12238 (void) nvpair_value_string_array(nvp, &str, &nelem);
12239 for (i = 0; i < nelem; i++)
12240 printf(gettext("\"%s\" "),
12241 str[i] ? str[i] : "<NULL>");
12242
12243 break;
12244 }
12245
12246 case DATA_TYPE_BOOLEAN_ARRAY:
12247 case DATA_TYPE_BYTE_ARRAY:
12248 case DATA_TYPE_DOUBLE:
12249 case DATA_TYPE_DONTCARE:
12250 case DATA_TYPE_UNKNOWN:
12251 printf(gettext("<unknown>"));
12252 break;
12253 }
12254
12255 printf(gettext("\n"));
12256 }
12257 }
12258
12259 static int
zpool_do_events_next(ev_opts_t * opts)12260 zpool_do_events_next(ev_opts_t *opts)
12261 {
12262 nvlist_t *nvl;
12263 int zevent_fd, ret, dropped;
12264 const char *pool;
12265
12266 zevent_fd = open(ZFS_DEV, O_RDWR);
12267 VERIFY(zevent_fd >= 0);
12268
12269 if (!opts->scripted)
12270 (void) printf(gettext("%-30s %s\n"), "TIME", "CLASS");
12271
12272 while (1) {
12273 ret = zpool_events_next(g_zfs, &nvl, &dropped,
12274 (opts->follow ? ZEVENT_NONE : ZEVENT_NONBLOCK), zevent_fd);
12275 if (ret || nvl == NULL)
12276 break;
12277
12278 if (dropped > 0)
12279 (void) printf(gettext("dropped %d events\n"), dropped);
12280
12281 if (strlen(opts->poolname) > 0 &&
12282 nvlist_lookup_string(nvl, FM_FMRI_ZFS_POOL, &pool) == 0 &&
12283 strcmp(opts->poolname, pool) != 0)
12284 continue;
12285
12286 zpool_do_events_short(nvl, opts);
12287
12288 if (opts->verbose) {
12289 zpool_do_events_nvprint(nvl, 8);
12290 printf(gettext("\n"));
12291 }
12292 (void) fflush(stdout);
12293
12294 nvlist_free(nvl);
12295 }
12296
12297 VERIFY(0 == close(zevent_fd));
12298
12299 return (ret);
12300 }
12301
12302 static int
zpool_do_events_clear(void)12303 zpool_do_events_clear(void)
12304 {
12305 int count, ret;
12306
12307 ret = zpool_events_clear(g_zfs, &count);
12308 if (!ret)
12309 (void) printf(gettext("cleared %d events\n"), count);
12310
12311 return (ret);
12312 }
12313
12314 /*
12315 * zpool events [-vHf [pool] | -c]
12316 *
12317 * Displays events logs by ZFS.
12318 */
12319 int
zpool_do_events(int argc,char ** argv)12320 zpool_do_events(int argc, char **argv)
12321 {
12322 ev_opts_t opts = { 0 };
12323 int ret;
12324 int c;
12325
12326 /* check options */
12327 while ((c = getopt(argc, argv, "vHfc")) != -1) {
12328 switch (c) {
12329 case 'v':
12330 opts.verbose = 1;
12331 break;
12332 case 'H':
12333 opts.scripted = 1;
12334 break;
12335 case 'f':
12336 opts.follow = 1;
12337 break;
12338 case 'c':
12339 opts.clear = 1;
12340 break;
12341 case '?':
12342 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
12343 optopt);
12344 usage(B_FALSE);
12345 }
12346 }
12347 argc -= optind;
12348 argv += optind;
12349
12350 if (argc > 1) {
12351 (void) fprintf(stderr, gettext("too many arguments\n"));
12352 usage(B_FALSE);
12353 } else if (argc == 1) {
12354 (void) strlcpy(opts.poolname, argv[0], sizeof (opts.poolname));
12355 if (!zfs_name_valid(opts.poolname, ZFS_TYPE_POOL)) {
12356 (void) fprintf(stderr,
12357 gettext("invalid pool name '%s'\n"), opts.poolname);
12358 usage(B_FALSE);
12359 }
12360 }
12361
12362 if ((argc == 1 || opts.verbose || opts.scripted || opts.follow) &&
12363 opts.clear) {
12364 (void) fprintf(stderr,
12365 gettext("invalid options combined with -c\n"));
12366 usage(B_FALSE);
12367 }
12368
12369 if (opts.clear)
12370 ret = zpool_do_events_clear();
12371 else
12372 ret = zpool_do_events_next(&opts);
12373
12374 return (ret);
12375 }
12376
12377 static int
get_callback_vdev(zpool_handle_t * zhp,char * vdevname,void * data)12378 get_callback_vdev(zpool_handle_t *zhp, char *vdevname, void *data)
12379 {
12380 zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
12381 char value[ZFS_MAXPROPLEN];
12382 zprop_source_t srctype;
12383 nvlist_t *props, *item, *d;
12384 props = item = d = NULL;
12385
12386 if (cbp->cb_json) {
12387 d = fnvlist_lookup_nvlist(cbp->cb_jsobj, "vdevs");
12388 if (d == NULL) {
12389 fprintf(stderr, "vdevs obj not found.\n");
12390 exit(1);
12391 }
12392 props = fnvlist_alloc();
12393 }
12394
12395 for (zprop_list_t *pl = cbp->cb_proplist; pl != NULL;
12396 pl = pl->pl_next) {
12397 char *prop_name;
12398 /*
12399 * If the first property is pool name, it is a special
12400 * placeholder that we can skip. This will also skip
12401 * over the name property when 'all' is specified.
12402 */
12403 if (pl->pl_prop == ZPOOL_PROP_NAME &&
12404 pl == cbp->cb_proplist)
12405 continue;
12406
12407 if (pl->pl_prop == ZPROP_INVAL) {
12408 prop_name = pl->pl_user_prop;
12409 } else {
12410 prop_name = (char *)vdev_prop_to_name(pl->pl_prop);
12411 }
12412 if (zpool_get_vdev_prop(zhp, vdevname, pl->pl_prop,
12413 prop_name, value, sizeof (value), &srctype,
12414 cbp->cb_literal) == 0) {
12415 zprop_collect_property(vdevname, cbp, prop_name,
12416 value, srctype, NULL, NULL, props);
12417 }
12418 }
12419
12420 if (cbp->cb_json) {
12421 if (!nvlist_empty(props)) {
12422 item = fnvlist_alloc();
12423 fill_vdev_info(item, zhp, vdevname, B_TRUE,
12424 cbp->cb_json_as_int);
12425 fnvlist_add_nvlist(item, "properties", props);
12426 fnvlist_add_nvlist(d, vdevname, item);
12427 fnvlist_add_nvlist(cbp->cb_jsobj, "vdevs", d);
12428 fnvlist_free(item);
12429 }
12430 fnvlist_free(props);
12431 }
12432
12433 return (0);
12434 }
12435
12436 static int
get_callback_vdev_cb(void * zhp_data,nvlist_t * nv,void * data)12437 get_callback_vdev_cb(void *zhp_data, nvlist_t *nv, void *data)
12438 {
12439 zpool_handle_t *zhp = zhp_data;
12440 zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
12441 char *vdevname;
12442 const char *type;
12443 int ret;
12444
12445 /*
12446 * zpool_vdev_name() transforms the root vdev name (i.e., root-0) to the
12447 * pool name for display purposes, which is not desired. Fallback to
12448 * zpool_vdev_name() when not dealing with the root vdev.
12449 */
12450 type = fnvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE);
12451 if (zhp != NULL && strcmp(type, "root") == 0)
12452 vdevname = strdup("root-0");
12453 else
12454 vdevname = zpool_vdev_name(g_zfs, zhp, nv,
12455 cbp->cb_vdevs.cb_name_flags);
12456
12457 (void) vdev_expand_proplist(zhp, vdevname, &cbp->cb_proplist);
12458
12459 ret = get_callback_vdev(zhp, vdevname, data);
12460
12461 free(vdevname);
12462
12463 return (ret);
12464 }
12465
12466 static int
get_callback(zpool_handle_t * zhp,void * data)12467 get_callback(zpool_handle_t *zhp, void *data)
12468 {
12469 zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
12470 char value[ZFS_MAXPROPLEN];
12471 zprop_source_t srctype;
12472 zprop_list_t *pl;
12473 int vid;
12474 int err = 0;
12475 nvlist_t *props, *item, *d;
12476 props = item = d = NULL;
12477
12478 if (cbp->cb_type == ZFS_TYPE_VDEV) {
12479 if (cbp->cb_json) {
12480 nvlist_t *pool = fnvlist_alloc();
12481 fill_pool_info(pool, zhp, B_FALSE, cbp->cb_json_as_int);
12482 fnvlist_add_nvlist(cbp->cb_jsobj, "pool", pool);
12483 fnvlist_free(pool);
12484 }
12485
12486 if (strcmp(cbp->cb_vdevs.cb_names[0], "all-vdevs") == 0) {
12487 for_each_vdev(zhp, get_callback_vdev_cb, data);
12488 } else {
12489 /* Adjust column widths for vdev properties */
12490 for (vid = 0; vid < cbp->cb_vdevs.cb_names_count;
12491 vid++) {
12492 vdev_expand_proplist(zhp,
12493 cbp->cb_vdevs.cb_names[vid],
12494 &cbp->cb_proplist);
12495 }
12496 /* Display the properties */
12497 for (vid = 0; vid < cbp->cb_vdevs.cb_names_count;
12498 vid++) {
12499 get_callback_vdev(zhp,
12500 cbp->cb_vdevs.cb_names[vid], data);
12501 }
12502 }
12503 } else {
12504 assert(cbp->cb_type == ZFS_TYPE_POOL);
12505 if (cbp->cb_json) {
12506 d = fnvlist_lookup_nvlist(cbp->cb_jsobj, "pools");
12507 if (d == NULL) {
12508 fprintf(stderr, "pools obj not found.\n");
12509 exit(1);
12510 }
12511 props = fnvlist_alloc();
12512 }
12513 for (pl = cbp->cb_proplist; pl != NULL; pl = pl->pl_next) {
12514 /*
12515 * Skip the special fake placeholder. This will also
12516 * skip over the name property when 'all' is specified.
12517 */
12518 if (pl->pl_prop == ZPOOL_PROP_NAME &&
12519 pl == cbp->cb_proplist)
12520 continue;
12521
12522 if (pl->pl_prop == ZPROP_INVAL &&
12523 zfs_prop_user(pl->pl_user_prop)) {
12524 srctype = ZPROP_SRC_LOCAL;
12525
12526 if (zpool_get_userprop(zhp, pl->pl_user_prop,
12527 value, sizeof (value), &srctype) != 0)
12528 continue;
12529
12530 err = zprop_collect_property(
12531 zpool_get_name(zhp), cbp, pl->pl_user_prop,
12532 value, srctype, NULL, NULL, props);
12533 } else if (pl->pl_prop == ZPROP_INVAL &&
12534 (zpool_prop_feature(pl->pl_user_prop) ||
12535 zpool_prop_unsupported(pl->pl_user_prop))) {
12536 srctype = ZPROP_SRC_LOCAL;
12537
12538 if (zpool_prop_get_feature(zhp,
12539 pl->pl_user_prop, value,
12540 sizeof (value)) == 0) {
12541 err = zprop_collect_property(
12542 zpool_get_name(zhp), cbp,
12543 pl->pl_user_prop, value, srctype,
12544 NULL, NULL, props);
12545 }
12546 } else {
12547 if (zpool_get_prop(zhp, pl->pl_prop, value,
12548 sizeof (value), &srctype,
12549 cbp->cb_literal) != 0)
12550 continue;
12551
12552 err = zprop_collect_property(
12553 zpool_get_name(zhp), cbp,
12554 zpool_prop_to_name(pl->pl_prop),
12555 value, srctype, NULL, NULL, props);
12556 }
12557 if (err != 0)
12558 return (err);
12559 }
12560
12561 if (cbp->cb_json) {
12562 if (!nvlist_empty(props)) {
12563 item = fnvlist_alloc();
12564 fill_pool_info(item, zhp, B_TRUE,
12565 cbp->cb_json_as_int);
12566 fnvlist_add_nvlist(item, "properties", props);
12567 if (cbp->cb_json_pool_key_guid) {
12568 char buf[256];
12569 uint64_t guid = fnvlist_lookup_uint64(
12570 zpool_get_config(zhp, NULL),
12571 ZPOOL_CONFIG_POOL_GUID);
12572 snprintf(buf, 256, "%llu",
12573 (u_longlong_t)guid);
12574 fnvlist_add_nvlist(d, buf, item);
12575 } else {
12576 const char *name = zpool_get_name(zhp);
12577 fnvlist_add_nvlist(d, name, item);
12578 }
12579 fnvlist_add_nvlist(cbp->cb_jsobj, "pools", d);
12580 fnvlist_free(item);
12581 }
12582 fnvlist_free(props);
12583 }
12584 }
12585
12586 return (0);
12587 }
12588
12589 /*
12590 * zpool get [-Hp] [-o "all" | field[,...]] <"all" | property[,...]> <pool> ...
12591 *
12592 * -H Scripted mode. Don't display headers, and separate properties
12593 * by a single tab.
12594 * -o List of columns to display. Defaults to
12595 * "name,property,value,source".
12596 * -p Display values in parsable (exact) format.
12597 * -j Display output in JSON format.
12598 * --json-int Display numbers as integers instead of strings.
12599 * --json-pool-key-guid Set pool GUID as key for pool objects.
12600 *
12601 * Get properties of pools in the system. Output space statistics
12602 * for each one as well as other attributes.
12603 */
12604 int
zpool_do_get(int argc,char ** argv)12605 zpool_do_get(int argc, char **argv)
12606 {
12607 zprop_get_cbdata_t cb = { 0 };
12608 zprop_list_t fake_name = { 0 };
12609 int ret;
12610 int c, i;
12611 char *propstr = NULL;
12612 char *vdev = NULL;
12613 nvlist_t *data = NULL;
12614
12615 cb.cb_first = B_TRUE;
12616
12617 /*
12618 * Set up default columns and sources.
12619 */
12620 cb.cb_sources = ZPROP_SRC_ALL;
12621 cb.cb_columns[0] = GET_COL_NAME;
12622 cb.cb_columns[1] = GET_COL_PROPERTY;
12623 cb.cb_columns[2] = GET_COL_VALUE;
12624 cb.cb_columns[3] = GET_COL_SOURCE;
12625 cb.cb_type = ZFS_TYPE_POOL;
12626 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_TYPE_ID;
12627 current_prop_type = cb.cb_type;
12628
12629 struct option long_options[] = {
12630 {"json", no_argument, NULL, 'j'},
12631 {"json-int", no_argument, NULL, ZPOOL_OPTION_JSON_NUMS_AS_INT},
12632 {"json-pool-key-guid", no_argument, NULL,
12633 ZPOOL_OPTION_POOL_KEY_GUID},
12634 {0, 0, 0, 0}
12635 };
12636
12637 /* check options */
12638 while ((c = getopt_long(argc, argv, ":jHpo:", long_options,
12639 NULL)) != -1) {
12640 switch (c) {
12641 case 'p':
12642 cb.cb_literal = B_TRUE;
12643 break;
12644 case 'H':
12645 cb.cb_scripted = B_TRUE;
12646 break;
12647 case 'j':
12648 cb.cb_json = B_TRUE;
12649 cb.cb_jsobj = zpool_json_schema(0, 1);
12650 data = fnvlist_alloc();
12651 break;
12652 case ZPOOL_OPTION_POOL_KEY_GUID:
12653 cb.cb_json_pool_key_guid = B_TRUE;
12654 break;
12655 case ZPOOL_OPTION_JSON_NUMS_AS_INT:
12656 cb.cb_json_as_int = B_TRUE;
12657 cb.cb_literal = B_TRUE;
12658 break;
12659 case 'o':
12660 memset(&cb.cb_columns, 0, sizeof (cb.cb_columns));
12661 i = 0;
12662
12663 for (char *tok; (tok = strsep(&optarg, ",")); ) {
12664 static const char *const col_opts[] =
12665 { "name", "property", "value", "source",
12666 "all" };
12667 static const zfs_get_column_t col_cols[] =
12668 { GET_COL_NAME, GET_COL_PROPERTY, GET_COL_VALUE,
12669 GET_COL_SOURCE };
12670
12671 if (i == ZFS_GET_NCOLS - 1) {
12672 (void) fprintf(stderr, gettext("too "
12673 "many fields given to -o "
12674 "option\n"));
12675 usage(B_FALSE);
12676 }
12677
12678 for (c = 0; c < ARRAY_SIZE(col_opts); ++c)
12679 if (strcmp(tok, col_opts[c]) == 0)
12680 goto found;
12681
12682 (void) fprintf(stderr,
12683 gettext("invalid column name '%s'\n"), tok);
12684 usage(B_FALSE);
12685
12686 found:
12687 if (c >= 4) {
12688 if (i > 0) {
12689 (void) fprintf(stderr,
12690 gettext("\"all\" conflicts "
12691 "with specific fields "
12692 "given to -o option\n"));
12693 usage(B_FALSE);
12694 }
12695
12696 memcpy(cb.cb_columns, col_cols,
12697 sizeof (col_cols));
12698 i = ZFS_GET_NCOLS - 1;
12699 } else
12700 cb.cb_columns[i++] = col_cols[c];
12701 }
12702 break;
12703 case '?':
12704 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
12705 optopt);
12706 usage(B_FALSE);
12707 }
12708 }
12709
12710 argc -= optind;
12711 argv += optind;
12712
12713 if (!cb.cb_json && cb.cb_json_as_int) {
12714 (void) fprintf(stderr, gettext("'--json-int' only works with"
12715 " '-j' option\n"));
12716 usage(B_FALSE);
12717 }
12718
12719 if (!cb.cb_json && cb.cb_json_pool_key_guid) {
12720 (void) fprintf(stderr, gettext("'json-pool-key-guid' only"
12721 " works with '-j' option\n"));
12722 usage(B_FALSE);
12723 }
12724
12725 if (argc < 1) {
12726 (void) fprintf(stderr, gettext("missing property "
12727 "argument\n"));
12728 usage(B_FALSE);
12729 }
12730
12731 /* Properties list is needed later by zprop_get_list() */
12732 propstr = argv[0];
12733
12734 argc--;
12735 argv++;
12736
12737 if (argc == 0) {
12738 /* No args, so just print the defaults. */
12739 } else if (are_all_pools(argc, argv)) {
12740 /* All the args are pool names */
12741 } else if (are_all_pools(1, argv)) {
12742 /* The first arg is a pool name */
12743 if ((argc == 2 && strcmp(argv[1], "all-vdevs") == 0) ||
12744 (argc == 2 && strcmp(argv[1], "root") == 0) ||
12745 are_vdevs_in_pool(argc - 1, argv + 1, argv[0],
12746 &cb.cb_vdevs)) {
12747
12748 if (strcmp(argv[1], "root") == 0)
12749 vdev = strdup("root-0");
12750 else
12751 vdev = strdup(argv[1]);
12752
12753 /* ... and the rest are vdev names */
12754 cb.cb_vdevs.cb_names = &vdev;
12755 cb.cb_vdevs.cb_names_count = argc - 1;
12756 cb.cb_type = ZFS_TYPE_VDEV;
12757 argc = 1; /* One pool to process */
12758 } else {
12759 if (cb.cb_json) {
12760 nvlist_free(cb.cb_jsobj);
12761 nvlist_free(data);
12762 }
12763 fprintf(stderr, gettext("Expected a list of vdevs in"
12764 " \"%s\", but got:\n"), argv[0]);
12765 error_list_unresolved_vdevs(argc - 1, argv + 1,
12766 argv[0], &cb.cb_vdevs);
12767 fprintf(stderr, "\n");
12768 usage(B_FALSE);
12769 return (1);
12770 }
12771 } else {
12772 if (cb.cb_json) {
12773 nvlist_free(cb.cb_jsobj);
12774 nvlist_free(data);
12775 }
12776 /*
12777 * The first arg isn't the name of a valid pool.
12778 */
12779 fprintf(stderr, gettext("Cannot get properties of %s: "
12780 "no such pool available.\n"), argv[0]);
12781 return (1);
12782 }
12783
12784 if (zprop_get_list(g_zfs, propstr, &cb.cb_proplist,
12785 cb.cb_type) != 0) {
12786 /* Use correct list of valid properties (pool or vdev) */
12787 current_prop_type = cb.cb_type;
12788 usage(B_FALSE);
12789 }
12790
12791 if (cb.cb_proplist != NULL) {
12792 fake_name.pl_prop = ZPOOL_PROP_NAME;
12793 fake_name.pl_width = strlen(gettext("NAME"));
12794 fake_name.pl_next = cb.cb_proplist;
12795 cb.cb_proplist = &fake_name;
12796 }
12797
12798 if (cb.cb_json) {
12799 if (cb.cb_type == ZFS_TYPE_VDEV)
12800 fnvlist_add_nvlist(cb.cb_jsobj, "vdevs", data);
12801 else
12802 fnvlist_add_nvlist(cb.cb_jsobj, "pools", data);
12803 fnvlist_free(data);
12804 }
12805
12806 ret = for_each_pool(argc, argv, B_TRUE, &cb.cb_proplist, cb.cb_type,
12807 cb.cb_literal, get_callback, &cb);
12808
12809 if (ret == 0 && cb.cb_json)
12810 zcmd_print_json(cb.cb_jsobj);
12811 else if (ret != 0 && cb.cb_json)
12812 nvlist_free(cb.cb_jsobj);
12813
12814 if (cb.cb_proplist == &fake_name)
12815 zprop_free_list(fake_name.pl_next);
12816 else
12817 zprop_free_list(cb.cb_proplist);
12818
12819 if (vdev != NULL)
12820 free(vdev);
12821
12822 return (ret);
12823 }
12824
12825 typedef struct set_cbdata {
12826 char *cb_propname;
12827 char *cb_value;
12828 zfs_type_t cb_type;
12829 vdev_cbdata_t cb_vdevs;
12830 boolean_t cb_any_successful;
12831 } set_cbdata_t;
12832
12833 static int
set_pool_callback(zpool_handle_t * zhp,set_cbdata_t * cb)12834 set_pool_callback(zpool_handle_t *zhp, set_cbdata_t *cb)
12835 {
12836 int error;
12837
12838 /* Check if we have out-of-bounds features */
12839 if (strcmp(cb->cb_propname, ZPOOL_CONFIG_COMPATIBILITY) == 0) {
12840 boolean_t features[SPA_FEATURES];
12841 if (zpool_do_load_compat(cb->cb_value, features) !=
12842 ZPOOL_COMPATIBILITY_OK)
12843 return (-1);
12844
12845 nvlist_t *enabled = zpool_get_features(zhp);
12846 spa_feature_t i;
12847 for (i = 0; i < SPA_FEATURES; i++) {
12848 const char *fguid = spa_feature_table[i].fi_guid;
12849 if (nvlist_exists(enabled, fguid) && !features[i])
12850 break;
12851 }
12852 if (i < SPA_FEATURES)
12853 (void) fprintf(stderr, gettext("Warning: one or "
12854 "more features already enabled on pool '%s'\n"
12855 "are not present in this compatibility set.\n"),
12856 zpool_get_name(zhp));
12857 }
12858
12859 /* if we're setting a feature, check it's in compatibility set */
12860 if (zpool_prop_feature(cb->cb_propname) &&
12861 strcmp(cb->cb_value, ZFS_FEATURE_ENABLED) == 0) {
12862 char *fname = strchr(cb->cb_propname, '@') + 1;
12863 spa_feature_t f;
12864
12865 if (zfeature_lookup_name(fname, &f) == 0) {
12866 char compat[ZFS_MAXPROPLEN];
12867 if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY,
12868 compat, ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
12869 compat[0] = '\0';
12870
12871 boolean_t features[SPA_FEATURES];
12872 if (zpool_do_load_compat(compat, features) !=
12873 ZPOOL_COMPATIBILITY_OK) {
12874 (void) fprintf(stderr, gettext("Error: "
12875 "cannot enable feature '%s' on pool '%s'\n"
12876 "because the pool's 'compatibility' "
12877 "property cannot be parsed.\n"),
12878 fname, zpool_get_name(zhp));
12879 return (-1);
12880 }
12881
12882 if (!features[f]) {
12883 (void) fprintf(stderr, gettext("Error: "
12884 "cannot enable feature '%s' on pool '%s'\n"
12885 "as it is not specified in this pool's "
12886 "current compatibility set.\n"
12887 "Consider setting 'compatibility' to a "
12888 "less restrictive set, or to 'off'.\n"),
12889 fname, zpool_get_name(zhp));
12890 return (-1);
12891 }
12892 }
12893 }
12894
12895 error = zpool_set_prop(zhp, cb->cb_propname, cb->cb_value);
12896
12897 return (error);
12898 }
12899
12900 static int
set_callback(zpool_handle_t * zhp,void * data)12901 set_callback(zpool_handle_t *zhp, void *data)
12902 {
12903 int error;
12904 set_cbdata_t *cb = (set_cbdata_t *)data;
12905
12906 if (cb->cb_type == ZFS_TYPE_VDEV) {
12907 error = zpool_set_vdev_prop(zhp, *cb->cb_vdevs.cb_names,
12908 cb->cb_propname, cb->cb_value);
12909 } else {
12910 assert(cb->cb_type == ZFS_TYPE_POOL);
12911 error = set_pool_callback(zhp, cb);
12912 }
12913
12914 cb->cb_any_successful = !error;
12915 return (error);
12916 }
12917
12918 int
zpool_do_set(int argc,char ** argv)12919 zpool_do_set(int argc, char **argv)
12920 {
12921 set_cbdata_t cb = { 0 };
12922 int error;
12923 char *vdev = NULL;
12924
12925 current_prop_type = ZFS_TYPE_POOL;
12926 if (argc > 1 && argv[1][0] == '-') {
12927 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
12928 argv[1][1]);
12929 usage(B_FALSE);
12930 }
12931
12932 if (argc < 2) {
12933 (void) fprintf(stderr, gettext("missing property=value "
12934 "argument\n"));
12935 usage(B_FALSE);
12936 }
12937
12938 if (argc < 3) {
12939 (void) fprintf(stderr, gettext("missing pool name\n"));
12940 usage(B_FALSE);
12941 }
12942
12943 if (argc > 4) {
12944 (void) fprintf(stderr, gettext("too many pool names\n"));
12945 usage(B_FALSE);
12946 }
12947
12948 cb.cb_propname = argv[1];
12949 cb.cb_type = ZFS_TYPE_POOL;
12950 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_TYPE_ID;
12951 cb.cb_value = strchr(cb.cb_propname, '=');
12952 if (cb.cb_value == NULL) {
12953 (void) fprintf(stderr, gettext("missing value in "
12954 "property=value argument\n"));
12955 usage(B_FALSE);
12956 }
12957
12958 *(cb.cb_value) = '\0';
12959 cb.cb_value++;
12960 argc -= 2;
12961 argv += 2;
12962
12963 /* argv[0] is pool name */
12964 if (!is_pool(argv[0])) {
12965 (void) fprintf(stderr,
12966 gettext("cannot open '%s': is not a pool\n"), argv[0]);
12967 return (EINVAL);
12968 }
12969
12970 /* argv[1], when supplied, is vdev name */
12971 if (argc == 2) {
12972
12973 if (strcmp(argv[1], "root") == 0)
12974 vdev = strdup("root-0");
12975 else
12976 vdev = strdup(argv[1]);
12977
12978 if (!are_vdevs_in_pool(1, &vdev, argv[0], &cb.cb_vdevs)) {
12979 (void) fprintf(stderr, gettext(
12980 "cannot find '%s' in '%s': device not in pool\n"),
12981 vdev, argv[0]);
12982 free(vdev);
12983 return (EINVAL);
12984 }
12985 cb.cb_vdevs.cb_names = &vdev;
12986 cb.cb_vdevs.cb_names_count = 1;
12987 cb.cb_type = ZFS_TYPE_VDEV;
12988 }
12989
12990 error = for_each_pool(1, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
12991 B_FALSE, set_callback, &cb);
12992
12993 if (vdev != NULL)
12994 free(vdev);
12995
12996 return (error);
12997 }
12998
12999 /* Add up the total number of bytes left to initialize/trim across all vdevs */
13000 static uint64_t
vdev_activity_remaining(nvlist_t * nv,zpool_wait_activity_t activity)13001 vdev_activity_remaining(nvlist_t *nv, zpool_wait_activity_t activity)
13002 {
13003 uint64_t bytes_remaining;
13004 nvlist_t **child;
13005 uint_t c, children;
13006 vdev_stat_t *vs;
13007
13008 assert(activity == ZPOOL_WAIT_INITIALIZE ||
13009 activity == ZPOOL_WAIT_TRIM);
13010
13011 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
13012 (uint64_t **)&vs, &c) == 0);
13013
13014 if (activity == ZPOOL_WAIT_INITIALIZE &&
13015 vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE)
13016 bytes_remaining = vs->vs_initialize_bytes_est -
13017 vs->vs_initialize_bytes_done;
13018 else if (activity == ZPOOL_WAIT_TRIM &&
13019 vs->vs_trim_state == VDEV_TRIM_ACTIVE)
13020 bytes_remaining = vs->vs_trim_bytes_est -
13021 vs->vs_trim_bytes_done;
13022 else
13023 bytes_remaining = 0;
13024
13025 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
13026 &child, &children) != 0)
13027 children = 0;
13028
13029 for (c = 0; c < children; c++)
13030 bytes_remaining += vdev_activity_remaining(child[c], activity);
13031
13032 return (bytes_remaining);
13033 }
13034
13035 /* Add up the total number of bytes left to rebuild across top-level vdevs */
13036 static uint64_t
vdev_activity_top_remaining(nvlist_t * nv)13037 vdev_activity_top_remaining(nvlist_t *nv)
13038 {
13039 uint64_t bytes_remaining = 0;
13040 nvlist_t **child;
13041 uint_t children;
13042 int error;
13043
13044 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
13045 &child, &children) != 0)
13046 children = 0;
13047
13048 for (uint_t c = 0; c < children; c++) {
13049 vdev_rebuild_stat_t *vrs;
13050 uint_t i;
13051
13052 error = nvlist_lookup_uint64_array(child[c],
13053 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i);
13054 if (error == 0) {
13055 if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
13056 bytes_remaining += (vrs->vrs_bytes_est -
13057 vrs->vrs_bytes_rebuilt);
13058 }
13059 }
13060 }
13061
13062 return (bytes_remaining);
13063 }
13064
13065 /* Whether any vdevs are 'spare' or 'replacing' vdevs */
13066 static boolean_t
vdev_any_spare_replacing(nvlist_t * nv)13067 vdev_any_spare_replacing(nvlist_t *nv)
13068 {
13069 nvlist_t **child;
13070 uint_t c, children;
13071 const char *vdev_type;
13072
13073 (void) nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &vdev_type);
13074
13075 if (strcmp(vdev_type, VDEV_TYPE_REPLACING) == 0 ||
13076 strcmp(vdev_type, VDEV_TYPE_SPARE) == 0 ||
13077 strcmp(vdev_type, VDEV_TYPE_DRAID_SPARE) == 0) {
13078 return (B_TRUE);
13079 }
13080
13081 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
13082 &child, &children) != 0)
13083 children = 0;
13084
13085 for (c = 0; c < children; c++) {
13086 if (vdev_any_spare_replacing(child[c]))
13087 return (B_TRUE);
13088 }
13089
13090 return (B_FALSE);
13091 }
13092
13093 typedef struct wait_data {
13094 char *wd_poolname;
13095 boolean_t wd_scripted;
13096 boolean_t wd_exact;
13097 boolean_t wd_headers_once;
13098 boolean_t wd_should_exit;
13099 /* Which activities to wait for */
13100 boolean_t wd_enabled[ZPOOL_WAIT_NUM_ACTIVITIES];
13101 float wd_interval;
13102 pthread_cond_t wd_cv;
13103 pthread_mutex_t wd_mutex;
13104 } wait_data_t;
13105
13106 /*
13107 * Print to stdout a single line, containing one column for each activity that
13108 * we are waiting for specifying how many bytes of work are left for that
13109 * activity.
13110 */
13111 static void
print_wait_status_row(wait_data_t * wd,zpool_handle_t * zhp,int row)13112 print_wait_status_row(wait_data_t *wd, zpool_handle_t *zhp, int row)
13113 {
13114 nvlist_t *config, *nvroot;
13115 uint_t c;
13116 int i;
13117 pool_checkpoint_stat_t *pcs = NULL;
13118 pool_scan_stat_t *pss = NULL;
13119 pool_removal_stat_t *prs = NULL;
13120 pool_raidz_expand_stat_t *pres = NULL;
13121 const char *const headers[] = {"DISCARD", "FREE", "INITIALIZE",
13122 "REPLACE", "REMOVE", "RESILVER", "SCRUB", "TRIM", "RAIDZ_EXPAND"};
13123 int col_widths[ZPOOL_WAIT_NUM_ACTIVITIES];
13124
13125 /* Calculate the width of each column */
13126 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
13127 /*
13128 * Make sure we have enough space in the col for pretty-printed
13129 * numbers and for the column header, and then leave a couple
13130 * spaces between cols for readability.
13131 */
13132 col_widths[i] = MAX(strlen(headers[i]), 6) + 2;
13133 }
13134
13135 if (timestamp_fmt != NODATE)
13136 print_timestamp(timestamp_fmt);
13137
13138 /* Print header if appropriate */
13139 int term_height = terminal_height();
13140 boolean_t reprint_header = (!wd->wd_headers_once && term_height > 0 &&
13141 row % (term_height-1) == 0);
13142 if (!wd->wd_scripted && (row == 0 || reprint_header)) {
13143 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
13144 if (wd->wd_enabled[i])
13145 (void) printf("%*s", col_widths[i], headers[i]);
13146 }
13147 (void) fputc('\n', stdout);
13148 }
13149
13150 /* Bytes of work remaining in each activity */
13151 int64_t bytes_rem[ZPOOL_WAIT_NUM_ACTIVITIES] = {0};
13152
13153 bytes_rem[ZPOOL_WAIT_FREE] =
13154 zpool_get_prop_int(zhp, ZPOOL_PROP_FREEING, NULL);
13155
13156 config = zpool_get_config(zhp, NULL);
13157 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
13158
13159 (void) nvlist_lookup_uint64_array(nvroot,
13160 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
13161 if (pcs != NULL && pcs->pcs_state == CS_CHECKPOINT_DISCARDING)
13162 bytes_rem[ZPOOL_WAIT_CKPT_DISCARD] = pcs->pcs_space;
13163
13164 (void) nvlist_lookup_uint64_array(nvroot,
13165 ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c);
13166 if (prs != NULL && prs->prs_state == DSS_SCANNING)
13167 bytes_rem[ZPOOL_WAIT_REMOVE] = prs->prs_to_copy -
13168 prs->prs_copied;
13169
13170 (void) nvlist_lookup_uint64_array(nvroot,
13171 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&pss, &c);
13172 if (pss != NULL && pss->pss_state == DSS_SCANNING &&
13173 pss->pss_pass_scrub_pause == 0) {
13174 int64_t rem = pss->pss_to_examine - pss->pss_issued;
13175 if (pss->pss_func == POOL_SCAN_SCRUB)
13176 bytes_rem[ZPOOL_WAIT_SCRUB] = rem;
13177 else
13178 bytes_rem[ZPOOL_WAIT_RESILVER] = rem;
13179 } else if (check_rebuilding(nvroot, NULL)) {
13180 bytes_rem[ZPOOL_WAIT_RESILVER] =
13181 vdev_activity_top_remaining(nvroot);
13182 }
13183
13184 (void) nvlist_lookup_uint64_array(nvroot,
13185 ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, (uint64_t **)&pres, &c);
13186 if (pres != NULL && pres->pres_state == DSS_SCANNING) {
13187 int64_t rem = pres->pres_to_reflow - pres->pres_reflowed;
13188 bytes_rem[ZPOOL_WAIT_RAIDZ_EXPAND] = rem;
13189 }
13190
13191 bytes_rem[ZPOOL_WAIT_INITIALIZE] =
13192 vdev_activity_remaining(nvroot, ZPOOL_WAIT_INITIALIZE);
13193 bytes_rem[ZPOOL_WAIT_TRIM] =
13194 vdev_activity_remaining(nvroot, ZPOOL_WAIT_TRIM);
13195
13196 /*
13197 * A replace finishes after resilvering finishes, so the amount of work
13198 * left for a replace is the same as for resilvering.
13199 *
13200 * It isn't quite correct to say that if we have any 'spare' or
13201 * 'replacing' vdevs and a resilver is happening, then a replace is in
13202 * progress, like we do here. When a hot spare is used, the faulted vdev
13203 * is not removed after the hot spare is resilvered, so parent 'spare'
13204 * vdev is not removed either. So we could have a 'spare' vdev, but be
13205 * resilvering for a different reason. However, we use it as a heuristic
13206 * because we don't have access to the DTLs, which could tell us whether
13207 * or not we have really finished resilvering a hot spare.
13208 */
13209 if (vdev_any_spare_replacing(nvroot))
13210 bytes_rem[ZPOOL_WAIT_REPLACE] = bytes_rem[ZPOOL_WAIT_RESILVER];
13211
13212 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
13213 char buf[64];
13214 if (!wd->wd_enabled[i])
13215 continue;
13216
13217 if (wd->wd_exact) {
13218 (void) snprintf(buf, sizeof (buf), "%" PRIi64,
13219 bytes_rem[i]);
13220 } else {
13221 zfs_nicenum(bytes_rem[i], buf, sizeof (buf));
13222 }
13223
13224 if (wd->wd_scripted)
13225 (void) printf(i == 0 ? "%s" : "\t%s", buf);
13226 else
13227 (void) printf(" %*s", col_widths[i] - 1, buf);
13228 }
13229 (void) printf("\n");
13230 (void) fflush(stdout);
13231 }
13232
13233 static void *
wait_status_thread(void * arg)13234 wait_status_thread(void *arg)
13235 {
13236 wait_data_t *wd = (wait_data_t *)arg;
13237 zpool_handle_t *zhp;
13238
13239 if ((zhp = zpool_open(g_zfs, wd->wd_poolname)) == NULL)
13240 return (void *)(1);
13241
13242 for (int row = 0; ; row++) {
13243 boolean_t missing;
13244 struct timespec timeout;
13245 int ret = 0;
13246 (void) clock_gettime(CLOCK_REALTIME, &timeout);
13247
13248 if (zpool_refresh_stats(zhp, &missing) != 0 || missing ||
13249 zpool_props_refresh(zhp) != 0) {
13250 zpool_close(zhp);
13251 return (void *)(uintptr_t)(missing ? 0 : 1);
13252 }
13253
13254 print_wait_status_row(wd, zhp, row);
13255
13256 timeout.tv_sec += floor(wd->wd_interval);
13257 long nanos = timeout.tv_nsec +
13258 (wd->wd_interval - floor(wd->wd_interval)) * NANOSEC;
13259 if (nanos >= NANOSEC) {
13260 timeout.tv_sec++;
13261 timeout.tv_nsec = nanos - NANOSEC;
13262 } else {
13263 timeout.tv_nsec = nanos;
13264 }
13265 pthread_mutex_lock(&wd->wd_mutex);
13266 if (!wd->wd_should_exit)
13267 ret = pthread_cond_timedwait(&wd->wd_cv, &wd->wd_mutex,
13268 &timeout);
13269 pthread_mutex_unlock(&wd->wd_mutex);
13270 if (ret == 0) {
13271 break; /* signaled by main thread */
13272 } else if (ret != ETIMEDOUT) {
13273 (void) fprintf(stderr, gettext("pthread_cond_timedwait "
13274 "failed: %s\n"), strerror(ret));
13275 zpool_close(zhp);
13276 return (void *)(uintptr_t)(1);
13277 }
13278 }
13279
13280 zpool_close(zhp);
13281 return (void *)(0);
13282 }
13283
13284 int
zpool_do_wait(int argc,char ** argv)13285 zpool_do_wait(int argc, char **argv)
13286 {
13287 boolean_t verbose = B_FALSE;
13288 int c, i;
13289 unsigned long count;
13290 pthread_t status_thr;
13291 int error = 0;
13292 zpool_handle_t *zhp;
13293
13294 wait_data_t wd;
13295 wd.wd_scripted = B_FALSE;
13296 wd.wd_exact = B_FALSE;
13297 wd.wd_headers_once = B_FALSE;
13298 wd.wd_should_exit = B_FALSE;
13299
13300 pthread_mutex_init(&wd.wd_mutex, NULL);
13301 pthread_cond_init(&wd.wd_cv, NULL);
13302
13303 /* By default, wait for all types of activity. */
13304 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++)
13305 wd.wd_enabled[i] = B_TRUE;
13306
13307 while ((c = getopt(argc, argv, "HpT:t:")) != -1) {
13308 switch (c) {
13309 case 'H':
13310 wd.wd_scripted = B_TRUE;
13311 break;
13312 case 'n':
13313 wd.wd_headers_once = B_TRUE;
13314 break;
13315 case 'p':
13316 wd.wd_exact = B_TRUE;
13317 break;
13318 case 'T':
13319 get_timestamp_arg(*optarg);
13320 break;
13321 case 't':
13322 /* Reset activities array */
13323 memset(&wd.wd_enabled, 0, sizeof (wd.wd_enabled));
13324
13325 for (char *tok; (tok = strsep(&optarg, ",")); ) {
13326 static const char *const col_opts[] = {
13327 "discard", "free", "initialize", "replace",
13328 "remove", "resilver", "scrub", "trim",
13329 "raidz_expand" };
13330
13331 for (i = 0; i < ARRAY_SIZE(col_opts); ++i)
13332 if (strcmp(tok, col_opts[i]) == 0) {
13333 wd.wd_enabled[i] = B_TRUE;
13334 goto found;
13335 }
13336
13337 (void) fprintf(stderr,
13338 gettext("invalid activity '%s'\n"), tok);
13339 usage(B_FALSE);
13340 found:;
13341 }
13342 break;
13343 case '?':
13344 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
13345 optopt);
13346 usage(B_FALSE);
13347 }
13348 }
13349
13350 argc -= optind;
13351 argv += optind;
13352
13353 get_interval_count(&argc, argv, &wd.wd_interval, &count);
13354 if (count != 0) {
13355 /* This subcmd only accepts an interval, not a count */
13356 (void) fprintf(stderr, gettext("too many arguments\n"));
13357 usage(B_FALSE);
13358 }
13359
13360 if (wd.wd_interval != 0)
13361 verbose = B_TRUE;
13362
13363 if (argc < 1) {
13364 (void) fprintf(stderr, gettext("missing 'pool' argument\n"));
13365 usage(B_FALSE);
13366 }
13367 if (argc > 1) {
13368 (void) fprintf(stderr, gettext("too many arguments\n"));
13369 usage(B_FALSE);
13370 }
13371
13372 wd.wd_poolname = argv[0];
13373
13374 if ((zhp = zpool_open(g_zfs, wd.wd_poolname)) == NULL)
13375 return (1);
13376
13377 if (verbose) {
13378 /*
13379 * We use a separate thread for printing status updates because
13380 * the main thread will call lzc_wait(), which blocks as long
13381 * as an activity is in progress, which can be a long time.
13382 */
13383 if (pthread_create(&status_thr, NULL, wait_status_thread, &wd)
13384 != 0) {
13385 (void) fprintf(stderr, gettext("failed to create status"
13386 "thread: %s\n"), strerror(errno));
13387 zpool_close(zhp);
13388 return (1);
13389 }
13390 }
13391
13392 /*
13393 * Loop over all activities that we are supposed to wait for until none
13394 * of them are in progress. Note that this means we can end up waiting
13395 * for more activities to complete than just those that were in progress
13396 * when we began waiting; if an activity we are interested in begins
13397 * while we are waiting for another activity, we will wait for both to
13398 * complete before exiting.
13399 */
13400 for (;;) {
13401 boolean_t missing = B_FALSE;
13402 boolean_t any_waited = B_FALSE;
13403
13404 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
13405 boolean_t waited;
13406
13407 if (!wd.wd_enabled[i])
13408 continue;
13409
13410 error = zpool_wait_status(zhp, i, &missing, &waited);
13411 if (error != 0 || missing)
13412 break;
13413
13414 any_waited = (any_waited || waited);
13415 }
13416
13417 if (error != 0 || missing || !any_waited)
13418 break;
13419 }
13420
13421 zpool_close(zhp);
13422
13423 if (verbose) {
13424 uintptr_t status;
13425 pthread_mutex_lock(&wd.wd_mutex);
13426 wd.wd_should_exit = B_TRUE;
13427 pthread_cond_signal(&wd.wd_cv);
13428 pthread_mutex_unlock(&wd.wd_mutex);
13429 (void) pthread_join(status_thr, (void *)&status);
13430 if (status != 0)
13431 error = status;
13432 }
13433
13434 pthread_mutex_destroy(&wd.wd_mutex);
13435 pthread_cond_destroy(&wd.wd_cv);
13436 return (error);
13437 }
13438
13439 /*
13440 * zpool ddtprune -d|-p <amount> <pool>
13441 *
13442 * -d <days> Prune entries <days> old and older
13443 * -p <percent> Prune <percent> amount of entries
13444 *
13445 * Prune single reference entries from DDT to satisfy the amount specified.
13446 */
13447 int
zpool_do_ddt_prune(int argc,char ** argv)13448 zpool_do_ddt_prune(int argc, char **argv)
13449 {
13450 zpool_ddt_prune_unit_t unit = ZPOOL_DDT_PRUNE_NONE;
13451 uint64_t amount = 0;
13452 zpool_handle_t *zhp;
13453 char *endptr;
13454 int c;
13455
13456 while ((c = getopt(argc, argv, "d:p:")) != -1) {
13457 switch (c) {
13458 case 'd':
13459 if (unit == ZPOOL_DDT_PRUNE_PERCENTAGE) {
13460 (void) fprintf(stderr, gettext("-d cannot be "
13461 "combined with -p option\n"));
13462 usage(B_FALSE);
13463 }
13464 errno = 0;
13465 amount = strtoull(optarg, &endptr, 0);
13466 if (errno != 0 || *endptr != '\0' || amount == 0) {
13467 (void) fprintf(stderr,
13468 gettext("invalid days value\n"));
13469 usage(B_FALSE);
13470 }
13471 amount *= 86400; /* convert days to seconds */
13472 unit = ZPOOL_DDT_PRUNE_AGE;
13473 break;
13474 case 'p':
13475 if (unit == ZPOOL_DDT_PRUNE_AGE) {
13476 (void) fprintf(stderr, gettext("-p cannot be "
13477 "combined with -d option\n"));
13478 usage(B_FALSE);
13479 }
13480 errno = 0;
13481 amount = strtoull(optarg, &endptr, 0);
13482 if (errno != 0 || *endptr != '\0' ||
13483 amount == 0 || amount > 100) {
13484 (void) fprintf(stderr,
13485 gettext("invalid percentage value\n"));
13486 usage(B_FALSE);
13487 }
13488 unit = ZPOOL_DDT_PRUNE_PERCENTAGE;
13489 break;
13490 case '?':
13491 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
13492 optopt);
13493 usage(B_FALSE);
13494 }
13495 }
13496 argc -= optind;
13497 argv += optind;
13498
13499 if (unit == ZPOOL_DDT_PRUNE_NONE) {
13500 (void) fprintf(stderr,
13501 gettext("missing amount option (-d|-p <value>)\n"));
13502 usage(B_FALSE);
13503 } else if (argc < 1) {
13504 (void) fprintf(stderr, gettext("missing pool argument\n"));
13505 usage(B_FALSE);
13506 } else if (argc > 1) {
13507 (void) fprintf(stderr, gettext("too many arguments\n"));
13508 usage(B_FALSE);
13509 }
13510 zhp = zpool_open(g_zfs, argv[0]);
13511 if (zhp == NULL)
13512 return (-1);
13513
13514 int error = zpool_ddt_prune(zhp, unit, amount);
13515
13516 zpool_close(zhp);
13517
13518 return (error);
13519 }
13520
13521 static int
find_command_idx(const char * command,int * idx)13522 find_command_idx(const char *command, int *idx)
13523 {
13524 for (int i = 0; i < NCOMMAND; ++i) {
13525 if (command_table[i].name == NULL)
13526 continue;
13527
13528 if (strcmp(command, command_table[i].name) == 0) {
13529 *idx = i;
13530 return (0);
13531 }
13532 }
13533 return (1);
13534 }
13535
13536 /*
13537 * Display version message
13538 */
13539 static int
zpool_do_version(int argc,char ** argv)13540 zpool_do_version(int argc, char **argv)
13541 {
13542 int c;
13543 nvlist_t *jsobj = NULL, *zfs_ver = NULL;
13544 boolean_t json = B_FALSE;
13545
13546 struct option long_options[] = {
13547 {"json", no_argument, NULL, 'j'},
13548 };
13549
13550 while ((c = getopt_long(argc, argv, "j", long_options, NULL)) != -1) {
13551 switch (c) {
13552 case 'j':
13553 json = B_TRUE;
13554 jsobj = zpool_json_schema(0, 1);
13555 break;
13556 case '?':
13557 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
13558 optopt);
13559 usage(B_FALSE);
13560 }
13561 }
13562
13563 argc -= optind;
13564 if (argc != 0) {
13565 (void) fprintf(stderr, "too many arguments\n");
13566 usage(B_FALSE);
13567 }
13568
13569 if (json) {
13570 zfs_ver = zfs_version_nvlist();
13571 if (zfs_ver) {
13572 fnvlist_add_nvlist(jsobj, "zfs_version", zfs_ver);
13573 zcmd_print_json(jsobj);
13574 fnvlist_free(zfs_ver);
13575 return (0);
13576 } else
13577 return (-1);
13578 } else
13579 return (zfs_version_print() != 0);
13580 }
13581
13582 /* Display documentation */
13583 static int
zpool_do_help(int argc,char ** argv)13584 zpool_do_help(int argc, char **argv)
13585 {
13586 char page[MAXNAMELEN];
13587 if (argc < 3 || strcmp(argv[2], "zpool") == 0)
13588 strcpy(page, "zpool");
13589 else if (strcmp(argv[2], "concepts") == 0 ||
13590 strcmp(argv[2], "props") == 0)
13591 snprintf(page, sizeof (page), "zpool%s", argv[2]);
13592 else
13593 snprintf(page, sizeof (page), "zpool-%s", argv[2]);
13594
13595 execlp("man", "man", page, NULL);
13596
13597 fprintf(stderr, "couldn't run man program: %s", strerror(errno));
13598 return (-1);
13599 }
13600
13601 /*
13602 * Do zpool_load_compat() and print error message on failure
13603 */
13604 static zpool_compat_status_t
zpool_do_load_compat(const char * compat,boolean_t * list)13605 zpool_do_load_compat(const char *compat, boolean_t *list)
13606 {
13607 char report[1024];
13608
13609 zpool_compat_status_t ret;
13610
13611 ret = zpool_load_compat(compat, list, report, 1024);
13612 switch (ret) {
13613
13614 case ZPOOL_COMPATIBILITY_OK:
13615 break;
13616
13617 case ZPOOL_COMPATIBILITY_NOFILES:
13618 case ZPOOL_COMPATIBILITY_BADFILE:
13619 case ZPOOL_COMPATIBILITY_BADTOKEN:
13620 (void) fprintf(stderr, "Error: %s\n", report);
13621 break;
13622
13623 case ZPOOL_COMPATIBILITY_WARNTOKEN:
13624 (void) fprintf(stderr, "Warning: %s\n", report);
13625 ret = ZPOOL_COMPATIBILITY_OK;
13626 break;
13627 }
13628 return (ret);
13629 }
13630
13631 int
main(int argc,char ** argv)13632 main(int argc, char **argv)
13633 {
13634 int ret = 0;
13635 int i = 0;
13636 char *cmdname;
13637 char **newargv;
13638
13639 (void) setlocale(LC_ALL, "");
13640 (void) setlocale(LC_NUMERIC, "C");
13641 (void) textdomain(TEXT_DOMAIN);
13642 srand(time(NULL));
13643
13644 opterr = 0;
13645
13646 /*
13647 * Make sure the user has specified some command.
13648 */
13649 if (argc < 2) {
13650 (void) fprintf(stderr, gettext("missing command\n"));
13651 usage(B_FALSE);
13652 }
13653
13654 cmdname = argv[1];
13655
13656 /*
13657 * Special case '-?'
13658 */
13659 if ((strcmp(cmdname, "-?") == 0) || strcmp(cmdname, "--help") == 0)
13660 usage(B_TRUE);
13661
13662 /*
13663 * Special case '-V|--version'
13664 */
13665 if ((strcmp(cmdname, "-V") == 0) || (strcmp(cmdname, "--version") == 0))
13666 return (zfs_version_print() != 0);
13667
13668 /*
13669 * Special case 'help'
13670 */
13671 if (strcmp(cmdname, "help") == 0)
13672 return (zpool_do_help(argc, argv));
13673
13674 if ((g_zfs = libzfs_init()) == NULL) {
13675 (void) fprintf(stderr, "%s\n", libzfs_error_init(errno));
13676 return (1);
13677 }
13678
13679 libzfs_print_on_error(g_zfs, B_TRUE);
13680
13681 zfs_save_arguments(argc, argv, history_str, sizeof (history_str));
13682
13683 /*
13684 * Many commands modify input strings for string parsing reasons.
13685 * We create a copy to protect the original argv.
13686 */
13687 newargv = safe_malloc((argc + 1) * sizeof (newargv[0]));
13688 for (i = 0; i < argc; i++)
13689 newargv[i] = strdup(argv[i]);
13690 newargv[argc] = NULL;
13691
13692 /*
13693 * Run the appropriate command.
13694 */
13695 if (find_command_idx(cmdname, &i) == 0) {
13696 current_command = &command_table[i];
13697 ret = command_table[i].func(argc - 1, newargv + 1);
13698 } else if (strchr(cmdname, '=')) {
13699 verify(find_command_idx("set", &i) == 0);
13700 current_command = &command_table[i];
13701 ret = command_table[i].func(argc, newargv);
13702 } else if (strcmp(cmdname, "freeze") == 0 && argc == 3) {
13703 /*
13704 * 'freeze' is a vile debugging abomination, so we treat
13705 * it as such.
13706 */
13707 zfs_cmd_t zc = {"\0"};
13708
13709 (void) strlcpy(zc.zc_name, argv[2], sizeof (zc.zc_name));
13710 ret = zfs_ioctl(g_zfs, ZFS_IOC_POOL_FREEZE, &zc);
13711 if (ret != 0) {
13712 (void) fprintf(stderr,
13713 gettext("failed to freeze pool: %d\n"), errno);
13714 ret = 1;
13715 }
13716
13717 log_history = 0;
13718 } else {
13719 (void) fprintf(stderr, gettext("unrecognized "
13720 "command '%s'\n"), cmdname);
13721 usage(B_FALSE);
13722 ret = 1;
13723 }
13724
13725 for (i = 0; i < argc; i++)
13726 free(newargv[i]);
13727 free(newargv);
13728
13729 if (ret == 0 && log_history)
13730 (void) zpool_log_history(g_zfs, history_str);
13731
13732 libzfs_fini(g_zfs);
13733
13734 /*
13735 * The 'ZFS_ABORT' environment variable causes us to dump core on exit
13736 * for the purposes of running ::findleaks.
13737 */
13738 if (getenv("ZFS_ABORT") != NULL) {
13739 (void) printf("dumping core by request\n");
13740 abort();
13741 }
13742
13743 return (ret);
13744 }
13745