1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2011, 2024 by Delphix. All rights reserved.
26 * Copyright (c) 2012 by Frederik Wessels. All rights reserved.
27 * Copyright (c) 2012 by Cyril Plisko. All rights reserved.
28 * Copyright (c) 2013 by Prasad Joshi (sTec). All rights reserved.
29 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>.
30 * Copyright (c) 2017 Datto Inc.
31 * Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
32 * Copyright (c) 2017, Intel Corporation.
33 * Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>
34 * Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
35 * Copyright (c) 2021, 2023, Klara Inc.
36 * Copyright [2021] Hewlett Packard Enterprise Development LP
37 */
38
39 #include <assert.h>
40 #include <ctype.h>
41 #include <dirent.h>
42 #include <errno.h>
43 #include <fcntl.h>
44 #include <getopt.h>
45 #include <libgen.h>
46 #include <libintl.h>
47 #include <libuutil.h>
48 #include <locale.h>
49 #include <pthread.h>
50 #include <stdio.h>
51 #include <stdlib.h>
52 #include <string.h>
53 #include <thread_pool.h>
54 #include <time.h>
55 #include <unistd.h>
56 #include <pwd.h>
57 #include <zone.h>
58 #include <sys/wait.h>
59 #include <zfs_prop.h>
60 #include <sys/fs/zfs.h>
61 #include <sys/stat.h>
62 #include <sys/systeminfo.h>
63 #include <sys/fm/fs/zfs.h>
64 #include <sys/fm/util.h>
65 #include <sys/fm/protocol.h>
66 #include <sys/zfs_ioctl.h>
67 #include <sys/mount.h>
68 #include <sys/sysmacros.h>
69 #include <string.h>
70 #include <math.h>
71
72 #include <libzfs.h>
73 #include <libzutil.h>
74
75 #include "zpool_util.h"
76 #include "zfs_comutil.h"
77 #include "zfeature_common.h"
78 #include "zfs_valstr.h"
79
80 #include "statcommon.h"
81
82 libzfs_handle_t *g_zfs;
83
84 static int mount_tp_nthr = 512; /* tpool threads for multi-threaded mounting */
85
86 static int zpool_do_create(int, char **);
87 static int zpool_do_destroy(int, char **);
88
89 static int zpool_do_add(int, char **);
90 static int zpool_do_remove(int, char **);
91 static int zpool_do_labelclear(int, char **);
92
93 static int zpool_do_checkpoint(int, char **);
94 static int zpool_do_prefetch(int, char **);
95
96 static int zpool_do_list(int, char **);
97 static int zpool_do_iostat(int, char **);
98 static int zpool_do_status(int, char **);
99
100 static int zpool_do_online(int, char **);
101 static int zpool_do_offline(int, char **);
102 static int zpool_do_clear(int, char **);
103 static int zpool_do_reopen(int, char **);
104
105 static int zpool_do_reguid(int, char **);
106
107 static int zpool_do_attach(int, char **);
108 static int zpool_do_detach(int, char **);
109 static int zpool_do_replace(int, char **);
110 static int zpool_do_split(int, char **);
111
112 static int zpool_do_initialize(int, char **);
113 static int zpool_do_scrub(int, char **);
114 static int zpool_do_resilver(int, char **);
115 static int zpool_do_trim(int, char **);
116
117 static int zpool_do_import(int, char **);
118 static int zpool_do_export(int, char **);
119
120 static int zpool_do_upgrade(int, char **);
121
122 static int zpool_do_history(int, char **);
123 static int zpool_do_events(int, char **);
124
125 static int zpool_do_get(int, char **);
126 static int zpool_do_set(int, char **);
127
128 static int zpool_do_sync(int, char **);
129
130 static int zpool_do_version(int, char **);
131
132 static int zpool_do_wait(int, char **);
133
134 static int zpool_do_ddt_prune(int, char **);
135
136 static int zpool_do_help(int argc, char **argv);
137
138 static zpool_compat_status_t zpool_do_load_compat(
139 const char *, boolean_t *);
140
141 enum zpool_options {
142 ZPOOL_OPTION_POWER = 1024,
143 ZPOOL_OPTION_ALLOW_INUSE,
144 ZPOOL_OPTION_ALLOW_REPLICATION_MISMATCH,
145 ZPOOL_OPTION_ALLOW_ASHIFT_MISMATCH,
146 ZPOOL_OPTION_POOL_KEY_GUID,
147 ZPOOL_OPTION_JSON_NUMS_AS_INT,
148 ZPOOL_OPTION_JSON_FLAT_VDEVS
149 };
150
151 /*
152 * These libumem hooks provide a reasonable set of defaults for the allocator's
153 * debugging facilities.
154 */
155
156 #ifdef DEBUG
157 const char *
_umem_debug_init(void)158 _umem_debug_init(void)
159 {
160 return ("default,verbose"); /* $UMEM_DEBUG setting */
161 }
162
163 const char *
_umem_logging_init(void)164 _umem_logging_init(void)
165 {
166 return ("fail,contents"); /* $UMEM_LOGGING setting */
167 }
168 #endif
169
170 typedef enum {
171 HELP_ADD,
172 HELP_ATTACH,
173 HELP_CLEAR,
174 HELP_CREATE,
175 HELP_CHECKPOINT,
176 HELP_DDT_PRUNE,
177 HELP_DESTROY,
178 HELP_DETACH,
179 HELP_EXPORT,
180 HELP_HISTORY,
181 HELP_IMPORT,
182 HELP_IOSTAT,
183 HELP_LABELCLEAR,
184 HELP_LIST,
185 HELP_OFFLINE,
186 HELP_ONLINE,
187 HELP_PREFETCH,
188 HELP_REPLACE,
189 HELP_REMOVE,
190 HELP_INITIALIZE,
191 HELP_SCRUB,
192 HELP_RESILVER,
193 HELP_TRIM,
194 HELP_STATUS,
195 HELP_UPGRADE,
196 HELP_EVENTS,
197 HELP_GET,
198 HELP_SET,
199 HELP_SPLIT,
200 HELP_SYNC,
201 HELP_REGUID,
202 HELP_REOPEN,
203 HELP_VERSION,
204 HELP_WAIT
205 } zpool_help_t;
206
207
208 /*
209 * Flags for stats to display with "zpool iostats"
210 */
211 enum iostat_type {
212 IOS_DEFAULT = 0,
213 IOS_LATENCY = 1,
214 IOS_QUEUES = 2,
215 IOS_L_HISTO = 3,
216 IOS_RQ_HISTO = 4,
217 IOS_COUNT, /* always last element */
218 };
219
220 /* iostat_type entries as bitmasks */
221 #define IOS_DEFAULT_M (1ULL << IOS_DEFAULT)
222 #define IOS_LATENCY_M (1ULL << IOS_LATENCY)
223 #define IOS_QUEUES_M (1ULL << IOS_QUEUES)
224 #define IOS_L_HISTO_M (1ULL << IOS_L_HISTO)
225 #define IOS_RQ_HISTO_M (1ULL << IOS_RQ_HISTO)
226
227 /* Mask of all the histo bits */
228 #define IOS_ANYHISTO_M (IOS_L_HISTO_M | IOS_RQ_HISTO_M)
229
230 /*
231 * Lookup table for iostat flags to nvlist names. Basically a list
232 * of all the nvlists a flag requires. Also specifies the order in
233 * which data gets printed in zpool iostat.
234 */
235 static const char *vsx_type_to_nvlist[IOS_COUNT][15] = {
236 [IOS_L_HISTO] = {
237 ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
238 ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
239 ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
240 ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
241 ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO,
242 ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO,
243 ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO,
244 ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO,
245 ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO,
246 ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
247 ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
248 NULL},
249 [IOS_LATENCY] = {
250 ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
251 ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
252 ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
253 ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
254 ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
255 ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
256 NULL},
257 [IOS_QUEUES] = {
258 ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE,
259 ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE,
260 ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE,
261 ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE,
262 ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,
263 ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE,
264 ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE,
265 NULL},
266 [IOS_RQ_HISTO] = {
267 ZPOOL_CONFIG_VDEV_SYNC_IND_R_HISTO,
268 ZPOOL_CONFIG_VDEV_SYNC_AGG_R_HISTO,
269 ZPOOL_CONFIG_VDEV_SYNC_IND_W_HISTO,
270 ZPOOL_CONFIG_VDEV_SYNC_AGG_W_HISTO,
271 ZPOOL_CONFIG_VDEV_ASYNC_IND_R_HISTO,
272 ZPOOL_CONFIG_VDEV_ASYNC_AGG_R_HISTO,
273 ZPOOL_CONFIG_VDEV_ASYNC_IND_W_HISTO,
274 ZPOOL_CONFIG_VDEV_ASYNC_AGG_W_HISTO,
275 ZPOOL_CONFIG_VDEV_IND_SCRUB_HISTO,
276 ZPOOL_CONFIG_VDEV_AGG_SCRUB_HISTO,
277 ZPOOL_CONFIG_VDEV_IND_TRIM_HISTO,
278 ZPOOL_CONFIG_VDEV_AGG_TRIM_HISTO,
279 ZPOOL_CONFIG_VDEV_IND_REBUILD_HISTO,
280 ZPOOL_CONFIG_VDEV_AGG_REBUILD_HISTO,
281 NULL},
282 };
283
284 static const char *pool_scan_func_str[] = {
285 "NONE",
286 "SCRUB",
287 "RESILVER",
288 "ERRORSCRUB"
289 };
290
291 static const char *pool_scan_state_str[] = {
292 "NONE",
293 "SCANNING",
294 "FINISHED",
295 "CANCELED",
296 "ERRORSCRUBBING"
297 };
298
299 static const char *vdev_rebuild_state_str[] = {
300 "NONE",
301 "ACTIVE",
302 "CANCELED",
303 "COMPLETE"
304 };
305
306 static const char *checkpoint_state_str[] = {
307 "NONE",
308 "EXISTS",
309 "DISCARDING"
310 };
311
312 static const char *vdev_state_str[] = {
313 "UNKNOWN",
314 "CLOSED",
315 "OFFLINE",
316 "REMOVED",
317 "CANT_OPEN",
318 "FAULTED",
319 "DEGRADED",
320 "ONLINE"
321 };
322
323 static const char *vdev_aux_str[] = {
324 "NONE",
325 "OPEN_FAILED",
326 "CORRUPT_DATA",
327 "NO_REPLICAS",
328 "BAD_GUID_SUM",
329 "TOO_SMALL",
330 "BAD_LABEL",
331 "VERSION_NEWER",
332 "VERSION_OLDER",
333 "UNSUP_FEAT",
334 "SPARED",
335 "ERR_EXCEEDED",
336 "IO_FAILURE",
337 "BAD_LOG",
338 "EXTERNAL",
339 "SPLIT_POOL",
340 "BAD_ASHIFT",
341 "EXTERNAL_PERSIST",
342 "ACTIVE",
343 "CHILDREN_OFFLINE",
344 "ASHIFT_TOO_BIG"
345 };
346
347 static const char *vdev_init_state_str[] = {
348 "NONE",
349 "ACTIVE",
350 "CANCELED",
351 "SUSPENDED",
352 "COMPLETE"
353 };
354
355 static const char *vdev_trim_state_str[] = {
356 "NONE",
357 "ACTIVE",
358 "CANCELED",
359 "SUSPENDED",
360 "COMPLETE"
361 };
362
363 #define ZFS_NICE_TIMESTAMP 100
364
365 /*
366 * Given a cb->cb_flags with a histogram bit set, return the iostat_type.
367 * Right now, only one histo bit is ever set at one time, so we can
368 * just do a highbit64(a)
369 */
370 #define IOS_HISTO_IDX(a) (highbit64(a & IOS_ANYHISTO_M) - 1)
371
372 typedef struct zpool_command {
373 const char *name;
374 int (*func)(int, char **);
375 zpool_help_t usage;
376 } zpool_command_t;
377
378 /*
379 * Master command table. Each ZFS command has a name, associated function, and
380 * usage message. The usage messages need to be internationalized, so we have
381 * to have a function to return the usage message based on a command index.
382 *
383 * These commands are organized according to how they are displayed in the usage
384 * message. An empty command (one with a NULL name) indicates an empty line in
385 * the generic usage message.
386 */
387 static zpool_command_t command_table[] = {
388 { "version", zpool_do_version, HELP_VERSION },
389 { NULL },
390 { "create", zpool_do_create, HELP_CREATE },
391 { "destroy", zpool_do_destroy, HELP_DESTROY },
392 { NULL },
393 { "add", zpool_do_add, HELP_ADD },
394 { "remove", zpool_do_remove, HELP_REMOVE },
395 { NULL },
396 { "labelclear", zpool_do_labelclear, HELP_LABELCLEAR },
397 { NULL },
398 { "checkpoint", zpool_do_checkpoint, HELP_CHECKPOINT },
399 { "prefetch", zpool_do_prefetch, HELP_PREFETCH },
400 { NULL },
401 { "list", zpool_do_list, HELP_LIST },
402 { "iostat", zpool_do_iostat, HELP_IOSTAT },
403 { "status", zpool_do_status, HELP_STATUS },
404 { NULL },
405 { "online", zpool_do_online, HELP_ONLINE },
406 { "offline", zpool_do_offline, HELP_OFFLINE },
407 { "clear", zpool_do_clear, HELP_CLEAR },
408 { "reopen", zpool_do_reopen, HELP_REOPEN },
409 { NULL },
410 { "attach", zpool_do_attach, HELP_ATTACH },
411 { "detach", zpool_do_detach, HELP_DETACH },
412 { "replace", zpool_do_replace, HELP_REPLACE },
413 { "split", zpool_do_split, HELP_SPLIT },
414 { NULL },
415 { "initialize", zpool_do_initialize, HELP_INITIALIZE },
416 { "resilver", zpool_do_resilver, HELP_RESILVER },
417 { "scrub", zpool_do_scrub, HELP_SCRUB },
418 { "trim", zpool_do_trim, HELP_TRIM },
419 { NULL },
420 { "import", zpool_do_import, HELP_IMPORT },
421 { "export", zpool_do_export, HELP_EXPORT },
422 { "upgrade", zpool_do_upgrade, HELP_UPGRADE },
423 { "reguid", zpool_do_reguid, HELP_REGUID },
424 { NULL },
425 { "history", zpool_do_history, HELP_HISTORY },
426 { "events", zpool_do_events, HELP_EVENTS },
427 { NULL },
428 { "get", zpool_do_get, HELP_GET },
429 { "set", zpool_do_set, HELP_SET },
430 { "sync", zpool_do_sync, HELP_SYNC },
431 { NULL },
432 { "wait", zpool_do_wait, HELP_WAIT },
433 { NULL },
434 { "ddtprune", zpool_do_ddt_prune, HELP_DDT_PRUNE },
435 };
436
437 #define NCOMMAND (ARRAY_SIZE(command_table))
438
439 #define VDEV_ALLOC_CLASS_LOGS "logs"
440
441 #define MAX_CMD_LEN 256
442
443 static zpool_command_t *current_command;
444 static zfs_type_t current_prop_type = (ZFS_TYPE_POOL | ZFS_TYPE_VDEV);
445 static char history_str[HIS_MAX_RECORD_LEN];
446 static boolean_t log_history = B_TRUE;
447 static uint_t timestamp_fmt = NODATE;
448
449 static const char *
get_usage(zpool_help_t idx)450 get_usage(zpool_help_t idx)
451 {
452 switch (idx) {
453 case HELP_ADD:
454 return (gettext("\tadd [-afgLnP] [-o property=value] "
455 "<pool> <vdev> ...\n"));
456 case HELP_ATTACH:
457 return (gettext("\tattach [-fsw] [-o property=value] "
458 "<pool> <device> <new-device>\n"));
459 case HELP_CLEAR:
460 return (gettext("\tclear [[--power]|[-nF]] <pool> [device]\n"));
461 case HELP_CREATE:
462 return (gettext("\tcreate [-fnd] [-o property=value] ... \n"
463 "\t [-O file-system-property=value] ... \n"
464 "\t [-m mountpoint] [-R root] <pool> <vdev> ...\n"));
465 case HELP_CHECKPOINT:
466 return (gettext("\tcheckpoint [-d [-w]] <pool> ...\n"));
467 case HELP_DESTROY:
468 return (gettext("\tdestroy [-f] <pool>\n"));
469 case HELP_DETACH:
470 return (gettext("\tdetach <pool> <device>\n"));
471 case HELP_EXPORT:
472 return (gettext("\texport [-af] <pool> ...\n"));
473 case HELP_HISTORY:
474 return (gettext("\thistory [-il] [<pool>] ...\n"));
475 case HELP_IMPORT:
476 return (gettext("\timport [-d dir] [-D]\n"
477 "\timport [-o mntopts] [-o property=value] ... \n"
478 "\t [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] "
479 "[-R root] [-F [-n]] -a\n"
480 "\timport [-o mntopts] [-o property=value] ... \n"
481 "\t [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] "
482 "[-R root] [-F [-n]]\n"
483 "\t [--rewind-to-checkpoint] <pool | id> [newpool]\n"));
484 case HELP_IOSTAT:
485 return (gettext("\tiostat [[[-c [script1,script2,...]"
486 "[-lq]]|[-rw]] [-T d | u] [-ghHLpPvy]\n"
487 "\t [[pool ...]|[pool vdev ...]|[vdev ...]]"
488 " [[-n] interval [count]]\n"));
489 case HELP_LABELCLEAR:
490 return (gettext("\tlabelclear [-f] <vdev>\n"));
491 case HELP_LIST:
492 return (gettext("\tlist [-gHLpPv] [-o property[,...]] [-j "
493 "[--json-int, --json-pool-key-guid]] ...\n"
494 "\t [-T d|u] [pool] [interval [count]]\n"));
495 case HELP_PREFETCH:
496 return (gettext("\tprefetch -t <type> [<type opts>] <pool>\n"
497 "\t -t ddt <pool>\n"));
498 case HELP_OFFLINE:
499 return (gettext("\toffline [--power]|[[-f][-t]] <pool> "
500 "<device> ...\n"));
501 case HELP_ONLINE:
502 return (gettext("\tonline [--power][-e] <pool> <device> "
503 "...\n"));
504 case HELP_REPLACE:
505 return (gettext("\treplace [-fsw] [-o property=value] "
506 "<pool> <device> [new-device]\n"));
507 case HELP_REMOVE:
508 return (gettext("\tremove [-npsw] <pool> <device> ...\n"));
509 case HELP_REOPEN:
510 return (gettext("\treopen [-n] <pool>\n"));
511 case HELP_INITIALIZE:
512 return (gettext("\tinitialize [-c | -s | -u] [-w] <pool> "
513 "[<device> ...]\n"));
514 case HELP_SCRUB:
515 return (gettext("\tscrub [-s | -p] [-w] [-e] <pool> ...\n"));
516 case HELP_RESILVER:
517 return (gettext("\tresilver <pool> ...\n"));
518 case HELP_TRIM:
519 return (gettext("\ttrim [-dw] [-r <rate>] [-c | -s] <pool> "
520 "[<device> ...]\n"));
521 case HELP_STATUS:
522 return (gettext("\tstatus [--power] [-j [--json-int, "
523 "--json-flat-vdevs, ...\n"
524 "\t --json-pool-key-guid]] [-c [script1,script2,...]] "
525 "[-dDegiLpPstvx] ...\n"
526 "\t [-T d|u] [pool] [interval [count]]\n"));
527 case HELP_UPGRADE:
528 return (gettext("\tupgrade\n"
529 "\tupgrade -v\n"
530 "\tupgrade [-V version] <-a | pool ...>\n"));
531 case HELP_EVENTS:
532 return (gettext("\tevents [-vHf [pool] | -c]\n"));
533 case HELP_GET:
534 return (gettext("\tget [-Hp] [-j [--json-int, "
535 "--json-pool-key-guid]] ...\n"
536 "\t [-o \"all\" | field[,...]] "
537 "<\"all\" | property[,...]> <pool> ...\n"));
538 case HELP_SET:
539 return (gettext("\tset <property=value> <pool>\n"
540 "\tset <vdev_property=value> <pool> <vdev>\n"));
541 case HELP_SPLIT:
542 return (gettext("\tsplit [-gLnPl] [-R altroot] [-o mntopts]\n"
543 "\t [-o property=value] <pool> <newpool> "
544 "[<device> ...]\n"));
545 case HELP_REGUID:
546 return (gettext("\treguid [-g guid] <pool>\n"));
547 case HELP_SYNC:
548 return (gettext("\tsync [pool] ...\n"));
549 case HELP_VERSION:
550 return (gettext("\tversion [-j]\n"));
551 case HELP_WAIT:
552 return (gettext("\twait [-Hp] [-T d|u] [-t <activity>[,...]] "
553 "<pool> [interval]\n"));
554 case HELP_DDT_PRUNE:
555 return (gettext("\tddtprune -d|-p <amount> <pool>\n"));
556 default:
557 __builtin_unreachable();
558 }
559 }
560
561 static void
zpool_collect_leaves(zpool_handle_t * zhp,nvlist_t * nvroot,nvlist_t * res)562 zpool_collect_leaves(zpool_handle_t *zhp, nvlist_t *nvroot, nvlist_t *res)
563 {
564 uint_t children = 0;
565 nvlist_t **child;
566 uint_t i;
567
568 (void) nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
569 &child, &children);
570
571 if (children == 0) {
572 char *path = zpool_vdev_name(g_zfs, zhp, nvroot,
573 VDEV_NAME_PATH);
574
575 if (strcmp(path, VDEV_TYPE_INDIRECT) != 0 &&
576 strcmp(path, VDEV_TYPE_HOLE) != 0)
577 fnvlist_add_boolean(res, path);
578
579 free(path);
580 return;
581 }
582
583 for (i = 0; i < children; i++) {
584 zpool_collect_leaves(zhp, child[i], res);
585 }
586 }
587
588 /*
589 * Callback routine that will print out a pool property value.
590 */
591 static int
print_pool_prop_cb(int prop,void * cb)592 print_pool_prop_cb(int prop, void *cb)
593 {
594 FILE *fp = cb;
595
596 (void) fprintf(fp, "\t%-19s ", zpool_prop_to_name(prop));
597
598 if (zpool_prop_readonly(prop))
599 (void) fprintf(fp, " NO ");
600 else
601 (void) fprintf(fp, " YES ");
602
603 if (zpool_prop_values(prop) == NULL)
604 (void) fprintf(fp, "-\n");
605 else
606 (void) fprintf(fp, "%s\n", zpool_prop_values(prop));
607
608 return (ZPROP_CONT);
609 }
610
611 /*
612 * Callback routine that will print out a vdev property value.
613 */
614 static int
print_vdev_prop_cb(int prop,void * cb)615 print_vdev_prop_cb(int prop, void *cb)
616 {
617 FILE *fp = cb;
618
619 (void) fprintf(fp, "\t%-19s ", vdev_prop_to_name(prop));
620
621 if (vdev_prop_readonly(prop))
622 (void) fprintf(fp, " NO ");
623 else
624 (void) fprintf(fp, " YES ");
625
626 if (vdev_prop_values(prop) == NULL)
627 (void) fprintf(fp, "-\n");
628 else
629 (void) fprintf(fp, "%s\n", vdev_prop_values(prop));
630
631 return (ZPROP_CONT);
632 }
633
634 /*
635 * Given a leaf vdev name like 'L5' return its VDEV_CONFIG_PATH like
636 * '/dev/disk/by-vdev/L5'.
637 */
638 static const char *
vdev_name_to_path(zpool_handle_t * zhp,char * vdev)639 vdev_name_to_path(zpool_handle_t *zhp, char *vdev)
640 {
641 nvlist_t *vdev_nv = zpool_find_vdev(zhp, vdev, NULL, NULL, NULL);
642 if (vdev_nv == NULL) {
643 return (NULL);
644 }
645 return (fnvlist_lookup_string(vdev_nv, ZPOOL_CONFIG_PATH));
646 }
647
648 static int
zpool_power_on(zpool_handle_t * zhp,char * vdev)649 zpool_power_on(zpool_handle_t *zhp, char *vdev)
650 {
651 return (zpool_power(zhp, vdev, B_TRUE));
652 }
653
654 static int
zpool_power_on_and_disk_wait(zpool_handle_t * zhp,char * vdev)655 zpool_power_on_and_disk_wait(zpool_handle_t *zhp, char *vdev)
656 {
657 int rc;
658
659 rc = zpool_power_on(zhp, vdev);
660 if (rc != 0)
661 return (rc);
662
663 zpool_disk_wait(vdev_name_to_path(zhp, vdev));
664
665 return (0);
666 }
667
668 static int
zpool_power_on_pool_and_wait_for_devices(zpool_handle_t * zhp)669 zpool_power_on_pool_and_wait_for_devices(zpool_handle_t *zhp)
670 {
671 nvlist_t *nv;
672 const char *path = NULL;
673 int rc;
674
675 /* Power up all the devices first */
676 FOR_EACH_REAL_LEAF_VDEV(zhp, nv) {
677 path = fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH);
678 if (path != NULL) {
679 rc = zpool_power_on(zhp, (char *)path);
680 if (rc != 0) {
681 return (rc);
682 }
683 }
684 }
685
686 /*
687 * Wait for their devices to show up. Since we powered them on
688 * at roughly the same time, they should all come online around
689 * the same time.
690 */
691 FOR_EACH_REAL_LEAF_VDEV(zhp, nv) {
692 path = fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH);
693 zpool_disk_wait(path);
694 }
695
696 return (0);
697 }
698
699 static int
zpool_power_off(zpool_handle_t * zhp,char * vdev)700 zpool_power_off(zpool_handle_t *zhp, char *vdev)
701 {
702 return (zpool_power(zhp, vdev, B_FALSE));
703 }
704
705 /*
706 * Display usage message. If we're inside a command, display only the usage for
707 * that command. Otherwise, iterate over the entire command table and display
708 * a complete usage message.
709 */
710 static __attribute__((noreturn)) void
usage(boolean_t requested)711 usage(boolean_t requested)
712 {
713 FILE *fp = requested ? stdout : stderr;
714
715 if (current_command == NULL) {
716 int i;
717
718 (void) fprintf(fp, gettext("usage: zpool command args ...\n"));
719 (void) fprintf(fp,
720 gettext("where 'command' is one of the following:\n\n"));
721
722 for (i = 0; i < NCOMMAND; i++) {
723 if (command_table[i].name == NULL)
724 (void) fprintf(fp, "\n");
725 else
726 (void) fprintf(fp, "%s",
727 get_usage(command_table[i].usage));
728 }
729
730 (void) fprintf(fp,
731 gettext("\nFor further help on a command or topic, "
732 "run: %s\n"), "zpool help [<topic>]");
733 } else {
734 (void) fprintf(fp, gettext("usage:\n"));
735 (void) fprintf(fp, "%s", get_usage(current_command->usage));
736 }
737
738 if (current_command != NULL &&
739 current_prop_type != (ZFS_TYPE_POOL | ZFS_TYPE_VDEV) &&
740 ((strcmp(current_command->name, "set") == 0) ||
741 (strcmp(current_command->name, "get") == 0) ||
742 (strcmp(current_command->name, "list") == 0))) {
743
744 (void) fprintf(fp, "%s",
745 gettext("\nthe following properties are supported:\n"));
746
747 (void) fprintf(fp, "\n\t%-19s %s %s\n\n",
748 "PROPERTY", "EDIT", "VALUES");
749
750 /* Iterate over all properties */
751 if (current_prop_type == ZFS_TYPE_POOL) {
752 (void) zprop_iter(print_pool_prop_cb, fp, B_FALSE,
753 B_TRUE, current_prop_type);
754
755 (void) fprintf(fp, "\t%-19s ", "feature@...");
756 (void) fprintf(fp, "YES "
757 "disabled | enabled | active\n");
758
759 (void) fprintf(fp, gettext("\nThe feature@ properties "
760 "must be appended with a feature name.\n"
761 "See zpool-features(7).\n"));
762 } else if (current_prop_type == ZFS_TYPE_VDEV) {
763 (void) zprop_iter(print_vdev_prop_cb, fp, B_FALSE,
764 B_TRUE, current_prop_type);
765 }
766 }
767
768 /*
769 * See comments at end of main().
770 */
771 if (getenv("ZFS_ABORT") != NULL) {
772 (void) printf("dumping core by request\n");
773 abort();
774 }
775
776 exit(requested ? 0 : 2);
777 }
778
779 /*
780 * zpool initialize [-c | -s | -u] [-w] <pool> [<vdev> ...]
781 * Initialize all unused blocks in the specified vdevs, or all vdevs in the pool
782 * if none specified.
783 *
784 * -c Cancel. Ends active initializing.
785 * -s Suspend. Initializing can then be restarted with no flags.
786 * -u Uninitialize. Clears initialization state.
787 * -w Wait. Blocks until initializing has completed.
788 */
789 int
zpool_do_initialize(int argc,char ** argv)790 zpool_do_initialize(int argc, char **argv)
791 {
792 int c;
793 char *poolname;
794 zpool_handle_t *zhp;
795 nvlist_t *vdevs;
796 int err = 0;
797 boolean_t wait = B_FALSE;
798
799 struct option long_options[] = {
800 {"cancel", no_argument, NULL, 'c'},
801 {"suspend", no_argument, NULL, 's'},
802 {"uninit", no_argument, NULL, 'u'},
803 {"wait", no_argument, NULL, 'w'},
804 {0, 0, 0, 0}
805 };
806
807 pool_initialize_func_t cmd_type = POOL_INITIALIZE_START;
808 while ((c = getopt_long(argc, argv, "csuw", long_options,
809 NULL)) != -1) {
810 switch (c) {
811 case 'c':
812 if (cmd_type != POOL_INITIALIZE_START &&
813 cmd_type != POOL_INITIALIZE_CANCEL) {
814 (void) fprintf(stderr, gettext("-c cannot be "
815 "combined with other options\n"));
816 usage(B_FALSE);
817 }
818 cmd_type = POOL_INITIALIZE_CANCEL;
819 break;
820 case 's':
821 if (cmd_type != POOL_INITIALIZE_START &&
822 cmd_type != POOL_INITIALIZE_SUSPEND) {
823 (void) fprintf(stderr, gettext("-s cannot be "
824 "combined with other options\n"));
825 usage(B_FALSE);
826 }
827 cmd_type = POOL_INITIALIZE_SUSPEND;
828 break;
829 case 'u':
830 if (cmd_type != POOL_INITIALIZE_START &&
831 cmd_type != POOL_INITIALIZE_UNINIT) {
832 (void) fprintf(stderr, gettext("-u cannot be "
833 "combined with other options\n"));
834 usage(B_FALSE);
835 }
836 cmd_type = POOL_INITIALIZE_UNINIT;
837 break;
838 case 'w':
839 wait = B_TRUE;
840 break;
841 case '?':
842 if (optopt != 0) {
843 (void) fprintf(stderr,
844 gettext("invalid option '%c'\n"), optopt);
845 } else {
846 (void) fprintf(stderr,
847 gettext("invalid option '%s'\n"),
848 argv[optind - 1]);
849 }
850 usage(B_FALSE);
851 }
852 }
853
854 argc -= optind;
855 argv += optind;
856
857 if (argc < 1) {
858 (void) fprintf(stderr, gettext("missing pool name argument\n"));
859 usage(B_FALSE);
860 return (-1);
861 }
862
863 if (wait && (cmd_type != POOL_INITIALIZE_START)) {
864 (void) fprintf(stderr, gettext("-w cannot be used with -c, -s"
865 "or -u\n"));
866 usage(B_FALSE);
867 }
868
869 poolname = argv[0];
870 zhp = zpool_open(g_zfs, poolname);
871 if (zhp == NULL)
872 return (-1);
873
874 vdevs = fnvlist_alloc();
875 if (argc == 1) {
876 /* no individual leaf vdevs specified, so add them all */
877 nvlist_t *config = zpool_get_config(zhp, NULL);
878 nvlist_t *nvroot = fnvlist_lookup_nvlist(config,
879 ZPOOL_CONFIG_VDEV_TREE);
880 zpool_collect_leaves(zhp, nvroot, vdevs);
881 } else {
882 for (int i = 1; i < argc; i++) {
883 fnvlist_add_boolean(vdevs, argv[i]);
884 }
885 }
886
887 if (wait)
888 err = zpool_initialize_wait(zhp, cmd_type, vdevs);
889 else
890 err = zpool_initialize(zhp, cmd_type, vdevs);
891
892 fnvlist_free(vdevs);
893 zpool_close(zhp);
894
895 return (err);
896 }
897
898 /*
899 * print a pool vdev config for dry runs
900 */
901 static void
print_vdev_tree(zpool_handle_t * zhp,const char * name,nvlist_t * nv,int indent,const char * match,int name_flags)902 print_vdev_tree(zpool_handle_t *zhp, const char *name, nvlist_t *nv, int indent,
903 const char *match, int name_flags)
904 {
905 nvlist_t **child;
906 uint_t c, children;
907 char *vname;
908 boolean_t printed = B_FALSE;
909
910 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
911 &child, &children) != 0) {
912 if (name != NULL)
913 (void) printf("\t%*s%s\n", indent, "", name);
914 return;
915 }
916
917 for (c = 0; c < children; c++) {
918 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
919 const char *class = "";
920
921 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
922 &is_hole);
923
924 if (is_hole == B_TRUE) {
925 continue;
926 }
927
928 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
929 &is_log);
930 if (is_log)
931 class = VDEV_ALLOC_BIAS_LOG;
932 (void) nvlist_lookup_string(child[c],
933 ZPOOL_CONFIG_ALLOCATION_BIAS, &class);
934 if (strcmp(match, class) != 0)
935 continue;
936
937 if (!printed && name != NULL) {
938 (void) printf("\t%*s%s\n", indent, "", name);
939 printed = B_TRUE;
940 }
941 vname = zpool_vdev_name(g_zfs, zhp, child[c], name_flags);
942 print_vdev_tree(zhp, vname, child[c], indent + 2, "",
943 name_flags);
944 free(vname);
945 }
946 }
947
948 /*
949 * Print the list of l2cache devices for dry runs.
950 */
951 static void
print_cache_list(nvlist_t * nv,int indent)952 print_cache_list(nvlist_t *nv, int indent)
953 {
954 nvlist_t **child;
955 uint_t c, children;
956
957 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
958 &child, &children) == 0 && children > 0) {
959 (void) printf("\t%*s%s\n", indent, "", "cache");
960 } else {
961 return;
962 }
963 for (c = 0; c < children; c++) {
964 char *vname;
965
966 vname = zpool_vdev_name(g_zfs, NULL, child[c], 0);
967 (void) printf("\t%*s%s\n", indent + 2, "", vname);
968 free(vname);
969 }
970 }
971
972 /*
973 * Print the list of spares for dry runs.
974 */
975 static void
print_spare_list(nvlist_t * nv,int indent)976 print_spare_list(nvlist_t *nv, int indent)
977 {
978 nvlist_t **child;
979 uint_t c, children;
980
981 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
982 &child, &children) == 0 && children > 0) {
983 (void) printf("\t%*s%s\n", indent, "", "spares");
984 } else {
985 return;
986 }
987 for (c = 0; c < children; c++) {
988 char *vname;
989
990 vname = zpool_vdev_name(g_zfs, NULL, child[c], 0);
991 (void) printf("\t%*s%s\n", indent + 2, "", vname);
992 free(vname);
993 }
994 }
995
996 typedef struct spare_cbdata {
997 uint64_t cb_guid;
998 zpool_handle_t *cb_zhp;
999 } spare_cbdata_t;
1000
1001 static boolean_t
find_vdev(nvlist_t * nv,uint64_t search)1002 find_vdev(nvlist_t *nv, uint64_t search)
1003 {
1004 uint64_t guid;
1005 nvlist_t **child;
1006 uint_t c, children;
1007
1008 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0 &&
1009 search == guid)
1010 return (B_TRUE);
1011
1012 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1013 &child, &children) == 0) {
1014 for (c = 0; c < children; c++)
1015 if (find_vdev(child[c], search))
1016 return (B_TRUE);
1017 }
1018
1019 return (B_FALSE);
1020 }
1021
1022 static int
find_spare(zpool_handle_t * zhp,void * data)1023 find_spare(zpool_handle_t *zhp, void *data)
1024 {
1025 spare_cbdata_t *cbp = data;
1026 nvlist_t *config, *nvroot;
1027
1028 config = zpool_get_config(zhp, NULL);
1029 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
1030 &nvroot) == 0);
1031
1032 if (find_vdev(nvroot, cbp->cb_guid)) {
1033 cbp->cb_zhp = zhp;
1034 return (1);
1035 }
1036
1037 zpool_close(zhp);
1038 return (0);
1039 }
1040
1041 static void
nice_num_str_nvlist(nvlist_t * item,const char * key,uint64_t value,boolean_t literal,boolean_t as_int,int format)1042 nice_num_str_nvlist(nvlist_t *item, const char *key, uint64_t value,
1043 boolean_t literal, boolean_t as_int, int format)
1044 {
1045 char buf[256];
1046 if (literal) {
1047 if (!as_int)
1048 snprintf(buf, 256, "%llu", (u_longlong_t)value);
1049 } else {
1050 switch (format) {
1051 case ZFS_NICENUM_1024:
1052 zfs_nicenum_format(value, buf, 256, ZFS_NICENUM_1024);
1053 break;
1054 case ZFS_NICENUM_BYTES:
1055 zfs_nicenum_format(value, buf, 256, ZFS_NICENUM_BYTES);
1056 break;
1057 case ZFS_NICENUM_TIME:
1058 zfs_nicenum_format(value, buf, 256, ZFS_NICENUM_TIME);
1059 break;
1060 case ZFS_NICE_TIMESTAMP:
1061 format_timestamp(value, buf, 256);
1062 break;
1063 default:
1064 fprintf(stderr, "Invalid number format");
1065 exit(1);
1066 }
1067 }
1068 if (as_int)
1069 fnvlist_add_uint64(item, key, value);
1070 else
1071 fnvlist_add_string(item, key, buf);
1072 }
1073
1074 /*
1075 * Generates an nvlist with output version for every command based on params.
1076 * Purpose of this is to add a version of JSON output, considering the schema
1077 * format might be updated for each command in future.
1078 *
1079 * Schema:
1080 *
1081 * "output_version": {
1082 * "command": string,
1083 * "vers_major": integer,
1084 * "vers_minor": integer,
1085 * }
1086 */
1087 static nvlist_t *
zpool_json_schema(int maj_v,int min_v)1088 zpool_json_schema(int maj_v, int min_v)
1089 {
1090 char cmd[MAX_CMD_LEN];
1091 nvlist_t *sch = fnvlist_alloc();
1092 nvlist_t *ov = fnvlist_alloc();
1093
1094 snprintf(cmd, MAX_CMD_LEN, "zpool %s", current_command->name);
1095 fnvlist_add_string(ov, "command", cmd);
1096 fnvlist_add_uint32(ov, "vers_major", maj_v);
1097 fnvlist_add_uint32(ov, "vers_minor", min_v);
1098 fnvlist_add_nvlist(sch, "output_version", ov);
1099 fnvlist_free(ov);
1100 return (sch);
1101 }
1102
1103 static void
fill_pool_info(nvlist_t * list,zpool_handle_t * zhp,boolean_t addtype,boolean_t as_int)1104 fill_pool_info(nvlist_t *list, zpool_handle_t *zhp, boolean_t addtype,
1105 boolean_t as_int)
1106 {
1107 nvlist_t *config = zpool_get_config(zhp, NULL);
1108 uint64_t guid = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID);
1109 uint64_t txg = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG);
1110
1111 fnvlist_add_string(list, "name", zpool_get_name(zhp));
1112 if (addtype)
1113 fnvlist_add_string(list, "type", "POOL");
1114 fnvlist_add_string(list, "state", zpool_get_state_str(zhp));
1115 if (as_int) {
1116 if (guid)
1117 fnvlist_add_uint64(list, ZPOOL_CONFIG_POOL_GUID, guid);
1118 if (txg)
1119 fnvlist_add_uint64(list, ZPOOL_CONFIG_POOL_TXG, txg);
1120 fnvlist_add_uint64(list, "spa_version", SPA_VERSION);
1121 fnvlist_add_uint64(list, "zpl_version", ZPL_VERSION);
1122 } else {
1123 char value[ZFS_MAXPROPLEN];
1124 if (guid) {
1125 snprintf(value, ZFS_MAXPROPLEN, "%llu",
1126 (u_longlong_t)guid);
1127 fnvlist_add_string(list, ZPOOL_CONFIG_POOL_GUID, value);
1128 }
1129 if (txg) {
1130 snprintf(value, ZFS_MAXPROPLEN, "%llu",
1131 (u_longlong_t)txg);
1132 fnvlist_add_string(list, ZPOOL_CONFIG_POOL_TXG, value);
1133 }
1134 fnvlist_add_string(list, "spa_version", SPA_VERSION_STRING);
1135 fnvlist_add_string(list, "zpl_version", ZPL_VERSION_STRING);
1136 }
1137 }
1138
1139 static void
used_by_other(zpool_handle_t * zhp,nvlist_t * nvdev,nvlist_t * list)1140 used_by_other(zpool_handle_t *zhp, nvlist_t *nvdev, nvlist_t *list)
1141 {
1142 spare_cbdata_t spare_cb;
1143 verify(nvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_GUID,
1144 &spare_cb.cb_guid) == 0);
1145 if (zpool_iter(g_zfs, find_spare, &spare_cb) == 1) {
1146 if (strcmp(zpool_get_name(spare_cb.cb_zhp),
1147 zpool_get_name(zhp)) != 0) {
1148 fnvlist_add_string(list, "used_by",
1149 zpool_get_name(spare_cb.cb_zhp));
1150 }
1151 zpool_close(spare_cb.cb_zhp);
1152 }
1153 }
1154
1155 static void
fill_vdev_info(nvlist_t * list,zpool_handle_t * zhp,char * name,boolean_t addtype,boolean_t as_int)1156 fill_vdev_info(nvlist_t *list, zpool_handle_t *zhp, char *name,
1157 boolean_t addtype, boolean_t as_int)
1158 {
1159 boolean_t l2c = B_FALSE;
1160 const char *path, *phys, *devid, *bias = NULL;
1161 uint64_t hole = 0, log = 0, spare = 0;
1162 vdev_stat_t *vs;
1163 uint_t c;
1164 nvlist_t *nvdev;
1165 nvlist_t *nvdev_parent = NULL;
1166 char *_name;
1167
1168 if (strcmp(name, zpool_get_name(zhp)) != 0)
1169 _name = name;
1170 else
1171 _name = (char *)"root-0";
1172
1173 nvdev = zpool_find_vdev(zhp, _name, NULL, &l2c, NULL);
1174
1175 fnvlist_add_string(list, "name", name);
1176 if (addtype)
1177 fnvlist_add_string(list, "type", "VDEV");
1178 if (nvdev) {
1179 const char *type = fnvlist_lookup_string(nvdev,
1180 ZPOOL_CONFIG_TYPE);
1181 if (type)
1182 fnvlist_add_string(list, "vdev_type", type);
1183 uint64_t guid = fnvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_GUID);
1184 if (guid) {
1185 if (as_int) {
1186 fnvlist_add_uint64(list, "guid", guid);
1187 } else {
1188 char buf[ZFS_MAXPROPLEN];
1189 snprintf(buf, ZFS_MAXPROPLEN, "%llu",
1190 (u_longlong_t)guid);
1191 fnvlist_add_string(list, "guid", buf);
1192 }
1193 }
1194 if (nvlist_lookup_string(nvdev, ZPOOL_CONFIG_PATH, &path) == 0)
1195 fnvlist_add_string(list, "path", path);
1196 if (nvlist_lookup_string(nvdev, ZPOOL_CONFIG_PHYS_PATH,
1197 &phys) == 0)
1198 fnvlist_add_string(list, "phys_path", phys);
1199 if (nvlist_lookup_string(nvdev, ZPOOL_CONFIG_DEVID,
1200 &devid) == 0)
1201 fnvlist_add_string(list, "devid", devid);
1202 (void) nvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_IS_LOG, &log);
1203 (void) nvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_IS_SPARE,
1204 &spare);
1205 (void) nvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_IS_HOLE, &hole);
1206 if (hole)
1207 fnvlist_add_string(list, "class", VDEV_TYPE_HOLE);
1208 else if (l2c)
1209 fnvlist_add_string(list, "class", VDEV_TYPE_L2CACHE);
1210 else if (spare)
1211 fnvlist_add_string(list, "class", VDEV_TYPE_SPARE);
1212 else if (log)
1213 fnvlist_add_string(list, "class", VDEV_TYPE_LOG);
1214 else {
1215 (void) nvlist_lookup_string(nvdev,
1216 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
1217 if (bias != NULL)
1218 fnvlist_add_string(list, "class", bias);
1219 else {
1220 nvdev_parent = NULL;
1221 nvdev_parent = zpool_find_parent_vdev(zhp,
1222 _name, NULL, NULL, NULL);
1223
1224 /*
1225 * With a mirrored special device, the parent
1226 * "mirror" vdev will have
1227 * ZPOOL_CONFIG_ALLOCATION_BIAS set to "special"
1228 * not the leaf vdevs. If we're a leaf vdev
1229 * in that case we need to look at our parent
1230 * to see if they're "special" to know if we
1231 * are "special" too.
1232 */
1233 if (nvdev_parent) {
1234 (void) nvlist_lookup_string(
1235 nvdev_parent,
1236 ZPOOL_CONFIG_ALLOCATION_BIAS,
1237 &bias);
1238 }
1239 if (bias != NULL)
1240 fnvlist_add_string(list, "class", bias);
1241 else
1242 fnvlist_add_string(list, "class",
1243 "normal");
1244 }
1245 }
1246 if (nvlist_lookup_uint64_array(nvdev, ZPOOL_CONFIG_VDEV_STATS,
1247 (uint64_t **)&vs, &c) == 0) {
1248 fnvlist_add_string(list, "state",
1249 vdev_state_str[vs->vs_state]);
1250 }
1251 }
1252 }
1253
1254 static boolean_t
prop_list_contains_feature(nvlist_t * proplist)1255 prop_list_contains_feature(nvlist_t *proplist)
1256 {
1257 nvpair_t *nvp;
1258 for (nvp = nvlist_next_nvpair(proplist, NULL); NULL != nvp;
1259 nvp = nvlist_next_nvpair(proplist, nvp)) {
1260 if (zpool_prop_feature(nvpair_name(nvp)))
1261 return (B_TRUE);
1262 }
1263 return (B_FALSE);
1264 }
1265
1266 /*
1267 * Add a property pair (name, string-value) into a property nvlist.
1268 */
1269 static int
add_prop_list(const char * propname,const char * propval,nvlist_t ** props,boolean_t poolprop)1270 add_prop_list(const char *propname, const char *propval, nvlist_t **props,
1271 boolean_t poolprop)
1272 {
1273 zpool_prop_t prop = ZPOOL_PROP_INVAL;
1274 nvlist_t *proplist;
1275 const char *normnm;
1276 const char *strval;
1277
1278 if (*props == NULL &&
1279 nvlist_alloc(props, NV_UNIQUE_NAME, 0) != 0) {
1280 (void) fprintf(stderr,
1281 gettext("internal error: out of memory\n"));
1282 return (1);
1283 }
1284
1285 proplist = *props;
1286
1287 if (poolprop) {
1288 const char *vname = zpool_prop_to_name(ZPOOL_PROP_VERSION);
1289 const char *cname =
1290 zpool_prop_to_name(ZPOOL_PROP_COMPATIBILITY);
1291
1292 if ((prop = zpool_name_to_prop(propname)) == ZPOOL_PROP_INVAL &&
1293 (!zpool_prop_feature(propname) &&
1294 !zpool_prop_vdev(propname))) {
1295 (void) fprintf(stderr, gettext("property '%s' is "
1296 "not a valid pool or vdev property\n"), propname);
1297 return (2);
1298 }
1299
1300 /*
1301 * feature@ properties and version should not be specified
1302 * at the same time.
1303 */
1304 if ((prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname) &&
1305 nvlist_exists(proplist, vname)) ||
1306 (prop == ZPOOL_PROP_VERSION &&
1307 prop_list_contains_feature(proplist))) {
1308 (void) fprintf(stderr, gettext("'feature@' and "
1309 "'version' properties cannot be specified "
1310 "together\n"));
1311 return (2);
1312 }
1313
1314 /*
1315 * if version is specified, only "legacy" compatibility
1316 * may be requested
1317 */
1318 if ((prop == ZPOOL_PROP_COMPATIBILITY &&
1319 strcmp(propval, ZPOOL_COMPAT_LEGACY) != 0 &&
1320 nvlist_exists(proplist, vname)) ||
1321 (prop == ZPOOL_PROP_VERSION &&
1322 nvlist_exists(proplist, cname) &&
1323 strcmp(fnvlist_lookup_string(proplist, cname),
1324 ZPOOL_COMPAT_LEGACY) != 0)) {
1325 (void) fprintf(stderr, gettext("when 'version' is "
1326 "specified, the 'compatibility' feature may only "
1327 "be set to '" ZPOOL_COMPAT_LEGACY "'\n"));
1328 return (2);
1329 }
1330
1331 if (zpool_prop_feature(propname) || zpool_prop_vdev(propname))
1332 normnm = propname;
1333 else
1334 normnm = zpool_prop_to_name(prop);
1335 } else {
1336 zfs_prop_t fsprop = zfs_name_to_prop(propname);
1337
1338 if (zfs_prop_valid_for_type(fsprop, ZFS_TYPE_FILESYSTEM,
1339 B_FALSE)) {
1340 normnm = zfs_prop_to_name(fsprop);
1341 } else if (zfs_prop_user(propname) ||
1342 zfs_prop_userquota(propname)) {
1343 normnm = propname;
1344 } else {
1345 (void) fprintf(stderr, gettext("property '%s' is "
1346 "not a valid filesystem property\n"), propname);
1347 return (2);
1348 }
1349 }
1350
1351 if (nvlist_lookup_string(proplist, normnm, &strval) == 0 &&
1352 prop != ZPOOL_PROP_CACHEFILE) {
1353 (void) fprintf(stderr, gettext("property '%s' "
1354 "specified multiple times\n"), propname);
1355 return (2);
1356 }
1357
1358 if (nvlist_add_string(proplist, normnm, propval) != 0) {
1359 (void) fprintf(stderr, gettext("internal "
1360 "error: out of memory\n"));
1361 return (1);
1362 }
1363
1364 return (0);
1365 }
1366
1367 /*
1368 * Set a default property pair (name, string-value) in a property nvlist
1369 */
1370 static int
add_prop_list_default(const char * propname,const char * propval,nvlist_t ** props)1371 add_prop_list_default(const char *propname, const char *propval,
1372 nvlist_t **props)
1373 {
1374 const char *pval;
1375
1376 if (nvlist_lookup_string(*props, propname, &pval) == 0)
1377 return (0);
1378
1379 return (add_prop_list(propname, propval, props, B_TRUE));
1380 }
1381
1382 /*
1383 * zpool add [-afgLnP] [-o property=value] <pool> <vdev> ...
1384 *
1385 * -a Disable the ashift validation checks
1386 * -f Force addition of devices, even if they appear in use
1387 * -g Display guid for individual vdev name.
1388 * -L Follow links when resolving vdev path name.
1389 * -n Do not add the devices, but display the resulting layout if
1390 * they were to be added.
1391 * -o Set property=value.
1392 * -P Display full path for vdev name.
1393 *
1394 * Adds the given vdevs to 'pool'. As with create, the bulk of this work is
1395 * handled by make_root_vdev(), which constructs the nvlist needed to pass to
1396 * libzfs.
1397 */
1398 int
zpool_do_add(int argc,char ** argv)1399 zpool_do_add(int argc, char **argv)
1400 {
1401 boolean_t check_replication = B_TRUE;
1402 boolean_t check_inuse = B_TRUE;
1403 boolean_t dryrun = B_FALSE;
1404 boolean_t check_ashift = B_TRUE;
1405 boolean_t force = B_FALSE;
1406 int name_flags = 0;
1407 int c;
1408 nvlist_t *nvroot;
1409 char *poolname;
1410 int ret;
1411 zpool_handle_t *zhp;
1412 nvlist_t *config;
1413 nvlist_t *props = NULL;
1414 char *propval;
1415
1416 struct option long_options[] = {
1417 {"allow-in-use", no_argument, NULL, ZPOOL_OPTION_ALLOW_INUSE},
1418 {"allow-replication-mismatch", no_argument, NULL,
1419 ZPOOL_OPTION_ALLOW_REPLICATION_MISMATCH},
1420 {"allow-ashift-mismatch", no_argument, NULL,
1421 ZPOOL_OPTION_ALLOW_ASHIFT_MISMATCH},
1422 {0, 0, 0, 0}
1423 };
1424
1425 /* check options */
1426 while ((c = getopt_long(argc, argv, "fgLno:P", long_options, NULL))
1427 != -1) {
1428 switch (c) {
1429 case 'f':
1430 force = B_TRUE;
1431 break;
1432 case 'g':
1433 name_flags |= VDEV_NAME_GUID;
1434 break;
1435 case 'L':
1436 name_flags |= VDEV_NAME_FOLLOW_LINKS;
1437 break;
1438 case 'n':
1439 dryrun = B_TRUE;
1440 break;
1441 case 'o':
1442 if ((propval = strchr(optarg, '=')) == NULL) {
1443 (void) fprintf(stderr, gettext("missing "
1444 "'=' for -o option\n"));
1445 usage(B_FALSE);
1446 }
1447 *propval = '\0';
1448 propval++;
1449
1450 if ((strcmp(optarg, ZPOOL_CONFIG_ASHIFT) != 0) ||
1451 (add_prop_list(optarg, propval, &props, B_TRUE)))
1452 usage(B_FALSE);
1453 break;
1454 case 'P':
1455 name_flags |= VDEV_NAME_PATH;
1456 break;
1457 case ZPOOL_OPTION_ALLOW_INUSE:
1458 check_inuse = B_FALSE;
1459 break;
1460 case ZPOOL_OPTION_ALLOW_REPLICATION_MISMATCH:
1461 check_replication = B_FALSE;
1462 break;
1463 case ZPOOL_OPTION_ALLOW_ASHIFT_MISMATCH:
1464 check_ashift = B_FALSE;
1465 break;
1466 case '?':
1467 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
1468 optopt);
1469 usage(B_FALSE);
1470 }
1471 }
1472
1473 argc -= optind;
1474 argv += optind;
1475
1476 /* get pool name and check number of arguments */
1477 if (argc < 1) {
1478 (void) fprintf(stderr, gettext("missing pool name argument\n"));
1479 usage(B_FALSE);
1480 }
1481 if (argc < 2) {
1482 (void) fprintf(stderr, gettext("missing vdev specification\n"));
1483 usage(B_FALSE);
1484 }
1485
1486 if (force) {
1487 if (!check_inuse || !check_replication || !check_ashift) {
1488 (void) fprintf(stderr, gettext("'-f' option is not "
1489 "allowed with '--allow-replication-mismatch', "
1490 "'--allow-ashift-mismatch', or "
1491 "'--allow-in-use'\n"));
1492 usage(B_FALSE);
1493 }
1494 check_inuse = B_FALSE;
1495 check_replication = B_FALSE;
1496 check_ashift = B_FALSE;
1497 }
1498
1499 poolname = argv[0];
1500
1501 argc--;
1502 argv++;
1503
1504 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
1505 return (1);
1506
1507 if ((config = zpool_get_config(zhp, NULL)) == NULL) {
1508 (void) fprintf(stderr, gettext("pool '%s' is unavailable\n"),
1509 poolname);
1510 zpool_close(zhp);
1511 return (1);
1512 }
1513
1514 /* unless manually specified use "ashift" pool property (if set) */
1515 if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) {
1516 int intval;
1517 zprop_source_t src;
1518 char strval[ZPOOL_MAXPROPLEN];
1519
1520 intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src);
1521 if (src != ZPROP_SRC_DEFAULT) {
1522 (void) sprintf(strval, "%" PRId32, intval);
1523 verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval,
1524 &props, B_TRUE) == 0);
1525 }
1526 }
1527
1528 /* pass off to make_root_vdev for processing */
1529 nvroot = make_root_vdev(zhp, props, !check_inuse,
1530 check_replication, B_FALSE, dryrun, argc, argv);
1531 if (nvroot == NULL) {
1532 zpool_close(zhp);
1533 return (1);
1534 }
1535
1536 if (dryrun) {
1537 nvlist_t *poolnvroot;
1538 nvlist_t **l2child, **sparechild;
1539 uint_t l2children, sparechildren, c;
1540 char *vname;
1541 boolean_t hadcache = B_FALSE, hadspare = B_FALSE;
1542
1543 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
1544 &poolnvroot) == 0);
1545
1546 (void) printf(gettext("would update '%s' to the following "
1547 "configuration:\n\n"), zpool_get_name(zhp));
1548
1549 /* print original main pool and new tree */
1550 print_vdev_tree(zhp, poolname, poolnvroot, 0, "",
1551 name_flags | VDEV_NAME_TYPE_ID);
1552 print_vdev_tree(zhp, NULL, nvroot, 0, "", name_flags);
1553
1554 /* print other classes: 'dedup', 'special', and 'log' */
1555 if (zfs_special_devs(poolnvroot, VDEV_ALLOC_BIAS_DEDUP)) {
1556 print_vdev_tree(zhp, "dedup", poolnvroot, 0,
1557 VDEV_ALLOC_BIAS_DEDUP, name_flags);
1558 print_vdev_tree(zhp, NULL, nvroot, 0,
1559 VDEV_ALLOC_BIAS_DEDUP, name_flags);
1560 } else if (zfs_special_devs(nvroot, VDEV_ALLOC_BIAS_DEDUP)) {
1561 print_vdev_tree(zhp, "dedup", nvroot, 0,
1562 VDEV_ALLOC_BIAS_DEDUP, name_flags);
1563 }
1564
1565 if (zfs_special_devs(poolnvroot, VDEV_ALLOC_BIAS_SPECIAL)) {
1566 print_vdev_tree(zhp, "special", poolnvroot, 0,
1567 VDEV_ALLOC_BIAS_SPECIAL, name_flags);
1568 print_vdev_tree(zhp, NULL, nvroot, 0,
1569 VDEV_ALLOC_BIAS_SPECIAL, name_flags);
1570 } else if (zfs_special_devs(nvroot, VDEV_ALLOC_BIAS_SPECIAL)) {
1571 print_vdev_tree(zhp, "special", nvroot, 0,
1572 VDEV_ALLOC_BIAS_SPECIAL, name_flags);
1573 }
1574
1575 if (num_logs(poolnvroot) > 0) {
1576 print_vdev_tree(zhp, "logs", poolnvroot, 0,
1577 VDEV_ALLOC_BIAS_LOG, name_flags);
1578 print_vdev_tree(zhp, NULL, nvroot, 0,
1579 VDEV_ALLOC_BIAS_LOG, name_flags);
1580 } else if (num_logs(nvroot) > 0) {
1581 print_vdev_tree(zhp, "logs", nvroot, 0,
1582 VDEV_ALLOC_BIAS_LOG, name_flags);
1583 }
1584
1585 /* Do the same for the caches */
1586 if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_L2CACHE,
1587 &l2child, &l2children) == 0 && l2children) {
1588 hadcache = B_TRUE;
1589 (void) printf(gettext("\tcache\n"));
1590 for (c = 0; c < l2children; c++) {
1591 vname = zpool_vdev_name(g_zfs, NULL,
1592 l2child[c], name_flags);
1593 (void) printf("\t %s\n", vname);
1594 free(vname);
1595 }
1596 }
1597 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1598 &l2child, &l2children) == 0 && l2children) {
1599 if (!hadcache)
1600 (void) printf(gettext("\tcache\n"));
1601 for (c = 0; c < l2children; c++) {
1602 vname = zpool_vdev_name(g_zfs, NULL,
1603 l2child[c], name_flags);
1604 (void) printf("\t %s\n", vname);
1605 free(vname);
1606 }
1607 }
1608 /* And finally the spares */
1609 if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_SPARES,
1610 &sparechild, &sparechildren) == 0 && sparechildren > 0) {
1611 hadspare = B_TRUE;
1612 (void) printf(gettext("\tspares\n"));
1613 for (c = 0; c < sparechildren; c++) {
1614 vname = zpool_vdev_name(g_zfs, NULL,
1615 sparechild[c], name_flags);
1616 (void) printf("\t %s\n", vname);
1617 free(vname);
1618 }
1619 }
1620 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1621 &sparechild, &sparechildren) == 0 && sparechildren > 0) {
1622 if (!hadspare)
1623 (void) printf(gettext("\tspares\n"));
1624 for (c = 0; c < sparechildren; c++) {
1625 vname = zpool_vdev_name(g_zfs, NULL,
1626 sparechild[c], name_flags);
1627 (void) printf("\t %s\n", vname);
1628 free(vname);
1629 }
1630 }
1631
1632 ret = 0;
1633 } else {
1634 ret = (zpool_add(zhp, nvroot, check_ashift) != 0);
1635 }
1636
1637 nvlist_free(props);
1638 nvlist_free(nvroot);
1639 zpool_close(zhp);
1640
1641 return (ret);
1642 }
1643
1644 /*
1645 * zpool remove [-npsw] <pool> <vdev> ...
1646 *
1647 * Removes the given vdev from the pool.
1648 */
1649 int
zpool_do_remove(int argc,char ** argv)1650 zpool_do_remove(int argc, char **argv)
1651 {
1652 char *poolname;
1653 int i, ret = 0;
1654 zpool_handle_t *zhp = NULL;
1655 boolean_t stop = B_FALSE;
1656 int c;
1657 boolean_t noop = B_FALSE;
1658 boolean_t parsable = B_FALSE;
1659 boolean_t wait = B_FALSE;
1660
1661 /* check options */
1662 while ((c = getopt(argc, argv, "npsw")) != -1) {
1663 switch (c) {
1664 case 'n':
1665 noop = B_TRUE;
1666 break;
1667 case 'p':
1668 parsable = B_TRUE;
1669 break;
1670 case 's':
1671 stop = B_TRUE;
1672 break;
1673 case 'w':
1674 wait = B_TRUE;
1675 break;
1676 case '?':
1677 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
1678 optopt);
1679 usage(B_FALSE);
1680 }
1681 }
1682
1683 argc -= optind;
1684 argv += optind;
1685
1686 /* get pool name and check number of arguments */
1687 if (argc < 1) {
1688 (void) fprintf(stderr, gettext("missing pool name argument\n"));
1689 usage(B_FALSE);
1690 }
1691
1692 poolname = argv[0];
1693
1694 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
1695 return (1);
1696
1697 if (stop && noop) {
1698 zpool_close(zhp);
1699 (void) fprintf(stderr, gettext("stop request ignored\n"));
1700 return (0);
1701 }
1702
1703 if (stop) {
1704 if (argc > 1) {
1705 (void) fprintf(stderr, gettext("too many arguments\n"));
1706 usage(B_FALSE);
1707 }
1708 if (zpool_vdev_remove_cancel(zhp) != 0)
1709 ret = 1;
1710 if (wait) {
1711 (void) fprintf(stderr, gettext("invalid option "
1712 "combination: -w cannot be used with -s\n"));
1713 usage(B_FALSE);
1714 }
1715 } else {
1716 if (argc < 2) {
1717 (void) fprintf(stderr, gettext("missing device\n"));
1718 usage(B_FALSE);
1719 }
1720
1721 for (i = 1; i < argc; i++) {
1722 if (noop) {
1723 uint64_t size;
1724
1725 if (zpool_vdev_indirect_size(zhp, argv[i],
1726 &size) != 0) {
1727 ret = 1;
1728 break;
1729 }
1730 if (parsable) {
1731 (void) printf("%s %llu\n",
1732 argv[i], (unsigned long long)size);
1733 } else {
1734 char valstr[32];
1735 zfs_nicenum(size, valstr,
1736 sizeof (valstr));
1737 (void) printf("Memory that will be "
1738 "used after removing %s: %s\n",
1739 argv[i], valstr);
1740 }
1741 } else {
1742 if (zpool_vdev_remove(zhp, argv[i]) != 0)
1743 ret = 1;
1744 }
1745 }
1746
1747 if (ret == 0 && wait)
1748 ret = zpool_wait(zhp, ZPOOL_WAIT_REMOVE);
1749 }
1750 zpool_close(zhp);
1751
1752 return (ret);
1753 }
1754
1755 /*
1756 * Return 1 if a vdev is active (being used in a pool)
1757 * Return 0 if a vdev is inactive (offlined or faulted, or not in active pool)
1758 *
1759 * This is useful for checking if a disk in an active pool is offlined or
1760 * faulted.
1761 */
1762 static int
vdev_is_active(char * vdev_path)1763 vdev_is_active(char *vdev_path)
1764 {
1765 int fd;
1766 fd = open(vdev_path, O_EXCL);
1767 if (fd < 0) {
1768 return (1); /* cant open O_EXCL - disk is active */
1769 }
1770
1771 close(fd);
1772 return (0); /* disk is inactive in the pool */
1773 }
1774
1775 /*
1776 * zpool labelclear [-f] <vdev>
1777 *
1778 * -f Force clearing the label for the vdevs which are members of
1779 * the exported or foreign pools.
1780 *
1781 * Verifies that the vdev is not active and zeros out the label information
1782 * on the device.
1783 */
1784 int
zpool_do_labelclear(int argc,char ** argv)1785 zpool_do_labelclear(int argc, char **argv)
1786 {
1787 char vdev[MAXPATHLEN];
1788 char *name = NULL;
1789 int c, fd = -1, ret = 0;
1790 nvlist_t *config;
1791 pool_state_t state;
1792 boolean_t inuse = B_FALSE;
1793 boolean_t force = B_FALSE;
1794
1795 /* check options */
1796 while ((c = getopt(argc, argv, "f")) != -1) {
1797 switch (c) {
1798 case 'f':
1799 force = B_TRUE;
1800 break;
1801 default:
1802 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
1803 optopt);
1804 usage(B_FALSE);
1805 }
1806 }
1807
1808 argc -= optind;
1809 argv += optind;
1810
1811 /* get vdev name */
1812 if (argc < 1) {
1813 (void) fprintf(stderr, gettext("missing vdev name\n"));
1814 usage(B_FALSE);
1815 }
1816 if (argc > 1) {
1817 (void) fprintf(stderr, gettext("too many arguments\n"));
1818 usage(B_FALSE);
1819 }
1820
1821 (void) strlcpy(vdev, argv[0], sizeof (vdev));
1822
1823 /*
1824 * If we cannot open an absolute path, we quit.
1825 * Otherwise if the provided vdev name doesn't point to a file,
1826 * try prepending expected disk paths and partition numbers.
1827 */
1828 if ((fd = open(vdev, O_RDWR)) < 0) {
1829 int error;
1830 if (vdev[0] == '/') {
1831 (void) fprintf(stderr, gettext("failed to open "
1832 "%s: %s\n"), vdev, strerror(errno));
1833 return (1);
1834 }
1835
1836 error = zfs_resolve_shortname(argv[0], vdev, MAXPATHLEN);
1837 if (error == 0 && zfs_dev_is_whole_disk(vdev)) {
1838 if (zfs_append_partition(vdev, MAXPATHLEN) == -1)
1839 error = ENOENT;
1840 }
1841
1842 if (error || ((fd = open(vdev, O_RDWR)) < 0)) {
1843 if (errno == ENOENT) {
1844 (void) fprintf(stderr, gettext(
1845 "failed to find device %s, try "
1846 "specifying absolute path instead\n"),
1847 argv[0]);
1848 return (1);
1849 }
1850
1851 (void) fprintf(stderr, gettext("failed to open %s:"
1852 " %s\n"), vdev, strerror(errno));
1853 return (1);
1854 }
1855 }
1856
1857 /*
1858 * Flush all dirty pages for the block device. This should not be
1859 * fatal when the device does not support BLKFLSBUF as would be the
1860 * case for a file vdev.
1861 */
1862 if ((zfs_dev_flush(fd) != 0) && (errno != ENOTTY))
1863 (void) fprintf(stderr, gettext("failed to invalidate "
1864 "cache for %s: %s\n"), vdev, strerror(errno));
1865
1866 if (zpool_read_label(fd, &config, NULL) != 0) {
1867 (void) fprintf(stderr,
1868 gettext("failed to read label from %s\n"), vdev);
1869 ret = 1;
1870 goto errout;
1871 }
1872 nvlist_free(config);
1873
1874 ret = zpool_in_use(g_zfs, fd, &state, &name, &inuse);
1875 if (ret != 0) {
1876 (void) fprintf(stderr,
1877 gettext("failed to check state for %s\n"), vdev);
1878 ret = 1;
1879 goto errout;
1880 }
1881
1882 if (!inuse)
1883 goto wipe_label;
1884
1885 switch (state) {
1886 default:
1887 case POOL_STATE_ACTIVE:
1888 case POOL_STATE_SPARE:
1889 case POOL_STATE_L2CACHE:
1890 /*
1891 * We allow the user to call 'zpool offline -f'
1892 * on an offlined disk in an active pool. We can check if
1893 * the disk is online by calling vdev_is_active().
1894 */
1895 if (force && !vdev_is_active(vdev))
1896 break;
1897
1898 (void) fprintf(stderr, gettext(
1899 "%s is a member (%s) of pool \"%s\""),
1900 vdev, zpool_pool_state_to_name(state), name);
1901
1902 if (force) {
1903 (void) fprintf(stderr, gettext(
1904 ". Offline the disk first to clear its label."));
1905 }
1906 printf("\n");
1907 ret = 1;
1908 goto errout;
1909
1910 case POOL_STATE_EXPORTED:
1911 if (force)
1912 break;
1913 (void) fprintf(stderr, gettext(
1914 "use '-f' to override the following error:\n"
1915 "%s is a member of exported pool \"%s\"\n"),
1916 vdev, name);
1917 ret = 1;
1918 goto errout;
1919
1920 case POOL_STATE_POTENTIALLY_ACTIVE:
1921 if (force)
1922 break;
1923 (void) fprintf(stderr, gettext(
1924 "use '-f' to override the following error:\n"
1925 "%s is a member of potentially active pool \"%s\"\n"),
1926 vdev, name);
1927 ret = 1;
1928 goto errout;
1929
1930 case POOL_STATE_DESTROYED:
1931 /* inuse should never be set for a destroyed pool */
1932 assert(0);
1933 break;
1934 }
1935
1936 wipe_label:
1937 ret = zpool_clear_label(fd);
1938 if (ret != 0) {
1939 (void) fprintf(stderr,
1940 gettext("failed to clear label for %s\n"), vdev);
1941 }
1942
1943 errout:
1944 free(name);
1945 (void) close(fd);
1946
1947 return (ret);
1948 }
1949
1950 /*
1951 * zpool create [-fnd] [-o property=value] ...
1952 * [-O file-system-property=value] ...
1953 * [-R root] [-m mountpoint] <pool> <dev> ...
1954 *
1955 * -f Force creation, even if devices appear in use
1956 * -n Do not create the pool, but display the resulting layout if it
1957 * were to be created.
1958 * -R Create a pool under an alternate root
1959 * -m Set default mountpoint for the root dataset. By default it's
1960 * '/<pool>'
1961 * -o Set property=value.
1962 * -o Set feature@feature=enabled|disabled.
1963 * -d Don't automatically enable all supported pool features
1964 * (individual features can be enabled with -o).
1965 * -O Set fsproperty=value in the pool's root file system
1966 *
1967 * Creates the named pool according to the given vdev specification. The
1968 * bulk of the vdev processing is done in make_root_vdev() in zpool_vdev.c.
1969 * Once we get the nvlist back from make_root_vdev(), we either print out the
1970 * contents (if '-n' was specified), or pass it to libzfs to do the creation.
1971 */
1972 int
zpool_do_create(int argc,char ** argv)1973 zpool_do_create(int argc, char **argv)
1974 {
1975 boolean_t force = B_FALSE;
1976 boolean_t dryrun = B_FALSE;
1977 boolean_t enable_pool_features = B_TRUE;
1978
1979 int c;
1980 nvlist_t *nvroot = NULL;
1981 char *poolname;
1982 char *tname = NULL;
1983 int ret = 1;
1984 char *altroot = NULL;
1985 char *compat = NULL;
1986 char *mountpoint = NULL;
1987 nvlist_t *fsprops = NULL;
1988 nvlist_t *props = NULL;
1989 char *propval;
1990
1991 /* check options */
1992 while ((c = getopt(argc, argv, ":fndR:m:o:O:t:")) != -1) {
1993 switch (c) {
1994 case 'f':
1995 force = B_TRUE;
1996 break;
1997 case 'n':
1998 dryrun = B_TRUE;
1999 break;
2000 case 'd':
2001 enable_pool_features = B_FALSE;
2002 break;
2003 case 'R':
2004 altroot = optarg;
2005 if (add_prop_list(zpool_prop_to_name(
2006 ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE))
2007 goto errout;
2008 if (add_prop_list_default(zpool_prop_to_name(
2009 ZPOOL_PROP_CACHEFILE), "none", &props))
2010 goto errout;
2011 break;
2012 case 'm':
2013 /* Equivalent to -O mountpoint=optarg */
2014 mountpoint = optarg;
2015 break;
2016 case 'o':
2017 if ((propval = strchr(optarg, '=')) == NULL) {
2018 (void) fprintf(stderr, gettext("missing "
2019 "'=' for -o option\n"));
2020 goto errout;
2021 }
2022 *propval = '\0';
2023 propval++;
2024
2025 if (add_prop_list(optarg, propval, &props, B_TRUE))
2026 goto errout;
2027
2028 /*
2029 * If the user is creating a pool that doesn't support
2030 * feature flags, don't enable any features.
2031 */
2032 if (zpool_name_to_prop(optarg) == ZPOOL_PROP_VERSION) {
2033 char *end;
2034 u_longlong_t ver;
2035
2036 ver = strtoull(propval, &end, 0);
2037 if (*end == '\0' &&
2038 ver < SPA_VERSION_FEATURES) {
2039 enable_pool_features = B_FALSE;
2040 }
2041 }
2042 if (zpool_name_to_prop(optarg) == ZPOOL_PROP_ALTROOT)
2043 altroot = propval;
2044 if (zpool_name_to_prop(optarg) ==
2045 ZPOOL_PROP_COMPATIBILITY)
2046 compat = propval;
2047 break;
2048 case 'O':
2049 if ((propval = strchr(optarg, '=')) == NULL) {
2050 (void) fprintf(stderr, gettext("missing "
2051 "'=' for -O option\n"));
2052 goto errout;
2053 }
2054 *propval = '\0';
2055 propval++;
2056
2057 /*
2058 * Mountpoints are checked and then added later.
2059 * Uniquely among properties, they can be specified
2060 * more than once, to avoid conflict with -m.
2061 */
2062 if (0 == strcmp(optarg,
2063 zfs_prop_to_name(ZFS_PROP_MOUNTPOINT))) {
2064 mountpoint = propval;
2065 } else if (add_prop_list(optarg, propval, &fsprops,
2066 B_FALSE)) {
2067 goto errout;
2068 }
2069 break;
2070 case 't':
2071 /*
2072 * Sanity check temporary pool name.
2073 */
2074 if (strchr(optarg, '/') != NULL) {
2075 (void) fprintf(stderr, gettext("cannot create "
2076 "'%s': invalid character '/' in temporary "
2077 "name\n"), optarg);
2078 (void) fprintf(stderr, gettext("use 'zfs "
2079 "create' to create a dataset\n"));
2080 goto errout;
2081 }
2082
2083 if (add_prop_list(zpool_prop_to_name(
2084 ZPOOL_PROP_TNAME), optarg, &props, B_TRUE))
2085 goto errout;
2086 if (add_prop_list_default(zpool_prop_to_name(
2087 ZPOOL_PROP_CACHEFILE), "none", &props))
2088 goto errout;
2089 tname = optarg;
2090 break;
2091 case ':':
2092 (void) fprintf(stderr, gettext("missing argument for "
2093 "'%c' option\n"), optopt);
2094 goto badusage;
2095 case '?':
2096 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
2097 optopt);
2098 goto badusage;
2099 }
2100 }
2101
2102 argc -= optind;
2103 argv += optind;
2104
2105 /* get pool name and check number of arguments */
2106 if (argc < 1) {
2107 (void) fprintf(stderr, gettext("missing pool name argument\n"));
2108 goto badusage;
2109 }
2110 if (argc < 2) {
2111 (void) fprintf(stderr, gettext("missing vdev specification\n"));
2112 goto badusage;
2113 }
2114
2115 poolname = argv[0];
2116
2117 /*
2118 * As a special case, check for use of '/' in the name, and direct the
2119 * user to use 'zfs create' instead.
2120 */
2121 if (strchr(poolname, '/') != NULL) {
2122 (void) fprintf(stderr, gettext("cannot create '%s': invalid "
2123 "character '/' in pool name\n"), poolname);
2124 (void) fprintf(stderr, gettext("use 'zfs create' to "
2125 "create a dataset\n"));
2126 goto errout;
2127 }
2128
2129 /* pass off to make_root_vdev for bulk processing */
2130 nvroot = make_root_vdev(NULL, props, force, !force, B_FALSE, dryrun,
2131 argc - 1, argv + 1);
2132 if (nvroot == NULL)
2133 goto errout;
2134
2135 /* make_root_vdev() allows 0 toplevel children if there are spares */
2136 if (!zfs_allocatable_devs(nvroot)) {
2137 (void) fprintf(stderr, gettext("invalid vdev "
2138 "specification: at least one toplevel vdev must be "
2139 "specified\n"));
2140 goto errout;
2141 }
2142
2143 if (altroot != NULL && altroot[0] != '/') {
2144 (void) fprintf(stderr, gettext("invalid alternate root '%s': "
2145 "must be an absolute path\n"), altroot);
2146 goto errout;
2147 }
2148
2149 /*
2150 * Check the validity of the mountpoint and direct the user to use the
2151 * '-m' mountpoint option if it looks like its in use.
2152 */
2153 if (mountpoint == NULL ||
2154 (strcmp(mountpoint, ZFS_MOUNTPOINT_LEGACY) != 0 &&
2155 strcmp(mountpoint, ZFS_MOUNTPOINT_NONE) != 0)) {
2156 char buf[MAXPATHLEN];
2157 DIR *dirp;
2158
2159 if (mountpoint && mountpoint[0] != '/') {
2160 (void) fprintf(stderr, gettext("invalid mountpoint "
2161 "'%s': must be an absolute path, 'legacy', or "
2162 "'none'\n"), mountpoint);
2163 goto errout;
2164 }
2165
2166 if (mountpoint == NULL) {
2167 if (altroot != NULL)
2168 (void) snprintf(buf, sizeof (buf), "%s/%s",
2169 altroot, poolname);
2170 else
2171 (void) snprintf(buf, sizeof (buf), "/%s",
2172 poolname);
2173 } else {
2174 if (altroot != NULL)
2175 (void) snprintf(buf, sizeof (buf), "%s%s",
2176 altroot, mountpoint);
2177 else
2178 (void) snprintf(buf, sizeof (buf), "%s",
2179 mountpoint);
2180 }
2181
2182 if ((dirp = opendir(buf)) == NULL && errno != ENOENT) {
2183 (void) fprintf(stderr, gettext("mountpoint '%s' : "
2184 "%s\n"), buf, strerror(errno));
2185 (void) fprintf(stderr, gettext("use '-m' "
2186 "option to provide a different default\n"));
2187 goto errout;
2188 } else if (dirp) {
2189 int count = 0;
2190
2191 while (count < 3 && readdir(dirp) != NULL)
2192 count++;
2193 (void) closedir(dirp);
2194
2195 if (count > 2) {
2196 (void) fprintf(stderr, gettext("mountpoint "
2197 "'%s' exists and is not empty\n"), buf);
2198 (void) fprintf(stderr, gettext("use '-m' "
2199 "option to provide a "
2200 "different default\n"));
2201 goto errout;
2202 }
2203 }
2204 }
2205
2206 /*
2207 * Now that the mountpoint's validity has been checked, ensure that
2208 * the property is set appropriately prior to creating the pool.
2209 */
2210 if (mountpoint != NULL) {
2211 ret = add_prop_list(zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
2212 mountpoint, &fsprops, B_FALSE);
2213 if (ret != 0)
2214 goto errout;
2215 }
2216
2217 ret = 1;
2218 if (dryrun) {
2219 /*
2220 * For a dry run invocation, print out a basic message and run
2221 * through all the vdevs in the list and print out in an
2222 * appropriate hierarchy.
2223 */
2224 (void) printf(gettext("would create '%s' with the "
2225 "following layout:\n\n"), poolname);
2226
2227 print_vdev_tree(NULL, poolname, nvroot, 0, "", 0);
2228 print_vdev_tree(NULL, "dedup", nvroot, 0,
2229 VDEV_ALLOC_BIAS_DEDUP, 0);
2230 print_vdev_tree(NULL, "special", nvroot, 0,
2231 VDEV_ALLOC_BIAS_SPECIAL, 0);
2232 print_vdev_tree(NULL, "logs", nvroot, 0,
2233 VDEV_ALLOC_BIAS_LOG, 0);
2234 print_cache_list(nvroot, 0);
2235 print_spare_list(nvroot, 0);
2236
2237 ret = 0;
2238 } else {
2239 /*
2240 * Load in feature set.
2241 * Note: if compatibility property not given, we'll have
2242 * NULL, which means 'all features'.
2243 */
2244 boolean_t requested_features[SPA_FEATURES];
2245 if (zpool_do_load_compat(compat, requested_features) !=
2246 ZPOOL_COMPATIBILITY_OK)
2247 goto errout;
2248
2249 /*
2250 * props contains list of features to enable.
2251 * For each feature:
2252 * - remove it if feature@name=disabled
2253 * - leave it there if feature@name=enabled
2254 * - add it if:
2255 * - enable_pool_features (ie: no '-d' or '-o version')
2256 * - it's supported by the kernel module
2257 * - it's in the requested feature set
2258 * - warn if it's enabled but not in compat
2259 */
2260 for (spa_feature_t i = 0; i < SPA_FEATURES; i++) {
2261 char propname[MAXPATHLEN];
2262 const char *propval;
2263 zfeature_info_t *feat = &spa_feature_table[i];
2264
2265 (void) snprintf(propname, sizeof (propname),
2266 "feature@%s", feat->fi_uname);
2267
2268 if (!nvlist_lookup_string(props, propname, &propval)) {
2269 if (strcmp(propval,
2270 ZFS_FEATURE_DISABLED) == 0) {
2271 (void) nvlist_remove_all(props,
2272 propname);
2273 } else if (strcmp(propval,
2274 ZFS_FEATURE_ENABLED) == 0 &&
2275 !requested_features[i]) {
2276 (void) fprintf(stderr, gettext(
2277 "Warning: feature \"%s\" enabled "
2278 "but is not in specified "
2279 "'compatibility' feature set.\n"),
2280 feat->fi_uname);
2281 }
2282 } else if (
2283 enable_pool_features &&
2284 feat->fi_zfs_mod_supported &&
2285 requested_features[i]) {
2286 ret = add_prop_list(propname,
2287 ZFS_FEATURE_ENABLED, &props, B_TRUE);
2288 if (ret != 0)
2289 goto errout;
2290 }
2291 }
2292
2293 ret = 1;
2294 if (zpool_create(g_zfs, poolname,
2295 nvroot, props, fsprops) == 0) {
2296 zfs_handle_t *pool = zfs_open(g_zfs,
2297 tname ? tname : poolname, ZFS_TYPE_FILESYSTEM);
2298 if (pool != NULL) {
2299 if (zfs_mount(pool, NULL, 0) == 0) {
2300 ret = zfs_share(pool, NULL);
2301 zfs_commit_shares(NULL);
2302 }
2303 zfs_close(pool);
2304 }
2305 } else if (libzfs_errno(g_zfs) == EZFS_INVALIDNAME) {
2306 (void) fprintf(stderr, gettext("pool name may have "
2307 "been omitted\n"));
2308 }
2309 }
2310
2311 errout:
2312 nvlist_free(nvroot);
2313 nvlist_free(fsprops);
2314 nvlist_free(props);
2315 return (ret);
2316 badusage:
2317 nvlist_free(fsprops);
2318 nvlist_free(props);
2319 usage(B_FALSE);
2320 return (2);
2321 }
2322
2323 /*
2324 * zpool destroy <pool>
2325 *
2326 * -f Forcefully unmount any datasets
2327 *
2328 * Destroy the given pool. Automatically unmounts any datasets in the pool.
2329 */
2330 int
zpool_do_destroy(int argc,char ** argv)2331 zpool_do_destroy(int argc, char **argv)
2332 {
2333 boolean_t force = B_FALSE;
2334 int c;
2335 char *pool;
2336 zpool_handle_t *zhp;
2337 int ret;
2338
2339 /* check options */
2340 while ((c = getopt(argc, argv, "f")) != -1) {
2341 switch (c) {
2342 case 'f':
2343 force = B_TRUE;
2344 break;
2345 case '?':
2346 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
2347 optopt);
2348 usage(B_FALSE);
2349 }
2350 }
2351
2352 argc -= optind;
2353 argv += optind;
2354
2355 /* check arguments */
2356 if (argc < 1) {
2357 (void) fprintf(stderr, gettext("missing pool argument\n"));
2358 usage(B_FALSE);
2359 }
2360 if (argc > 1) {
2361 (void) fprintf(stderr, gettext("too many arguments\n"));
2362 usage(B_FALSE);
2363 }
2364
2365 pool = argv[0];
2366
2367 if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) {
2368 /*
2369 * As a special case, check for use of '/' in the name, and
2370 * direct the user to use 'zfs destroy' instead.
2371 */
2372 if (strchr(pool, '/') != NULL)
2373 (void) fprintf(stderr, gettext("use 'zfs destroy' to "
2374 "destroy a dataset\n"));
2375 return (1);
2376 }
2377
2378 if (zpool_disable_datasets(zhp, force) != 0) {
2379 (void) fprintf(stderr, gettext("could not destroy '%s': "
2380 "could not unmount datasets\n"), zpool_get_name(zhp));
2381 zpool_close(zhp);
2382 return (1);
2383 }
2384
2385 /* The history must be logged as part of the export */
2386 log_history = B_FALSE;
2387
2388 ret = (zpool_destroy(zhp, history_str) != 0);
2389
2390 zpool_close(zhp);
2391
2392 return (ret);
2393 }
2394
2395 typedef struct export_cbdata {
2396 tpool_t *tpool;
2397 pthread_mutex_t mnttab_lock;
2398 boolean_t force;
2399 boolean_t hardforce;
2400 int retval;
2401 } export_cbdata_t;
2402
2403
2404 typedef struct {
2405 char *aea_poolname;
2406 export_cbdata_t *aea_cbdata;
2407 } async_export_args_t;
2408
2409 /*
2410 * Export one pool
2411 */
2412 static int
zpool_export_one(zpool_handle_t * zhp,void * data)2413 zpool_export_one(zpool_handle_t *zhp, void *data)
2414 {
2415 export_cbdata_t *cb = data;
2416
2417 /*
2418 * zpool_disable_datasets() is not thread-safe for mnttab access.
2419 * So we serialize access here for 'zpool export -a' parallel case.
2420 */
2421 if (cb->tpool != NULL)
2422 pthread_mutex_lock(&cb->mnttab_lock);
2423
2424 int retval = zpool_disable_datasets(zhp, cb->force);
2425
2426 if (cb->tpool != NULL)
2427 pthread_mutex_unlock(&cb->mnttab_lock);
2428
2429 if (retval)
2430 return (1);
2431
2432 if (cb->hardforce) {
2433 if (zpool_export_force(zhp, history_str) != 0)
2434 return (1);
2435 } else if (zpool_export(zhp, cb->force, history_str) != 0) {
2436 return (1);
2437 }
2438
2439 return (0);
2440 }
2441
2442 /*
2443 * Asynchronous export request
2444 */
2445 static void
zpool_export_task(void * arg)2446 zpool_export_task(void *arg)
2447 {
2448 async_export_args_t *aea = arg;
2449
2450 zpool_handle_t *zhp = zpool_open(g_zfs, aea->aea_poolname);
2451 if (zhp != NULL) {
2452 int ret = zpool_export_one(zhp, aea->aea_cbdata);
2453 if (ret != 0)
2454 aea->aea_cbdata->retval = ret;
2455 zpool_close(zhp);
2456 } else {
2457 aea->aea_cbdata->retval = 1;
2458 }
2459
2460 free(aea->aea_poolname);
2461 free(aea);
2462 }
2463
2464 /*
2465 * Process an export request in parallel
2466 */
2467 static int
zpool_export_one_async(zpool_handle_t * zhp,void * data)2468 zpool_export_one_async(zpool_handle_t *zhp, void *data)
2469 {
2470 tpool_t *tpool = ((export_cbdata_t *)data)->tpool;
2471 async_export_args_t *aea = safe_malloc(sizeof (async_export_args_t));
2472
2473 /* save pool name since zhp will go out of scope */
2474 aea->aea_poolname = strdup(zpool_get_name(zhp));
2475 aea->aea_cbdata = data;
2476
2477 /* ship off actual export to another thread */
2478 if (tpool_dispatch(tpool, zpool_export_task, (void *)aea) != 0)
2479 return (errno); /* unlikely */
2480 else
2481 return (0);
2482 }
2483
2484 /*
2485 * zpool export [-f] <pool> ...
2486 *
2487 * -a Export all pools
2488 * -f Forcefully unmount datasets
2489 *
2490 * Export the given pools. By default, the command will attempt to cleanly
2491 * unmount any active datasets within the pool. If the '-f' flag is specified,
2492 * then the datasets will be forcefully unmounted.
2493 */
2494 int
zpool_do_export(int argc,char ** argv)2495 zpool_do_export(int argc, char **argv)
2496 {
2497 export_cbdata_t cb;
2498 boolean_t do_all = B_FALSE;
2499 boolean_t force = B_FALSE;
2500 boolean_t hardforce = B_FALSE;
2501 int c, ret;
2502
2503 /* check options */
2504 while ((c = getopt(argc, argv, "afF")) != -1) {
2505 switch (c) {
2506 case 'a':
2507 do_all = B_TRUE;
2508 break;
2509 case 'f':
2510 force = B_TRUE;
2511 break;
2512 case 'F':
2513 hardforce = B_TRUE;
2514 break;
2515 case '?':
2516 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
2517 optopt);
2518 usage(B_FALSE);
2519 }
2520 }
2521
2522 cb.force = force;
2523 cb.hardforce = hardforce;
2524 cb.tpool = NULL;
2525 cb.retval = 0;
2526 argc -= optind;
2527 argv += optind;
2528
2529 /* The history will be logged as part of the export itself */
2530 log_history = B_FALSE;
2531
2532 if (do_all) {
2533 if (argc != 0) {
2534 (void) fprintf(stderr, gettext("too many arguments\n"));
2535 usage(B_FALSE);
2536 }
2537
2538 cb.tpool = tpool_create(1, 5 * sysconf(_SC_NPROCESSORS_ONLN),
2539 0, NULL);
2540 pthread_mutex_init(&cb.mnttab_lock, NULL);
2541
2542 /* Asynchronously call zpool_export_one using thread pool */
2543 ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
2544 B_FALSE, zpool_export_one_async, &cb);
2545
2546 tpool_wait(cb.tpool);
2547 tpool_destroy(cb.tpool);
2548 (void) pthread_mutex_destroy(&cb.mnttab_lock);
2549
2550 return (ret | cb.retval);
2551 }
2552
2553 /* check arguments */
2554 if (argc < 1) {
2555 (void) fprintf(stderr, gettext("missing pool argument\n"));
2556 usage(B_FALSE);
2557 }
2558
2559 ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
2560 B_FALSE, zpool_export_one, &cb);
2561
2562 return (ret);
2563 }
2564
2565 /*
2566 * Given a vdev configuration, determine the maximum width needed for the device
2567 * name column.
2568 */
2569 static int
max_width(zpool_handle_t * zhp,nvlist_t * nv,int depth,int max,int name_flags)2570 max_width(zpool_handle_t *zhp, nvlist_t *nv, int depth, int max,
2571 int name_flags)
2572 {
2573 static const char *const subtypes[] =
2574 {ZPOOL_CONFIG_SPARES, ZPOOL_CONFIG_L2CACHE, ZPOOL_CONFIG_CHILDREN};
2575
2576 char *name = zpool_vdev_name(g_zfs, zhp, nv, name_flags);
2577 max = MAX(strlen(name) + depth, max);
2578 free(name);
2579
2580 nvlist_t **child;
2581 uint_t children;
2582 for (size_t i = 0; i < ARRAY_SIZE(subtypes); ++i)
2583 if (nvlist_lookup_nvlist_array(nv, subtypes[i],
2584 &child, &children) == 0)
2585 for (uint_t c = 0; c < children; ++c)
2586 max = MAX(max_width(zhp, child[c], depth + 2,
2587 max, name_flags), max);
2588
2589 return (max);
2590 }
2591
2592 typedef struct status_cbdata {
2593 int cb_count;
2594 int cb_name_flags;
2595 int cb_namewidth;
2596 boolean_t cb_allpools;
2597 boolean_t cb_verbose;
2598 boolean_t cb_literal;
2599 boolean_t cb_explain;
2600 boolean_t cb_first;
2601 boolean_t cb_dedup_stats;
2602 boolean_t cb_print_unhealthy;
2603 boolean_t cb_print_status;
2604 boolean_t cb_print_slow_ios;
2605 boolean_t cb_print_dio_verify;
2606 boolean_t cb_print_vdev_init;
2607 boolean_t cb_print_vdev_trim;
2608 vdev_cmd_data_list_t *vcdl;
2609 boolean_t cb_print_power;
2610 boolean_t cb_json;
2611 boolean_t cb_flat_vdevs;
2612 nvlist_t *cb_jsobj;
2613 boolean_t cb_json_as_int;
2614 boolean_t cb_json_pool_key_guid;
2615 } status_cbdata_t;
2616
2617 /* Return 1 if string is NULL, empty, or whitespace; return 0 otherwise. */
2618 static boolean_t
is_blank_str(const char * str)2619 is_blank_str(const char *str)
2620 {
2621 for (; str != NULL && *str != '\0'; ++str)
2622 if (!isblank(*str))
2623 return (B_FALSE);
2624 return (B_TRUE);
2625 }
2626
2627 static void
zpool_nvlist_cmd(vdev_cmd_data_list_t * vcdl,const char * pool,const char * path,nvlist_t * item)2628 zpool_nvlist_cmd(vdev_cmd_data_list_t *vcdl, const char *pool, const char *path,
2629 nvlist_t *item)
2630 {
2631 vdev_cmd_data_t *data;
2632 int i, j, k = 1;
2633 char tmp[256];
2634 const char *val;
2635
2636 for (i = 0; i < vcdl->count; i++) {
2637 if ((strcmp(vcdl->data[i].path, path) != 0) ||
2638 (strcmp(vcdl->data[i].pool, pool) != 0))
2639 continue;
2640
2641 data = &vcdl->data[i];
2642 for (j = 0; j < vcdl->uniq_cols_cnt; j++) {
2643 val = NULL;
2644 for (int k = 0; k < data->cols_cnt; k++) {
2645 if (strcmp(data->cols[k],
2646 vcdl->uniq_cols[j]) == 0) {
2647 val = data->lines[k];
2648 break;
2649 }
2650 }
2651 if (val == NULL || is_blank_str(val))
2652 val = "-";
2653 fnvlist_add_string(item, vcdl->uniq_cols[j], val);
2654 }
2655
2656 for (j = data->cols_cnt; j < data->lines_cnt; j++) {
2657 if (data->lines[j]) {
2658 snprintf(tmp, 256, "extra_%d", k++);
2659 fnvlist_add_string(item, tmp,
2660 data->lines[j]);
2661 }
2662 }
2663 break;
2664 }
2665 }
2666
2667 /* Print command output lines for specific vdev in a specific pool */
2668 static void
zpool_print_cmd(vdev_cmd_data_list_t * vcdl,const char * pool,const char * path)2669 zpool_print_cmd(vdev_cmd_data_list_t *vcdl, const char *pool, const char *path)
2670 {
2671 vdev_cmd_data_t *data;
2672 int i, j;
2673 const char *val;
2674
2675 for (i = 0; i < vcdl->count; i++) {
2676 if ((strcmp(vcdl->data[i].path, path) != 0) ||
2677 (strcmp(vcdl->data[i].pool, pool) != 0)) {
2678 /* Not the vdev we're looking for */
2679 continue;
2680 }
2681
2682 data = &vcdl->data[i];
2683 /* Print out all the output values for this vdev */
2684 for (j = 0; j < vcdl->uniq_cols_cnt; j++) {
2685 val = NULL;
2686 /* Does this vdev have values for this column? */
2687 for (int k = 0; k < data->cols_cnt; k++) {
2688 if (strcmp(data->cols[k],
2689 vcdl->uniq_cols[j]) == 0) {
2690 /* yes it does, record the value */
2691 val = data->lines[k];
2692 break;
2693 }
2694 }
2695 /*
2696 * Mark empty values with dashes to make output
2697 * awk-able.
2698 */
2699 if (val == NULL || is_blank_str(val))
2700 val = "-";
2701
2702 printf("%*s", vcdl->uniq_cols_width[j], val);
2703 if (j < vcdl->uniq_cols_cnt - 1)
2704 fputs(" ", stdout);
2705 }
2706
2707 /* Print out any values that aren't in a column at the end */
2708 for (j = data->cols_cnt; j < data->lines_cnt; j++) {
2709 /* Did we have any columns? If so print a spacer. */
2710 if (vcdl->uniq_cols_cnt > 0)
2711 fputs(" ", stdout);
2712
2713 val = data->lines[j];
2714 fputs(val ?: "", stdout);
2715 }
2716 break;
2717 }
2718 }
2719
2720 /*
2721 * Print vdev initialization status for leaves
2722 */
2723 static void
print_status_initialize(vdev_stat_t * vs,boolean_t verbose)2724 print_status_initialize(vdev_stat_t *vs, boolean_t verbose)
2725 {
2726 if (verbose) {
2727 if ((vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE ||
2728 vs->vs_initialize_state == VDEV_INITIALIZE_SUSPENDED ||
2729 vs->vs_initialize_state == VDEV_INITIALIZE_COMPLETE) &&
2730 !vs->vs_scan_removing) {
2731 char zbuf[1024];
2732 char tbuf[256];
2733
2734 time_t t = vs->vs_initialize_action_time;
2735 int initialize_pct = 100;
2736 if (vs->vs_initialize_state !=
2737 VDEV_INITIALIZE_COMPLETE) {
2738 initialize_pct = (vs->vs_initialize_bytes_done *
2739 100 / (vs->vs_initialize_bytes_est + 1));
2740 }
2741
2742 (void) ctime_r(&t, tbuf);
2743 tbuf[24] = 0;
2744
2745 switch (vs->vs_initialize_state) {
2746 case VDEV_INITIALIZE_SUSPENDED:
2747 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2748 gettext("suspended, started at"), tbuf);
2749 break;
2750 case VDEV_INITIALIZE_ACTIVE:
2751 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2752 gettext("started at"), tbuf);
2753 break;
2754 case VDEV_INITIALIZE_COMPLETE:
2755 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2756 gettext("completed at"), tbuf);
2757 break;
2758 }
2759
2760 (void) printf(gettext(" (%d%% initialized%s)"),
2761 initialize_pct, zbuf);
2762 } else {
2763 (void) printf(gettext(" (uninitialized)"));
2764 }
2765 } else if (vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE) {
2766 (void) printf(gettext(" (initializing)"));
2767 }
2768 }
2769
2770 /*
2771 * Print vdev TRIM status for leaves
2772 */
2773 static void
print_status_trim(vdev_stat_t * vs,boolean_t verbose)2774 print_status_trim(vdev_stat_t *vs, boolean_t verbose)
2775 {
2776 if (verbose) {
2777 if ((vs->vs_trim_state == VDEV_TRIM_ACTIVE ||
2778 vs->vs_trim_state == VDEV_TRIM_SUSPENDED ||
2779 vs->vs_trim_state == VDEV_TRIM_COMPLETE) &&
2780 !vs->vs_scan_removing) {
2781 char zbuf[1024];
2782 char tbuf[256];
2783
2784 time_t t = vs->vs_trim_action_time;
2785 int trim_pct = 100;
2786 if (vs->vs_trim_state != VDEV_TRIM_COMPLETE) {
2787 trim_pct = (vs->vs_trim_bytes_done *
2788 100 / (vs->vs_trim_bytes_est + 1));
2789 }
2790
2791 (void) ctime_r(&t, tbuf);
2792 tbuf[24] = 0;
2793
2794 switch (vs->vs_trim_state) {
2795 case VDEV_TRIM_SUSPENDED:
2796 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2797 gettext("suspended, started at"), tbuf);
2798 break;
2799 case VDEV_TRIM_ACTIVE:
2800 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2801 gettext("started at"), tbuf);
2802 break;
2803 case VDEV_TRIM_COMPLETE:
2804 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2805 gettext("completed at"), tbuf);
2806 break;
2807 }
2808
2809 (void) printf(gettext(" (%d%% trimmed%s)"),
2810 trim_pct, zbuf);
2811 } else if (vs->vs_trim_notsup) {
2812 (void) printf(gettext(" (trim unsupported)"));
2813 } else {
2814 (void) printf(gettext(" (untrimmed)"));
2815 }
2816 } else if (vs->vs_trim_state == VDEV_TRIM_ACTIVE) {
2817 (void) printf(gettext(" (trimming)"));
2818 }
2819 }
2820
2821 /*
2822 * Return the color associated with a health string. This includes returning
2823 * NULL for no color change.
2824 */
2825 static const char *
health_str_to_color(const char * health)2826 health_str_to_color(const char *health)
2827 {
2828 if (strcmp(health, gettext("FAULTED")) == 0 ||
2829 strcmp(health, gettext("SUSPENDED")) == 0 ||
2830 strcmp(health, gettext("UNAVAIL")) == 0) {
2831 return (ANSI_RED);
2832 }
2833
2834 if (strcmp(health, gettext("OFFLINE")) == 0 ||
2835 strcmp(health, gettext("DEGRADED")) == 0 ||
2836 strcmp(health, gettext("REMOVED")) == 0) {
2837 return (ANSI_YELLOW);
2838 }
2839
2840 return (NULL);
2841 }
2842
2843 /*
2844 * Called for each leaf vdev. Returns 0 if the vdev is healthy.
2845 * A vdev is unhealthy if any of the following are true:
2846 * 1) there are read, write, or checksum errors,
2847 * 2) its state is not ONLINE, or
2848 * 3) slow IO reporting was requested (-s) and there are slow IOs.
2849 */
2850 static int
vdev_health_check_cb(void * hdl_data,nvlist_t * nv,void * data)2851 vdev_health_check_cb(void *hdl_data, nvlist_t *nv, void *data)
2852 {
2853 status_cbdata_t *cb = data;
2854 vdev_stat_t *vs;
2855 uint_t vsc;
2856 (void) hdl_data;
2857
2858 if (nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
2859 (uint64_t **)&vs, &vsc) != 0)
2860 return (1);
2861
2862 if (vs->vs_checksum_errors || vs->vs_read_errors ||
2863 vs->vs_write_errors || vs->vs_state != VDEV_STATE_HEALTHY)
2864 return (1);
2865
2866 if (cb->cb_print_slow_ios && vs->vs_slow_ios)
2867 return (1);
2868
2869 return (0);
2870 }
2871
2872 /*
2873 * Print out configuration state as requested by status_callback.
2874 */
2875 static void
print_status_config(zpool_handle_t * zhp,status_cbdata_t * cb,const char * name,nvlist_t * nv,int depth,boolean_t isspare,vdev_rebuild_stat_t * vrs)2876 print_status_config(zpool_handle_t *zhp, status_cbdata_t *cb, const char *name,
2877 nvlist_t *nv, int depth, boolean_t isspare, vdev_rebuild_stat_t *vrs)
2878 {
2879 nvlist_t **child, *root;
2880 uint_t c, i, vsc, children;
2881 pool_scan_stat_t *ps = NULL;
2882 vdev_stat_t *vs;
2883 char rbuf[6], wbuf[6], cbuf[6], dbuf[6];
2884 char *vname;
2885 uint64_t notpresent;
2886 spare_cbdata_t spare_cb;
2887 const char *state;
2888 const char *type;
2889 const char *path = NULL;
2890 const char *rcolor = NULL, *wcolor = NULL, *ccolor = NULL,
2891 *scolor = NULL;
2892
2893 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2894 &child, &children) != 0)
2895 children = 0;
2896
2897 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
2898 (uint64_t **)&vs, &vsc) == 0);
2899
2900 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
2901
2902 if (strcmp(type, VDEV_TYPE_INDIRECT) == 0)
2903 return;
2904
2905 state = zpool_state_to_name(vs->vs_state, vs->vs_aux);
2906
2907 if (isspare) {
2908 /*
2909 * For hot spares, we use the terms 'INUSE' and 'AVAILABLE' for
2910 * online drives.
2911 */
2912 if (vs->vs_aux == VDEV_AUX_SPARED)
2913 state = gettext("INUSE");
2914 else if (vs->vs_state == VDEV_STATE_HEALTHY)
2915 state = gettext("AVAIL");
2916 }
2917
2918 /*
2919 * If '-e' is specified then top-level vdevs and their children
2920 * can be pruned if all of their leaves are healthy.
2921 */
2922 if (cb->cb_print_unhealthy && depth > 0 &&
2923 for_each_vdev_in_nvlist(nv, vdev_health_check_cb, cb) == 0) {
2924 return;
2925 }
2926
2927 printf_color(health_str_to_color(state),
2928 "\t%*s%-*s %-8s", depth, "", cb->cb_namewidth - depth,
2929 name, state);
2930
2931 if (!isspare) {
2932 if (vs->vs_read_errors)
2933 rcolor = ANSI_RED;
2934
2935 if (vs->vs_write_errors)
2936 wcolor = ANSI_RED;
2937
2938 if (vs->vs_checksum_errors)
2939 ccolor = ANSI_RED;
2940
2941 if (vs->vs_slow_ios)
2942 scolor = ANSI_BLUE;
2943
2944 if (cb->cb_literal) {
2945 fputc(' ', stdout);
2946 printf_color(rcolor, "%5llu",
2947 (u_longlong_t)vs->vs_read_errors);
2948 fputc(' ', stdout);
2949 printf_color(wcolor, "%5llu",
2950 (u_longlong_t)vs->vs_write_errors);
2951 fputc(' ', stdout);
2952 printf_color(ccolor, "%5llu",
2953 (u_longlong_t)vs->vs_checksum_errors);
2954 } else {
2955 zfs_nicenum(vs->vs_read_errors, rbuf, sizeof (rbuf));
2956 zfs_nicenum(vs->vs_write_errors, wbuf, sizeof (wbuf));
2957 zfs_nicenum(vs->vs_checksum_errors, cbuf,
2958 sizeof (cbuf));
2959 fputc(' ', stdout);
2960 printf_color(rcolor, "%5s", rbuf);
2961 fputc(' ', stdout);
2962 printf_color(wcolor, "%5s", wbuf);
2963 fputc(' ', stdout);
2964 printf_color(ccolor, "%5s", cbuf);
2965 }
2966 if (cb->cb_print_slow_ios) {
2967 if (children == 0) {
2968 /* Only leafs vdevs have slow IOs */
2969 zfs_nicenum(vs->vs_slow_ios, rbuf,
2970 sizeof (rbuf));
2971 } else {
2972 snprintf(rbuf, sizeof (rbuf), "-");
2973 }
2974
2975 if (cb->cb_literal)
2976 printf_color(scolor, " %5llu",
2977 (u_longlong_t)vs->vs_slow_ios);
2978 else
2979 printf_color(scolor, " %5s", rbuf);
2980 }
2981 if (cb->cb_print_power) {
2982 if (children == 0) {
2983 /* Only leaf vdevs have physical slots */
2984 switch (zpool_power_current_state(zhp, (char *)
2985 fnvlist_lookup_string(nv,
2986 ZPOOL_CONFIG_PATH))) {
2987 case 0:
2988 printf_color(ANSI_RED, " %5s",
2989 gettext("off"));
2990 break;
2991 case 1:
2992 printf(" %5s", gettext("on"));
2993 break;
2994 default:
2995 printf(" %5s", "-");
2996 }
2997 } else {
2998 printf(" %5s", "-");
2999 }
3000 }
3001 if (VDEV_STAT_VALID(vs_dio_verify_errors, vsc) &&
3002 cb->cb_print_dio_verify) {
3003 zfs_nicenum(vs->vs_dio_verify_errors, dbuf,
3004 sizeof (dbuf));
3005
3006 if (cb->cb_literal)
3007 printf(" %5llu",
3008 (u_longlong_t)vs->vs_dio_verify_errors);
3009 else
3010 printf(" %5s", dbuf);
3011 }
3012 }
3013
3014 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
3015 ¬present) == 0) {
3016 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0);
3017 (void) printf(" %s %s", gettext("was"), path);
3018 } else if (vs->vs_aux != 0) {
3019 (void) printf(" ");
3020 color_start(ANSI_RED);
3021 switch (vs->vs_aux) {
3022 case VDEV_AUX_OPEN_FAILED:
3023 (void) printf(gettext("cannot open"));
3024 break;
3025
3026 case VDEV_AUX_BAD_GUID_SUM:
3027 (void) printf(gettext("missing device"));
3028 break;
3029
3030 case VDEV_AUX_NO_REPLICAS:
3031 (void) printf(gettext("insufficient replicas"));
3032 break;
3033
3034 case VDEV_AUX_VERSION_NEWER:
3035 (void) printf(gettext("newer version"));
3036 break;
3037
3038 case VDEV_AUX_UNSUP_FEAT:
3039 (void) printf(gettext("unsupported feature(s)"));
3040 break;
3041
3042 case VDEV_AUX_ASHIFT_TOO_BIG:
3043 (void) printf(gettext("unsupported minimum blocksize"));
3044 break;
3045
3046 case VDEV_AUX_SPARED:
3047 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3048 &spare_cb.cb_guid) == 0);
3049 if (zpool_iter(g_zfs, find_spare, &spare_cb) == 1) {
3050 if (strcmp(zpool_get_name(spare_cb.cb_zhp),
3051 zpool_get_name(zhp)) == 0)
3052 (void) printf(gettext("currently in "
3053 "use"));
3054 else
3055 (void) printf(gettext("in use by "
3056 "pool '%s'"),
3057 zpool_get_name(spare_cb.cb_zhp));
3058 zpool_close(spare_cb.cb_zhp);
3059 } else {
3060 (void) printf(gettext("currently in use"));
3061 }
3062 break;
3063
3064 case VDEV_AUX_ERR_EXCEEDED:
3065 if (vs->vs_read_errors + vs->vs_write_errors +
3066 vs->vs_checksum_errors == 0 && children == 0 &&
3067 vs->vs_slow_ios > 0) {
3068 (void) printf(gettext("too many slow I/Os"));
3069 } else {
3070 (void) printf(gettext("too many errors"));
3071 }
3072 break;
3073
3074 case VDEV_AUX_IO_FAILURE:
3075 (void) printf(gettext("experienced I/O failures"));
3076 break;
3077
3078 case VDEV_AUX_BAD_LOG:
3079 (void) printf(gettext("bad intent log"));
3080 break;
3081
3082 case VDEV_AUX_EXTERNAL:
3083 (void) printf(gettext("external device fault"));
3084 break;
3085
3086 case VDEV_AUX_SPLIT_POOL:
3087 (void) printf(gettext("split into new pool"));
3088 break;
3089
3090 case VDEV_AUX_ACTIVE:
3091 (void) printf(gettext("currently in use"));
3092 break;
3093
3094 case VDEV_AUX_CHILDREN_OFFLINE:
3095 (void) printf(gettext("all children offline"));
3096 break;
3097
3098 case VDEV_AUX_BAD_LABEL:
3099 (void) printf(gettext("invalid label"));
3100 break;
3101
3102 default:
3103 (void) printf(gettext("corrupted data"));
3104 break;
3105 }
3106 color_end();
3107 } else if (children == 0 && !isspare &&
3108 getenv("ZPOOL_STATUS_NON_NATIVE_ASHIFT_IGNORE") == NULL &&
3109 VDEV_STAT_VALID(vs_physical_ashift, vsc) &&
3110 vs->vs_configured_ashift < vs->vs_physical_ashift) {
3111 (void) printf(
3112 gettext(" block size: %dB configured, %dB native"),
3113 1 << vs->vs_configured_ashift, 1 << vs->vs_physical_ashift);
3114 }
3115
3116 if (vs->vs_scan_removing != 0) {
3117 (void) printf(gettext(" (removing)"));
3118 } else if (VDEV_STAT_VALID(vs_noalloc, vsc) && vs->vs_noalloc != 0) {
3119 (void) printf(gettext(" (non-allocating)"));
3120 }
3121
3122 /* The root vdev has the scrub/resilver stats */
3123 root = fnvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
3124 ZPOOL_CONFIG_VDEV_TREE);
3125 (void) nvlist_lookup_uint64_array(root, ZPOOL_CONFIG_SCAN_STATS,
3126 (uint64_t **)&ps, &c);
3127
3128 /*
3129 * If you force fault a drive that's resilvering, its scan stats can
3130 * get frozen in time, giving the false impression that it's
3131 * being resilvered. That's why we check the state to see if the vdev
3132 * is healthy before reporting "resilvering" or "repairing".
3133 */
3134 if (ps != NULL && ps->pss_state == DSS_SCANNING && children == 0 &&
3135 vs->vs_state == VDEV_STATE_HEALTHY) {
3136 if (vs->vs_scan_processed != 0) {
3137 (void) printf(gettext(" (%s)"),
3138 (ps->pss_func == POOL_SCAN_RESILVER) ?
3139 "resilvering" : "repairing");
3140 } else if (vs->vs_resilver_deferred) {
3141 (void) printf(gettext(" (awaiting resilver)"));
3142 }
3143 }
3144
3145 /* The top-level vdevs have the rebuild stats */
3146 if (vrs != NULL && vrs->vrs_state == VDEV_REBUILD_ACTIVE &&
3147 children == 0 && vs->vs_state == VDEV_STATE_HEALTHY) {
3148 if (vs->vs_rebuild_processed != 0) {
3149 (void) printf(gettext(" (resilvering)"));
3150 }
3151 }
3152
3153 if (cb->vcdl != NULL) {
3154 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
3155 printf(" ");
3156 zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path);
3157 }
3158 }
3159
3160 /* Display vdev initialization and trim status for leaves. */
3161 if (children == 0) {
3162 print_status_initialize(vs, cb->cb_print_vdev_init);
3163 print_status_trim(vs, cb->cb_print_vdev_trim);
3164 }
3165
3166 (void) printf("\n");
3167
3168 for (c = 0; c < children; c++) {
3169 uint64_t islog = B_FALSE, ishole = B_FALSE;
3170
3171 /* Don't print logs or holes here */
3172 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
3173 &islog);
3174 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
3175 &ishole);
3176 if (islog || ishole)
3177 continue;
3178 /* Only print normal classes here */
3179 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
3180 continue;
3181
3182 /* Provide vdev_rebuild_stats to children if available */
3183 if (vrs == NULL) {
3184 (void) nvlist_lookup_uint64_array(nv,
3185 ZPOOL_CONFIG_REBUILD_STATS,
3186 (uint64_t **)&vrs, &i);
3187 }
3188
3189 vname = zpool_vdev_name(g_zfs, zhp, child[c],
3190 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
3191 print_status_config(zhp, cb, vname, child[c], depth + 2,
3192 isspare, vrs);
3193 free(vname);
3194 }
3195 }
3196
3197 /*
3198 * Print the configuration of an exported pool. Iterate over all vdevs in the
3199 * pool, printing out the name and status for each one.
3200 */
3201 static void
print_import_config(status_cbdata_t * cb,const char * name,nvlist_t * nv,int depth)3202 print_import_config(status_cbdata_t *cb, const char *name, nvlist_t *nv,
3203 int depth)
3204 {
3205 nvlist_t **child;
3206 uint_t c, children;
3207 vdev_stat_t *vs;
3208 const char *type;
3209 char *vname;
3210
3211 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
3212 if (strcmp(type, VDEV_TYPE_MISSING) == 0 ||
3213 strcmp(type, VDEV_TYPE_HOLE) == 0)
3214 return;
3215
3216 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
3217 (uint64_t **)&vs, &c) == 0);
3218
3219 (void) printf("\t%*s%-*s", depth, "", cb->cb_namewidth - depth, name);
3220 (void) printf(" %s", zpool_state_to_name(vs->vs_state, vs->vs_aux));
3221
3222 if (vs->vs_aux != 0) {
3223 (void) printf(" ");
3224
3225 switch (vs->vs_aux) {
3226 case VDEV_AUX_OPEN_FAILED:
3227 (void) printf(gettext("cannot open"));
3228 break;
3229
3230 case VDEV_AUX_BAD_GUID_SUM:
3231 (void) printf(gettext("missing device"));
3232 break;
3233
3234 case VDEV_AUX_NO_REPLICAS:
3235 (void) printf(gettext("insufficient replicas"));
3236 break;
3237
3238 case VDEV_AUX_VERSION_NEWER:
3239 (void) printf(gettext("newer version"));
3240 break;
3241
3242 case VDEV_AUX_UNSUP_FEAT:
3243 (void) printf(gettext("unsupported feature(s)"));
3244 break;
3245
3246 case VDEV_AUX_ERR_EXCEEDED:
3247 (void) printf(gettext("too many errors"));
3248 break;
3249
3250 case VDEV_AUX_ACTIVE:
3251 (void) printf(gettext("currently in use"));
3252 break;
3253
3254 case VDEV_AUX_CHILDREN_OFFLINE:
3255 (void) printf(gettext("all children offline"));
3256 break;
3257
3258 case VDEV_AUX_BAD_LABEL:
3259 (void) printf(gettext("invalid label"));
3260 break;
3261
3262 default:
3263 (void) printf(gettext("corrupted data"));
3264 break;
3265 }
3266 }
3267 (void) printf("\n");
3268
3269 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
3270 &child, &children) != 0)
3271 return;
3272
3273 for (c = 0; c < children; c++) {
3274 uint64_t is_log = B_FALSE;
3275
3276 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
3277 &is_log);
3278 if (is_log)
3279 continue;
3280 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
3281 continue;
3282
3283 vname = zpool_vdev_name(g_zfs, NULL, child[c],
3284 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
3285 print_import_config(cb, vname, child[c], depth + 2);
3286 free(vname);
3287 }
3288
3289 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
3290 &child, &children) == 0) {
3291 (void) printf(gettext("\tcache\n"));
3292 for (c = 0; c < children; c++) {
3293 vname = zpool_vdev_name(g_zfs, NULL, child[c],
3294 cb->cb_name_flags);
3295 (void) printf("\t %s\n", vname);
3296 free(vname);
3297 }
3298 }
3299
3300 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
3301 &child, &children) == 0) {
3302 (void) printf(gettext("\tspares\n"));
3303 for (c = 0; c < children; c++) {
3304 vname = zpool_vdev_name(g_zfs, NULL, child[c],
3305 cb->cb_name_flags);
3306 (void) printf("\t %s\n", vname);
3307 free(vname);
3308 }
3309 }
3310 }
3311
3312 /*
3313 * Print specialized class vdevs.
3314 *
3315 * These are recorded as top level vdevs in the main pool child array
3316 * but with "is_log" set to 1 or an "alloc_bias" string. We use either
3317 * print_status_config() or print_import_config() to print the top level
3318 * class vdevs then any of their children (eg mirrored slogs) are printed
3319 * recursively - which works because only the top level vdev is marked.
3320 */
3321 static void
print_class_vdevs(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t * nv,const char * class)3322 print_class_vdevs(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,
3323 const char *class)
3324 {
3325 uint_t c, children;
3326 nvlist_t **child;
3327 boolean_t printed = B_FALSE;
3328
3329 assert(zhp != NULL || !cb->cb_verbose);
3330
3331 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &child,
3332 &children) != 0)
3333 return;
3334
3335 for (c = 0; c < children; c++) {
3336 uint64_t is_log = B_FALSE;
3337 const char *bias = NULL;
3338 const char *type = NULL;
3339
3340 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
3341 &is_log);
3342
3343 if (is_log) {
3344 bias = (char *)VDEV_ALLOC_CLASS_LOGS;
3345 } else {
3346 (void) nvlist_lookup_string(child[c],
3347 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
3348 (void) nvlist_lookup_string(child[c],
3349 ZPOOL_CONFIG_TYPE, &type);
3350 }
3351
3352 if (bias == NULL || strcmp(bias, class) != 0)
3353 continue;
3354 if (!is_log && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
3355 continue;
3356
3357 if (!printed) {
3358 (void) printf("\t%s\t\n", gettext(class));
3359 printed = B_TRUE;
3360 }
3361
3362 char *name = zpool_vdev_name(g_zfs, zhp, child[c],
3363 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
3364 if (cb->cb_print_status)
3365 print_status_config(zhp, cb, name, child[c], 2,
3366 B_FALSE, NULL);
3367 else
3368 print_import_config(cb, name, child[c], 2);
3369 free(name);
3370 }
3371 }
3372
3373 /*
3374 * Display the status for the given pool.
3375 */
3376 static int
show_import(nvlist_t * config,boolean_t report_error)3377 show_import(nvlist_t *config, boolean_t report_error)
3378 {
3379 uint64_t pool_state;
3380 vdev_stat_t *vs;
3381 const char *name;
3382 uint64_t guid;
3383 uint64_t hostid = 0;
3384 const char *msgid;
3385 const char *hostname = "unknown";
3386 nvlist_t *nvroot, *nvinfo;
3387 zpool_status_t reason;
3388 zpool_errata_t errata;
3389 const char *health;
3390 uint_t vsc;
3391 const char *comment;
3392 const char *indent;
3393 char buf[2048];
3394 status_cbdata_t cb = { 0 };
3395
3396 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
3397 &name) == 0);
3398 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
3399 &guid) == 0);
3400 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
3401 &pool_state) == 0);
3402 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
3403 &nvroot) == 0);
3404
3405 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
3406 (uint64_t **)&vs, &vsc) == 0);
3407 health = zpool_state_to_name(vs->vs_state, vs->vs_aux);
3408
3409 reason = zpool_import_status(config, &msgid, &errata);
3410
3411 /*
3412 * If we're importing using a cachefile, then we won't report any
3413 * errors unless we are in the scan phase of the import.
3414 */
3415 if (reason != ZPOOL_STATUS_OK && !report_error)
3416 return (reason);
3417
3418 if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0) {
3419 indent = " ";
3420 } else {
3421 comment = NULL;
3422 indent = "";
3423 }
3424
3425 (void) printf(gettext("%s pool: %s\n"), indent, name);
3426 (void) printf(gettext("%s id: %llu\n"), indent, (u_longlong_t)guid);
3427 (void) printf(gettext("%s state: %s"), indent, health);
3428 if (pool_state == POOL_STATE_DESTROYED)
3429 (void) printf(gettext(" (DESTROYED)"));
3430 (void) printf("\n");
3431
3432 if (reason != ZPOOL_STATUS_OK) {
3433 (void) printf("%s", indent);
3434 printf_color(ANSI_BOLD, gettext("status: "));
3435 }
3436 switch (reason) {
3437 case ZPOOL_STATUS_MISSING_DEV_R:
3438 case ZPOOL_STATUS_MISSING_DEV_NR:
3439 case ZPOOL_STATUS_BAD_GUID_SUM:
3440 printf_color(ANSI_YELLOW, gettext("One or more devices are "
3441 "missing from the system.\n"));
3442 break;
3443
3444 case ZPOOL_STATUS_CORRUPT_LABEL_R:
3445 case ZPOOL_STATUS_CORRUPT_LABEL_NR:
3446 printf_color(ANSI_YELLOW, gettext("One or more devices "
3447 "contains corrupted data.\n"));
3448 break;
3449
3450 case ZPOOL_STATUS_CORRUPT_DATA:
3451 printf_color(ANSI_YELLOW, gettext("The pool data is "
3452 "corrupted.\n"));
3453 break;
3454
3455 case ZPOOL_STATUS_OFFLINE_DEV:
3456 printf_color(ANSI_YELLOW, gettext("One or more devices "
3457 "are offlined.\n"));
3458 break;
3459
3460 case ZPOOL_STATUS_CORRUPT_POOL:
3461 printf_color(ANSI_YELLOW, gettext("The pool metadata is "
3462 "corrupted.\n"));
3463 break;
3464
3465 case ZPOOL_STATUS_VERSION_OLDER:
3466 printf_color(ANSI_YELLOW, gettext("The pool is formatted using "
3467 "a legacy on-disk version.\n"));
3468 break;
3469
3470 case ZPOOL_STATUS_VERSION_NEWER:
3471 printf_color(ANSI_YELLOW, gettext("The pool is formatted using "
3472 "an incompatible version.\n"));
3473 break;
3474
3475 case ZPOOL_STATUS_FEAT_DISABLED:
3476 printf_color(ANSI_YELLOW, gettext("Some supported "
3477 "features are not enabled on the pool.\n"
3478 "\t%s(Note that they may be intentionally disabled if the\n"
3479 "\t%s'compatibility' property is set.)\n"), indent, indent);
3480 break;
3481
3482 case ZPOOL_STATUS_COMPATIBILITY_ERR:
3483 printf_color(ANSI_YELLOW, gettext("Error reading or parsing "
3484 "the file(s) indicated by the 'compatibility'\n"
3485 "\t%sproperty.\n"), indent);
3486 break;
3487
3488 case ZPOOL_STATUS_INCOMPATIBLE_FEAT:
3489 printf_color(ANSI_YELLOW, gettext("One or more features "
3490 "are enabled on the pool despite not being\n"
3491 "\t%srequested by the 'compatibility' property.\n"),
3492 indent);
3493 break;
3494
3495 case ZPOOL_STATUS_UNSUP_FEAT_READ:
3496 printf_color(ANSI_YELLOW, gettext("The pool uses the following "
3497 "feature(s) not supported on this system:\n"));
3498 color_start(ANSI_YELLOW);
3499 zpool_collect_unsup_feat(config, buf, 2048);
3500 (void) printf("%s", buf);
3501 color_end();
3502 break;
3503
3504 case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
3505 printf_color(ANSI_YELLOW, gettext("The pool can only be "
3506 "accessed in read-only mode on this system. It\n"
3507 "\t%scannot be accessed in read-write mode because it uses "
3508 "the following\n"
3509 "\t%sfeature(s) not supported on this system:\n"),
3510 indent, indent);
3511 color_start(ANSI_YELLOW);
3512 zpool_collect_unsup_feat(config, buf, 2048);
3513 (void) printf("%s", buf);
3514 color_end();
3515 break;
3516
3517 case ZPOOL_STATUS_HOSTID_ACTIVE:
3518 printf_color(ANSI_YELLOW, gettext("The pool is currently "
3519 "imported by another system.\n"));
3520 break;
3521
3522 case ZPOOL_STATUS_HOSTID_REQUIRED:
3523 printf_color(ANSI_YELLOW, gettext("The pool has the "
3524 "multihost property on. It cannot\n"
3525 "\t%sbe safely imported when the system hostid is not "
3526 "set.\n"), indent);
3527 break;
3528
3529 case ZPOOL_STATUS_HOSTID_MISMATCH:
3530 printf_color(ANSI_YELLOW, gettext("The pool was last accessed "
3531 "by another system.\n"));
3532 break;
3533
3534 case ZPOOL_STATUS_FAULTED_DEV_R:
3535 case ZPOOL_STATUS_FAULTED_DEV_NR:
3536 printf_color(ANSI_YELLOW, gettext("One or more devices are "
3537 "faulted.\n"));
3538 break;
3539
3540 case ZPOOL_STATUS_BAD_LOG:
3541 printf_color(ANSI_YELLOW, gettext("An intent log record cannot "
3542 "be read.\n"));
3543 break;
3544
3545 case ZPOOL_STATUS_RESILVERING:
3546 case ZPOOL_STATUS_REBUILDING:
3547 printf_color(ANSI_YELLOW, gettext("One or more devices were "
3548 "being resilvered.\n"));
3549 break;
3550
3551 case ZPOOL_STATUS_ERRATA:
3552 printf_color(ANSI_YELLOW, gettext("Errata #%d detected.\n"),
3553 errata);
3554 break;
3555
3556 case ZPOOL_STATUS_NON_NATIVE_ASHIFT:
3557 printf_color(ANSI_YELLOW, gettext("One or more devices are "
3558 "configured to use a non-native block size.\n"
3559 "\t%sExpect reduced performance.\n"), indent);
3560 break;
3561
3562 default:
3563 /*
3564 * No other status can be seen when importing pools.
3565 */
3566 assert(reason == ZPOOL_STATUS_OK);
3567 }
3568
3569 /*
3570 * Print out an action according to the overall state of the pool.
3571 */
3572 if (vs->vs_state != VDEV_STATE_HEALTHY ||
3573 reason != ZPOOL_STATUS_ERRATA || errata != ZPOOL_ERRATA_NONE) {
3574 (void) printf("%s", indent);
3575 (void) printf(gettext("action: "));
3576 }
3577 if (vs->vs_state == VDEV_STATE_HEALTHY) {
3578 if (reason == ZPOOL_STATUS_VERSION_OLDER ||
3579 reason == ZPOOL_STATUS_FEAT_DISABLED) {
3580 (void) printf(gettext("The pool can be imported using "
3581 "its name or numeric identifier, though\n"
3582 "\t%ssome features will not be available without "
3583 "an explicit 'zpool upgrade'.\n"), indent);
3584 } else if (reason == ZPOOL_STATUS_COMPATIBILITY_ERR) {
3585 (void) printf(gettext("The pool can be imported using "
3586 "its name or numeric\n"
3587 "\t%sidentifier, though the file(s) indicated by "
3588 "its 'compatibility'\n"
3589 "\t%sproperty cannot be parsed at this time.\n"),
3590 indent, indent);
3591 } else if (reason == ZPOOL_STATUS_HOSTID_MISMATCH) {
3592 (void) printf(gettext("The pool can be imported using "
3593 "its name or numeric identifier and\n"
3594 "\t%sthe '-f' flag.\n"), indent);
3595 } else if (reason == ZPOOL_STATUS_ERRATA) {
3596 switch (errata) {
3597 case ZPOOL_ERRATA_ZOL_2094_SCRUB:
3598 (void) printf(gettext("The pool can be "
3599 "imported using its name or numeric "
3600 "identifier,\n"
3601 "\t%showever there is a compatibility "
3602 "issue which should be corrected\n"
3603 "\t%sby running 'zpool scrub'\n"),
3604 indent, indent);
3605 break;
3606
3607 case ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY:
3608 (void) printf(gettext("The pool cannot be "
3609 "imported with this version of ZFS due to\n"
3610 "\t%san active asynchronous destroy. "
3611 "Revert to an earlier version\n"
3612 "\t%sand allow the destroy to complete "
3613 "before updating.\n"), indent, indent);
3614 break;
3615
3616 case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION:
3617 (void) printf(gettext("Existing encrypted "
3618 "datasets contain an on-disk "
3619 "incompatibility, which\n"
3620 "\t%sneeds to be corrected. Backup these "
3621 "datasets to new encrypted datasets\n"
3622 "\t%sand destroy the old ones.\n"),
3623 indent, indent);
3624 break;
3625
3626 case ZPOOL_ERRATA_ZOL_8308_ENCRYPTION:
3627 (void) printf(gettext("Existing encrypted "
3628 "snapshots and bookmarks contain an "
3629 "on-disk\n"
3630 "\t%sincompatibility. This may cause "
3631 "on-disk corruption if they are used\n"
3632 "\t%swith 'zfs recv'. To correct the "
3633 "issue, enable the bookmark_v2 feature.\n"
3634 "\t%sNo additional action is needed if "
3635 "there are no encrypted snapshots or\n"
3636 "\t%sbookmarks. If preserving the "
3637 "encrypted snapshots and bookmarks is\n"
3638 "\t%srequired, use a non-raw send to "
3639 "backup and restore them. Alternately,\n"
3640 "\t%sthey may be removed to resolve the "
3641 "incompatibility.\n"), indent, indent,
3642 indent, indent, indent, indent);
3643 break;
3644 default:
3645 /*
3646 * All errata must contain an action message.
3647 */
3648 assert(errata == ZPOOL_ERRATA_NONE);
3649 }
3650 } else {
3651 (void) printf(gettext("The pool can be imported using "
3652 "its name or numeric identifier.\n"));
3653 }
3654 } else if (vs->vs_state == VDEV_STATE_DEGRADED) {
3655 (void) printf(gettext("The pool can be imported despite "
3656 "missing or damaged devices. The\n"
3657 "\t%sfault tolerance of the pool may be compromised if "
3658 "imported.\n"), indent);
3659 } else {
3660 switch (reason) {
3661 case ZPOOL_STATUS_VERSION_NEWER:
3662 (void) printf(gettext("The pool cannot be imported. "
3663 "Access the pool on a system running newer\n"
3664 "\t%ssoftware, or recreate the pool from "
3665 "backup.\n"), indent);
3666 break;
3667 case ZPOOL_STATUS_UNSUP_FEAT_READ:
3668 (void) printf(gettext("The pool cannot be imported. "
3669 "Access the pool on a system that supports\n"
3670 "\t%sthe required feature(s), or recreate the pool "
3671 "from backup.\n"), indent);
3672 break;
3673 case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
3674 (void) printf(gettext("The pool cannot be imported in "
3675 "read-write mode. Import the pool with\n"
3676 "\t%s'-o readonly=on', access the pool on a system "
3677 "that supports the\n"
3678 "\t%srequired feature(s), or recreate the pool "
3679 "from backup.\n"), indent, indent);
3680 break;
3681 case ZPOOL_STATUS_MISSING_DEV_R:
3682 case ZPOOL_STATUS_MISSING_DEV_NR:
3683 case ZPOOL_STATUS_BAD_GUID_SUM:
3684 (void) printf(gettext("The pool cannot be imported. "
3685 "Attach the missing\n"
3686 "\t%sdevices and try again.\n"), indent);
3687 break;
3688 case ZPOOL_STATUS_HOSTID_ACTIVE:
3689 VERIFY0(nvlist_lookup_nvlist(config,
3690 ZPOOL_CONFIG_LOAD_INFO, &nvinfo));
3691
3692 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME))
3693 hostname = fnvlist_lookup_string(nvinfo,
3694 ZPOOL_CONFIG_MMP_HOSTNAME);
3695
3696 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID))
3697 hostid = fnvlist_lookup_uint64(nvinfo,
3698 ZPOOL_CONFIG_MMP_HOSTID);
3699
3700 (void) printf(gettext("The pool must be exported from "
3701 "%s (hostid=%"PRIx64")\n"
3702 "\t%sbefore it can be safely imported.\n"),
3703 hostname, hostid, indent);
3704 break;
3705 case ZPOOL_STATUS_HOSTID_REQUIRED:
3706 (void) printf(gettext("Set a unique system hostid with "
3707 "the zgenhostid(8) command.\n"));
3708 break;
3709 default:
3710 (void) printf(gettext("The pool cannot be imported due "
3711 "to damaged devices or data.\n"));
3712 }
3713 }
3714
3715 /* Print the comment attached to the pool. */
3716 if (comment != NULL)
3717 (void) printf(gettext("comment: %s\n"), comment);
3718
3719 /*
3720 * If the state is "closed" or "can't open", and the aux state
3721 * is "corrupt data":
3722 */
3723 if ((vs->vs_state == VDEV_STATE_CLOSED ||
3724 vs->vs_state == VDEV_STATE_CANT_OPEN) &&
3725 vs->vs_aux == VDEV_AUX_CORRUPT_DATA) {
3726 if (pool_state == POOL_STATE_DESTROYED)
3727 (void) printf(gettext("\t%sThe pool was destroyed, "
3728 "but can be imported using the '-Df' flags.\n"),
3729 indent);
3730 else if (pool_state != POOL_STATE_EXPORTED)
3731 (void) printf(gettext("\t%sThe pool may be active on "
3732 "another system, but can be imported using\n"
3733 "\t%sthe '-f' flag.\n"), indent, indent);
3734 }
3735
3736 if (msgid != NULL) {
3737 (void) printf(gettext("%s see: "
3738 "https://openzfs.github.io/openzfs-docs/msg/%s\n"),
3739 indent, msgid);
3740 }
3741
3742 (void) printf(gettext("%sconfig:\n\n"), indent);
3743
3744 cb.cb_namewidth = max_width(NULL, nvroot, 0, strlen(name),
3745 VDEV_NAME_TYPE_ID);
3746 if (cb.cb_namewidth < 10)
3747 cb.cb_namewidth = 10;
3748
3749 print_import_config(&cb, name, nvroot, 0);
3750
3751 print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_BIAS_DEDUP);
3752 print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_BIAS_SPECIAL);
3753 print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_CLASS_LOGS);
3754
3755 if (reason == ZPOOL_STATUS_BAD_GUID_SUM) {
3756 (void) printf(gettext("\n\t%sAdditional devices are known to "
3757 "be part of this pool, though their\n"
3758 "\t%sexact configuration cannot be determined.\n"),
3759 indent, indent);
3760 }
3761 return (0);
3762 }
3763
3764 static boolean_t
zfs_force_import_required(nvlist_t * config)3765 zfs_force_import_required(nvlist_t *config)
3766 {
3767 uint64_t state;
3768 uint64_t hostid = 0;
3769 nvlist_t *nvinfo;
3770
3771 state = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE);
3772 nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
3773
3774 /*
3775 * The hostid on LOAD_INFO comes from the MOS label via
3776 * spa_tryimport(). If its not there then we're likely talking to an
3777 * older kernel, so use the top one, which will be from the label
3778 * discovered in zpool_find_import(), or if a cachefile is in use, the
3779 * local hostid.
3780 */
3781 if (nvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_HOSTID, &hostid) != 0)
3782 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_HOSTID,
3783 &hostid);
3784
3785 if (state != POOL_STATE_EXPORTED && hostid != get_system_hostid())
3786 return (B_TRUE);
3787
3788 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE)) {
3789 mmp_state_t mmp_state = fnvlist_lookup_uint64(nvinfo,
3790 ZPOOL_CONFIG_MMP_STATE);
3791
3792 if (mmp_state != MMP_STATE_INACTIVE)
3793 return (B_TRUE);
3794 }
3795
3796 return (B_FALSE);
3797 }
3798
3799 /*
3800 * Perform the import for the given configuration. This passes the heavy
3801 * lifting off to zpool_import_props(), and then mounts the datasets contained
3802 * within the pool.
3803 */
3804 static int
do_import(nvlist_t * config,const char * newname,const char * mntopts,nvlist_t * props,int flags,uint_t mntthreads)3805 do_import(nvlist_t *config, const char *newname, const char *mntopts,
3806 nvlist_t *props, int flags, uint_t mntthreads)
3807 {
3808 int ret = 0;
3809 int ms_status = 0;
3810 zpool_handle_t *zhp;
3811 const char *name;
3812 uint64_t version;
3813
3814 name = fnvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME);
3815 version = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION);
3816
3817 if (!SPA_VERSION_IS_SUPPORTED(version)) {
3818 (void) fprintf(stderr, gettext("cannot import '%s': pool "
3819 "is formatted using an unsupported ZFS version\n"), name);
3820 return (1);
3821 } else if (zfs_force_import_required(config) &&
3822 !(flags & ZFS_IMPORT_ANY_HOST)) {
3823 mmp_state_t mmp_state = MMP_STATE_INACTIVE;
3824 nvlist_t *nvinfo;
3825
3826 nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
3827 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE))
3828 mmp_state = fnvlist_lookup_uint64(nvinfo,
3829 ZPOOL_CONFIG_MMP_STATE);
3830
3831 if (mmp_state == MMP_STATE_ACTIVE) {
3832 const char *hostname = "<unknown>";
3833 uint64_t hostid = 0;
3834
3835 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME))
3836 hostname = fnvlist_lookup_string(nvinfo,
3837 ZPOOL_CONFIG_MMP_HOSTNAME);
3838
3839 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID))
3840 hostid = fnvlist_lookup_uint64(nvinfo,
3841 ZPOOL_CONFIG_MMP_HOSTID);
3842
3843 (void) fprintf(stderr, gettext("cannot import '%s': "
3844 "pool is imported on %s (hostid: "
3845 "0x%"PRIx64")\nExport the pool on the other "
3846 "system, then run 'zpool import'.\n"),
3847 name, hostname, hostid);
3848 } else if (mmp_state == MMP_STATE_NO_HOSTID) {
3849 (void) fprintf(stderr, gettext("Cannot import '%s': "
3850 "pool has the multihost property on and the\n"
3851 "system's hostid is not set. Set a unique hostid "
3852 "with the zgenhostid(8) command.\n"), name);
3853 } else {
3854 const char *hostname = "<unknown>";
3855 time_t timestamp = 0;
3856 uint64_t hostid = 0;
3857
3858 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_HOSTNAME))
3859 hostname = fnvlist_lookup_string(nvinfo,
3860 ZPOOL_CONFIG_HOSTNAME);
3861 else if (nvlist_exists(config, ZPOOL_CONFIG_HOSTNAME))
3862 hostname = fnvlist_lookup_string(config,
3863 ZPOOL_CONFIG_HOSTNAME);
3864
3865 if (nvlist_exists(config, ZPOOL_CONFIG_TIMESTAMP))
3866 timestamp = fnvlist_lookup_uint64(config,
3867 ZPOOL_CONFIG_TIMESTAMP);
3868
3869 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_HOSTID))
3870 hostid = fnvlist_lookup_uint64(nvinfo,
3871 ZPOOL_CONFIG_HOSTID);
3872 else if (nvlist_exists(config, ZPOOL_CONFIG_HOSTID))
3873 hostid = fnvlist_lookup_uint64(config,
3874 ZPOOL_CONFIG_HOSTID);
3875
3876 (void) fprintf(stderr, gettext("cannot import '%s': "
3877 "pool was previously in use from another system.\n"
3878 "Last accessed by %s (hostid=%"PRIx64") at %s"
3879 "The pool can be imported, use 'zpool import -f' "
3880 "to import the pool.\n"), name, hostname,
3881 hostid, ctime(×tamp));
3882 }
3883
3884 return (1);
3885 }
3886
3887 if (zpool_import_props(g_zfs, config, newname, props, flags) != 0)
3888 return (1);
3889
3890 if (newname != NULL)
3891 name = newname;
3892
3893 if ((zhp = zpool_open_canfail(g_zfs, name)) == NULL)
3894 return (1);
3895
3896 /*
3897 * Loading keys is best effort. We don't want to return immediately
3898 * if it fails but we do want to give the error to the caller.
3899 */
3900 if (flags & ZFS_IMPORT_LOAD_KEYS &&
3901 zfs_crypto_attempt_load_keys(g_zfs, name) != 0)
3902 ret = 1;
3903
3904 if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL &&
3905 !(flags & ZFS_IMPORT_ONLY)) {
3906 ms_status = zpool_enable_datasets(zhp, mntopts, 0, mntthreads);
3907 if (ms_status == EZFS_SHAREFAILED) {
3908 (void) fprintf(stderr, gettext("Import was "
3909 "successful, but unable to share some datasets\n"));
3910 } else if (ms_status == EZFS_MOUNTFAILED) {
3911 (void) fprintf(stderr, gettext("Import was "
3912 "successful, but unable to mount some datasets\n"));
3913 }
3914 }
3915
3916 zpool_close(zhp);
3917 return (ret);
3918 }
3919
3920 typedef struct import_parameters {
3921 nvlist_t *ip_config;
3922 const char *ip_mntopts;
3923 nvlist_t *ip_props;
3924 int ip_flags;
3925 uint_t ip_mntthreads;
3926 int *ip_err;
3927 } import_parameters_t;
3928
3929 static void
do_import_task(void * arg)3930 do_import_task(void *arg)
3931 {
3932 import_parameters_t *ip = arg;
3933 *ip->ip_err |= do_import(ip->ip_config, NULL, ip->ip_mntopts,
3934 ip->ip_props, ip->ip_flags, ip->ip_mntthreads);
3935 free(ip);
3936 }
3937
3938
3939 static int
import_pools(nvlist_t * pools,nvlist_t * props,char * mntopts,int flags,char * orig_name,char * new_name,importargs_t * import)3940 import_pools(nvlist_t *pools, nvlist_t *props, char *mntopts, int flags,
3941 char *orig_name, char *new_name, importargs_t *import)
3942 {
3943 nvlist_t *config = NULL;
3944 nvlist_t *found_config = NULL;
3945 uint64_t pool_state;
3946 boolean_t pool_specified = (import->poolname != NULL ||
3947 import->guid != 0);
3948 uint_t npools = 0;
3949
3950
3951 tpool_t *tp = NULL;
3952 if (import->do_all) {
3953 tp = tpool_create(1, 5 * sysconf(_SC_NPROCESSORS_ONLN),
3954 0, NULL);
3955 }
3956
3957 /*
3958 * At this point we have a list of import candidate configs. Even if
3959 * we were searching by pool name or guid, we still need to
3960 * post-process the list to deal with pool state and possible
3961 * duplicate names.
3962 */
3963 int err = 0;
3964 nvpair_t *elem = NULL;
3965 boolean_t first = B_TRUE;
3966 if (!pool_specified && import->do_all) {
3967 while ((elem = nvlist_next_nvpair(pools, elem)) != NULL)
3968 npools++;
3969 }
3970 while ((elem = nvlist_next_nvpair(pools, elem)) != NULL) {
3971
3972 verify(nvpair_value_nvlist(elem, &config) == 0);
3973
3974 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
3975 &pool_state) == 0);
3976 if (!import->do_destroyed &&
3977 pool_state == POOL_STATE_DESTROYED)
3978 continue;
3979 if (import->do_destroyed &&
3980 pool_state != POOL_STATE_DESTROYED)
3981 continue;
3982
3983 verify(nvlist_add_nvlist(config, ZPOOL_LOAD_POLICY,
3984 import->policy) == 0);
3985
3986 if (!pool_specified) {
3987 if (first)
3988 first = B_FALSE;
3989 else if (!import->do_all)
3990 (void) fputc('\n', stdout);
3991
3992 if (import->do_all) {
3993 import_parameters_t *ip = safe_malloc(
3994 sizeof (import_parameters_t));
3995
3996 ip->ip_config = config;
3997 ip->ip_mntopts = mntopts;
3998 ip->ip_props = props;
3999 ip->ip_flags = flags;
4000 ip->ip_mntthreads = mount_tp_nthr / npools;
4001 ip->ip_err = &err;
4002
4003 (void) tpool_dispatch(tp, do_import_task,
4004 (void *)ip);
4005 } else {
4006 /*
4007 * If we're importing from cachefile, then
4008 * we don't want to report errors until we
4009 * are in the scan phase of the import. If
4010 * we get an error, then we return that error
4011 * to invoke the scan phase.
4012 */
4013 if (import->cachefile && !import->scan)
4014 err = show_import(config, B_FALSE);
4015 else
4016 (void) show_import(config, B_TRUE);
4017 }
4018 } else if (import->poolname != NULL) {
4019 const char *name;
4020
4021 /*
4022 * We are searching for a pool based on name.
4023 */
4024 verify(nvlist_lookup_string(config,
4025 ZPOOL_CONFIG_POOL_NAME, &name) == 0);
4026
4027 if (strcmp(name, import->poolname) == 0) {
4028 if (found_config != NULL) {
4029 (void) fprintf(stderr, gettext(
4030 "cannot import '%s': more than "
4031 "one matching pool\n"),
4032 import->poolname);
4033 (void) fprintf(stderr, gettext(
4034 "import by numeric ID instead\n"));
4035 err = B_TRUE;
4036 }
4037 found_config = config;
4038 }
4039 } else {
4040 uint64_t guid;
4041
4042 /*
4043 * Search for a pool by guid.
4044 */
4045 verify(nvlist_lookup_uint64(config,
4046 ZPOOL_CONFIG_POOL_GUID, &guid) == 0);
4047
4048 if (guid == import->guid)
4049 found_config = config;
4050 }
4051 }
4052 if (import->do_all) {
4053 tpool_wait(tp);
4054 tpool_destroy(tp);
4055 }
4056
4057 /*
4058 * If we were searching for a specific pool, verify that we found a
4059 * pool, and then do the import.
4060 */
4061 if (pool_specified && err == 0) {
4062 if (found_config == NULL) {
4063 (void) fprintf(stderr, gettext("cannot import '%s': "
4064 "no such pool available\n"), orig_name);
4065 err = B_TRUE;
4066 } else {
4067 err |= do_import(found_config, new_name,
4068 mntopts, props, flags, mount_tp_nthr);
4069 }
4070 }
4071
4072 /*
4073 * If we were just looking for pools, report an error if none were
4074 * found.
4075 */
4076 if (!pool_specified && first)
4077 (void) fprintf(stderr,
4078 gettext("no pools available to import\n"));
4079 return (err);
4080 }
4081
4082 typedef struct target_exists_args {
4083 const char *poolname;
4084 uint64_t poolguid;
4085 } target_exists_args_t;
4086
4087 static int
name_or_guid_exists(zpool_handle_t * zhp,void * data)4088 name_or_guid_exists(zpool_handle_t *zhp, void *data)
4089 {
4090 target_exists_args_t *args = data;
4091 nvlist_t *config = zpool_get_config(zhp, NULL);
4092 int found = 0;
4093
4094 if (config == NULL)
4095 return (0);
4096
4097 if (args->poolname != NULL) {
4098 const char *pool_name;
4099
4100 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
4101 &pool_name) == 0);
4102 if (strcmp(pool_name, args->poolname) == 0)
4103 found = 1;
4104 } else {
4105 uint64_t pool_guid;
4106
4107 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
4108 &pool_guid) == 0);
4109 if (pool_guid == args->poolguid)
4110 found = 1;
4111 }
4112 zpool_close(zhp);
4113
4114 return (found);
4115 }
4116 /*
4117 * zpool checkpoint <pool>
4118 * checkpoint --discard <pool>
4119 *
4120 * -d Discard the checkpoint from a checkpointed
4121 * --discard pool.
4122 *
4123 * -w Wait for discarding a checkpoint to complete.
4124 * --wait
4125 *
4126 * Checkpoints the specified pool, by taking a "snapshot" of its
4127 * current state. A pool can only have one checkpoint at a time.
4128 */
4129 int
zpool_do_checkpoint(int argc,char ** argv)4130 zpool_do_checkpoint(int argc, char **argv)
4131 {
4132 boolean_t discard, wait;
4133 char *pool;
4134 zpool_handle_t *zhp;
4135 int c, err;
4136
4137 struct option long_options[] = {
4138 {"discard", no_argument, NULL, 'd'},
4139 {"wait", no_argument, NULL, 'w'},
4140 {0, 0, 0, 0}
4141 };
4142
4143 discard = B_FALSE;
4144 wait = B_FALSE;
4145 while ((c = getopt_long(argc, argv, ":dw", long_options, NULL)) != -1) {
4146 switch (c) {
4147 case 'd':
4148 discard = B_TRUE;
4149 break;
4150 case 'w':
4151 wait = B_TRUE;
4152 break;
4153 case '?':
4154 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
4155 optopt);
4156 usage(B_FALSE);
4157 }
4158 }
4159
4160 if (wait && !discard) {
4161 (void) fprintf(stderr, gettext("--wait only valid when "
4162 "--discard also specified\n"));
4163 usage(B_FALSE);
4164 }
4165
4166 argc -= optind;
4167 argv += optind;
4168
4169 if (argc < 1) {
4170 (void) fprintf(stderr, gettext("missing pool argument\n"));
4171 usage(B_FALSE);
4172 }
4173
4174 if (argc > 1) {
4175 (void) fprintf(stderr, gettext("too many arguments\n"));
4176 usage(B_FALSE);
4177 }
4178
4179 pool = argv[0];
4180
4181 if ((zhp = zpool_open(g_zfs, pool)) == NULL) {
4182 /* As a special case, check for use of '/' in the name */
4183 if (strchr(pool, '/') != NULL)
4184 (void) fprintf(stderr, gettext("'zpool checkpoint' "
4185 "doesn't work on datasets. To save the state "
4186 "of a dataset from a specific point in time "
4187 "please use 'zfs snapshot'\n"));
4188 return (1);
4189 }
4190
4191 if (discard) {
4192 err = (zpool_discard_checkpoint(zhp) != 0);
4193 if (err == 0 && wait)
4194 err = zpool_wait(zhp, ZPOOL_WAIT_CKPT_DISCARD);
4195 } else {
4196 err = (zpool_checkpoint(zhp) != 0);
4197 }
4198
4199 zpool_close(zhp);
4200
4201 return (err);
4202 }
4203
4204 #define CHECKPOINT_OPT 1024
4205
4206 /*
4207 * zpool prefetch <type> [<type opts>] <pool>
4208 *
4209 * Prefetchs a particular type of data in the specified pool.
4210 */
4211 int
zpool_do_prefetch(int argc,char ** argv)4212 zpool_do_prefetch(int argc, char **argv)
4213 {
4214 int c;
4215 char *poolname;
4216 char *typestr = NULL;
4217 zpool_prefetch_type_t type;
4218 zpool_handle_t *zhp;
4219 int err = 0;
4220
4221 while ((c = getopt(argc, argv, "t:")) != -1) {
4222 switch (c) {
4223 case 't':
4224 typestr = optarg;
4225 break;
4226 case ':':
4227 (void) fprintf(stderr, gettext("missing argument for "
4228 "'%c' option\n"), optopt);
4229 usage(B_FALSE);
4230 break;
4231 case '?':
4232 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
4233 optopt);
4234 usage(B_FALSE);
4235 }
4236 }
4237 argc -= optind;
4238 argv += optind;
4239
4240 if (argc < 1) {
4241 (void) fprintf(stderr, gettext("missing pool name argument\n"));
4242 usage(B_FALSE);
4243 }
4244
4245 if (argc > 1) {
4246 (void) fprintf(stderr, gettext("too many arguments\n"));
4247 usage(B_FALSE);
4248 }
4249
4250 poolname = argv[0];
4251
4252 argc--;
4253 argv++;
4254
4255 if (strcmp(typestr, "ddt") == 0) {
4256 type = ZPOOL_PREFETCH_DDT;
4257 } else {
4258 (void) fprintf(stderr, gettext("unsupported prefetch type\n"));
4259 usage(B_FALSE);
4260 }
4261
4262 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
4263 return (1);
4264
4265 err = zpool_prefetch(zhp, type);
4266
4267 zpool_close(zhp);
4268
4269 return (err);
4270 }
4271
4272 /*
4273 * zpool import [-d dir] [-D]
4274 * import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l]
4275 * [-d dir | -c cachefile | -s] [-f] -a
4276 * import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l]
4277 * [-d dir | -c cachefile | -s] [-f] [-n] [-F] <pool | id>
4278 * [newpool]
4279 *
4280 * -c Read pool information from a cachefile instead of searching
4281 * devices. If importing from a cachefile config fails, then
4282 * fallback to searching for devices only in the directories that
4283 * exist in the cachefile.
4284 *
4285 * -d Scan in a specific directory, other than /dev/. More than
4286 * one directory can be specified using multiple '-d' options.
4287 *
4288 * -D Scan for previously destroyed pools or import all or only
4289 * specified destroyed pools.
4290 *
4291 * -R Temporarily import the pool, with all mountpoints relative to
4292 * the given root. The pool will remain exported when the machine
4293 * is rebooted.
4294 *
4295 * -V Import even in the presence of faulted vdevs. This is an
4296 * intentionally undocumented option for testing purposes, and
4297 * treats the pool configuration as complete, leaving any bad
4298 * vdevs in the FAULTED state. In other words, it does verbatim
4299 * import.
4300 *
4301 * -f Force import, even if it appears that the pool is active.
4302 *
4303 * -F Attempt rewind if necessary.
4304 *
4305 * -n See if rewind would work, but don't actually rewind.
4306 *
4307 * -N Import the pool but don't mount datasets.
4308 *
4309 * -T Specify a starting txg to use for import. This option is
4310 * intentionally undocumented option for testing purposes.
4311 *
4312 * -a Import all pools found.
4313 *
4314 * -l Load encryption keys while importing.
4315 *
4316 * -o Set property=value and/or temporary mount options (without '=').
4317 *
4318 * -s Scan using the default search path, the libblkid cache will
4319 * not be consulted.
4320 *
4321 * --rewind-to-checkpoint
4322 * Import the pool and revert back to the checkpoint.
4323 *
4324 * The import command scans for pools to import, and import pools based on pool
4325 * name and GUID. The pool can also be renamed as part of the import process.
4326 */
4327 int
zpool_do_import(int argc,char ** argv)4328 zpool_do_import(int argc, char **argv)
4329 {
4330 char **searchdirs = NULL;
4331 char *env, *envdup = NULL;
4332 int nsearch = 0;
4333 int c;
4334 int err = 0;
4335 nvlist_t *pools = NULL;
4336 boolean_t do_all = B_FALSE;
4337 boolean_t do_destroyed = B_FALSE;
4338 char *mntopts = NULL;
4339 uint64_t searchguid = 0;
4340 char *searchname = NULL;
4341 char *propval;
4342 nvlist_t *policy = NULL;
4343 nvlist_t *props = NULL;
4344 int flags = ZFS_IMPORT_NORMAL;
4345 uint32_t rewind_policy = ZPOOL_NO_REWIND;
4346 boolean_t dryrun = B_FALSE;
4347 boolean_t do_rewind = B_FALSE;
4348 boolean_t xtreme_rewind = B_FALSE;
4349 boolean_t do_scan = B_FALSE;
4350 boolean_t pool_exists = B_FALSE;
4351 uint64_t txg = -1ULL;
4352 char *cachefile = NULL;
4353 importargs_t idata = { 0 };
4354 char *endptr;
4355
4356 struct option long_options[] = {
4357 {"rewind-to-checkpoint", no_argument, NULL, CHECKPOINT_OPT},
4358 {0, 0, 0, 0}
4359 };
4360
4361 /* check options */
4362 while ((c = getopt_long(argc, argv, ":aCc:d:DEfFlmnNo:R:stT:VX",
4363 long_options, NULL)) != -1) {
4364 switch (c) {
4365 case 'a':
4366 do_all = B_TRUE;
4367 break;
4368 case 'c':
4369 cachefile = optarg;
4370 break;
4371 case 'd':
4372 searchdirs = safe_realloc(searchdirs,
4373 (nsearch + 1) * sizeof (char *));
4374 searchdirs[nsearch++] = optarg;
4375 break;
4376 case 'D':
4377 do_destroyed = B_TRUE;
4378 break;
4379 case 'f':
4380 flags |= ZFS_IMPORT_ANY_HOST;
4381 break;
4382 case 'F':
4383 do_rewind = B_TRUE;
4384 break;
4385 case 'l':
4386 flags |= ZFS_IMPORT_LOAD_KEYS;
4387 break;
4388 case 'm':
4389 flags |= ZFS_IMPORT_MISSING_LOG;
4390 break;
4391 case 'n':
4392 dryrun = B_TRUE;
4393 break;
4394 case 'N':
4395 flags |= ZFS_IMPORT_ONLY;
4396 break;
4397 case 'o':
4398 if ((propval = strchr(optarg, '=')) != NULL) {
4399 *propval = '\0';
4400 propval++;
4401 if (add_prop_list(optarg, propval,
4402 &props, B_TRUE))
4403 goto error;
4404 } else {
4405 mntopts = optarg;
4406 }
4407 break;
4408 case 'R':
4409 if (add_prop_list(zpool_prop_to_name(
4410 ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE))
4411 goto error;
4412 if (add_prop_list_default(zpool_prop_to_name(
4413 ZPOOL_PROP_CACHEFILE), "none", &props))
4414 goto error;
4415 break;
4416 case 's':
4417 do_scan = B_TRUE;
4418 break;
4419 case 't':
4420 flags |= ZFS_IMPORT_TEMP_NAME;
4421 if (add_prop_list_default(zpool_prop_to_name(
4422 ZPOOL_PROP_CACHEFILE), "none", &props))
4423 goto error;
4424 break;
4425
4426 case 'T':
4427 errno = 0;
4428 txg = strtoull(optarg, &endptr, 0);
4429 if (errno != 0 || *endptr != '\0') {
4430 (void) fprintf(stderr,
4431 gettext("invalid txg value\n"));
4432 usage(B_FALSE);
4433 }
4434 rewind_policy = ZPOOL_DO_REWIND | ZPOOL_EXTREME_REWIND;
4435 break;
4436 case 'V':
4437 flags |= ZFS_IMPORT_VERBATIM;
4438 break;
4439 case 'X':
4440 xtreme_rewind = B_TRUE;
4441 break;
4442 case CHECKPOINT_OPT:
4443 flags |= ZFS_IMPORT_CHECKPOINT;
4444 break;
4445 case ':':
4446 (void) fprintf(stderr, gettext("missing argument for "
4447 "'%c' option\n"), optopt);
4448 usage(B_FALSE);
4449 break;
4450 case '?':
4451 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
4452 optopt);
4453 usage(B_FALSE);
4454 }
4455 }
4456
4457 argc -= optind;
4458 argv += optind;
4459
4460 if (cachefile && nsearch != 0) {
4461 (void) fprintf(stderr, gettext("-c is incompatible with -d\n"));
4462 usage(B_FALSE);
4463 }
4464
4465 if (cachefile && do_scan) {
4466 (void) fprintf(stderr, gettext("-c is incompatible with -s\n"));
4467 usage(B_FALSE);
4468 }
4469
4470 if ((flags & ZFS_IMPORT_LOAD_KEYS) && (flags & ZFS_IMPORT_ONLY)) {
4471 (void) fprintf(stderr, gettext("-l is incompatible with -N\n"));
4472 usage(B_FALSE);
4473 }
4474
4475 if ((flags & ZFS_IMPORT_LOAD_KEYS) && !do_all && argc == 0) {
4476 (void) fprintf(stderr, gettext("-l is only meaningful during "
4477 "an import\n"));
4478 usage(B_FALSE);
4479 }
4480
4481 if ((dryrun || xtreme_rewind) && !do_rewind) {
4482 (void) fprintf(stderr,
4483 gettext("-n or -X only meaningful with -F\n"));
4484 usage(B_FALSE);
4485 }
4486 if (dryrun)
4487 rewind_policy = ZPOOL_TRY_REWIND;
4488 else if (do_rewind)
4489 rewind_policy = ZPOOL_DO_REWIND;
4490 if (xtreme_rewind)
4491 rewind_policy |= ZPOOL_EXTREME_REWIND;
4492
4493 /* In the future, we can capture further policy and include it here */
4494 if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 ||
4495 nvlist_add_uint64(policy, ZPOOL_LOAD_REQUEST_TXG, txg) != 0 ||
4496 nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY,
4497 rewind_policy) != 0)
4498 goto error;
4499
4500 /* check argument count */
4501 if (do_all) {
4502 if (argc != 0) {
4503 (void) fprintf(stderr, gettext("too many arguments\n"));
4504 usage(B_FALSE);
4505 }
4506 } else {
4507 if (argc > 2) {
4508 (void) fprintf(stderr, gettext("too many arguments\n"));
4509 usage(B_FALSE);
4510 }
4511 }
4512
4513 /*
4514 * Check for the effective uid. We do this explicitly here because
4515 * otherwise any attempt to discover pools will silently fail.
4516 */
4517 if (argc == 0 && geteuid() != 0) {
4518 (void) fprintf(stderr, gettext("cannot "
4519 "discover pools: permission denied\n"));
4520
4521 free(searchdirs);
4522 nvlist_free(props);
4523 nvlist_free(policy);
4524 return (1);
4525 }
4526
4527 /*
4528 * Depending on the arguments given, we do one of the following:
4529 *
4530 * <none> Iterate through all pools and display information about
4531 * each one.
4532 *
4533 * -a Iterate through all pools and try to import each one.
4534 *
4535 * <id> Find the pool that corresponds to the given GUID/pool
4536 * name and import that one.
4537 *
4538 * -D Above options applies only to destroyed pools.
4539 */
4540 if (argc != 0) {
4541 char *endptr;
4542
4543 errno = 0;
4544 searchguid = strtoull(argv[0], &endptr, 10);
4545 if (errno != 0 || *endptr != '\0') {
4546 searchname = argv[0];
4547 searchguid = 0;
4548 }
4549
4550 /*
4551 * User specified a name or guid. Ensure it's unique.
4552 */
4553 target_exists_args_t search = {searchname, searchguid};
4554 pool_exists = zpool_iter(g_zfs, name_or_guid_exists, &search);
4555 }
4556
4557 /*
4558 * Check the environment for the preferred search path.
4559 */
4560 if ((searchdirs == NULL) && (env = getenv("ZPOOL_IMPORT_PATH"))) {
4561 char *dir, *tmp = NULL;
4562
4563 envdup = strdup(env);
4564
4565 for (dir = strtok_r(envdup, ":", &tmp);
4566 dir != NULL;
4567 dir = strtok_r(NULL, ":", &tmp)) {
4568 searchdirs = safe_realloc(searchdirs,
4569 (nsearch + 1) * sizeof (char *));
4570 searchdirs[nsearch++] = dir;
4571 }
4572 }
4573
4574 idata.path = searchdirs;
4575 idata.paths = nsearch;
4576 idata.poolname = searchname;
4577 idata.guid = searchguid;
4578 idata.cachefile = cachefile;
4579 idata.scan = do_scan;
4580 idata.policy = policy;
4581 idata.do_destroyed = do_destroyed;
4582 idata.do_all = do_all;
4583
4584 libpc_handle_t lpch = {
4585 .lpc_lib_handle = g_zfs,
4586 .lpc_ops = &libzfs_config_ops,
4587 .lpc_printerr = B_TRUE
4588 };
4589 pools = zpool_search_import(&lpch, &idata);
4590
4591 if (pools != NULL && pool_exists &&
4592 (argc == 1 || strcmp(argv[0], argv[1]) == 0)) {
4593 (void) fprintf(stderr, gettext("cannot import '%s': "
4594 "a pool with that name already exists\n"),
4595 argv[0]);
4596 (void) fprintf(stderr, gettext("use the form '%s "
4597 "<pool | id> <newpool>' to give it a new name\n"),
4598 "zpool import");
4599 err = 1;
4600 } else if (pools == NULL && pool_exists) {
4601 (void) fprintf(stderr, gettext("cannot import '%s': "
4602 "a pool with that name is already created/imported,\n"),
4603 argv[0]);
4604 (void) fprintf(stderr, gettext("and no additional pools "
4605 "with that name were found\n"));
4606 err = 1;
4607 } else if (pools == NULL) {
4608 if (argc != 0) {
4609 (void) fprintf(stderr, gettext("cannot import '%s': "
4610 "no such pool available\n"), argv[0]);
4611 }
4612 err = 1;
4613 }
4614
4615 if (err == 1) {
4616 free(searchdirs);
4617 free(envdup);
4618 nvlist_free(policy);
4619 nvlist_free(pools);
4620 nvlist_free(props);
4621 return (1);
4622 }
4623
4624 err = import_pools(pools, props, mntopts, flags,
4625 argc >= 1 ? argv[0] : NULL, argc >= 2 ? argv[1] : NULL, &idata);
4626
4627 /*
4628 * If we're using the cachefile and we failed to import, then
4629 * fallback to scanning the directory for pools that match
4630 * those in the cachefile.
4631 */
4632 if (err != 0 && cachefile != NULL) {
4633 (void) printf(gettext("cachefile import failed, retrying\n"));
4634
4635 /*
4636 * We use the scan flag to gather the directories that exist
4637 * in the cachefile. If we need to fallback to searching for
4638 * the pool config, we will only search devices in these
4639 * directories.
4640 */
4641 idata.scan = B_TRUE;
4642 nvlist_free(pools);
4643 pools = zpool_search_import(&lpch, &idata);
4644
4645 err = import_pools(pools, props, mntopts, flags,
4646 argc >= 1 ? argv[0] : NULL, argc >= 2 ? argv[1] : NULL,
4647 &idata);
4648 }
4649
4650 error:
4651 nvlist_free(props);
4652 nvlist_free(pools);
4653 nvlist_free(policy);
4654 free(searchdirs);
4655 free(envdup);
4656
4657 return (err ? 1 : 0);
4658 }
4659
4660 /*
4661 * zpool sync [-f] [pool] ...
4662 *
4663 * -f (undocumented) force uberblock (and config including zpool cache file)
4664 * update.
4665 *
4666 * Sync the specified pool(s).
4667 * Without arguments "zpool sync" will sync all pools.
4668 * This command initiates TXG sync(s) and will return after the TXG(s) commit.
4669 *
4670 */
4671 static int
zpool_do_sync(int argc,char ** argv)4672 zpool_do_sync(int argc, char **argv)
4673 {
4674 int ret;
4675 boolean_t force = B_FALSE;
4676
4677 /* check options */
4678 while ((ret = getopt(argc, argv, "f")) != -1) {
4679 switch (ret) {
4680 case 'f':
4681 force = B_TRUE;
4682 break;
4683 case '?':
4684 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
4685 optopt);
4686 usage(B_FALSE);
4687 }
4688 }
4689
4690 argc -= optind;
4691 argv += optind;
4692
4693 /* if argc == 0 we will execute zpool_sync_one on all pools */
4694 ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL,
4695 B_FALSE, zpool_sync_one, &force);
4696
4697 return (ret);
4698 }
4699
4700 typedef struct iostat_cbdata {
4701 uint64_t cb_flags;
4702 int cb_namewidth;
4703 int cb_iteration;
4704 boolean_t cb_verbose;
4705 boolean_t cb_literal;
4706 boolean_t cb_scripted;
4707 zpool_list_t *cb_list;
4708 vdev_cmd_data_list_t *vcdl;
4709 vdev_cbdata_t cb_vdevs;
4710 } iostat_cbdata_t;
4711
4712 /* iostat labels */
4713 typedef struct name_and_columns {
4714 const char *name; /* Column name */
4715 unsigned int columns; /* Center name to this number of columns */
4716 } name_and_columns_t;
4717
4718 #define IOSTAT_MAX_LABELS 15 /* Max number of labels on one line */
4719
4720 static const name_and_columns_t iostat_top_labels[][IOSTAT_MAX_LABELS] =
4721 {
4722 [IOS_DEFAULT] = {{"capacity", 2}, {"operations", 2}, {"bandwidth", 2},
4723 {NULL}},
4724 [IOS_LATENCY] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2},
4725 {"asyncq_wait", 2}, {"scrub", 1}, {"trim", 1}, {"rebuild", 1},
4726 {NULL}},
4727 [IOS_QUEUES] = {{"syncq_read", 2}, {"syncq_write", 2},
4728 {"asyncq_read", 2}, {"asyncq_write", 2}, {"scrubq_read", 2},
4729 {"trimq_write", 2}, {"rebuildq_write", 2}, {NULL}},
4730 [IOS_L_HISTO] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2},
4731 {"asyncq_wait", 2}, {NULL}},
4732 [IOS_RQ_HISTO] = {{"sync_read", 2}, {"sync_write", 2},
4733 {"async_read", 2}, {"async_write", 2}, {"scrub", 2},
4734 {"trim", 2}, {"rebuild", 2}, {NULL}},
4735 };
4736
4737 /* Shorthand - if "columns" field not set, default to 1 column */
4738 static const name_and_columns_t iostat_bottom_labels[][IOSTAT_MAX_LABELS] =
4739 {
4740 [IOS_DEFAULT] = {{"alloc"}, {"free"}, {"read"}, {"write"}, {"read"},
4741 {"write"}, {NULL}},
4742 [IOS_LATENCY] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"},
4743 {"write"}, {"read"}, {"write"}, {"wait"}, {"wait"}, {"wait"},
4744 {NULL}},
4745 [IOS_QUEUES] = {{"pend"}, {"activ"}, {"pend"}, {"activ"}, {"pend"},
4746 {"activ"}, {"pend"}, {"activ"}, {"pend"}, {"activ"},
4747 {"pend"}, {"activ"}, {"pend"}, {"activ"}, {NULL}},
4748 [IOS_L_HISTO] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"},
4749 {"write"}, {"read"}, {"write"}, {"scrub"}, {"trim"}, {"rebuild"},
4750 {NULL}},
4751 [IOS_RQ_HISTO] = {{"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"},
4752 {"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"},
4753 {"ind"}, {"agg"}, {NULL}},
4754 };
4755
4756 static const char *histo_to_title[] = {
4757 [IOS_L_HISTO] = "latency",
4758 [IOS_RQ_HISTO] = "req_size",
4759 };
4760
4761 /*
4762 * Return the number of labels in a null-terminated name_and_columns_t
4763 * array.
4764 *
4765 */
4766 static unsigned int
label_array_len(const name_and_columns_t * labels)4767 label_array_len(const name_and_columns_t *labels)
4768 {
4769 int i = 0;
4770
4771 while (labels[i].name)
4772 i++;
4773
4774 return (i);
4775 }
4776
4777 /*
4778 * Return the number of strings in a null-terminated string array.
4779 * For example:
4780 *
4781 * const char foo[] = {"bar", "baz", NULL}
4782 *
4783 * returns 2
4784 */
4785 static uint64_t
str_array_len(const char * array[])4786 str_array_len(const char *array[])
4787 {
4788 uint64_t i = 0;
4789 while (array[i])
4790 i++;
4791
4792 return (i);
4793 }
4794
4795
4796 /*
4797 * Return a default column width for default/latency/queue columns. This does
4798 * not include histograms, which have their columns autosized.
4799 */
4800 static unsigned int
default_column_width(iostat_cbdata_t * cb,enum iostat_type type)4801 default_column_width(iostat_cbdata_t *cb, enum iostat_type type)
4802 {
4803 unsigned long column_width = 5; /* Normal niceprint */
4804 static unsigned long widths[] = {
4805 /*
4806 * Choose some sane default column sizes for printing the
4807 * raw numbers.
4808 */
4809 [IOS_DEFAULT] = 15, /* 1PB capacity */
4810 [IOS_LATENCY] = 10, /* 1B ns = 10sec */
4811 [IOS_QUEUES] = 6, /* 1M queue entries */
4812 [IOS_L_HISTO] = 10, /* 1B ns = 10sec */
4813 [IOS_RQ_HISTO] = 6, /* 1M queue entries */
4814 };
4815
4816 if (cb->cb_literal)
4817 column_width = widths[type];
4818
4819 return (column_width);
4820 }
4821
4822 /*
4823 * Print the column labels, i.e:
4824 *
4825 * capacity operations bandwidth
4826 * alloc free read write read write ...
4827 *
4828 * If force_column_width is set, use it for the column width. If not set, use
4829 * the default column width.
4830 */
4831 static void
print_iostat_labels(iostat_cbdata_t * cb,unsigned int force_column_width,const name_and_columns_t labels[][IOSTAT_MAX_LABELS])4832 print_iostat_labels(iostat_cbdata_t *cb, unsigned int force_column_width,
4833 const name_and_columns_t labels[][IOSTAT_MAX_LABELS])
4834 {
4835 int i, idx, s;
4836 int text_start, rw_column_width, spaces_to_end;
4837 uint64_t flags = cb->cb_flags;
4838 uint64_t f;
4839 unsigned int column_width = force_column_width;
4840
4841 /* For each bit set in flags */
4842 for (f = flags; f; f &= ~(1ULL << idx)) {
4843 idx = lowbit64(f) - 1;
4844 if (!force_column_width)
4845 column_width = default_column_width(cb, idx);
4846 /* Print our top labels centered over "read write" label. */
4847 for (i = 0; i < label_array_len(labels[idx]); i++) {
4848 const char *name = labels[idx][i].name;
4849 /*
4850 * We treat labels[][].columns == 0 as shorthand
4851 * for one column. It makes writing out the label
4852 * tables more concise.
4853 */
4854 unsigned int columns = MAX(1, labels[idx][i].columns);
4855 unsigned int slen = strlen(name);
4856
4857 rw_column_width = (column_width * columns) +
4858 (2 * (columns - 1));
4859
4860 text_start = (int)((rw_column_width) / columns -
4861 slen / columns);
4862 if (text_start < 0)
4863 text_start = 0;
4864
4865 printf(" "); /* Two spaces between columns */
4866
4867 /* Space from beginning of column to label */
4868 for (s = 0; s < text_start; s++)
4869 printf(" ");
4870
4871 printf("%s", name);
4872
4873 /* Print space after label to end of column */
4874 spaces_to_end = rw_column_width - text_start - slen;
4875 if (spaces_to_end < 0)
4876 spaces_to_end = 0;
4877
4878 for (s = 0; s < spaces_to_end; s++)
4879 printf(" ");
4880 }
4881 }
4882 }
4883
4884
4885 /*
4886 * print_cmd_columns - Print custom column titles from -c
4887 *
4888 * If the user specified the "zpool status|iostat -c" then print their custom
4889 * column titles in the header. For example, print_cmd_columns() would print
4890 * the " col1 col2" part of this:
4891 *
4892 * $ zpool iostat -vc 'echo col1=val1; echo col2=val2'
4893 * ...
4894 * capacity operations bandwidth
4895 * pool alloc free read write read write col1 col2
4896 * ---------- ----- ----- ----- ----- ----- ----- ---- ----
4897 * mypool 269K 1008M 0 0 107 946
4898 * mirror 269K 1008M 0 0 107 946
4899 * sdb - - 0 0 102 473 val1 val2
4900 * sdc - - 0 0 5 473 val1 val2
4901 * ---------- ----- ----- ----- ----- ----- ----- ---- ----
4902 */
4903 static void
print_cmd_columns(vdev_cmd_data_list_t * vcdl,int use_dashes)4904 print_cmd_columns(vdev_cmd_data_list_t *vcdl, int use_dashes)
4905 {
4906 int i, j;
4907 vdev_cmd_data_t *data = &vcdl->data[0];
4908
4909 if (vcdl->count == 0 || data == NULL)
4910 return;
4911
4912 /*
4913 * Each vdev cmd should have the same column names unless the user did
4914 * something weird with their cmd. Just take the column names from the
4915 * first vdev and assume it works for all of them.
4916 */
4917 for (i = 0; i < vcdl->uniq_cols_cnt; i++) {
4918 printf(" ");
4919 if (use_dashes) {
4920 for (j = 0; j < vcdl->uniq_cols_width[i]; j++)
4921 printf("-");
4922 } else {
4923 printf_color(ANSI_BOLD, "%*s", vcdl->uniq_cols_width[i],
4924 vcdl->uniq_cols[i]);
4925 }
4926 }
4927 }
4928
4929
4930 /*
4931 * Utility function to print out a line of dashes like:
4932 *
4933 * -------------------------------- ----- ----- ----- ----- -----
4934 *
4935 * ...or a dashed named-row line like:
4936 *
4937 * logs - - - - -
4938 *
4939 * @cb: iostat data
4940 *
4941 * @force_column_width If non-zero, use the value as the column width.
4942 * Otherwise use the default column widths.
4943 *
4944 * @name: Print a dashed named-row line starting
4945 * with @name. Otherwise, print a regular
4946 * dashed line.
4947 */
4948 static void
print_iostat_dashes(iostat_cbdata_t * cb,unsigned int force_column_width,const char * name)4949 print_iostat_dashes(iostat_cbdata_t *cb, unsigned int force_column_width,
4950 const char *name)
4951 {
4952 int i;
4953 unsigned int namewidth;
4954 uint64_t flags = cb->cb_flags;
4955 uint64_t f;
4956 int idx;
4957 const name_and_columns_t *labels;
4958 const char *title;
4959
4960
4961 if (cb->cb_flags & IOS_ANYHISTO_M) {
4962 title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)];
4963 } else if (cb->cb_vdevs.cb_names_count) {
4964 title = "vdev";
4965 } else {
4966 title = "pool";
4967 }
4968
4969 namewidth = MAX(MAX(strlen(title), cb->cb_namewidth),
4970 name ? strlen(name) : 0);
4971
4972
4973 if (name) {
4974 printf("%-*s", namewidth, name);
4975 } else {
4976 for (i = 0; i < namewidth; i++)
4977 (void) printf("-");
4978 }
4979
4980 /* For each bit in flags */
4981 for (f = flags; f; f &= ~(1ULL << idx)) {
4982 unsigned int column_width;
4983 idx = lowbit64(f) - 1;
4984 if (force_column_width)
4985 column_width = force_column_width;
4986 else
4987 column_width = default_column_width(cb, idx);
4988
4989 labels = iostat_bottom_labels[idx];
4990 for (i = 0; i < label_array_len(labels); i++) {
4991 if (name)
4992 printf(" %*s-", column_width - 1, " ");
4993 else
4994 printf(" %.*s", column_width,
4995 "--------------------");
4996 }
4997 }
4998 }
4999
5000
5001 static void
print_iostat_separator_impl(iostat_cbdata_t * cb,unsigned int force_column_width)5002 print_iostat_separator_impl(iostat_cbdata_t *cb,
5003 unsigned int force_column_width)
5004 {
5005 print_iostat_dashes(cb, force_column_width, NULL);
5006 }
5007
5008 static void
print_iostat_separator(iostat_cbdata_t * cb)5009 print_iostat_separator(iostat_cbdata_t *cb)
5010 {
5011 print_iostat_separator_impl(cb, 0);
5012 }
5013
5014 static void
print_iostat_header_impl(iostat_cbdata_t * cb,unsigned int force_column_width,const char * histo_vdev_name)5015 print_iostat_header_impl(iostat_cbdata_t *cb, unsigned int force_column_width,
5016 const char *histo_vdev_name)
5017 {
5018 unsigned int namewidth;
5019 const char *title;
5020
5021 color_start(ANSI_BOLD);
5022
5023 if (cb->cb_flags & IOS_ANYHISTO_M) {
5024 title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)];
5025 } else if (cb->cb_vdevs.cb_names_count) {
5026 title = "vdev";
5027 } else {
5028 title = "pool";
5029 }
5030
5031 namewidth = MAX(MAX(strlen(title), cb->cb_namewidth),
5032 histo_vdev_name ? strlen(histo_vdev_name) : 0);
5033
5034 if (histo_vdev_name)
5035 printf("%-*s", namewidth, histo_vdev_name);
5036 else
5037 printf("%*s", namewidth, "");
5038
5039
5040 print_iostat_labels(cb, force_column_width, iostat_top_labels);
5041 printf("\n");
5042
5043 printf("%-*s", namewidth, title);
5044
5045 print_iostat_labels(cb, force_column_width, iostat_bottom_labels);
5046 if (cb->vcdl != NULL)
5047 print_cmd_columns(cb->vcdl, 0);
5048
5049 printf("\n");
5050
5051 print_iostat_separator_impl(cb, force_column_width);
5052
5053 if (cb->vcdl != NULL)
5054 print_cmd_columns(cb->vcdl, 1);
5055
5056 color_end();
5057
5058 printf("\n");
5059 }
5060
5061 static void
print_iostat_header(iostat_cbdata_t * cb)5062 print_iostat_header(iostat_cbdata_t *cb)
5063 {
5064 print_iostat_header_impl(cb, 0, NULL);
5065 }
5066
5067 /*
5068 * Prints a size string (i.e. 120M) with the suffix ("M") colored
5069 * by order of magnitude. Uses column_size to add padding.
5070 */
5071 static void
print_stat_color(const char * statbuf,unsigned int column_size)5072 print_stat_color(const char *statbuf, unsigned int column_size)
5073 {
5074 fputs(" ", stdout);
5075 size_t len = strlen(statbuf);
5076 while (len < column_size) {
5077 fputc(' ', stdout);
5078 column_size--;
5079 }
5080 if (*statbuf == '0') {
5081 color_start(ANSI_GRAY);
5082 fputc('0', stdout);
5083 } else {
5084 for (; *statbuf; statbuf++) {
5085 if (*statbuf == 'K') color_start(ANSI_GREEN);
5086 else if (*statbuf == 'M') color_start(ANSI_YELLOW);
5087 else if (*statbuf == 'G') color_start(ANSI_RED);
5088 else if (*statbuf == 'T') color_start(ANSI_BOLD_BLUE);
5089 else if (*statbuf == 'P') color_start(ANSI_MAGENTA);
5090 else if (*statbuf == 'E') color_start(ANSI_CYAN);
5091 fputc(*statbuf, stdout);
5092 if (--column_size <= 0)
5093 break;
5094 }
5095 }
5096 color_end();
5097 }
5098
5099 /*
5100 * Display a single statistic.
5101 */
5102 static void
print_one_stat(uint64_t value,enum zfs_nicenum_format format,unsigned int column_size,boolean_t scripted)5103 print_one_stat(uint64_t value, enum zfs_nicenum_format format,
5104 unsigned int column_size, boolean_t scripted)
5105 {
5106 char buf[64];
5107
5108 zfs_nicenum_format(value, buf, sizeof (buf), format);
5109
5110 if (scripted)
5111 printf("\t%s", buf);
5112 else
5113 print_stat_color(buf, column_size);
5114 }
5115
5116 /*
5117 * Calculate the default vdev stats
5118 *
5119 * Subtract oldvs from newvs, apply a scaling factor, and save the resulting
5120 * stats into calcvs.
5121 */
5122 static void
calc_default_iostats(vdev_stat_t * oldvs,vdev_stat_t * newvs,vdev_stat_t * calcvs)5123 calc_default_iostats(vdev_stat_t *oldvs, vdev_stat_t *newvs,
5124 vdev_stat_t *calcvs)
5125 {
5126 int i;
5127
5128 memcpy(calcvs, newvs, sizeof (*calcvs));
5129 for (i = 0; i < ARRAY_SIZE(calcvs->vs_ops); i++)
5130 calcvs->vs_ops[i] = (newvs->vs_ops[i] - oldvs->vs_ops[i]);
5131
5132 for (i = 0; i < ARRAY_SIZE(calcvs->vs_bytes); i++)
5133 calcvs->vs_bytes[i] = (newvs->vs_bytes[i] - oldvs->vs_bytes[i]);
5134 }
5135
5136 /*
5137 * Internal representation of the extended iostats data.
5138 *
5139 * The extended iostat stats are exported in nvlists as either uint64_t arrays
5140 * or single uint64_t's. We make both look like arrays to make them easier
5141 * to process. In order to make single uint64_t's look like arrays, we set
5142 * __data to the stat data, and then set *data = &__data with count = 1. Then,
5143 * we can just use *data and count.
5144 */
5145 struct stat_array {
5146 uint64_t *data;
5147 uint_t count; /* Number of entries in data[] */
5148 uint64_t __data; /* Only used when data is a single uint64_t */
5149 };
5150
5151 static uint64_t
stat_histo_max(struct stat_array * nva,unsigned int len)5152 stat_histo_max(struct stat_array *nva, unsigned int len)
5153 {
5154 uint64_t max = 0;
5155 int i;
5156 for (i = 0; i < len; i++)
5157 max = MAX(max, array64_max(nva[i].data, nva[i].count));
5158
5159 return (max);
5160 }
5161
5162 /*
5163 * Helper function to lookup a uint64_t array or uint64_t value and store its
5164 * data as a stat_array. If the nvpair is a single uint64_t value, then we make
5165 * it look like a one element array to make it easier to process.
5166 */
5167 static int
nvpair64_to_stat_array(nvlist_t * nvl,const char * name,struct stat_array * nva)5168 nvpair64_to_stat_array(nvlist_t *nvl, const char *name,
5169 struct stat_array *nva)
5170 {
5171 nvpair_t *tmp;
5172 int ret;
5173
5174 verify(nvlist_lookup_nvpair(nvl, name, &tmp) == 0);
5175 switch (nvpair_type(tmp)) {
5176 case DATA_TYPE_UINT64_ARRAY:
5177 ret = nvpair_value_uint64_array(tmp, &nva->data, &nva->count);
5178 break;
5179 case DATA_TYPE_UINT64:
5180 ret = nvpair_value_uint64(tmp, &nva->__data);
5181 nva->data = &nva->__data;
5182 nva->count = 1;
5183 break;
5184 default:
5185 /* Not a uint64_t */
5186 ret = EINVAL;
5187 break;
5188 }
5189
5190 return (ret);
5191 }
5192
5193 /*
5194 * Given a list of nvlist names, look up the extended stats in newnv and oldnv,
5195 * subtract them, and return the results in a newly allocated stat_array.
5196 * You must free the returned array after you are done with it with
5197 * free_calc_stats().
5198 *
5199 * Additionally, you can set "oldnv" to NULL if you simply want the newnv
5200 * values.
5201 */
5202 static struct stat_array *
calc_and_alloc_stats_ex(const char ** names,unsigned int len,nvlist_t * oldnv,nvlist_t * newnv)5203 calc_and_alloc_stats_ex(const char **names, unsigned int len, nvlist_t *oldnv,
5204 nvlist_t *newnv)
5205 {
5206 nvlist_t *oldnvx = NULL, *newnvx;
5207 struct stat_array *oldnva, *newnva, *calcnva;
5208 int i, j;
5209 unsigned int alloc_size = (sizeof (struct stat_array)) * len;
5210
5211 /* Extract our extended stats nvlist from the main list */
5212 verify(nvlist_lookup_nvlist(newnv, ZPOOL_CONFIG_VDEV_STATS_EX,
5213 &newnvx) == 0);
5214 if (oldnv) {
5215 verify(nvlist_lookup_nvlist(oldnv, ZPOOL_CONFIG_VDEV_STATS_EX,
5216 &oldnvx) == 0);
5217 }
5218
5219 newnva = safe_malloc(alloc_size);
5220 oldnva = safe_malloc(alloc_size);
5221 calcnva = safe_malloc(alloc_size);
5222
5223 for (j = 0; j < len; j++) {
5224 verify(nvpair64_to_stat_array(newnvx, names[j],
5225 &newnva[j]) == 0);
5226 calcnva[j].count = newnva[j].count;
5227 alloc_size = calcnva[j].count * sizeof (calcnva[j].data[0]);
5228 calcnva[j].data = safe_malloc(alloc_size);
5229 memcpy(calcnva[j].data, newnva[j].data, alloc_size);
5230
5231 if (oldnvx) {
5232 verify(nvpair64_to_stat_array(oldnvx, names[j],
5233 &oldnva[j]) == 0);
5234 for (i = 0; i < oldnva[j].count; i++)
5235 calcnva[j].data[i] -= oldnva[j].data[i];
5236 }
5237 }
5238 free(newnva);
5239 free(oldnva);
5240 return (calcnva);
5241 }
5242
5243 static void
free_calc_stats(struct stat_array * nva,unsigned int len)5244 free_calc_stats(struct stat_array *nva, unsigned int len)
5245 {
5246 int i;
5247 for (i = 0; i < len; i++)
5248 free(nva[i].data);
5249
5250 free(nva);
5251 }
5252
5253 static void
print_iostat_histo(struct stat_array * nva,unsigned int len,iostat_cbdata_t * cb,unsigned int column_width,unsigned int namewidth,double scale)5254 print_iostat_histo(struct stat_array *nva, unsigned int len,
5255 iostat_cbdata_t *cb, unsigned int column_width, unsigned int namewidth,
5256 double scale)
5257 {
5258 int i, j;
5259 char buf[6];
5260 uint64_t val;
5261 enum zfs_nicenum_format format;
5262 unsigned int buckets;
5263 unsigned int start_bucket;
5264
5265 if (cb->cb_literal)
5266 format = ZFS_NICENUM_RAW;
5267 else
5268 format = ZFS_NICENUM_1024;
5269
5270 /* All these histos are the same size, so just use nva[0].count */
5271 buckets = nva[0].count;
5272
5273 if (cb->cb_flags & IOS_RQ_HISTO_M) {
5274 /* Start at 512 - req size should never be lower than this */
5275 start_bucket = 9;
5276 } else {
5277 start_bucket = 0;
5278 }
5279
5280 for (j = start_bucket; j < buckets; j++) {
5281 /* Print histogram bucket label */
5282 if (cb->cb_flags & IOS_L_HISTO_M) {
5283 /* Ending range of this bucket */
5284 val = (1UL << (j + 1)) - 1;
5285 zfs_nicetime(val, buf, sizeof (buf));
5286 } else {
5287 /* Request size (starting range of bucket) */
5288 val = (1UL << j);
5289 zfs_nicenum(val, buf, sizeof (buf));
5290 }
5291
5292 if (cb->cb_scripted)
5293 printf("%llu", (u_longlong_t)val);
5294 else
5295 printf("%-*s", namewidth, buf);
5296
5297 /* Print the values on the line */
5298 for (i = 0; i < len; i++) {
5299 print_one_stat(nva[i].data[j] * scale, format,
5300 column_width, cb->cb_scripted);
5301 }
5302 printf("\n");
5303 }
5304 }
5305
5306 static void
print_solid_separator(unsigned int length)5307 print_solid_separator(unsigned int length)
5308 {
5309 while (length--)
5310 printf("-");
5311 printf("\n");
5312 }
5313
5314 static void
print_iostat_histos(iostat_cbdata_t * cb,nvlist_t * oldnv,nvlist_t * newnv,double scale,const char * name)5315 print_iostat_histos(iostat_cbdata_t *cb, nvlist_t *oldnv,
5316 nvlist_t *newnv, double scale, const char *name)
5317 {
5318 unsigned int column_width;
5319 unsigned int namewidth;
5320 unsigned int entire_width;
5321 enum iostat_type type;
5322 struct stat_array *nva;
5323 const char **names;
5324 unsigned int names_len;
5325
5326 /* What type of histo are we? */
5327 type = IOS_HISTO_IDX(cb->cb_flags);
5328
5329 /* Get NULL-terminated array of nvlist names for our histo */
5330 names = vsx_type_to_nvlist[type];
5331 names_len = str_array_len(names); /* num of names */
5332
5333 nva = calc_and_alloc_stats_ex(names, names_len, oldnv, newnv);
5334
5335 if (cb->cb_literal) {
5336 column_width = MAX(5,
5337 (unsigned int) log10(stat_histo_max(nva, names_len)) + 1);
5338 } else {
5339 column_width = 5;
5340 }
5341
5342 namewidth = MAX(cb->cb_namewidth,
5343 strlen(histo_to_title[IOS_HISTO_IDX(cb->cb_flags)]));
5344
5345 /*
5346 * Calculate the entire line width of what we're printing. The
5347 * +2 is for the two spaces between columns:
5348 */
5349 /* read write */
5350 /* ----- ----- */
5351 /* |___| <---------- column_width */
5352 /* */
5353 /* |__________| <--- entire_width */
5354 /* */
5355 entire_width = namewidth + (column_width + 2) *
5356 label_array_len(iostat_bottom_labels[type]);
5357
5358 if (cb->cb_scripted)
5359 printf("%s\n", name);
5360 else
5361 print_iostat_header_impl(cb, column_width, name);
5362
5363 print_iostat_histo(nva, names_len, cb, column_width,
5364 namewidth, scale);
5365
5366 free_calc_stats(nva, names_len);
5367 if (!cb->cb_scripted)
5368 print_solid_separator(entire_width);
5369 }
5370
5371 /*
5372 * Calculate the average latency of a power-of-two latency histogram
5373 */
5374 static uint64_t
single_histo_average(uint64_t * histo,unsigned int buckets)5375 single_histo_average(uint64_t *histo, unsigned int buckets)
5376 {
5377 int i;
5378 uint64_t count = 0, total = 0;
5379
5380 for (i = 0; i < buckets; i++) {
5381 /*
5382 * Our buckets are power-of-two latency ranges. Use the
5383 * midpoint latency of each bucket to calculate the average.
5384 * For example:
5385 *
5386 * Bucket Midpoint
5387 * 8ns-15ns: 12ns
5388 * 16ns-31ns: 24ns
5389 * ...
5390 */
5391 if (histo[i] != 0) {
5392 total += histo[i] * (((1UL << i) + ((1UL << i)/2)));
5393 count += histo[i];
5394 }
5395 }
5396
5397 /* Prevent divide by zero */
5398 return (count == 0 ? 0 : total / count);
5399 }
5400
5401 static void
print_iostat_queues(iostat_cbdata_t * cb,nvlist_t * newnv)5402 print_iostat_queues(iostat_cbdata_t *cb, nvlist_t *newnv)
5403 {
5404 const char *names[] = {
5405 ZPOOL_CONFIG_VDEV_SYNC_R_PEND_QUEUE,
5406 ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE,
5407 ZPOOL_CONFIG_VDEV_SYNC_W_PEND_QUEUE,
5408 ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE,
5409 ZPOOL_CONFIG_VDEV_ASYNC_R_PEND_QUEUE,
5410 ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE,
5411 ZPOOL_CONFIG_VDEV_ASYNC_W_PEND_QUEUE,
5412 ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE,
5413 ZPOOL_CONFIG_VDEV_SCRUB_PEND_QUEUE,
5414 ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,
5415 ZPOOL_CONFIG_VDEV_TRIM_PEND_QUEUE,
5416 ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE,
5417 ZPOOL_CONFIG_VDEV_REBUILD_PEND_QUEUE,
5418 ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE,
5419 };
5420
5421 struct stat_array *nva;
5422
5423 unsigned int column_width = default_column_width(cb, IOS_QUEUES);
5424 enum zfs_nicenum_format format;
5425
5426 nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), NULL, newnv);
5427
5428 if (cb->cb_literal)
5429 format = ZFS_NICENUM_RAW;
5430 else
5431 format = ZFS_NICENUM_1024;
5432
5433 for (int i = 0; i < ARRAY_SIZE(names); i++) {
5434 uint64_t val = nva[i].data[0];
5435 print_one_stat(val, format, column_width, cb->cb_scripted);
5436 }
5437
5438 free_calc_stats(nva, ARRAY_SIZE(names));
5439 }
5440
5441 static void
print_iostat_latency(iostat_cbdata_t * cb,nvlist_t * oldnv,nvlist_t * newnv)5442 print_iostat_latency(iostat_cbdata_t *cb, nvlist_t *oldnv,
5443 nvlist_t *newnv)
5444 {
5445 int i;
5446 uint64_t val;
5447 const char *names[] = {
5448 ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
5449 ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
5450 ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
5451 ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
5452 ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO,
5453 ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO,
5454 ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO,
5455 ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO,
5456 ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO,
5457 ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
5458 ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
5459 };
5460 struct stat_array *nva;
5461
5462 unsigned int column_width = default_column_width(cb, IOS_LATENCY);
5463 enum zfs_nicenum_format format;
5464
5465 nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), oldnv, newnv);
5466
5467 if (cb->cb_literal)
5468 format = ZFS_NICENUM_RAWTIME;
5469 else
5470 format = ZFS_NICENUM_TIME;
5471
5472 /* Print our avg latencies on the line */
5473 for (i = 0; i < ARRAY_SIZE(names); i++) {
5474 /* Compute average latency for a latency histo */
5475 val = single_histo_average(nva[i].data, nva[i].count);
5476 print_one_stat(val, format, column_width, cb->cb_scripted);
5477 }
5478 free_calc_stats(nva, ARRAY_SIZE(names));
5479 }
5480
5481 /*
5482 * Print default statistics (capacity/operations/bandwidth)
5483 */
5484 static void
print_iostat_default(vdev_stat_t * vs,iostat_cbdata_t * cb,double scale)5485 print_iostat_default(vdev_stat_t *vs, iostat_cbdata_t *cb, double scale)
5486 {
5487 unsigned int column_width = default_column_width(cb, IOS_DEFAULT);
5488 enum zfs_nicenum_format format;
5489 char na; /* char to print for "not applicable" values */
5490
5491 if (cb->cb_literal) {
5492 format = ZFS_NICENUM_RAW;
5493 na = '0';
5494 } else {
5495 format = ZFS_NICENUM_1024;
5496 na = '-';
5497 }
5498
5499 /* only toplevel vdevs have capacity stats */
5500 if (vs->vs_space == 0) {
5501 if (cb->cb_scripted)
5502 printf("\t%c\t%c", na, na);
5503 else
5504 printf(" %*c %*c", column_width, na, column_width,
5505 na);
5506 } else {
5507 print_one_stat(vs->vs_alloc, format, column_width,
5508 cb->cb_scripted);
5509 print_one_stat(vs->vs_space - vs->vs_alloc, format,
5510 column_width, cb->cb_scripted);
5511 }
5512
5513 print_one_stat((uint64_t)(vs->vs_ops[ZIO_TYPE_READ] * scale),
5514 format, column_width, cb->cb_scripted);
5515 print_one_stat((uint64_t)(vs->vs_ops[ZIO_TYPE_WRITE] * scale),
5516 format, column_width, cb->cb_scripted);
5517 print_one_stat((uint64_t)(vs->vs_bytes[ZIO_TYPE_READ] * scale),
5518 format, column_width, cb->cb_scripted);
5519 print_one_stat((uint64_t)(vs->vs_bytes[ZIO_TYPE_WRITE] * scale),
5520 format, column_width, cb->cb_scripted);
5521 }
5522
5523 static const char *const class_name[] = {
5524 VDEV_ALLOC_BIAS_DEDUP,
5525 VDEV_ALLOC_BIAS_SPECIAL,
5526 VDEV_ALLOC_CLASS_LOGS
5527 };
5528
5529 /*
5530 * Print out all the statistics for the given vdev. This can either be the
5531 * toplevel configuration, or called recursively. If 'name' is NULL, then this
5532 * is a verbose output, and we don't want to display the toplevel pool stats.
5533 *
5534 * Returns the number of stat lines printed.
5535 */
5536 static unsigned int
print_vdev_stats(zpool_handle_t * zhp,const char * name,nvlist_t * oldnv,nvlist_t * newnv,iostat_cbdata_t * cb,int depth)5537 print_vdev_stats(zpool_handle_t *zhp, const char *name, nvlist_t *oldnv,
5538 nvlist_t *newnv, iostat_cbdata_t *cb, int depth)
5539 {
5540 nvlist_t **oldchild, **newchild;
5541 uint_t c, children, oldchildren;
5542 vdev_stat_t *oldvs, *newvs, *calcvs;
5543 vdev_stat_t zerovs = { 0 };
5544 char *vname;
5545 int i;
5546 int ret = 0;
5547 uint64_t tdelta;
5548 double scale;
5549
5550 if (strcmp(name, VDEV_TYPE_INDIRECT) == 0)
5551 return (ret);
5552
5553 calcvs = safe_malloc(sizeof (*calcvs));
5554
5555 if (oldnv != NULL) {
5556 verify(nvlist_lookup_uint64_array(oldnv,
5557 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&oldvs, &c) == 0);
5558 } else {
5559 oldvs = &zerovs;
5560 }
5561
5562 /* Do we only want to see a specific vdev? */
5563 for (i = 0; i < cb->cb_vdevs.cb_names_count; i++) {
5564 /* Yes we do. Is this the vdev? */
5565 if (strcmp(name, cb->cb_vdevs.cb_names[i]) == 0) {
5566 /*
5567 * This is our vdev. Since it is the only vdev we
5568 * will be displaying, make depth = 0 so that it
5569 * doesn't get indented.
5570 */
5571 depth = 0;
5572 break;
5573 }
5574 }
5575
5576 if (cb->cb_vdevs.cb_names_count && (i == cb->cb_vdevs.cb_names_count)) {
5577 /* Couldn't match the name */
5578 goto children;
5579 }
5580
5581
5582 verify(nvlist_lookup_uint64_array(newnv, ZPOOL_CONFIG_VDEV_STATS,
5583 (uint64_t **)&newvs, &c) == 0);
5584
5585 /*
5586 * Print the vdev name unless it's is a histogram. Histograms
5587 * display the vdev name in the header itself.
5588 */
5589 if (!(cb->cb_flags & IOS_ANYHISTO_M)) {
5590 if (cb->cb_scripted) {
5591 printf("%s", name);
5592 } else {
5593 if (strlen(name) + depth > cb->cb_namewidth)
5594 (void) printf("%*s%s", depth, "", name);
5595 else
5596 (void) printf("%*s%s%*s", depth, "", name,
5597 (int)(cb->cb_namewidth - strlen(name) -
5598 depth), "");
5599 }
5600 }
5601
5602 /* Calculate our scaling factor */
5603 tdelta = newvs->vs_timestamp - oldvs->vs_timestamp;
5604 if ((oldvs->vs_timestamp == 0) && (cb->cb_flags & IOS_ANYHISTO_M)) {
5605 /*
5606 * If we specify printing histograms with no time interval, then
5607 * print the histogram numbers over the entire lifetime of the
5608 * vdev.
5609 */
5610 scale = 1;
5611 } else {
5612 if (tdelta == 0)
5613 scale = 1.0;
5614 else
5615 scale = (double)NANOSEC / tdelta;
5616 }
5617
5618 if (cb->cb_flags & IOS_DEFAULT_M) {
5619 calc_default_iostats(oldvs, newvs, calcvs);
5620 print_iostat_default(calcvs, cb, scale);
5621 }
5622 if (cb->cb_flags & IOS_LATENCY_M)
5623 print_iostat_latency(cb, oldnv, newnv);
5624 if (cb->cb_flags & IOS_QUEUES_M)
5625 print_iostat_queues(cb, newnv);
5626 if (cb->cb_flags & IOS_ANYHISTO_M) {
5627 printf("\n");
5628 print_iostat_histos(cb, oldnv, newnv, scale, name);
5629 }
5630
5631 if (cb->vcdl != NULL) {
5632 const char *path;
5633 if (nvlist_lookup_string(newnv, ZPOOL_CONFIG_PATH,
5634 &path) == 0) {
5635 printf(" ");
5636 zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path);
5637 }
5638 }
5639
5640 if (!(cb->cb_flags & IOS_ANYHISTO_M))
5641 printf("\n");
5642
5643 ret++;
5644
5645 children:
5646
5647 free(calcvs);
5648
5649 if (!cb->cb_verbose)
5650 return (ret);
5651
5652 if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_CHILDREN,
5653 &newchild, &children) != 0)
5654 return (ret);
5655
5656 if (oldnv) {
5657 if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_CHILDREN,
5658 &oldchild, &oldchildren) != 0)
5659 return (ret);
5660
5661 children = MIN(oldchildren, children);
5662 }
5663
5664 /*
5665 * print normal top-level devices
5666 */
5667 for (c = 0; c < children; c++) {
5668 uint64_t ishole = B_FALSE, islog = B_FALSE;
5669
5670 (void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_HOLE,
5671 &ishole);
5672
5673 (void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_LOG,
5674 &islog);
5675
5676 if (ishole || islog)
5677 continue;
5678
5679 if (nvlist_exists(newchild[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
5680 continue;
5681
5682 vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
5683 cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID);
5684 ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c] : NULL,
5685 newchild[c], cb, depth + 2);
5686 free(vname);
5687 }
5688
5689 /*
5690 * print all other top-level devices
5691 */
5692 for (uint_t n = 0; n < ARRAY_SIZE(class_name); n++) {
5693 boolean_t printed = B_FALSE;
5694
5695 for (c = 0; c < children; c++) {
5696 uint64_t islog = B_FALSE;
5697 const char *bias = NULL;
5698 const char *type = NULL;
5699
5700 (void) nvlist_lookup_uint64(newchild[c],
5701 ZPOOL_CONFIG_IS_LOG, &islog);
5702 if (islog) {
5703 bias = VDEV_ALLOC_CLASS_LOGS;
5704 } else {
5705 (void) nvlist_lookup_string(newchild[c],
5706 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
5707 (void) nvlist_lookup_string(newchild[c],
5708 ZPOOL_CONFIG_TYPE, &type);
5709 }
5710 if (bias == NULL || strcmp(bias, class_name[n]) != 0)
5711 continue;
5712 if (!islog && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
5713 continue;
5714
5715 if (!printed) {
5716 if ((!(cb->cb_flags & IOS_ANYHISTO_M)) &&
5717 !cb->cb_scripted &&
5718 !cb->cb_vdevs.cb_names) {
5719 print_iostat_dashes(cb, 0,
5720 class_name[n]);
5721 }
5722 printf("\n");
5723 printed = B_TRUE;
5724 }
5725
5726 vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
5727 cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID);
5728 ret += print_vdev_stats(zhp, vname, oldnv ?
5729 oldchild[c] : NULL, newchild[c], cb, depth + 2);
5730 free(vname);
5731 }
5732 }
5733
5734 /*
5735 * Include level 2 ARC devices in iostat output
5736 */
5737 if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_L2CACHE,
5738 &newchild, &children) != 0)
5739 return (ret);
5740
5741 if (oldnv) {
5742 if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_L2CACHE,
5743 &oldchild, &oldchildren) != 0)
5744 return (ret);
5745
5746 children = MIN(oldchildren, children);
5747 }
5748
5749 if (children > 0) {
5750 if ((!(cb->cb_flags & IOS_ANYHISTO_M)) && !cb->cb_scripted &&
5751 !cb->cb_vdevs.cb_names) {
5752 print_iostat_dashes(cb, 0, "cache");
5753 }
5754 printf("\n");
5755
5756 for (c = 0; c < children; c++) {
5757 vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
5758 cb->cb_vdevs.cb_name_flags);
5759 ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c]
5760 : NULL, newchild[c], cb, depth + 2);
5761 free(vname);
5762 }
5763 }
5764
5765 return (ret);
5766 }
5767
5768 static int
refresh_iostat(zpool_handle_t * zhp,void * data)5769 refresh_iostat(zpool_handle_t *zhp, void *data)
5770 {
5771 iostat_cbdata_t *cb = data;
5772 boolean_t missing;
5773
5774 /*
5775 * If the pool has disappeared, remove it from the list and continue.
5776 */
5777 if (zpool_refresh_stats(zhp, &missing) != 0)
5778 return (-1);
5779
5780 if (missing)
5781 pool_list_remove(cb->cb_list, zhp);
5782
5783 return (0);
5784 }
5785
5786 /*
5787 * Callback to print out the iostats for the given pool.
5788 */
5789 static int
print_iostat(zpool_handle_t * zhp,void * data)5790 print_iostat(zpool_handle_t *zhp, void *data)
5791 {
5792 iostat_cbdata_t *cb = data;
5793 nvlist_t *oldconfig, *newconfig;
5794 nvlist_t *oldnvroot, *newnvroot;
5795 int ret;
5796
5797 newconfig = zpool_get_config(zhp, &oldconfig);
5798
5799 if (cb->cb_iteration == 1)
5800 oldconfig = NULL;
5801
5802 verify(nvlist_lookup_nvlist(newconfig, ZPOOL_CONFIG_VDEV_TREE,
5803 &newnvroot) == 0);
5804
5805 if (oldconfig == NULL)
5806 oldnvroot = NULL;
5807 else
5808 verify(nvlist_lookup_nvlist(oldconfig, ZPOOL_CONFIG_VDEV_TREE,
5809 &oldnvroot) == 0);
5810
5811 ret = print_vdev_stats(zhp, zpool_get_name(zhp), oldnvroot, newnvroot,
5812 cb, 0);
5813 if ((ret != 0) && !(cb->cb_flags & IOS_ANYHISTO_M) &&
5814 !cb->cb_scripted && cb->cb_verbose &&
5815 !cb->cb_vdevs.cb_names_count) {
5816 print_iostat_separator(cb);
5817 if (cb->vcdl != NULL) {
5818 print_cmd_columns(cb->vcdl, 1);
5819 }
5820 printf("\n");
5821 }
5822
5823 return (ret);
5824 }
5825
5826 static int
get_columns(void)5827 get_columns(void)
5828 {
5829 struct winsize ws;
5830 int columns = 80;
5831 int error;
5832
5833 if (isatty(STDOUT_FILENO)) {
5834 error = ioctl(STDOUT_FILENO, TIOCGWINSZ, &ws);
5835 if (error == 0)
5836 columns = ws.ws_col;
5837 } else {
5838 columns = 999;
5839 }
5840
5841 return (columns);
5842 }
5843
5844 /*
5845 * Return the required length of the pool/vdev name column. The minimum
5846 * allowed width and output formatting flags must be provided.
5847 */
5848 static int
get_namewidth(zpool_handle_t * zhp,int min_width,int flags,boolean_t verbose)5849 get_namewidth(zpool_handle_t *zhp, int min_width, int flags, boolean_t verbose)
5850 {
5851 nvlist_t *config, *nvroot;
5852 int width = min_width;
5853
5854 if ((config = zpool_get_config(zhp, NULL)) != NULL) {
5855 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
5856 &nvroot) == 0);
5857 size_t poolname_len = strlen(zpool_get_name(zhp));
5858 if (verbose == B_FALSE) {
5859 width = MAX(poolname_len, min_width);
5860 } else {
5861 width = MAX(poolname_len,
5862 max_width(zhp, nvroot, 0, min_width, flags));
5863 }
5864 }
5865
5866 return (width);
5867 }
5868
5869 /*
5870 * Parse the input string, get the 'interval' and 'count' value if there is one.
5871 */
5872 static void
get_interval_count(int * argcp,char ** argv,float * iv,unsigned long * cnt)5873 get_interval_count(int *argcp, char **argv, float *iv,
5874 unsigned long *cnt)
5875 {
5876 float interval = 0;
5877 unsigned long count = 0;
5878 int argc = *argcp;
5879
5880 /*
5881 * Determine if the last argument is an integer or a pool name
5882 */
5883 if (argc > 0 && zfs_isnumber(argv[argc - 1])) {
5884 char *end;
5885
5886 errno = 0;
5887 interval = strtof(argv[argc - 1], &end);
5888
5889 if (*end == '\0' && errno == 0) {
5890 if (interval == 0) {
5891 (void) fprintf(stderr, gettext(
5892 "interval cannot be zero\n"));
5893 usage(B_FALSE);
5894 }
5895 /*
5896 * Ignore the last parameter
5897 */
5898 argc--;
5899 } else {
5900 /*
5901 * If this is not a valid number, just plow on. The
5902 * user will get a more informative error message later
5903 * on.
5904 */
5905 interval = 0;
5906 }
5907 }
5908
5909 /*
5910 * If the last argument is also an integer, then we have both a count
5911 * and an interval.
5912 */
5913 if (argc > 0 && zfs_isnumber(argv[argc - 1])) {
5914 char *end;
5915
5916 errno = 0;
5917 count = interval;
5918 interval = strtof(argv[argc - 1], &end);
5919
5920 if (*end == '\0' && errno == 0) {
5921 if (interval == 0) {
5922 (void) fprintf(stderr, gettext(
5923 "interval cannot be zero\n"));
5924 usage(B_FALSE);
5925 }
5926
5927 /*
5928 * Ignore the last parameter
5929 */
5930 argc--;
5931 } else {
5932 interval = 0;
5933 }
5934 }
5935
5936 *iv = interval;
5937 *cnt = count;
5938 *argcp = argc;
5939 }
5940
5941 static void
get_timestamp_arg(char c)5942 get_timestamp_arg(char c)
5943 {
5944 if (c == 'u')
5945 timestamp_fmt = UDATE;
5946 else if (c == 'd')
5947 timestamp_fmt = DDATE;
5948 else
5949 usage(B_FALSE);
5950 }
5951
5952 /*
5953 * Return stat flags that are supported by all pools by both the module and
5954 * zpool iostat. "*data" should be initialized to all 0xFFs before running.
5955 * It will get ANDed down until only the flags that are supported on all pools
5956 * remain.
5957 */
5958 static int
get_stat_flags_cb(zpool_handle_t * zhp,void * data)5959 get_stat_flags_cb(zpool_handle_t *zhp, void *data)
5960 {
5961 uint64_t *mask = data;
5962 nvlist_t *config, *nvroot, *nvx;
5963 uint64_t flags = 0;
5964 int i, j;
5965
5966 config = zpool_get_config(zhp, NULL);
5967 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
5968 &nvroot) == 0);
5969
5970 /* Default stats are always supported, but for completeness.. */
5971 if (nvlist_exists(nvroot, ZPOOL_CONFIG_VDEV_STATS))
5972 flags |= IOS_DEFAULT_M;
5973
5974 /* Get our extended stats nvlist from the main list */
5975 if (nvlist_lookup_nvlist(nvroot, ZPOOL_CONFIG_VDEV_STATS_EX,
5976 &nvx) != 0) {
5977 /*
5978 * No extended stats; they're probably running an older
5979 * module. No big deal, we support that too.
5980 */
5981 goto end;
5982 }
5983
5984 /* For each extended stat, make sure all its nvpairs are supported */
5985 for (j = 0; j < ARRAY_SIZE(vsx_type_to_nvlist); j++) {
5986 if (!vsx_type_to_nvlist[j][0])
5987 continue;
5988
5989 /* Start off by assuming the flag is supported, then check */
5990 flags |= (1ULL << j);
5991 for (i = 0; vsx_type_to_nvlist[j][i]; i++) {
5992 if (!nvlist_exists(nvx, vsx_type_to_nvlist[j][i])) {
5993 /* flag isn't supported */
5994 flags = flags & ~(1ULL << j);
5995 break;
5996 }
5997 }
5998 }
5999 end:
6000 *mask = *mask & flags;
6001 return (0);
6002 }
6003
6004 /*
6005 * Return a bitmask of stats that are supported on all pools by both the module
6006 * and zpool iostat.
6007 */
6008 static uint64_t
get_stat_flags(zpool_list_t * list)6009 get_stat_flags(zpool_list_t *list)
6010 {
6011 uint64_t mask = -1;
6012
6013 /*
6014 * get_stat_flags_cb() will lop off bits from "mask" until only the
6015 * flags that are supported on all pools remain.
6016 */
6017 pool_list_iter(list, B_FALSE, get_stat_flags_cb, &mask);
6018 return (mask);
6019 }
6020
6021 /*
6022 * Return 1 if cb_data->cb_names[0] is this vdev's name, 0 otherwise.
6023 */
6024 static int
is_vdev_cb(void * zhp_data,nvlist_t * nv,void * cb_data)6025 is_vdev_cb(void *zhp_data, nvlist_t *nv, void *cb_data)
6026 {
6027 uint64_t guid;
6028 vdev_cbdata_t *cb = cb_data;
6029 zpool_handle_t *zhp = zhp_data;
6030
6031 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
6032 return (0);
6033
6034 return (guid == zpool_vdev_path_to_guid(zhp, cb->cb_names[0]));
6035 }
6036
6037 /*
6038 * Returns 1 if cb_data->cb_names[0] is a vdev name, 0 otherwise.
6039 */
6040 static int
is_vdev(zpool_handle_t * zhp,void * cb_data)6041 is_vdev(zpool_handle_t *zhp, void *cb_data)
6042 {
6043 return (for_each_vdev(zhp, is_vdev_cb, cb_data));
6044 }
6045
6046 /*
6047 * Check if vdevs are in a pool
6048 *
6049 * Return 1 if all argv[] strings are vdev names in pool "pool_name". Otherwise
6050 * return 0. If pool_name is NULL, then search all pools.
6051 */
6052 static int
are_vdevs_in_pool(int argc,char ** argv,char * pool_name,vdev_cbdata_t * cb)6053 are_vdevs_in_pool(int argc, char **argv, char *pool_name,
6054 vdev_cbdata_t *cb)
6055 {
6056 char **tmp_name;
6057 int ret = 0;
6058 int i;
6059 int pool_count = 0;
6060
6061 if ((argc == 0) || !*argv)
6062 return (0);
6063
6064 if (pool_name)
6065 pool_count = 1;
6066
6067 /* Temporarily hijack cb_names for a second... */
6068 tmp_name = cb->cb_names;
6069
6070 /* Go though our list of prospective vdev names */
6071 for (i = 0; i < argc; i++) {
6072 cb->cb_names = argv + i;
6073
6074 /* Is this name a vdev in our pools? */
6075 ret = for_each_pool(pool_count, &pool_name, B_TRUE, NULL,
6076 ZFS_TYPE_POOL, B_FALSE, is_vdev, cb);
6077 if (!ret) {
6078 /* No match */
6079 break;
6080 }
6081 }
6082
6083 cb->cb_names = tmp_name;
6084
6085 return (ret);
6086 }
6087
6088 static int
is_pool_cb(zpool_handle_t * zhp,void * data)6089 is_pool_cb(zpool_handle_t *zhp, void *data)
6090 {
6091 char *name = data;
6092 if (strcmp(name, zpool_get_name(zhp)) == 0)
6093 return (1);
6094
6095 return (0);
6096 }
6097
6098 /*
6099 * Do we have a pool named *name? If so, return 1, otherwise 0.
6100 */
6101 static int
is_pool(char * name)6102 is_pool(char *name)
6103 {
6104 return (for_each_pool(0, NULL, B_TRUE, NULL, ZFS_TYPE_POOL, B_FALSE,
6105 is_pool_cb, name));
6106 }
6107
6108 /* Are all our argv[] strings pool names? If so return 1, 0 otherwise. */
6109 static int
are_all_pools(int argc,char ** argv)6110 are_all_pools(int argc, char **argv)
6111 {
6112 if ((argc == 0) || !*argv)
6113 return (0);
6114
6115 while (--argc >= 0)
6116 if (!is_pool(argv[argc]))
6117 return (0);
6118
6119 return (1);
6120 }
6121
6122 /*
6123 * Helper function to print out vdev/pool names we can't resolve. Used for an
6124 * error message.
6125 */
6126 static void
error_list_unresolved_vdevs(int argc,char ** argv,char * pool_name,vdev_cbdata_t * cb)6127 error_list_unresolved_vdevs(int argc, char **argv, char *pool_name,
6128 vdev_cbdata_t *cb)
6129 {
6130 int i;
6131 char *name;
6132 char *str;
6133 for (i = 0; i < argc; i++) {
6134 name = argv[i];
6135
6136 if (is_pool(name))
6137 str = gettext("pool");
6138 else if (are_vdevs_in_pool(1, &name, pool_name, cb))
6139 str = gettext("vdev in this pool");
6140 else if (are_vdevs_in_pool(1, &name, NULL, cb))
6141 str = gettext("vdev in another pool");
6142 else
6143 str = gettext("unknown");
6144
6145 fprintf(stderr, "\t%s (%s)\n", name, str);
6146 }
6147 }
6148
6149 /*
6150 * Same as get_interval_count(), but with additional checks to not misinterpret
6151 * guids as interval/count values. Assumes VDEV_NAME_GUID is set in
6152 * cb.cb_vdevs.cb_name_flags.
6153 */
6154 static void
get_interval_count_filter_guids(int * argc,char ** argv,float * interval,unsigned long * count,iostat_cbdata_t * cb)6155 get_interval_count_filter_guids(int *argc, char **argv, float *interval,
6156 unsigned long *count, iostat_cbdata_t *cb)
6157 {
6158 char **tmpargv = argv;
6159 int argc_for_interval = 0;
6160
6161 /* Is the last arg an interval value? Or a guid? */
6162 if (*argc >= 1 && !are_vdevs_in_pool(1, &argv[*argc - 1], NULL,
6163 &cb->cb_vdevs)) {
6164 /*
6165 * The last arg is not a guid, so it's probably an
6166 * interval value.
6167 */
6168 argc_for_interval++;
6169
6170 if (*argc >= 2 &&
6171 !are_vdevs_in_pool(1, &argv[*argc - 2], NULL,
6172 &cb->cb_vdevs)) {
6173 /*
6174 * The 2nd to last arg is not a guid, so it's probably
6175 * an interval value.
6176 */
6177 argc_for_interval++;
6178 }
6179 }
6180
6181 /* Point to our list of possible intervals */
6182 tmpargv = &argv[*argc - argc_for_interval];
6183
6184 *argc = *argc - argc_for_interval;
6185 get_interval_count(&argc_for_interval, tmpargv,
6186 interval, count);
6187 }
6188
6189 /*
6190 * Terminal height, in rows. Returns -1 if stdout is not connected to a TTY or
6191 * if we were unable to determine its size.
6192 */
6193 static int
terminal_height(void)6194 terminal_height(void)
6195 {
6196 struct winsize win;
6197
6198 if (isatty(STDOUT_FILENO) == 0)
6199 return (-1);
6200
6201 if (ioctl(STDOUT_FILENO, TIOCGWINSZ, &win) != -1 && win.ws_row > 0)
6202 return (win.ws_row);
6203
6204 return (-1);
6205 }
6206
6207 /*
6208 * Run one of the zpool status/iostat -c scripts with the help (-h) option and
6209 * print the result.
6210 *
6211 * name: Short name of the script ('iostat').
6212 * path: Full path to the script ('/usr/local/etc/zfs/zpool.d/iostat');
6213 */
6214 static void
print_zpool_script_help(char * name,char * path)6215 print_zpool_script_help(char *name, char *path)
6216 {
6217 char *argv[] = {path, (char *)"-h", NULL};
6218 char **lines = NULL;
6219 int lines_cnt = 0;
6220 int rc;
6221
6222 rc = libzfs_run_process_get_stdout_nopath(path, argv, NULL, &lines,
6223 &lines_cnt);
6224 if (rc != 0 || lines == NULL || lines_cnt <= 0) {
6225 if (lines != NULL)
6226 libzfs_free_str_array(lines, lines_cnt);
6227 return;
6228 }
6229
6230 for (int i = 0; i < lines_cnt; i++)
6231 if (!is_blank_str(lines[i]))
6232 printf(" %-14s %s\n", name, lines[i]);
6233
6234 libzfs_free_str_array(lines, lines_cnt);
6235 }
6236
6237 /*
6238 * Go though the zpool status/iostat -c scripts in the user's path, run their
6239 * help option (-h), and print out the results.
6240 */
6241 static void
print_zpool_dir_scripts(char * dirpath)6242 print_zpool_dir_scripts(char *dirpath)
6243 {
6244 DIR *dir;
6245 struct dirent *ent;
6246 char fullpath[MAXPATHLEN];
6247 struct stat dir_stat;
6248
6249 if ((dir = opendir(dirpath)) != NULL) {
6250 /* print all the files and directories within directory */
6251 while ((ent = readdir(dir)) != NULL) {
6252 if (snprintf(fullpath, sizeof (fullpath), "%s/%s",
6253 dirpath, ent->d_name) >= sizeof (fullpath)) {
6254 (void) fprintf(stderr,
6255 gettext("internal error: "
6256 "ZPOOL_SCRIPTS_PATH too large.\n"));
6257 exit(1);
6258 }
6259
6260 /* Print the scripts */
6261 if (stat(fullpath, &dir_stat) == 0)
6262 if (dir_stat.st_mode & S_IXUSR &&
6263 S_ISREG(dir_stat.st_mode))
6264 print_zpool_script_help(ent->d_name,
6265 fullpath);
6266 }
6267 closedir(dir);
6268 }
6269 }
6270
6271 /*
6272 * Print out help text for all zpool status/iostat -c scripts.
6273 */
6274 static void
print_zpool_script_list(const char * subcommand)6275 print_zpool_script_list(const char *subcommand)
6276 {
6277 char *dir, *sp, *tmp;
6278
6279 printf(gettext("Available 'zpool %s -c' commands:\n"), subcommand);
6280
6281 sp = zpool_get_cmd_search_path();
6282 if (sp == NULL)
6283 return;
6284
6285 for (dir = strtok_r(sp, ":", &tmp);
6286 dir != NULL;
6287 dir = strtok_r(NULL, ":", &tmp))
6288 print_zpool_dir_scripts(dir);
6289
6290 free(sp);
6291 }
6292
6293 /*
6294 * Set the minimum pool/vdev name column width. The width must be at least 10,
6295 * but may be as large as the column width - 42 so it still fits on one line.
6296 * NOTE: 42 is the width of the default capacity/operations/bandwidth output
6297 */
6298 static int
get_namewidth_iostat(zpool_handle_t * zhp,void * data)6299 get_namewidth_iostat(zpool_handle_t *zhp, void *data)
6300 {
6301 iostat_cbdata_t *cb = data;
6302 int width, available_width;
6303
6304 /*
6305 * get_namewidth() returns the maximum width of any name in that column
6306 * for any pool/vdev/device line that will be output.
6307 */
6308 width = get_namewidth(zhp, cb->cb_namewidth,
6309 cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID, cb->cb_verbose);
6310
6311 /*
6312 * The width we are calculating is the width of the header and also the
6313 * padding width for names that are less than maximum width. The stats
6314 * take up 42 characters, so the width available for names is:
6315 */
6316 available_width = get_columns() - 42;
6317
6318 /*
6319 * If the maximum width fits on a screen, then great! Make everything
6320 * line up by justifying all lines to the same width. If that max
6321 * width is larger than what's available, the name plus stats won't fit
6322 * on one line, and justifying to that width would cause every line to
6323 * wrap on the screen. We only want lines with long names to wrap.
6324 * Limit the padding to what won't wrap.
6325 */
6326 if (width > available_width)
6327 width = available_width;
6328
6329 /*
6330 * And regardless of whatever the screen width is (get_columns can
6331 * return 0 if the width is not known or less than 42 for a narrow
6332 * terminal) have the width be a minimum of 10.
6333 */
6334 if (width < 10)
6335 width = 10;
6336
6337 /* Save the calculated width */
6338 cb->cb_namewidth = width;
6339
6340 return (0);
6341 }
6342
6343 /*
6344 * zpool iostat [[-c [script1,script2,...]] [-lq]|[-rw]] [-ghHLpPvy] [-n name]
6345 * [-T d|u] [[ pool ...]|[pool vdev ...]|[vdev ...]]
6346 * [interval [count]]
6347 *
6348 * -c CMD For each vdev, run command CMD
6349 * -g Display guid for individual vdev name.
6350 * -L Follow links when resolving vdev path name.
6351 * -P Display full path for vdev name.
6352 * -v Display statistics for individual vdevs
6353 * -h Display help
6354 * -p Display values in parsable (exact) format.
6355 * -H Scripted mode. Don't display headers, and separate properties
6356 * by a single tab.
6357 * -l Display average latency
6358 * -q Display queue depths
6359 * -w Display latency histograms
6360 * -r Display request size histogram
6361 * -T Display a timestamp in date(1) or Unix format
6362 * -n Only print headers once
6363 *
6364 * This command can be tricky because we want to be able to deal with pool
6365 * creation/destruction as well as vdev configuration changes. The bulk of this
6366 * processing is handled by the pool_list_* routines in zpool_iter.c. We rely
6367 * on pool_list_update() to detect the addition of new pools. Configuration
6368 * changes are all handled within libzfs.
6369 */
6370 int
zpool_do_iostat(int argc,char ** argv)6371 zpool_do_iostat(int argc, char **argv)
6372 {
6373 int c;
6374 int ret;
6375 int npools;
6376 float interval = 0;
6377 unsigned long count = 0;
6378 int winheight = 24;
6379 zpool_list_t *list;
6380 boolean_t verbose = B_FALSE;
6381 boolean_t latency = B_FALSE, l_histo = B_FALSE, rq_histo = B_FALSE;
6382 boolean_t queues = B_FALSE, parsable = B_FALSE, scripted = B_FALSE;
6383 boolean_t omit_since_boot = B_FALSE;
6384 boolean_t guid = B_FALSE;
6385 boolean_t follow_links = B_FALSE;
6386 boolean_t full_name = B_FALSE;
6387 boolean_t headers_once = B_FALSE;
6388 iostat_cbdata_t cb = { 0 };
6389 char *cmd = NULL;
6390
6391 /* Used for printing error message */
6392 const char flag_to_arg[] = {[IOS_LATENCY] = 'l', [IOS_QUEUES] = 'q',
6393 [IOS_L_HISTO] = 'w', [IOS_RQ_HISTO] = 'r'};
6394
6395 uint64_t unsupported_flags;
6396
6397 /* check options */
6398 while ((c = getopt(argc, argv, "c:gLPT:vyhplqrwnH")) != -1) {
6399 switch (c) {
6400 case 'c':
6401 if (cmd != NULL) {
6402 fprintf(stderr,
6403 gettext("Can't set -c flag twice\n"));
6404 exit(1);
6405 }
6406
6407 if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL &&
6408 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) {
6409 fprintf(stderr, gettext(
6410 "Can't run -c, disabled by "
6411 "ZPOOL_SCRIPTS_ENABLED.\n"));
6412 exit(1);
6413 }
6414
6415 if ((getuid() <= 0 || geteuid() <= 0) &&
6416 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) {
6417 fprintf(stderr, gettext(
6418 "Can't run -c with root privileges "
6419 "unless ZPOOL_SCRIPTS_AS_ROOT is set.\n"));
6420 exit(1);
6421 }
6422 cmd = optarg;
6423 verbose = B_TRUE;
6424 break;
6425 case 'g':
6426 guid = B_TRUE;
6427 break;
6428 case 'L':
6429 follow_links = B_TRUE;
6430 break;
6431 case 'P':
6432 full_name = B_TRUE;
6433 break;
6434 case 'T':
6435 get_timestamp_arg(*optarg);
6436 break;
6437 case 'v':
6438 verbose = B_TRUE;
6439 break;
6440 case 'p':
6441 parsable = B_TRUE;
6442 break;
6443 case 'l':
6444 latency = B_TRUE;
6445 break;
6446 case 'q':
6447 queues = B_TRUE;
6448 break;
6449 case 'H':
6450 scripted = B_TRUE;
6451 break;
6452 case 'w':
6453 l_histo = B_TRUE;
6454 break;
6455 case 'r':
6456 rq_histo = B_TRUE;
6457 break;
6458 case 'y':
6459 omit_since_boot = B_TRUE;
6460 break;
6461 case 'n':
6462 headers_once = B_TRUE;
6463 break;
6464 case 'h':
6465 usage(B_FALSE);
6466 break;
6467 case '?':
6468 if (optopt == 'c') {
6469 print_zpool_script_list("iostat");
6470 exit(0);
6471 } else {
6472 fprintf(stderr,
6473 gettext("invalid option '%c'\n"), optopt);
6474 }
6475 usage(B_FALSE);
6476 }
6477 }
6478
6479 argc -= optind;
6480 argv += optind;
6481
6482 cb.cb_literal = parsable;
6483 cb.cb_scripted = scripted;
6484
6485 if (guid)
6486 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_GUID;
6487 if (follow_links)
6488 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
6489 if (full_name)
6490 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_PATH;
6491 cb.cb_iteration = 0;
6492 cb.cb_namewidth = 0;
6493 cb.cb_verbose = verbose;
6494
6495 /* Get our interval and count values (if any) */
6496 if (guid) {
6497 get_interval_count_filter_guids(&argc, argv, &interval,
6498 &count, &cb);
6499 } else {
6500 get_interval_count(&argc, argv, &interval, &count);
6501 }
6502
6503 if (argc == 0) {
6504 /* No args, so just print the defaults. */
6505 } else if (are_all_pools(argc, argv)) {
6506 /* All the args are pool names */
6507 } else if (are_vdevs_in_pool(argc, argv, NULL, &cb.cb_vdevs)) {
6508 /* All the args are vdevs */
6509 cb.cb_vdevs.cb_names = argv;
6510 cb.cb_vdevs.cb_names_count = argc;
6511 argc = 0; /* No pools to process */
6512 } else if (are_all_pools(1, argv)) {
6513 /* The first arg is a pool name */
6514 if (are_vdevs_in_pool(argc - 1, argv + 1, argv[0],
6515 &cb.cb_vdevs)) {
6516 /* ...and the rest are vdev names */
6517 cb.cb_vdevs.cb_names = argv + 1;
6518 cb.cb_vdevs.cb_names_count = argc - 1;
6519 argc = 1; /* One pool to process */
6520 } else {
6521 fprintf(stderr, gettext("Expected either a list of "));
6522 fprintf(stderr, gettext("pools, or list of vdevs in"));
6523 fprintf(stderr, " \"%s\", ", argv[0]);
6524 fprintf(stderr, gettext("but got:\n"));
6525 error_list_unresolved_vdevs(argc - 1, argv + 1,
6526 argv[0], &cb.cb_vdevs);
6527 fprintf(stderr, "\n");
6528 usage(B_FALSE);
6529 return (1);
6530 }
6531 } else {
6532 /*
6533 * The args don't make sense. The first arg isn't a pool name,
6534 * nor are all the args vdevs.
6535 */
6536 fprintf(stderr, gettext("Unable to parse pools/vdevs list.\n"));
6537 fprintf(stderr, "\n");
6538 return (1);
6539 }
6540
6541 if (cb.cb_vdevs.cb_names_count != 0) {
6542 /*
6543 * If user specified vdevs, it implies verbose.
6544 */
6545 cb.cb_verbose = B_TRUE;
6546 }
6547
6548 /*
6549 * Construct the list of all interesting pools.
6550 */
6551 ret = 0;
6552 if ((list = pool_list_get(argc, argv, NULL, ZFS_TYPE_POOL, parsable,
6553 &ret)) == NULL)
6554 return (1);
6555
6556 if (pool_list_count(list) == 0 && argc != 0) {
6557 pool_list_free(list);
6558 return (1);
6559 }
6560
6561 if (pool_list_count(list) == 0 && interval == 0) {
6562 pool_list_free(list);
6563 (void) fprintf(stderr, gettext("no pools available\n"));
6564 return (1);
6565 }
6566
6567 if ((l_histo || rq_histo) && (cmd != NULL || latency || queues)) {
6568 pool_list_free(list);
6569 (void) fprintf(stderr,
6570 gettext("[-r|-w] isn't allowed with [-c|-l|-q]\n"));
6571 usage(B_FALSE);
6572 return (1);
6573 }
6574
6575 if (l_histo && rq_histo) {
6576 pool_list_free(list);
6577 (void) fprintf(stderr,
6578 gettext("Only one of [-r|-w] can be passed at a time\n"));
6579 usage(B_FALSE);
6580 return (1);
6581 }
6582
6583 /*
6584 * Enter the main iostat loop.
6585 */
6586 cb.cb_list = list;
6587
6588 if (l_histo) {
6589 /*
6590 * Histograms tables look out of place when you try to display
6591 * them with the other stats, so make a rule that you can only
6592 * print histograms by themselves.
6593 */
6594 cb.cb_flags = IOS_L_HISTO_M;
6595 } else if (rq_histo) {
6596 cb.cb_flags = IOS_RQ_HISTO_M;
6597 } else {
6598 cb.cb_flags = IOS_DEFAULT_M;
6599 if (latency)
6600 cb.cb_flags |= IOS_LATENCY_M;
6601 if (queues)
6602 cb.cb_flags |= IOS_QUEUES_M;
6603 }
6604
6605 /*
6606 * See if the module supports all the stats we want to display.
6607 */
6608 unsupported_flags = cb.cb_flags & ~get_stat_flags(list);
6609 if (unsupported_flags) {
6610 uint64_t f;
6611 int idx;
6612 fprintf(stderr,
6613 gettext("The loaded zfs module doesn't support:"));
6614
6615 /* for each bit set in unsupported_flags */
6616 for (f = unsupported_flags; f; f &= ~(1ULL << idx)) {
6617 idx = lowbit64(f) - 1;
6618 fprintf(stderr, " -%c", flag_to_arg[idx]);
6619 }
6620
6621 fprintf(stderr, ". Try running a newer module.\n");
6622 pool_list_free(list);
6623
6624 return (1);
6625 }
6626
6627 for (;;) {
6628 if ((npools = pool_list_count(list)) == 0)
6629 (void) fprintf(stderr, gettext("no pools available\n"));
6630 else {
6631 /*
6632 * If this is the first iteration and -y was supplied
6633 * we skip any printing.
6634 */
6635 boolean_t skip = (omit_since_boot &&
6636 cb.cb_iteration == 0);
6637
6638 /*
6639 * Refresh all statistics. This is done as an
6640 * explicit step before calculating the maximum name
6641 * width, so that any * configuration changes are
6642 * properly accounted for.
6643 */
6644 (void) pool_list_iter(list, B_FALSE, refresh_iostat,
6645 &cb);
6646
6647 /*
6648 * Iterate over all pools to determine the maximum width
6649 * for the pool / device name column across all pools.
6650 */
6651 cb.cb_namewidth = 0;
6652 (void) pool_list_iter(list, B_FALSE,
6653 get_namewidth_iostat, &cb);
6654
6655 if (timestamp_fmt != NODATE)
6656 print_timestamp(timestamp_fmt);
6657
6658 if (cmd != NULL && cb.cb_verbose &&
6659 !(cb.cb_flags & IOS_ANYHISTO_M)) {
6660 cb.vcdl = all_pools_for_each_vdev_run(argc,
6661 argv, cmd, g_zfs, cb.cb_vdevs.cb_names,
6662 cb.cb_vdevs.cb_names_count,
6663 cb.cb_vdevs.cb_name_flags);
6664 } else {
6665 cb.vcdl = NULL;
6666 }
6667
6668
6669 /*
6670 * Check terminal size so we can print headers
6671 * even when terminal window has its height
6672 * changed.
6673 */
6674 winheight = terminal_height();
6675 /*
6676 * Are we connected to TTY? If not, headers_once
6677 * should be true, to avoid breaking scripts.
6678 */
6679 if (winheight < 0)
6680 headers_once = B_TRUE;
6681
6682 /*
6683 * If it's the first time and we're not skipping it,
6684 * or either skip or verbose mode, print the header.
6685 *
6686 * The histogram code explicitly prints its header on
6687 * every vdev, so skip this for histograms.
6688 */
6689 if (((++cb.cb_iteration == 1 && !skip) ||
6690 (skip != verbose) ||
6691 (!headers_once &&
6692 (cb.cb_iteration % winheight) == 0)) &&
6693 (!(cb.cb_flags & IOS_ANYHISTO_M)) &&
6694 !cb.cb_scripted)
6695 print_iostat_header(&cb);
6696
6697 if (skip) {
6698 (void) fflush(stdout);
6699 (void) fsleep(interval);
6700 continue;
6701 }
6702
6703 pool_list_iter(list, B_FALSE, print_iostat, &cb);
6704
6705 /*
6706 * If there's more than one pool, and we're not in
6707 * verbose mode (which prints a separator for us),
6708 * then print a separator.
6709 *
6710 * In addition, if we're printing specific vdevs then
6711 * we also want an ending separator.
6712 */
6713 if (((npools > 1 && !verbose &&
6714 !(cb.cb_flags & IOS_ANYHISTO_M)) ||
6715 (!(cb.cb_flags & IOS_ANYHISTO_M) &&
6716 cb.cb_vdevs.cb_names_count)) &&
6717 !cb.cb_scripted) {
6718 print_iostat_separator(&cb);
6719 if (cb.vcdl != NULL)
6720 print_cmd_columns(cb.vcdl, 1);
6721 printf("\n");
6722 }
6723
6724 if (cb.vcdl != NULL)
6725 free_vdev_cmd_data_list(cb.vcdl);
6726
6727 }
6728
6729 if (interval == 0)
6730 break;
6731
6732 if (count != 0 && --count == 0)
6733 break;
6734
6735 (void) fflush(stdout);
6736 (void) fsleep(interval);
6737 }
6738
6739 pool_list_free(list);
6740
6741 return (ret);
6742 }
6743
6744 typedef struct list_cbdata {
6745 boolean_t cb_verbose;
6746 int cb_name_flags;
6747 int cb_namewidth;
6748 boolean_t cb_json;
6749 boolean_t cb_scripted;
6750 zprop_list_t *cb_proplist;
6751 boolean_t cb_literal;
6752 nvlist_t *cb_jsobj;
6753 boolean_t cb_json_as_int;
6754 boolean_t cb_json_pool_key_guid;
6755 } list_cbdata_t;
6756
6757
6758 /*
6759 * Given a list of columns to display, output appropriate headers for each one.
6760 */
6761 static void
print_header(list_cbdata_t * cb)6762 print_header(list_cbdata_t *cb)
6763 {
6764 zprop_list_t *pl = cb->cb_proplist;
6765 char headerbuf[ZPOOL_MAXPROPLEN];
6766 const char *header;
6767 boolean_t first = B_TRUE;
6768 boolean_t right_justify;
6769 size_t width = 0;
6770
6771 for (; pl != NULL; pl = pl->pl_next) {
6772 width = pl->pl_width;
6773 if (first && cb->cb_verbose) {
6774 /*
6775 * Reset the width to accommodate the verbose listing
6776 * of devices.
6777 */
6778 width = cb->cb_namewidth;
6779 }
6780
6781 if (!first)
6782 (void) fputs(" ", stdout);
6783 else
6784 first = B_FALSE;
6785
6786 right_justify = B_FALSE;
6787 if (pl->pl_prop != ZPROP_USERPROP) {
6788 header = zpool_prop_column_name(pl->pl_prop);
6789 right_justify = zpool_prop_align_right(pl->pl_prop);
6790 } else {
6791 int i;
6792
6793 for (i = 0; pl->pl_user_prop[i] != '\0'; i++)
6794 headerbuf[i] = toupper(pl->pl_user_prop[i]);
6795 headerbuf[i] = '\0';
6796 header = headerbuf;
6797 }
6798
6799 if (pl->pl_next == NULL && !right_justify)
6800 (void) fputs(header, stdout);
6801 else if (right_justify)
6802 (void) printf("%*s", (int)width, header);
6803 else
6804 (void) printf("%-*s", (int)width, header);
6805 }
6806
6807 (void) fputc('\n', stdout);
6808 }
6809
6810 /*
6811 * Given a pool and a list of properties, print out all the properties according
6812 * to the described layout. Used by zpool_do_list().
6813 */
6814 static void
collect_pool(zpool_handle_t * zhp,list_cbdata_t * cb)6815 collect_pool(zpool_handle_t *zhp, list_cbdata_t *cb)
6816 {
6817 zprop_list_t *pl = cb->cb_proplist;
6818 boolean_t first = B_TRUE;
6819 char property[ZPOOL_MAXPROPLEN];
6820 const char *propstr;
6821 boolean_t right_justify;
6822 size_t width;
6823 zprop_source_t sourcetype = ZPROP_SRC_NONE;
6824 nvlist_t *item, *d, *props;
6825 item = d = props = NULL;
6826
6827 if (cb->cb_json) {
6828 item = fnvlist_alloc();
6829 props = fnvlist_alloc();
6830 d = fnvlist_lookup_nvlist(cb->cb_jsobj, "pools");
6831 if (d == NULL) {
6832 fprintf(stderr, "pools obj not found.\n");
6833 exit(1);
6834 }
6835 fill_pool_info(item, zhp, B_TRUE, cb->cb_json_as_int);
6836 }
6837
6838 for (; pl != NULL; pl = pl->pl_next) {
6839
6840 width = pl->pl_width;
6841 if (first && cb->cb_verbose) {
6842 /*
6843 * Reset the width to accommodate the verbose listing
6844 * of devices.
6845 */
6846 width = cb->cb_namewidth;
6847 }
6848
6849 if (!cb->cb_json && !first) {
6850 if (cb->cb_scripted)
6851 (void) fputc('\t', stdout);
6852 else
6853 (void) fputs(" ", stdout);
6854 } else {
6855 first = B_FALSE;
6856 }
6857
6858 right_justify = B_FALSE;
6859 if (pl->pl_prop != ZPROP_USERPROP) {
6860 if (zpool_get_prop(zhp, pl->pl_prop, property,
6861 sizeof (property), &sourcetype,
6862 cb->cb_literal) != 0)
6863 propstr = "-";
6864 else
6865 propstr = property;
6866
6867 right_justify = zpool_prop_align_right(pl->pl_prop);
6868 } else if ((zpool_prop_feature(pl->pl_user_prop) ||
6869 zpool_prop_unsupported(pl->pl_user_prop)) &&
6870 zpool_prop_get_feature(zhp, pl->pl_user_prop, property,
6871 sizeof (property)) == 0) {
6872 propstr = property;
6873 sourcetype = ZPROP_SRC_LOCAL;
6874 } else if (zfs_prop_user(pl->pl_user_prop) &&
6875 zpool_get_userprop(zhp, pl->pl_user_prop, property,
6876 sizeof (property), &sourcetype) == 0) {
6877 propstr = property;
6878 } else {
6879 propstr = "-";
6880 }
6881
6882 if (cb->cb_json) {
6883 if (pl->pl_prop == ZPOOL_PROP_NAME)
6884 continue;
6885 const char *prop_name;
6886 if (pl->pl_prop != ZPROP_USERPROP)
6887 prop_name = zpool_prop_to_name(pl->pl_prop);
6888 else
6889 prop_name = pl->pl_user_prop;
6890 (void) zprop_nvlist_one_property(
6891 prop_name, propstr,
6892 sourcetype, NULL, NULL, props, cb->cb_json_as_int);
6893 } else {
6894 /*
6895 * If this is being called in scripted mode, or if this
6896 * is the last column and it is left-justified, don't
6897 * include a width format specifier.
6898 */
6899 if (cb->cb_scripted || (pl->pl_next == NULL &&
6900 !right_justify))
6901 (void) fputs(propstr, stdout);
6902 else if (right_justify)
6903 (void) printf("%*s", (int)width, propstr);
6904 else
6905 (void) printf("%-*s", (int)width, propstr);
6906 }
6907 }
6908
6909 if (cb->cb_json) {
6910 fnvlist_add_nvlist(item, "properties", props);
6911 if (cb->cb_json_pool_key_guid) {
6912 char pool_guid[256];
6913 uint64_t guid = fnvlist_lookup_uint64(
6914 zpool_get_config(zhp, NULL),
6915 ZPOOL_CONFIG_POOL_GUID);
6916 snprintf(pool_guid, 256, "%llu",
6917 (u_longlong_t)guid);
6918 fnvlist_add_nvlist(d, pool_guid, item);
6919 } else {
6920 fnvlist_add_nvlist(d, zpool_get_name(zhp),
6921 item);
6922 }
6923 fnvlist_free(props);
6924 fnvlist_free(item);
6925 } else
6926 (void) fputc('\n', stdout);
6927 }
6928
6929 static void
collect_vdev_prop(zpool_prop_t prop,uint64_t value,const char * str,boolean_t scripted,boolean_t valid,enum zfs_nicenum_format format,boolean_t json,nvlist_t * nvl,boolean_t as_int)6930 collect_vdev_prop(zpool_prop_t prop, uint64_t value, const char *str,
6931 boolean_t scripted, boolean_t valid, enum zfs_nicenum_format format,
6932 boolean_t json, nvlist_t *nvl, boolean_t as_int)
6933 {
6934 char propval[64];
6935 boolean_t fixed;
6936 size_t width = zprop_width(prop, &fixed, ZFS_TYPE_POOL);
6937
6938 switch (prop) {
6939 case ZPOOL_PROP_SIZE:
6940 case ZPOOL_PROP_EXPANDSZ:
6941 case ZPOOL_PROP_CHECKPOINT:
6942 case ZPOOL_PROP_DEDUPRATIO:
6943 case ZPOOL_PROP_DEDUPCACHED:
6944 if (value == 0)
6945 (void) strlcpy(propval, "-", sizeof (propval));
6946 else
6947 zfs_nicenum_format(value, propval, sizeof (propval),
6948 format);
6949 break;
6950 case ZPOOL_PROP_FRAGMENTATION:
6951 if (value == ZFS_FRAG_INVALID) {
6952 (void) strlcpy(propval, "-", sizeof (propval));
6953 } else if (format == ZFS_NICENUM_RAW) {
6954 (void) snprintf(propval, sizeof (propval), "%llu",
6955 (unsigned long long)value);
6956 } else {
6957 (void) snprintf(propval, sizeof (propval), "%llu%%",
6958 (unsigned long long)value);
6959 }
6960 break;
6961 case ZPOOL_PROP_CAPACITY:
6962 /* capacity value is in parts-per-10,000 (aka permyriad) */
6963 if (format == ZFS_NICENUM_RAW)
6964 (void) snprintf(propval, sizeof (propval), "%llu",
6965 (unsigned long long)value / 100);
6966 else
6967 (void) snprintf(propval, sizeof (propval),
6968 value < 1000 ? "%1.2f%%" : value < 10000 ?
6969 "%2.1f%%" : "%3.0f%%", value / 100.0);
6970 break;
6971 case ZPOOL_PROP_HEALTH:
6972 width = 8;
6973 (void) strlcpy(propval, str, sizeof (propval));
6974 break;
6975 default:
6976 zfs_nicenum_format(value, propval, sizeof (propval), format);
6977 }
6978
6979 if (!valid)
6980 (void) strlcpy(propval, "-", sizeof (propval));
6981
6982 if (json) {
6983 zprop_nvlist_one_property(zpool_prop_to_name(prop), propval,
6984 ZPROP_SRC_NONE, NULL, NULL, nvl, as_int);
6985 } else {
6986 if (scripted)
6987 (void) printf("\t%s", propval);
6988 else
6989 (void) printf(" %*s", (int)width, propval);
6990 }
6991 }
6992
6993 /*
6994 * print static default line per vdev
6995 * not compatible with '-o' <proplist> option
6996 */
6997 static void
collect_list_stats(zpool_handle_t * zhp,const char * name,nvlist_t * nv,list_cbdata_t * cb,int depth,boolean_t isspare,nvlist_t * item)6998 collect_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv,
6999 list_cbdata_t *cb, int depth, boolean_t isspare, nvlist_t *item)
7000 {
7001 nvlist_t **child;
7002 vdev_stat_t *vs;
7003 uint_t c, children = 0;
7004 char *vname;
7005 boolean_t scripted = cb->cb_scripted;
7006 uint64_t islog = B_FALSE;
7007 nvlist_t *props, *ent, *ch, *obj, *l2c, *sp;
7008 props = ent = ch = obj = sp = l2c = NULL;
7009 const char *dashes = "%-*s - - - - "
7010 "- - - - -\n";
7011
7012 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
7013 (uint64_t **)&vs, &c) == 0);
7014
7015 if (name != NULL) {
7016 boolean_t toplevel = (vs->vs_space != 0);
7017 uint64_t cap;
7018 enum zfs_nicenum_format format;
7019 const char *state;
7020
7021 if (cb->cb_literal)
7022 format = ZFS_NICENUM_RAW;
7023 else
7024 format = ZFS_NICENUM_1024;
7025
7026 if (strcmp(name, VDEV_TYPE_INDIRECT) == 0)
7027 return;
7028
7029 if (cb->cb_json) {
7030 props = fnvlist_alloc();
7031 ent = fnvlist_alloc();
7032 fill_vdev_info(ent, zhp, (char *)name, B_FALSE,
7033 cb->cb_json_as_int);
7034 } else {
7035 if (scripted)
7036 (void) printf("\t%s", name);
7037 else if (strlen(name) + depth > cb->cb_namewidth)
7038 (void) printf("%*s%s", depth, "", name);
7039 else
7040 (void) printf("%*s%s%*s", depth, "", name,
7041 (int)(cb->cb_namewidth - strlen(name) -
7042 depth), "");
7043 }
7044
7045 /*
7046 * Print the properties for the individual vdevs. Some
7047 * properties are only applicable to toplevel vdevs. The
7048 * 'toplevel' boolean value is passed to the print_one_column()
7049 * to indicate that the value is valid.
7050 */
7051 if (VDEV_STAT_VALID(vs_pspace, c) && vs->vs_pspace) {
7052 collect_vdev_prop(ZPOOL_PROP_SIZE, vs->vs_pspace, NULL,
7053 scripted, B_TRUE, format, cb->cb_json, props,
7054 cb->cb_json_as_int);
7055 } else {
7056 collect_vdev_prop(ZPOOL_PROP_SIZE, vs->vs_space, NULL,
7057 scripted, toplevel, format, cb->cb_json, props,
7058 cb->cb_json_as_int);
7059 }
7060 collect_vdev_prop(ZPOOL_PROP_ALLOCATED, vs->vs_alloc, NULL,
7061 scripted, toplevel, format, cb->cb_json, props,
7062 cb->cb_json_as_int);
7063 collect_vdev_prop(ZPOOL_PROP_FREE, vs->vs_space - vs->vs_alloc,
7064 NULL, scripted, toplevel, format, cb->cb_json, props,
7065 cb->cb_json_as_int);
7066 collect_vdev_prop(ZPOOL_PROP_CHECKPOINT,
7067 vs->vs_checkpoint_space, NULL, scripted, toplevel, format,
7068 cb->cb_json, props, cb->cb_json_as_int);
7069 collect_vdev_prop(ZPOOL_PROP_EXPANDSZ, vs->vs_esize, NULL,
7070 scripted, B_TRUE, format, cb->cb_json, props,
7071 cb->cb_json_as_int);
7072 collect_vdev_prop(ZPOOL_PROP_FRAGMENTATION,
7073 vs->vs_fragmentation, NULL, scripted,
7074 (vs->vs_fragmentation != ZFS_FRAG_INVALID && toplevel),
7075 format, cb->cb_json, props, cb->cb_json_as_int);
7076 cap = (vs->vs_space == 0) ? 0 :
7077 (vs->vs_alloc * 10000 / vs->vs_space);
7078 collect_vdev_prop(ZPOOL_PROP_CAPACITY, cap, NULL,
7079 scripted, toplevel, format, cb->cb_json, props,
7080 cb->cb_json_as_int);
7081 collect_vdev_prop(ZPOOL_PROP_DEDUPRATIO, 0, NULL,
7082 scripted, toplevel, format, cb->cb_json, props,
7083 cb->cb_json_as_int);
7084 state = zpool_state_to_name(vs->vs_state, vs->vs_aux);
7085 if (isspare) {
7086 if (vs->vs_aux == VDEV_AUX_SPARED)
7087 state = "INUSE";
7088 else if (vs->vs_state == VDEV_STATE_HEALTHY)
7089 state = "AVAIL";
7090 }
7091 collect_vdev_prop(ZPOOL_PROP_HEALTH, 0, state, scripted,
7092 B_TRUE, format, cb->cb_json, props, cb->cb_json_as_int);
7093
7094 if (cb->cb_json) {
7095 fnvlist_add_nvlist(ent, "properties", props);
7096 fnvlist_free(props);
7097 } else
7098 (void) fputc('\n', stdout);
7099 }
7100
7101 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
7102 &child, &children) != 0) {
7103 if (cb->cb_json) {
7104 fnvlist_add_nvlist(item, name, ent);
7105 fnvlist_free(ent);
7106 }
7107 return;
7108 }
7109
7110 if (cb->cb_json) {
7111 ch = fnvlist_alloc();
7112 }
7113
7114 /* list the normal vdevs first */
7115 for (c = 0; c < children; c++) {
7116 uint64_t ishole = B_FALSE;
7117
7118 if (nvlist_lookup_uint64(child[c],
7119 ZPOOL_CONFIG_IS_HOLE, &ishole) == 0 && ishole)
7120 continue;
7121
7122 if (nvlist_lookup_uint64(child[c],
7123 ZPOOL_CONFIG_IS_LOG, &islog) == 0 && islog)
7124 continue;
7125
7126 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
7127 continue;
7128
7129 vname = zpool_vdev_name(g_zfs, zhp, child[c],
7130 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
7131
7132 if (name == NULL || cb->cb_json != B_TRUE)
7133 collect_list_stats(zhp, vname, child[c], cb, depth + 2,
7134 B_FALSE, item);
7135 else if (cb->cb_json) {
7136 collect_list_stats(zhp, vname, child[c], cb, depth + 2,
7137 B_FALSE, ch);
7138 }
7139 free(vname);
7140 }
7141
7142 if (cb->cb_json) {
7143 if (!nvlist_empty(ch))
7144 fnvlist_add_nvlist(ent, "vdevs", ch);
7145 fnvlist_free(ch);
7146 }
7147
7148 /* list the classes: 'logs', 'dedup', and 'special' */
7149 for (uint_t n = 0; n < ARRAY_SIZE(class_name); n++) {
7150 boolean_t printed = B_FALSE;
7151 if (cb->cb_json)
7152 obj = fnvlist_alloc();
7153 for (c = 0; c < children; c++) {
7154 const char *bias = NULL;
7155 const char *type = NULL;
7156
7157 if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
7158 &islog) == 0 && islog) {
7159 bias = VDEV_ALLOC_CLASS_LOGS;
7160 } else {
7161 (void) nvlist_lookup_string(child[c],
7162 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
7163 (void) nvlist_lookup_string(child[c],
7164 ZPOOL_CONFIG_TYPE, &type);
7165 }
7166 if (bias == NULL || strcmp(bias, class_name[n]) != 0)
7167 continue;
7168 if (!islog && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
7169 continue;
7170
7171 if (!printed && !cb->cb_json) {
7172 /* LINTED E_SEC_PRINTF_VAR_FMT */
7173 (void) printf(dashes, cb->cb_namewidth,
7174 class_name[n]);
7175 printed = B_TRUE;
7176 }
7177 vname = zpool_vdev_name(g_zfs, zhp, child[c],
7178 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
7179 collect_list_stats(zhp, vname, child[c], cb, depth + 2,
7180 B_FALSE, obj);
7181 free(vname);
7182 }
7183 if (cb->cb_json) {
7184 if (!nvlist_empty(obj))
7185 fnvlist_add_nvlist(item, class_name[n], obj);
7186 fnvlist_free(obj);
7187 }
7188 }
7189
7190 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
7191 &child, &children) == 0 && children > 0) {
7192 if (cb->cb_json) {
7193 l2c = fnvlist_alloc();
7194 } else {
7195 /* LINTED E_SEC_PRINTF_VAR_FMT */
7196 (void) printf(dashes, cb->cb_namewidth, "cache");
7197 }
7198 for (c = 0; c < children; c++) {
7199 vname = zpool_vdev_name(g_zfs, zhp, child[c],
7200 cb->cb_name_flags);
7201 collect_list_stats(zhp, vname, child[c], cb, depth + 2,
7202 B_FALSE, l2c);
7203 free(vname);
7204 }
7205 if (cb->cb_json) {
7206 if (!nvlist_empty(l2c))
7207 fnvlist_add_nvlist(item, "l2cache", l2c);
7208 fnvlist_free(l2c);
7209 }
7210 }
7211
7212 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, &child,
7213 &children) == 0 && children > 0) {
7214 if (cb->cb_json) {
7215 sp = fnvlist_alloc();
7216 } else {
7217 /* LINTED E_SEC_PRINTF_VAR_FMT */
7218 (void) printf(dashes, cb->cb_namewidth, "spare");
7219 }
7220 for (c = 0; c < children; c++) {
7221 vname = zpool_vdev_name(g_zfs, zhp, child[c],
7222 cb->cb_name_flags);
7223 collect_list_stats(zhp, vname, child[c], cb, depth + 2,
7224 B_TRUE, sp);
7225 free(vname);
7226 }
7227 if (cb->cb_json) {
7228 if (!nvlist_empty(sp))
7229 fnvlist_add_nvlist(item, "spares", sp);
7230 fnvlist_free(sp);
7231 }
7232 }
7233
7234 if (name != NULL && cb->cb_json) {
7235 fnvlist_add_nvlist(item, name, ent);
7236 fnvlist_free(ent);
7237 }
7238 }
7239
7240 /*
7241 * Generic callback function to list a pool.
7242 */
7243 static int
list_callback(zpool_handle_t * zhp,void * data)7244 list_callback(zpool_handle_t *zhp, void *data)
7245 {
7246 nvlist_t *p, *d, *nvdevs;
7247 uint64_t guid;
7248 char pool_guid[256];
7249 const char *pool_name = zpool_get_name(zhp);
7250 list_cbdata_t *cbp = data;
7251 p = d = nvdevs = NULL;
7252
7253 collect_pool(zhp, cbp);
7254
7255 if (cbp->cb_verbose) {
7256 nvlist_t *config, *nvroot;
7257 config = zpool_get_config(zhp, NULL);
7258 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
7259 &nvroot) == 0);
7260 if (cbp->cb_json) {
7261 d = fnvlist_lookup_nvlist(cbp->cb_jsobj,
7262 "pools");
7263 if (cbp->cb_json_pool_key_guid) {
7264 guid = fnvlist_lookup_uint64(config,
7265 ZPOOL_CONFIG_POOL_GUID);
7266 snprintf(pool_guid, 256, "%llu",
7267 (u_longlong_t)guid);
7268 p = fnvlist_lookup_nvlist(d, pool_guid);
7269 } else {
7270 p = fnvlist_lookup_nvlist(d, pool_name);
7271 }
7272 nvdevs = fnvlist_alloc();
7273 }
7274 collect_list_stats(zhp, NULL, nvroot, cbp, 0, B_FALSE, nvdevs);
7275 if (cbp->cb_json) {
7276 fnvlist_add_nvlist(p, "vdevs", nvdevs);
7277 if (cbp->cb_json_pool_key_guid)
7278 fnvlist_add_nvlist(d, pool_guid, p);
7279 else
7280 fnvlist_add_nvlist(d, pool_name, p);
7281 fnvlist_add_nvlist(cbp->cb_jsobj, "pools", d);
7282 fnvlist_free(nvdevs);
7283 }
7284 }
7285
7286 return (0);
7287 }
7288
7289 /*
7290 * Set the minimum pool/vdev name column width. The width must be at least 9,
7291 * but may be as large as needed.
7292 */
7293 static int
get_namewidth_list(zpool_handle_t * zhp,void * data)7294 get_namewidth_list(zpool_handle_t *zhp, void *data)
7295 {
7296 list_cbdata_t *cb = data;
7297 int width;
7298
7299 width = get_namewidth(zhp, cb->cb_namewidth,
7300 cb->cb_name_flags | VDEV_NAME_TYPE_ID, cb->cb_verbose);
7301
7302 if (width < 9)
7303 width = 9;
7304
7305 cb->cb_namewidth = width;
7306
7307 return (0);
7308 }
7309
7310 /*
7311 * zpool list [-gHLpP] [-o prop[,prop]*] [-T d|u] [pool] ... [interval [count]]
7312 *
7313 * -g Display guid for individual vdev name.
7314 * -H Scripted mode. Don't display headers, and separate properties
7315 * by a single tab.
7316 * -L Follow links when resolving vdev path name.
7317 * -o List of properties to display. Defaults to
7318 * "name,size,allocated,free,expandsize,fragmentation,capacity,"
7319 * "dedupratio,health,altroot"
7320 * -p Display values in parsable (exact) format.
7321 * -P Display full path for vdev name.
7322 * -T Display a timestamp in date(1) or Unix format
7323 * -j Display the output in JSON format
7324 * --json-int Display the numbers as integer instead of strings.
7325 * --json-pool-key-guid Set pool GUID as key for pool objects.
7326 *
7327 * List all pools in the system, whether or not they're healthy. Output space
7328 * statistics for each one, as well as health status summary.
7329 */
7330 int
zpool_do_list(int argc,char ** argv)7331 zpool_do_list(int argc, char **argv)
7332 {
7333 int c;
7334 int ret = 0;
7335 list_cbdata_t cb = { 0 };
7336 static char default_props[] =
7337 "name,size,allocated,free,checkpoint,expandsize,fragmentation,"
7338 "capacity,dedupratio,health,altroot";
7339 char *props = default_props;
7340 float interval = 0;
7341 unsigned long count = 0;
7342 zpool_list_t *list;
7343 boolean_t first = B_TRUE;
7344 nvlist_t *data = NULL;
7345 current_prop_type = ZFS_TYPE_POOL;
7346
7347 struct option long_options[] = {
7348 {"json", no_argument, NULL, 'j'},
7349 {"json-int", no_argument, NULL, ZPOOL_OPTION_JSON_NUMS_AS_INT},
7350 {"json-pool-key-guid", no_argument, NULL,
7351 ZPOOL_OPTION_POOL_KEY_GUID},
7352 {0, 0, 0, 0}
7353 };
7354
7355 /* check options */
7356 while ((c = getopt_long(argc, argv, ":gjHLo:pPT:v", long_options,
7357 NULL)) != -1) {
7358 switch (c) {
7359 case 'g':
7360 cb.cb_name_flags |= VDEV_NAME_GUID;
7361 break;
7362 case 'H':
7363 cb.cb_scripted = B_TRUE;
7364 break;
7365 case 'L':
7366 cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
7367 break;
7368 case 'o':
7369 props = optarg;
7370 break;
7371 case 'P':
7372 cb.cb_name_flags |= VDEV_NAME_PATH;
7373 break;
7374 case 'p':
7375 cb.cb_literal = B_TRUE;
7376 break;
7377 case 'j':
7378 cb.cb_json = B_TRUE;
7379 break;
7380 case ZPOOL_OPTION_JSON_NUMS_AS_INT:
7381 cb.cb_json_as_int = B_TRUE;
7382 cb.cb_literal = B_TRUE;
7383 break;
7384 case ZPOOL_OPTION_POOL_KEY_GUID:
7385 cb.cb_json_pool_key_guid = B_TRUE;
7386 break;
7387 case 'T':
7388 get_timestamp_arg(*optarg);
7389 break;
7390 case 'v':
7391 cb.cb_verbose = B_TRUE;
7392 cb.cb_namewidth = 8; /* 8 until precalc is avail */
7393 break;
7394 case ':':
7395 (void) fprintf(stderr, gettext("missing argument for "
7396 "'%c' option\n"), optopt);
7397 usage(B_FALSE);
7398 break;
7399 case '?':
7400 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7401 optopt);
7402 usage(B_FALSE);
7403 }
7404 }
7405
7406 argc -= optind;
7407 argv += optind;
7408
7409 if (!cb.cb_json && cb.cb_json_as_int) {
7410 (void) fprintf(stderr, gettext("'--json-int' only works with"
7411 " '-j' option\n"));
7412 usage(B_FALSE);
7413 }
7414
7415 if (!cb.cb_json && cb.cb_json_pool_key_guid) {
7416 (void) fprintf(stderr, gettext("'json-pool-key-guid' only"
7417 " works with '-j' option\n"));
7418 usage(B_FALSE);
7419 }
7420
7421 get_interval_count(&argc, argv, &interval, &count);
7422
7423 if (zprop_get_list(g_zfs, props, &cb.cb_proplist, ZFS_TYPE_POOL) != 0)
7424 usage(B_FALSE);
7425
7426 for (;;) {
7427 if ((list = pool_list_get(argc, argv, &cb.cb_proplist,
7428 ZFS_TYPE_POOL, cb.cb_literal, &ret)) == NULL)
7429 return (1);
7430
7431 if (pool_list_count(list) == 0)
7432 break;
7433
7434 if (cb.cb_json) {
7435 cb.cb_jsobj = zpool_json_schema(0, 1);
7436 data = fnvlist_alloc();
7437 fnvlist_add_nvlist(cb.cb_jsobj, "pools", data);
7438 fnvlist_free(data);
7439 }
7440
7441 cb.cb_namewidth = 0;
7442 (void) pool_list_iter(list, B_FALSE, get_namewidth_list, &cb);
7443
7444 if (timestamp_fmt != NODATE) {
7445 if (cb.cb_json) {
7446 if (cb.cb_json_as_int) {
7447 fnvlist_add_uint64(cb.cb_jsobj, "time",
7448 time(NULL));
7449 } else {
7450 char ts[128];
7451 get_timestamp(timestamp_fmt, ts, 128);
7452 fnvlist_add_string(cb.cb_jsobj, "time",
7453 ts);
7454 }
7455 } else
7456 print_timestamp(timestamp_fmt);
7457 }
7458
7459 if (!cb.cb_scripted && (first || cb.cb_verbose) &&
7460 !cb.cb_json) {
7461 print_header(&cb);
7462 first = B_FALSE;
7463 }
7464 ret = pool_list_iter(list, B_TRUE, list_callback, &cb);
7465
7466 if (ret == 0 && cb.cb_json)
7467 zcmd_print_json(cb.cb_jsobj);
7468 else if (ret != 0 && cb.cb_json)
7469 nvlist_free(cb.cb_jsobj);
7470
7471 if (interval == 0)
7472 break;
7473
7474 if (count != 0 && --count == 0)
7475 break;
7476
7477 pool_list_free(list);
7478
7479 (void) fflush(stdout);
7480 (void) fsleep(interval);
7481 }
7482
7483 if (argc == 0 && !cb.cb_scripted && !cb.cb_json &&
7484 pool_list_count(list) == 0) {
7485 (void) printf(gettext("no pools available\n"));
7486 ret = 0;
7487 }
7488
7489 pool_list_free(list);
7490 zprop_free_list(cb.cb_proplist);
7491 return (ret);
7492 }
7493
7494 static int
zpool_do_attach_or_replace(int argc,char ** argv,int replacing)7495 zpool_do_attach_or_replace(int argc, char **argv, int replacing)
7496 {
7497 boolean_t force = B_FALSE;
7498 boolean_t rebuild = B_FALSE;
7499 boolean_t wait = B_FALSE;
7500 int c;
7501 nvlist_t *nvroot;
7502 char *poolname, *old_disk, *new_disk;
7503 zpool_handle_t *zhp;
7504 nvlist_t *props = NULL;
7505 char *propval;
7506 int ret;
7507
7508 /* check options */
7509 while ((c = getopt(argc, argv, "fo:sw")) != -1) {
7510 switch (c) {
7511 case 'f':
7512 force = B_TRUE;
7513 break;
7514 case 'o':
7515 if ((propval = strchr(optarg, '=')) == NULL) {
7516 (void) fprintf(stderr, gettext("missing "
7517 "'=' for -o option\n"));
7518 usage(B_FALSE);
7519 }
7520 *propval = '\0';
7521 propval++;
7522
7523 if ((strcmp(optarg, ZPOOL_CONFIG_ASHIFT) != 0) ||
7524 (add_prop_list(optarg, propval, &props, B_TRUE)))
7525 usage(B_FALSE);
7526 break;
7527 case 's':
7528 rebuild = B_TRUE;
7529 break;
7530 case 'w':
7531 wait = B_TRUE;
7532 break;
7533 case '?':
7534 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7535 optopt);
7536 usage(B_FALSE);
7537 }
7538 }
7539
7540 argc -= optind;
7541 argv += optind;
7542
7543 /* get pool name and check number of arguments */
7544 if (argc < 1) {
7545 (void) fprintf(stderr, gettext("missing pool name argument\n"));
7546 usage(B_FALSE);
7547 }
7548
7549 poolname = argv[0];
7550
7551 if (argc < 2) {
7552 (void) fprintf(stderr,
7553 gettext("missing <device> specification\n"));
7554 usage(B_FALSE);
7555 }
7556
7557 old_disk = argv[1];
7558
7559 if (argc < 3) {
7560 if (!replacing) {
7561 (void) fprintf(stderr,
7562 gettext("missing <new_device> specification\n"));
7563 usage(B_FALSE);
7564 }
7565 new_disk = old_disk;
7566 argc -= 1;
7567 argv += 1;
7568 } else {
7569 new_disk = argv[2];
7570 argc -= 2;
7571 argv += 2;
7572 }
7573
7574 if (argc > 1) {
7575 (void) fprintf(stderr, gettext("too many arguments\n"));
7576 usage(B_FALSE);
7577 }
7578
7579 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) {
7580 nvlist_free(props);
7581 return (1);
7582 }
7583
7584 if (zpool_get_config(zhp, NULL) == NULL) {
7585 (void) fprintf(stderr, gettext("pool '%s' is unavailable\n"),
7586 poolname);
7587 zpool_close(zhp);
7588 nvlist_free(props);
7589 return (1);
7590 }
7591
7592 /* unless manually specified use "ashift" pool property (if set) */
7593 if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) {
7594 int intval;
7595 zprop_source_t src;
7596 char strval[ZPOOL_MAXPROPLEN];
7597
7598 intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src);
7599 if (src != ZPROP_SRC_DEFAULT) {
7600 (void) sprintf(strval, "%" PRId32, intval);
7601 verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval,
7602 &props, B_TRUE) == 0);
7603 }
7604 }
7605
7606 nvroot = make_root_vdev(zhp, props, force, B_FALSE, replacing, B_FALSE,
7607 argc, argv);
7608 if (nvroot == NULL) {
7609 zpool_close(zhp);
7610 nvlist_free(props);
7611 return (1);
7612 }
7613
7614 ret = zpool_vdev_attach(zhp, old_disk, new_disk, nvroot, replacing,
7615 rebuild);
7616
7617 if (ret == 0 && wait) {
7618 zpool_wait_activity_t activity = ZPOOL_WAIT_RESILVER;
7619 char raidz_prefix[] = "raidz";
7620 if (replacing) {
7621 activity = ZPOOL_WAIT_REPLACE;
7622 } else if (strncmp(old_disk,
7623 raidz_prefix, strlen(raidz_prefix)) == 0) {
7624 activity = ZPOOL_WAIT_RAIDZ_EXPAND;
7625 }
7626 ret = zpool_wait(zhp, activity);
7627 }
7628
7629 nvlist_free(props);
7630 nvlist_free(nvroot);
7631 zpool_close(zhp);
7632
7633 return (ret);
7634 }
7635
7636 /*
7637 * zpool replace [-fsw] [-o property=value] <pool> <device> <new_device>
7638 *
7639 * -f Force attach, even if <new_device> appears to be in use.
7640 * -s Use sequential instead of healing reconstruction for resilver.
7641 * -o Set property=value.
7642 * -w Wait for replacing to complete before returning
7643 *
7644 * Replace <device> with <new_device>.
7645 */
7646 int
zpool_do_replace(int argc,char ** argv)7647 zpool_do_replace(int argc, char **argv)
7648 {
7649 return (zpool_do_attach_or_replace(argc, argv, B_TRUE));
7650 }
7651
7652 /*
7653 * zpool attach [-fsw] [-o property=value] <pool> <device>|<vdev> <new_device>
7654 *
7655 * -f Force attach, even if <new_device> appears to be in use.
7656 * -s Use sequential instead of healing reconstruction for resilver.
7657 * -o Set property=value.
7658 * -w Wait for resilvering (mirror) or expansion (raidz) to complete
7659 * before returning.
7660 *
7661 * Attach <new_device> to a <device> or <vdev>, where the vdev can be of type
7662 * mirror or raidz. If <device> is not part of a mirror, then <device> will
7663 * be transformed into a mirror of <device> and <new_device>. When a mirror
7664 * is involved, <new_device> will begin life with a DTL of [0, now], and will
7665 * immediately begin to resilver itself. For the raidz case, a expansion will
7666 * commence and reflow the raidz data across all the disks including the
7667 * <new_device>.
7668 */
7669 int
zpool_do_attach(int argc,char ** argv)7670 zpool_do_attach(int argc, char **argv)
7671 {
7672 return (zpool_do_attach_or_replace(argc, argv, B_FALSE));
7673 }
7674
7675 /*
7676 * zpool detach [-f] <pool> <device>
7677 *
7678 * -f Force detach of <device>, even if DTLs argue against it
7679 * (not supported yet)
7680 *
7681 * Detach a device from a mirror. The operation will be refused if <device>
7682 * is the last device in the mirror, or if the DTLs indicate that this device
7683 * has the only valid copy of some data.
7684 */
7685 int
zpool_do_detach(int argc,char ** argv)7686 zpool_do_detach(int argc, char **argv)
7687 {
7688 int c;
7689 char *poolname, *path;
7690 zpool_handle_t *zhp;
7691 int ret;
7692
7693 /* check options */
7694 while ((c = getopt(argc, argv, "")) != -1) {
7695 switch (c) {
7696 case '?':
7697 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7698 optopt);
7699 usage(B_FALSE);
7700 }
7701 }
7702
7703 argc -= optind;
7704 argv += optind;
7705
7706 /* get pool name and check number of arguments */
7707 if (argc < 1) {
7708 (void) fprintf(stderr, gettext("missing pool name argument\n"));
7709 usage(B_FALSE);
7710 }
7711
7712 if (argc < 2) {
7713 (void) fprintf(stderr,
7714 gettext("missing <device> specification\n"));
7715 usage(B_FALSE);
7716 }
7717
7718 poolname = argv[0];
7719 path = argv[1];
7720
7721 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
7722 return (1);
7723
7724 ret = zpool_vdev_detach(zhp, path);
7725
7726 zpool_close(zhp);
7727
7728 return (ret);
7729 }
7730
7731 /*
7732 * zpool split [-gLnP] [-o prop=val] ...
7733 * [-o mntopt] ...
7734 * [-R altroot] <pool> <newpool> [<device> ...]
7735 *
7736 * -g Display guid for individual vdev name.
7737 * -L Follow links when resolving vdev path name.
7738 * -n Do not split the pool, but display the resulting layout if
7739 * it were to be split.
7740 * -o Set property=value, or set mount options.
7741 * -P Display full path for vdev name.
7742 * -R Mount the split-off pool under an alternate root.
7743 * -l Load encryption keys while importing.
7744 *
7745 * Splits the named pool and gives it the new pool name. Devices to be split
7746 * off may be listed, provided that no more than one device is specified
7747 * per top-level vdev mirror. The newly split pool is left in an exported
7748 * state unless -R is specified.
7749 *
7750 * Restrictions: the top-level of the pool pool must only be made up of
7751 * mirrors; all devices in the pool must be healthy; no device may be
7752 * undergoing a resilvering operation.
7753 */
7754 int
zpool_do_split(int argc,char ** argv)7755 zpool_do_split(int argc, char **argv)
7756 {
7757 char *srcpool, *newpool, *propval;
7758 char *mntopts = NULL;
7759 splitflags_t flags;
7760 int c, ret = 0;
7761 int ms_status = 0;
7762 boolean_t loadkeys = B_FALSE;
7763 zpool_handle_t *zhp;
7764 nvlist_t *config, *props = NULL;
7765
7766 flags.dryrun = B_FALSE;
7767 flags.import = B_FALSE;
7768 flags.name_flags = 0;
7769
7770 /* check options */
7771 while ((c = getopt(argc, argv, ":gLR:lno:P")) != -1) {
7772 switch (c) {
7773 case 'g':
7774 flags.name_flags |= VDEV_NAME_GUID;
7775 break;
7776 case 'L':
7777 flags.name_flags |= VDEV_NAME_FOLLOW_LINKS;
7778 break;
7779 case 'R':
7780 flags.import = B_TRUE;
7781 if (add_prop_list(
7782 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), optarg,
7783 &props, B_TRUE) != 0) {
7784 nvlist_free(props);
7785 usage(B_FALSE);
7786 }
7787 break;
7788 case 'l':
7789 loadkeys = B_TRUE;
7790 break;
7791 case 'n':
7792 flags.dryrun = B_TRUE;
7793 break;
7794 case 'o':
7795 if ((propval = strchr(optarg, '=')) != NULL) {
7796 *propval = '\0';
7797 propval++;
7798 if (add_prop_list(optarg, propval,
7799 &props, B_TRUE) != 0) {
7800 nvlist_free(props);
7801 usage(B_FALSE);
7802 }
7803 } else {
7804 mntopts = optarg;
7805 }
7806 break;
7807 case 'P':
7808 flags.name_flags |= VDEV_NAME_PATH;
7809 break;
7810 case ':':
7811 (void) fprintf(stderr, gettext("missing argument for "
7812 "'%c' option\n"), optopt);
7813 usage(B_FALSE);
7814 break;
7815 case '?':
7816 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7817 optopt);
7818 usage(B_FALSE);
7819 break;
7820 }
7821 }
7822
7823 if (!flags.import && mntopts != NULL) {
7824 (void) fprintf(stderr, gettext("setting mntopts is only "
7825 "valid when importing the pool\n"));
7826 usage(B_FALSE);
7827 }
7828
7829 if (!flags.import && loadkeys) {
7830 (void) fprintf(stderr, gettext("loading keys is only "
7831 "valid when importing the pool\n"));
7832 usage(B_FALSE);
7833 }
7834
7835 argc -= optind;
7836 argv += optind;
7837
7838 if (argc < 1) {
7839 (void) fprintf(stderr, gettext("Missing pool name\n"));
7840 usage(B_FALSE);
7841 }
7842 if (argc < 2) {
7843 (void) fprintf(stderr, gettext("Missing new pool name\n"));
7844 usage(B_FALSE);
7845 }
7846
7847 srcpool = argv[0];
7848 newpool = argv[1];
7849
7850 argc -= 2;
7851 argv += 2;
7852
7853 if ((zhp = zpool_open(g_zfs, srcpool)) == NULL) {
7854 nvlist_free(props);
7855 return (1);
7856 }
7857
7858 config = split_mirror_vdev(zhp, newpool, props, flags, argc, argv);
7859 if (config == NULL) {
7860 ret = 1;
7861 } else {
7862 if (flags.dryrun) {
7863 (void) printf(gettext("would create '%s' with the "
7864 "following layout:\n\n"), newpool);
7865 print_vdev_tree(NULL, newpool, config, 0, "",
7866 flags.name_flags);
7867 print_vdev_tree(NULL, "dedup", config, 0,
7868 VDEV_ALLOC_BIAS_DEDUP, 0);
7869 print_vdev_tree(NULL, "special", config, 0,
7870 VDEV_ALLOC_BIAS_SPECIAL, 0);
7871 }
7872 }
7873
7874 zpool_close(zhp);
7875
7876 if (ret != 0 || flags.dryrun || !flags.import) {
7877 nvlist_free(config);
7878 nvlist_free(props);
7879 return (ret);
7880 }
7881
7882 /*
7883 * The split was successful. Now we need to open the new
7884 * pool and import it.
7885 */
7886 if ((zhp = zpool_open_canfail(g_zfs, newpool)) == NULL) {
7887 nvlist_free(config);
7888 nvlist_free(props);
7889 return (1);
7890 }
7891
7892 if (loadkeys) {
7893 ret = zfs_crypto_attempt_load_keys(g_zfs, newpool);
7894 if (ret != 0)
7895 ret = 1;
7896 }
7897
7898 if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL) {
7899 ms_status = zpool_enable_datasets(zhp, mntopts, 0,
7900 mount_tp_nthr);
7901 if (ms_status == EZFS_SHAREFAILED) {
7902 (void) fprintf(stderr, gettext("Split was successful, "
7903 "datasets are mounted but sharing of some datasets "
7904 "has failed\n"));
7905 } else if (ms_status == EZFS_MOUNTFAILED) {
7906 (void) fprintf(stderr, gettext("Split was successful"
7907 ", but some datasets could not be mounted\n"));
7908 (void) fprintf(stderr, gettext("Try doing '%s' with a "
7909 "different altroot\n"), "zpool import");
7910 }
7911 }
7912 zpool_close(zhp);
7913 nvlist_free(config);
7914 nvlist_free(props);
7915
7916 return (ret);
7917 }
7918
7919
7920 /*
7921 * zpool online [--power] <pool> <device> ...
7922 *
7923 * --power: Power on the enclosure slot to the drive (if possible)
7924 */
7925 int
zpool_do_online(int argc,char ** argv)7926 zpool_do_online(int argc, char **argv)
7927 {
7928 int c, i;
7929 char *poolname;
7930 zpool_handle_t *zhp;
7931 int ret = 0;
7932 vdev_state_t newstate;
7933 int flags = 0;
7934 boolean_t is_power_on = B_FALSE;
7935 struct option long_options[] = {
7936 {"power", no_argument, NULL, ZPOOL_OPTION_POWER},
7937 {0, 0, 0, 0}
7938 };
7939
7940 /* check options */
7941 while ((c = getopt_long(argc, argv, "e", long_options, NULL)) != -1) {
7942 switch (c) {
7943 case 'e':
7944 flags |= ZFS_ONLINE_EXPAND;
7945 break;
7946 case ZPOOL_OPTION_POWER:
7947 is_power_on = B_TRUE;
7948 break;
7949 case '?':
7950 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7951 optopt);
7952 usage(B_FALSE);
7953 }
7954 }
7955
7956 if (libzfs_envvar_is_set("ZPOOL_AUTO_POWER_ON_SLOT"))
7957 is_power_on = B_TRUE;
7958
7959 argc -= optind;
7960 argv += optind;
7961
7962 /* get pool name and check number of arguments */
7963 if (argc < 1) {
7964 (void) fprintf(stderr, gettext("missing pool name\n"));
7965 usage(B_FALSE);
7966 }
7967 if (argc < 2) {
7968 (void) fprintf(stderr, gettext("missing device name\n"));
7969 usage(B_FALSE);
7970 }
7971
7972 poolname = argv[0];
7973
7974 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) {
7975 (void) fprintf(stderr, gettext("failed to open pool "
7976 "\"%s\""), poolname);
7977 return (1);
7978 }
7979
7980 for (i = 1; i < argc; i++) {
7981 vdev_state_t oldstate;
7982 boolean_t avail_spare, l2cache;
7983 int rc;
7984
7985 if (is_power_on) {
7986 rc = zpool_power_on_and_disk_wait(zhp, argv[i]);
7987 if (rc == ENOTSUP) {
7988 (void) fprintf(stderr,
7989 gettext("Power control not supported\n"));
7990 }
7991 if (rc != 0)
7992 return (rc);
7993 }
7994
7995 nvlist_t *tgt = zpool_find_vdev(zhp, argv[i], &avail_spare,
7996 &l2cache, NULL);
7997 if (tgt == NULL) {
7998 ret = 1;
7999 (void) fprintf(stderr, gettext("couldn't find device "
8000 "\"%s\" in pool \"%s\"\n"), argv[i], poolname);
8001 continue;
8002 }
8003 uint_t vsc;
8004 oldstate = ((vdev_stat_t *)fnvlist_lookup_uint64_array(tgt,
8005 ZPOOL_CONFIG_VDEV_STATS, &vsc))->vs_state;
8006 if ((rc = zpool_vdev_online(zhp, argv[i], flags,
8007 &newstate)) == 0) {
8008 if (newstate != VDEV_STATE_HEALTHY) {
8009 (void) printf(gettext("warning: device '%s' "
8010 "onlined, but remains in faulted state\n"),
8011 argv[i]);
8012 if (newstate == VDEV_STATE_FAULTED)
8013 (void) printf(gettext("use 'zpool "
8014 "clear' to restore a faulted "
8015 "device\n"));
8016 else
8017 (void) printf(gettext("use 'zpool "
8018 "replace' to replace devices "
8019 "that are no longer present\n"));
8020 if ((flags & ZFS_ONLINE_EXPAND)) {
8021 (void) printf(gettext("%s: failed "
8022 "to expand usable space on "
8023 "unhealthy device '%s'\n"),
8024 (oldstate >= VDEV_STATE_DEGRADED ?
8025 "error" : "warning"), argv[i]);
8026 if (oldstate >= VDEV_STATE_DEGRADED) {
8027 ret = 1;
8028 break;
8029 }
8030 }
8031 }
8032 } else {
8033 (void) fprintf(stderr, gettext("Failed to online "
8034 "\"%s\" in pool \"%s\": %d\n"),
8035 argv[i], poolname, rc);
8036 ret = 1;
8037 }
8038 }
8039
8040 zpool_close(zhp);
8041
8042 return (ret);
8043 }
8044
8045 /*
8046 * zpool offline [-ft]|[--power] <pool> <device> ...
8047 *
8048 *
8049 * -f Force the device into a faulted state.
8050 *
8051 * -t Only take the device off-line temporarily. The offline/faulted
8052 * state will not be persistent across reboots.
8053 *
8054 * --power Power off the enclosure slot to the drive (if possible)
8055 */
8056 int
zpool_do_offline(int argc,char ** argv)8057 zpool_do_offline(int argc, char **argv)
8058 {
8059 int c, i;
8060 char *poolname;
8061 zpool_handle_t *zhp;
8062 int ret = 0;
8063 boolean_t istmp = B_FALSE;
8064 boolean_t fault = B_FALSE;
8065 boolean_t is_power_off = B_FALSE;
8066
8067 struct option long_options[] = {
8068 {"power", no_argument, NULL, ZPOOL_OPTION_POWER},
8069 {0, 0, 0, 0}
8070 };
8071
8072 /* check options */
8073 while ((c = getopt_long(argc, argv, "ft", long_options, NULL)) != -1) {
8074 switch (c) {
8075 case 'f':
8076 fault = B_TRUE;
8077 break;
8078 case 't':
8079 istmp = B_TRUE;
8080 break;
8081 case ZPOOL_OPTION_POWER:
8082 is_power_off = B_TRUE;
8083 break;
8084 case '?':
8085 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
8086 optopt);
8087 usage(B_FALSE);
8088 }
8089 }
8090
8091 if (is_power_off && fault) {
8092 (void) fprintf(stderr,
8093 gettext("-0 and -f cannot be used together\n"));
8094 usage(B_FALSE);
8095 return (1);
8096 }
8097
8098 if (is_power_off && istmp) {
8099 (void) fprintf(stderr,
8100 gettext("-0 and -t cannot be used together\n"));
8101 usage(B_FALSE);
8102 return (1);
8103 }
8104
8105 argc -= optind;
8106 argv += optind;
8107
8108 /* get pool name and check number of arguments */
8109 if (argc < 1) {
8110 (void) fprintf(stderr, gettext("missing pool name\n"));
8111 usage(B_FALSE);
8112 }
8113 if (argc < 2) {
8114 (void) fprintf(stderr, gettext("missing device name\n"));
8115 usage(B_FALSE);
8116 }
8117
8118 poolname = argv[0];
8119
8120 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) {
8121 (void) fprintf(stderr, gettext("failed to open pool "
8122 "\"%s\""), poolname);
8123 return (1);
8124 }
8125
8126 for (i = 1; i < argc; i++) {
8127 uint64_t guid = zpool_vdev_path_to_guid(zhp, argv[i]);
8128 if (is_power_off) {
8129 /*
8130 * Note: we have to power off first, then set REMOVED,
8131 * or else zpool_vdev_set_removed_state() returns
8132 * EAGAIN.
8133 */
8134 ret = zpool_power_off(zhp, argv[i]);
8135 if (ret != 0) {
8136 (void) fprintf(stderr, "%s %s %d\n",
8137 gettext("unable to power off slot for"),
8138 argv[i], ret);
8139 }
8140 zpool_vdev_set_removed_state(zhp, guid, VDEV_AUX_NONE);
8141
8142 } else if (fault) {
8143 vdev_aux_t aux;
8144 if (istmp == B_FALSE) {
8145 /* Force the fault to persist across imports */
8146 aux = VDEV_AUX_EXTERNAL_PERSIST;
8147 } else {
8148 aux = VDEV_AUX_EXTERNAL;
8149 }
8150
8151 if (guid == 0 || zpool_vdev_fault(zhp, guid, aux) != 0)
8152 ret = 1;
8153 } else {
8154 if (zpool_vdev_offline(zhp, argv[i], istmp) != 0)
8155 ret = 1;
8156 }
8157 }
8158
8159 zpool_close(zhp);
8160
8161 return (ret);
8162 }
8163
8164 /*
8165 * zpool clear [-nF]|[--power] <pool> [device]
8166 *
8167 * Clear all errors associated with a pool or a particular device.
8168 */
8169 int
zpool_do_clear(int argc,char ** argv)8170 zpool_do_clear(int argc, char **argv)
8171 {
8172 int c;
8173 int ret = 0;
8174 boolean_t dryrun = B_FALSE;
8175 boolean_t do_rewind = B_FALSE;
8176 boolean_t xtreme_rewind = B_FALSE;
8177 boolean_t is_power_on = B_FALSE;
8178 uint32_t rewind_policy = ZPOOL_NO_REWIND;
8179 nvlist_t *policy = NULL;
8180 zpool_handle_t *zhp;
8181 char *pool, *device;
8182
8183 struct option long_options[] = {
8184 {"power", no_argument, NULL, ZPOOL_OPTION_POWER},
8185 {0, 0, 0, 0}
8186 };
8187
8188 /* check options */
8189 while ((c = getopt_long(argc, argv, "FnX", long_options,
8190 NULL)) != -1) {
8191 switch (c) {
8192 case 'F':
8193 do_rewind = B_TRUE;
8194 break;
8195 case 'n':
8196 dryrun = B_TRUE;
8197 break;
8198 case 'X':
8199 xtreme_rewind = B_TRUE;
8200 break;
8201 case ZPOOL_OPTION_POWER:
8202 is_power_on = B_TRUE;
8203 break;
8204 case '?':
8205 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
8206 optopt);
8207 usage(B_FALSE);
8208 }
8209 }
8210
8211 if (libzfs_envvar_is_set("ZPOOL_AUTO_POWER_ON_SLOT"))
8212 is_power_on = B_TRUE;
8213
8214 argc -= optind;
8215 argv += optind;
8216
8217 if (argc < 1) {
8218 (void) fprintf(stderr, gettext("missing pool name\n"));
8219 usage(B_FALSE);
8220 }
8221
8222 if (argc > 2) {
8223 (void) fprintf(stderr, gettext("too many arguments\n"));
8224 usage(B_FALSE);
8225 }
8226
8227 if ((dryrun || xtreme_rewind) && !do_rewind) {
8228 (void) fprintf(stderr,
8229 gettext("-n or -X only meaningful with -F\n"));
8230 usage(B_FALSE);
8231 }
8232 if (dryrun)
8233 rewind_policy = ZPOOL_TRY_REWIND;
8234 else if (do_rewind)
8235 rewind_policy = ZPOOL_DO_REWIND;
8236 if (xtreme_rewind)
8237 rewind_policy |= ZPOOL_EXTREME_REWIND;
8238
8239 /* In future, further rewind policy choices can be passed along here */
8240 if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 ||
8241 nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY,
8242 rewind_policy) != 0) {
8243 return (1);
8244 }
8245
8246 pool = argv[0];
8247 device = argc == 2 ? argv[1] : NULL;
8248
8249 if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) {
8250 nvlist_free(policy);
8251 return (1);
8252 }
8253
8254 if (is_power_on) {
8255 if (device == NULL) {
8256 zpool_power_on_pool_and_wait_for_devices(zhp);
8257 } else {
8258 zpool_power_on_and_disk_wait(zhp, device);
8259 }
8260 }
8261
8262 if (zpool_clear(zhp, device, policy) != 0)
8263 ret = 1;
8264
8265 zpool_close(zhp);
8266
8267 nvlist_free(policy);
8268
8269 return (ret);
8270 }
8271
8272 /*
8273 * zpool reguid [-g <guid>] <pool>
8274 */
8275 int
zpool_do_reguid(int argc,char ** argv)8276 zpool_do_reguid(int argc, char **argv)
8277 {
8278 uint64_t guid;
8279 uint64_t *guidp = NULL;
8280 int c;
8281 char *endptr;
8282 char *poolname;
8283 zpool_handle_t *zhp;
8284 int ret = 0;
8285
8286 /* check options */
8287 while ((c = getopt(argc, argv, "g:")) != -1) {
8288 switch (c) {
8289 case 'g':
8290 errno = 0;
8291 guid = strtoull(optarg, &endptr, 10);
8292 if (errno != 0 || *endptr != '\0') {
8293 (void) fprintf(stderr,
8294 gettext("invalid GUID: %s\n"), optarg);
8295 usage(B_FALSE);
8296 }
8297 guidp = &guid;
8298 break;
8299 case '?':
8300 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
8301 optopt);
8302 usage(B_FALSE);
8303 }
8304 }
8305
8306 argc -= optind;
8307 argv += optind;
8308
8309 /* get pool name and check number of arguments */
8310 if (argc < 1) {
8311 (void) fprintf(stderr, gettext("missing pool name\n"));
8312 usage(B_FALSE);
8313 }
8314
8315 if (argc > 1) {
8316 (void) fprintf(stderr, gettext("too many arguments\n"));
8317 usage(B_FALSE);
8318 }
8319
8320 poolname = argv[0];
8321 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
8322 return (1);
8323
8324 ret = zpool_set_guid(zhp, guidp);
8325
8326 zpool_close(zhp);
8327 return (ret);
8328 }
8329
8330
8331 /*
8332 * zpool reopen <pool>
8333 *
8334 * Reopen the pool so that the kernel can update the sizes of all vdevs.
8335 */
8336 int
zpool_do_reopen(int argc,char ** argv)8337 zpool_do_reopen(int argc, char **argv)
8338 {
8339 int c;
8340 int ret = 0;
8341 boolean_t scrub_restart = B_TRUE;
8342
8343 /* check options */
8344 while ((c = getopt(argc, argv, "n")) != -1) {
8345 switch (c) {
8346 case 'n':
8347 scrub_restart = B_FALSE;
8348 break;
8349 case '?':
8350 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
8351 optopt);
8352 usage(B_FALSE);
8353 }
8354 }
8355
8356 argc -= optind;
8357 argv += optind;
8358
8359 /* if argc == 0 we will execute zpool_reopen_one on all pools */
8360 ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
8361 B_FALSE, zpool_reopen_one, &scrub_restart);
8362
8363 return (ret);
8364 }
8365
8366 typedef struct scrub_cbdata {
8367 int cb_type;
8368 pool_scrub_cmd_t cb_scrub_cmd;
8369 } scrub_cbdata_t;
8370
8371 static boolean_t
zpool_has_checkpoint(zpool_handle_t * zhp)8372 zpool_has_checkpoint(zpool_handle_t *zhp)
8373 {
8374 nvlist_t *config, *nvroot;
8375
8376 config = zpool_get_config(zhp, NULL);
8377
8378 if (config != NULL) {
8379 pool_checkpoint_stat_t *pcs = NULL;
8380 uint_t c;
8381
8382 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
8383 (void) nvlist_lookup_uint64_array(nvroot,
8384 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
8385
8386 if (pcs == NULL || pcs->pcs_state == CS_NONE)
8387 return (B_FALSE);
8388
8389 assert(pcs->pcs_state == CS_CHECKPOINT_EXISTS ||
8390 pcs->pcs_state == CS_CHECKPOINT_DISCARDING);
8391 return (B_TRUE);
8392 }
8393
8394 return (B_FALSE);
8395 }
8396
8397 static int
scrub_callback(zpool_handle_t * zhp,void * data)8398 scrub_callback(zpool_handle_t *zhp, void *data)
8399 {
8400 scrub_cbdata_t *cb = data;
8401 int err;
8402
8403 /*
8404 * Ignore faulted pools.
8405 */
8406 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
8407 (void) fprintf(stderr, gettext("cannot scan '%s': pool is "
8408 "currently unavailable\n"), zpool_get_name(zhp));
8409 return (1);
8410 }
8411
8412 err = zpool_scan(zhp, cb->cb_type, cb->cb_scrub_cmd);
8413
8414 if (err == 0 && zpool_has_checkpoint(zhp) &&
8415 cb->cb_type == POOL_SCAN_SCRUB) {
8416 (void) printf(gettext("warning: will not scrub state that "
8417 "belongs to the checkpoint of pool '%s'\n"),
8418 zpool_get_name(zhp));
8419 }
8420
8421 return (err != 0);
8422 }
8423
8424 static int
wait_callback(zpool_handle_t * zhp,void * data)8425 wait_callback(zpool_handle_t *zhp, void *data)
8426 {
8427 zpool_wait_activity_t *act = data;
8428 return (zpool_wait(zhp, *act));
8429 }
8430
8431 /*
8432 * zpool scrub [-s | -p] [-w] [-e] <pool> ...
8433 *
8434 * -e Only scrub blocks in the error log.
8435 * -s Stop. Stops any in-progress scrub.
8436 * -p Pause. Pause in-progress scrub.
8437 * -w Wait. Blocks until scrub has completed.
8438 */
8439 int
zpool_do_scrub(int argc,char ** argv)8440 zpool_do_scrub(int argc, char **argv)
8441 {
8442 int c;
8443 scrub_cbdata_t cb;
8444 boolean_t wait = B_FALSE;
8445 int error;
8446
8447 cb.cb_type = POOL_SCAN_SCRUB;
8448 cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
8449
8450 boolean_t is_error_scrub = B_FALSE;
8451 boolean_t is_pause = B_FALSE;
8452 boolean_t is_stop = B_FALSE;
8453
8454 /* check options */
8455 while ((c = getopt(argc, argv, "spwe")) != -1) {
8456 switch (c) {
8457 case 'e':
8458 is_error_scrub = B_TRUE;
8459 break;
8460 case 's':
8461 is_stop = B_TRUE;
8462 break;
8463 case 'p':
8464 is_pause = B_TRUE;
8465 break;
8466 case 'w':
8467 wait = B_TRUE;
8468 break;
8469 case '?':
8470 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
8471 optopt);
8472 usage(B_FALSE);
8473 }
8474 }
8475
8476 if (is_pause && is_stop) {
8477 (void) fprintf(stderr, gettext("invalid option "
8478 "combination :-s and -p are mutually exclusive\n"));
8479 usage(B_FALSE);
8480 } else {
8481 if (is_error_scrub)
8482 cb.cb_type = POOL_SCAN_ERRORSCRUB;
8483
8484 if (is_pause) {
8485 cb.cb_scrub_cmd = POOL_SCRUB_PAUSE;
8486 } else if (is_stop) {
8487 cb.cb_type = POOL_SCAN_NONE;
8488 } else {
8489 cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
8490 }
8491 }
8492
8493 if (wait && (cb.cb_type == POOL_SCAN_NONE ||
8494 cb.cb_scrub_cmd == POOL_SCRUB_PAUSE)) {
8495 (void) fprintf(stderr, gettext("invalid option combination: "
8496 "-w cannot be used with -p or -s\n"));
8497 usage(B_FALSE);
8498 }
8499
8500 argc -= optind;
8501 argv += optind;
8502
8503 if (argc < 1) {
8504 (void) fprintf(stderr, gettext("missing pool name argument\n"));
8505 usage(B_FALSE);
8506 }
8507
8508 error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
8509 B_FALSE, scrub_callback, &cb);
8510
8511 if (wait && !error) {
8512 zpool_wait_activity_t act = ZPOOL_WAIT_SCRUB;
8513 error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
8514 B_FALSE, wait_callback, &act);
8515 }
8516
8517 return (error);
8518 }
8519
8520 /*
8521 * zpool resilver <pool> ...
8522 *
8523 * Restarts any in-progress resilver
8524 */
8525 int
zpool_do_resilver(int argc,char ** argv)8526 zpool_do_resilver(int argc, char **argv)
8527 {
8528 int c;
8529 scrub_cbdata_t cb;
8530
8531 cb.cb_type = POOL_SCAN_RESILVER;
8532 cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
8533
8534 /* check options */
8535 while ((c = getopt(argc, argv, "")) != -1) {
8536 switch (c) {
8537 case '?':
8538 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
8539 optopt);
8540 usage(B_FALSE);
8541 }
8542 }
8543
8544 argc -= optind;
8545 argv += optind;
8546
8547 if (argc < 1) {
8548 (void) fprintf(stderr, gettext("missing pool name argument\n"));
8549 usage(B_FALSE);
8550 }
8551
8552 return (for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
8553 B_FALSE, scrub_callback, &cb));
8554 }
8555
8556 /*
8557 * zpool trim [-d] [-r <rate>] [-c | -s] <pool> [<device> ...]
8558 *
8559 * -c Cancel. Ends any in-progress trim.
8560 * -d Secure trim. Requires kernel and device support.
8561 * -r <rate> Sets the TRIM rate in bytes (per second). Supports
8562 * adding a multiplier suffix such as 'k' or 'm'.
8563 * -s Suspend. TRIM can then be restarted with no flags.
8564 * -w Wait. Blocks until trimming has completed.
8565 */
8566 int
zpool_do_trim(int argc,char ** argv)8567 zpool_do_trim(int argc, char **argv)
8568 {
8569 struct option long_options[] = {
8570 {"cancel", no_argument, NULL, 'c'},
8571 {"secure", no_argument, NULL, 'd'},
8572 {"rate", required_argument, NULL, 'r'},
8573 {"suspend", no_argument, NULL, 's'},
8574 {"wait", no_argument, NULL, 'w'},
8575 {0, 0, 0, 0}
8576 };
8577
8578 pool_trim_func_t cmd_type = POOL_TRIM_START;
8579 uint64_t rate = 0;
8580 boolean_t secure = B_FALSE;
8581 boolean_t wait = B_FALSE;
8582
8583 int c;
8584 while ((c = getopt_long(argc, argv, "cdr:sw", long_options, NULL))
8585 != -1) {
8586 switch (c) {
8587 case 'c':
8588 if (cmd_type != POOL_TRIM_START &&
8589 cmd_type != POOL_TRIM_CANCEL) {
8590 (void) fprintf(stderr, gettext("-c cannot be "
8591 "combined with other options\n"));
8592 usage(B_FALSE);
8593 }
8594 cmd_type = POOL_TRIM_CANCEL;
8595 break;
8596 case 'd':
8597 if (cmd_type != POOL_TRIM_START) {
8598 (void) fprintf(stderr, gettext("-d cannot be "
8599 "combined with the -c or -s options\n"));
8600 usage(B_FALSE);
8601 }
8602 secure = B_TRUE;
8603 break;
8604 case 'r':
8605 if (cmd_type != POOL_TRIM_START) {
8606 (void) fprintf(stderr, gettext("-r cannot be "
8607 "combined with the -c or -s options\n"));
8608 usage(B_FALSE);
8609 }
8610 if (zfs_nicestrtonum(g_zfs, optarg, &rate) == -1) {
8611 (void) fprintf(stderr, "%s: %s\n",
8612 gettext("invalid value for rate"),
8613 libzfs_error_description(g_zfs));
8614 usage(B_FALSE);
8615 }
8616 break;
8617 case 's':
8618 if (cmd_type != POOL_TRIM_START &&
8619 cmd_type != POOL_TRIM_SUSPEND) {
8620 (void) fprintf(stderr, gettext("-s cannot be "
8621 "combined with other options\n"));
8622 usage(B_FALSE);
8623 }
8624 cmd_type = POOL_TRIM_SUSPEND;
8625 break;
8626 case 'w':
8627 wait = B_TRUE;
8628 break;
8629 case '?':
8630 if (optopt != 0) {
8631 (void) fprintf(stderr,
8632 gettext("invalid option '%c'\n"), optopt);
8633 } else {
8634 (void) fprintf(stderr,
8635 gettext("invalid option '%s'\n"),
8636 argv[optind - 1]);
8637 }
8638 usage(B_FALSE);
8639 }
8640 }
8641
8642 argc -= optind;
8643 argv += optind;
8644
8645 if (argc < 1) {
8646 (void) fprintf(stderr, gettext("missing pool name argument\n"));
8647 usage(B_FALSE);
8648 return (-1);
8649 }
8650
8651 if (wait && (cmd_type != POOL_TRIM_START)) {
8652 (void) fprintf(stderr, gettext("-w cannot be used with -c or "
8653 "-s\n"));
8654 usage(B_FALSE);
8655 }
8656
8657 char *poolname = argv[0];
8658 zpool_handle_t *zhp = zpool_open(g_zfs, poolname);
8659 if (zhp == NULL)
8660 return (-1);
8661
8662 trimflags_t trim_flags = {
8663 .secure = secure,
8664 .rate = rate,
8665 .wait = wait,
8666 };
8667
8668 nvlist_t *vdevs = fnvlist_alloc();
8669 if (argc == 1) {
8670 /* no individual leaf vdevs specified, so add them all */
8671 nvlist_t *config = zpool_get_config(zhp, NULL);
8672 nvlist_t *nvroot = fnvlist_lookup_nvlist(config,
8673 ZPOOL_CONFIG_VDEV_TREE);
8674 zpool_collect_leaves(zhp, nvroot, vdevs);
8675 trim_flags.fullpool = B_TRUE;
8676 } else {
8677 trim_flags.fullpool = B_FALSE;
8678 for (int i = 1; i < argc; i++) {
8679 fnvlist_add_boolean(vdevs, argv[i]);
8680 }
8681 }
8682
8683 int error = zpool_trim(zhp, cmd_type, vdevs, &trim_flags);
8684
8685 fnvlist_free(vdevs);
8686 zpool_close(zhp);
8687
8688 return (error);
8689 }
8690
8691 /*
8692 * Converts a total number of seconds to a human readable string broken
8693 * down in to days/hours/minutes/seconds.
8694 */
8695 static void
secs_to_dhms(uint64_t total,char * buf)8696 secs_to_dhms(uint64_t total, char *buf)
8697 {
8698 uint64_t days = total / 60 / 60 / 24;
8699 uint64_t hours = (total / 60 / 60) % 24;
8700 uint64_t mins = (total / 60) % 60;
8701 uint64_t secs = (total % 60);
8702
8703 if (days > 0) {
8704 (void) sprintf(buf, "%llu days %02llu:%02llu:%02llu",
8705 (u_longlong_t)days, (u_longlong_t)hours,
8706 (u_longlong_t)mins, (u_longlong_t)secs);
8707 } else {
8708 (void) sprintf(buf, "%02llu:%02llu:%02llu",
8709 (u_longlong_t)hours, (u_longlong_t)mins,
8710 (u_longlong_t)secs);
8711 }
8712 }
8713
8714 /*
8715 * Print out detailed error scrub status.
8716 */
8717 static void
print_err_scrub_status(pool_scan_stat_t * ps)8718 print_err_scrub_status(pool_scan_stat_t *ps)
8719 {
8720 time_t start, end, pause;
8721 uint64_t total_secs_left;
8722 uint64_t secs_left, mins_left, hours_left, days_left;
8723 uint64_t examined, to_be_examined;
8724
8725 if (ps == NULL || ps->pss_error_scrub_func != POOL_SCAN_ERRORSCRUB) {
8726 return;
8727 }
8728
8729 (void) printf(gettext(" scrub: "));
8730
8731 start = ps->pss_error_scrub_start;
8732 end = ps->pss_error_scrub_end;
8733 pause = ps->pss_pass_error_scrub_pause;
8734 examined = ps->pss_error_scrub_examined;
8735 to_be_examined = ps->pss_error_scrub_to_be_examined;
8736
8737 assert(ps->pss_error_scrub_func == POOL_SCAN_ERRORSCRUB);
8738
8739 if (ps->pss_error_scrub_state == DSS_FINISHED) {
8740 total_secs_left = end - start;
8741 days_left = total_secs_left / 60 / 60 / 24;
8742 hours_left = (total_secs_left / 60 / 60) % 24;
8743 mins_left = (total_secs_left / 60) % 60;
8744 secs_left = (total_secs_left % 60);
8745
8746 (void) printf(gettext("scrubbed %llu error blocks in %llu days "
8747 "%02llu:%02llu:%02llu on %s"), (u_longlong_t)examined,
8748 (u_longlong_t)days_left, (u_longlong_t)hours_left,
8749 (u_longlong_t)mins_left, (u_longlong_t)secs_left,
8750 ctime(&end));
8751
8752 return;
8753 } else if (ps->pss_error_scrub_state == DSS_CANCELED) {
8754 (void) printf(gettext("error scrub canceled on %s"),
8755 ctime(&end));
8756 return;
8757 }
8758 assert(ps->pss_error_scrub_state == DSS_ERRORSCRUBBING);
8759
8760 /* Error scrub is in progress. */
8761 if (pause == 0) {
8762 (void) printf(gettext("error scrub in progress since %s"),
8763 ctime(&start));
8764 } else {
8765 (void) printf(gettext("error scrub paused since %s"),
8766 ctime(&pause));
8767 (void) printf(gettext("\terror scrub started on %s"),
8768 ctime(&start));
8769 }
8770
8771 double fraction_done = (double)examined / (to_be_examined + examined);
8772 (void) printf(gettext("\t%.2f%% done, issued I/O for %llu error"
8773 " blocks"), 100 * fraction_done, (u_longlong_t)examined);
8774
8775 (void) printf("\n");
8776 }
8777
8778 /*
8779 * Print out detailed scrub status.
8780 */
8781 static void
print_scan_scrub_resilver_status(pool_scan_stat_t * ps)8782 print_scan_scrub_resilver_status(pool_scan_stat_t *ps)
8783 {
8784 time_t start, end, pause;
8785 uint64_t pass_scanned, scanned, pass_issued, issued, total_s, total_i;
8786 uint64_t elapsed, scan_rate, issue_rate;
8787 double fraction_done;
8788 char processed_buf[7], scanned_buf[7], issued_buf[7], total_s_buf[7];
8789 char total_i_buf[7], srate_buf[7], irate_buf[7], time_buf[32];
8790
8791 printf(" ");
8792 printf_color(ANSI_BOLD, gettext("scan:"));
8793 printf(" ");
8794
8795 /* If there's never been a scan, there's not much to say. */
8796 if (ps == NULL || ps->pss_func == POOL_SCAN_NONE ||
8797 ps->pss_func >= POOL_SCAN_FUNCS) {
8798 (void) printf(gettext("none requested\n"));
8799 return;
8800 }
8801
8802 start = ps->pss_start_time;
8803 end = ps->pss_end_time;
8804 pause = ps->pss_pass_scrub_pause;
8805
8806 zfs_nicebytes(ps->pss_processed, processed_buf, sizeof (processed_buf));
8807
8808 int is_resilver = ps->pss_func == POOL_SCAN_RESILVER;
8809 int is_scrub = ps->pss_func == POOL_SCAN_SCRUB;
8810 assert(is_resilver || is_scrub);
8811
8812 /* Scan is finished or canceled. */
8813 if (ps->pss_state == DSS_FINISHED) {
8814 secs_to_dhms(end - start, time_buf);
8815
8816 if (is_scrub) {
8817 (void) printf(gettext("scrub repaired %s "
8818 "in %s with %llu errors on %s"), processed_buf,
8819 time_buf, (u_longlong_t)ps->pss_errors,
8820 ctime(&end));
8821 } else if (is_resilver) {
8822 (void) printf(gettext("resilvered %s "
8823 "in %s with %llu errors on %s"), processed_buf,
8824 time_buf, (u_longlong_t)ps->pss_errors,
8825 ctime(&end));
8826 }
8827 return;
8828 } else if (ps->pss_state == DSS_CANCELED) {
8829 if (is_scrub) {
8830 (void) printf(gettext("scrub canceled on %s"),
8831 ctime(&end));
8832 } else if (is_resilver) {
8833 (void) printf(gettext("resilver canceled on %s"),
8834 ctime(&end));
8835 }
8836 return;
8837 }
8838
8839 assert(ps->pss_state == DSS_SCANNING);
8840
8841 /* Scan is in progress. Resilvers can't be paused. */
8842 if (is_scrub) {
8843 if (pause == 0) {
8844 (void) printf(gettext("scrub in progress since %s"),
8845 ctime(&start));
8846 } else {
8847 (void) printf(gettext("scrub paused since %s"),
8848 ctime(&pause));
8849 (void) printf(gettext("\tscrub started on %s"),
8850 ctime(&start));
8851 }
8852 } else if (is_resilver) {
8853 (void) printf(gettext("resilver in progress since %s"),
8854 ctime(&start));
8855 }
8856
8857 scanned = ps->pss_examined;
8858 pass_scanned = ps->pss_pass_exam;
8859 issued = ps->pss_issued;
8860 pass_issued = ps->pss_pass_issued;
8861 total_s = ps->pss_to_examine;
8862 total_i = ps->pss_to_examine - ps->pss_skipped;
8863
8864 /* we are only done with a block once we have issued the IO for it */
8865 fraction_done = (double)issued / total_i;
8866
8867 /* elapsed time for this pass, rounding up to 1 if it's 0 */
8868 elapsed = time(NULL) - ps->pss_pass_start;
8869 elapsed -= ps->pss_pass_scrub_spent_paused;
8870 elapsed = (elapsed != 0) ? elapsed : 1;
8871
8872 scan_rate = pass_scanned / elapsed;
8873 issue_rate = pass_issued / elapsed;
8874
8875 /* format all of the numbers we will be reporting */
8876 zfs_nicebytes(scanned, scanned_buf, sizeof (scanned_buf));
8877 zfs_nicebytes(issued, issued_buf, sizeof (issued_buf));
8878 zfs_nicebytes(total_s, total_s_buf, sizeof (total_s_buf));
8879 zfs_nicebytes(total_i, total_i_buf, sizeof (total_i_buf));
8880
8881 /* do not print estimated time if we have a paused scrub */
8882 (void) printf(gettext("\t%s / %s scanned"), scanned_buf, total_s_buf);
8883 if (pause == 0 && scan_rate > 0) {
8884 zfs_nicebytes(scan_rate, srate_buf, sizeof (srate_buf));
8885 (void) printf(gettext(" at %s/s"), srate_buf);
8886 }
8887 (void) printf(gettext(", %s / %s issued"), issued_buf, total_i_buf);
8888 if (pause == 0 && issue_rate > 0) {
8889 zfs_nicebytes(issue_rate, irate_buf, sizeof (irate_buf));
8890 (void) printf(gettext(" at %s/s"), irate_buf);
8891 }
8892 (void) printf(gettext("\n"));
8893
8894 if (is_resilver) {
8895 (void) printf(gettext("\t%s resilvered, %.2f%% done"),
8896 processed_buf, 100 * fraction_done);
8897 } else if (is_scrub) {
8898 (void) printf(gettext("\t%s repaired, %.2f%% done"),
8899 processed_buf, 100 * fraction_done);
8900 }
8901
8902 if (pause == 0) {
8903 /*
8904 * Only provide an estimate iff:
8905 * 1) we haven't yet issued all we expected, and
8906 * 2) the issue rate exceeds 10 MB/s, and
8907 * 3) it's either:
8908 * a) a resilver which has started repairs, or
8909 * b) a scrub which has entered the issue phase.
8910 */
8911 if (total_i >= issued && issue_rate >= 10 * 1024 * 1024 &&
8912 ((is_resilver && ps->pss_processed > 0) ||
8913 (is_scrub && issued > 0))) {
8914 secs_to_dhms((total_i - issued) / issue_rate, time_buf);
8915 (void) printf(gettext(", %s to go\n"), time_buf);
8916 } else {
8917 (void) printf(gettext(", no estimated "
8918 "completion time\n"));
8919 }
8920 } else {
8921 (void) printf(gettext("\n"));
8922 }
8923 }
8924
8925 static void
print_rebuild_status_impl(vdev_rebuild_stat_t * vrs,uint_t c,char * vdev_name)8926 print_rebuild_status_impl(vdev_rebuild_stat_t *vrs, uint_t c, char *vdev_name)
8927 {
8928 if (vrs == NULL || vrs->vrs_state == VDEV_REBUILD_NONE)
8929 return;
8930
8931 printf(" ");
8932 printf_color(ANSI_BOLD, gettext("scan:"));
8933 printf(" ");
8934
8935 uint64_t bytes_scanned = vrs->vrs_bytes_scanned;
8936 uint64_t bytes_issued = vrs->vrs_bytes_issued;
8937 uint64_t bytes_rebuilt = vrs->vrs_bytes_rebuilt;
8938 uint64_t bytes_est_s = vrs->vrs_bytes_est;
8939 uint64_t bytes_est_i = vrs->vrs_bytes_est;
8940 if (c > offsetof(vdev_rebuild_stat_t, vrs_pass_bytes_skipped) / 8)
8941 bytes_est_i -= vrs->vrs_pass_bytes_skipped;
8942 uint64_t scan_rate = (vrs->vrs_pass_bytes_scanned /
8943 (vrs->vrs_pass_time_ms + 1)) * 1000;
8944 uint64_t issue_rate = (vrs->vrs_pass_bytes_issued /
8945 (vrs->vrs_pass_time_ms + 1)) * 1000;
8946 double scan_pct = MIN((double)bytes_scanned * 100 /
8947 (bytes_est_s + 1), 100);
8948
8949 /* Format all of the numbers we will be reporting */
8950 char bytes_scanned_buf[7], bytes_issued_buf[7];
8951 char bytes_rebuilt_buf[7], bytes_est_s_buf[7], bytes_est_i_buf[7];
8952 char scan_rate_buf[7], issue_rate_buf[7], time_buf[32];
8953 zfs_nicebytes(bytes_scanned, bytes_scanned_buf,
8954 sizeof (bytes_scanned_buf));
8955 zfs_nicebytes(bytes_issued, bytes_issued_buf,
8956 sizeof (bytes_issued_buf));
8957 zfs_nicebytes(bytes_rebuilt, bytes_rebuilt_buf,
8958 sizeof (bytes_rebuilt_buf));
8959 zfs_nicebytes(bytes_est_s, bytes_est_s_buf, sizeof (bytes_est_s_buf));
8960 zfs_nicebytes(bytes_est_i, bytes_est_i_buf, sizeof (bytes_est_i_buf));
8961
8962 time_t start = vrs->vrs_start_time;
8963 time_t end = vrs->vrs_end_time;
8964
8965 /* Rebuild is finished or canceled. */
8966 if (vrs->vrs_state == VDEV_REBUILD_COMPLETE) {
8967 secs_to_dhms(vrs->vrs_scan_time_ms / 1000, time_buf);
8968 (void) printf(gettext("resilvered (%s) %s in %s "
8969 "with %llu errors on %s"), vdev_name, bytes_rebuilt_buf,
8970 time_buf, (u_longlong_t)vrs->vrs_errors, ctime(&end));
8971 return;
8972 } else if (vrs->vrs_state == VDEV_REBUILD_CANCELED) {
8973 (void) printf(gettext("resilver (%s) canceled on %s"),
8974 vdev_name, ctime(&end));
8975 return;
8976 } else if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
8977 (void) printf(gettext("resilver (%s) in progress since %s"),
8978 vdev_name, ctime(&start));
8979 }
8980
8981 assert(vrs->vrs_state == VDEV_REBUILD_ACTIVE);
8982
8983 (void) printf(gettext("\t%s / %s scanned"), bytes_scanned_buf,
8984 bytes_est_s_buf);
8985 if (scan_rate > 0) {
8986 zfs_nicebytes(scan_rate, scan_rate_buf, sizeof (scan_rate_buf));
8987 (void) printf(gettext(" at %s/s"), scan_rate_buf);
8988 }
8989 (void) printf(gettext(", %s / %s issued"), bytes_issued_buf,
8990 bytes_est_i_buf);
8991 if (issue_rate > 0) {
8992 zfs_nicebytes(issue_rate, issue_rate_buf,
8993 sizeof (issue_rate_buf));
8994 (void) printf(gettext(" at %s/s"), issue_rate_buf);
8995 }
8996 (void) printf(gettext("\n"));
8997
8998 (void) printf(gettext("\t%s resilvered, %.2f%% done"),
8999 bytes_rebuilt_buf, scan_pct);
9000
9001 if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
9002 if (bytes_est_s >= bytes_scanned &&
9003 scan_rate >= 10 * 1024 * 1024) {
9004 secs_to_dhms((bytes_est_s - bytes_scanned) / scan_rate,
9005 time_buf);
9006 (void) printf(gettext(", %s to go\n"), time_buf);
9007 } else {
9008 (void) printf(gettext(", no estimated "
9009 "completion time\n"));
9010 }
9011 } else {
9012 (void) printf(gettext("\n"));
9013 }
9014 }
9015
9016 /*
9017 * Print rebuild status for top-level vdevs.
9018 */
9019 static void
print_rebuild_status(zpool_handle_t * zhp,nvlist_t * nvroot)9020 print_rebuild_status(zpool_handle_t *zhp, nvlist_t *nvroot)
9021 {
9022 nvlist_t **child;
9023 uint_t children;
9024
9025 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
9026 &child, &children) != 0)
9027 children = 0;
9028
9029 for (uint_t c = 0; c < children; c++) {
9030 vdev_rebuild_stat_t *vrs;
9031 uint_t i;
9032
9033 if (nvlist_lookup_uint64_array(child[c],
9034 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i) == 0) {
9035 char *name = zpool_vdev_name(g_zfs, zhp,
9036 child[c], VDEV_NAME_TYPE_ID);
9037 print_rebuild_status_impl(vrs, i, name);
9038 free(name);
9039 }
9040 }
9041 }
9042
9043 /*
9044 * As we don't scrub checkpointed blocks, we want to warn the user that we
9045 * skipped scanning some blocks if a checkpoint exists or existed at any
9046 * time during the scan. If a sequential instead of healing reconstruction
9047 * was performed then the blocks were reconstructed. However, their checksums
9048 * have not been verified so we still print the warning.
9049 */
9050 static void
print_checkpoint_scan_warning(pool_scan_stat_t * ps,pool_checkpoint_stat_t * pcs)9051 print_checkpoint_scan_warning(pool_scan_stat_t *ps, pool_checkpoint_stat_t *pcs)
9052 {
9053 if (ps == NULL || pcs == NULL)
9054 return;
9055
9056 if (pcs->pcs_state == CS_NONE ||
9057 pcs->pcs_state == CS_CHECKPOINT_DISCARDING)
9058 return;
9059
9060 assert(pcs->pcs_state == CS_CHECKPOINT_EXISTS);
9061
9062 if (ps->pss_state == DSS_NONE)
9063 return;
9064
9065 if ((ps->pss_state == DSS_FINISHED || ps->pss_state == DSS_CANCELED) &&
9066 ps->pss_end_time < pcs->pcs_start_time)
9067 return;
9068
9069 if (ps->pss_state == DSS_FINISHED || ps->pss_state == DSS_CANCELED) {
9070 (void) printf(gettext(" scan warning: skipped blocks "
9071 "that are only referenced by the checkpoint.\n"));
9072 } else {
9073 assert(ps->pss_state == DSS_SCANNING);
9074 (void) printf(gettext(" scan warning: skipping blocks "
9075 "that are only referenced by the checkpoint.\n"));
9076 }
9077 }
9078
9079 /*
9080 * Returns B_TRUE if there is an active rebuild in progress. Otherwise,
9081 * B_FALSE is returned and 'rebuild_end_time' is set to the end time for
9082 * the last completed (or cancelled) rebuild.
9083 */
9084 static boolean_t
check_rebuilding(nvlist_t * nvroot,uint64_t * rebuild_end_time)9085 check_rebuilding(nvlist_t *nvroot, uint64_t *rebuild_end_time)
9086 {
9087 nvlist_t **child;
9088 uint_t children;
9089 boolean_t rebuilding = B_FALSE;
9090 uint64_t end_time = 0;
9091
9092 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
9093 &child, &children) != 0)
9094 children = 0;
9095
9096 for (uint_t c = 0; c < children; c++) {
9097 vdev_rebuild_stat_t *vrs;
9098 uint_t i;
9099
9100 if (nvlist_lookup_uint64_array(child[c],
9101 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i) == 0) {
9102
9103 if (vrs->vrs_end_time > end_time)
9104 end_time = vrs->vrs_end_time;
9105
9106 if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
9107 rebuilding = B_TRUE;
9108 end_time = 0;
9109 break;
9110 }
9111 }
9112 }
9113
9114 if (rebuild_end_time != NULL)
9115 *rebuild_end_time = end_time;
9116
9117 return (rebuilding);
9118 }
9119
9120 static void
vdev_stats_nvlist(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t * nv,int depth,boolean_t isspare,char * parent,nvlist_t * item)9121 vdev_stats_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,
9122 int depth, boolean_t isspare, char *parent, nvlist_t *item)
9123 {
9124 nvlist_t *vds, **child, *ch = NULL;
9125 uint_t vsc, children;
9126 vdev_stat_t *vs;
9127 char *vname;
9128 uint64_t notpresent;
9129 const char *type, *path;
9130
9131 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
9132 &child, &children) != 0)
9133 children = 0;
9134 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
9135 (uint64_t **)&vs, &vsc) == 0);
9136 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
9137 if (strcmp(type, VDEV_TYPE_INDIRECT) == 0)
9138 return;
9139
9140 if (cb->cb_print_unhealthy && depth > 0 &&
9141 for_each_vdev_in_nvlist(nv, vdev_health_check_cb, cb) == 0) {
9142 return;
9143 }
9144 vname = zpool_vdev_name(g_zfs, zhp, nv,
9145 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
9146 vds = fnvlist_alloc();
9147 fill_vdev_info(vds, zhp, vname, B_FALSE, cb->cb_json_as_int);
9148 if (cb->cb_flat_vdevs && parent != NULL) {
9149 fnvlist_add_string(vds, "parent", parent);
9150 }
9151
9152 if (isspare) {
9153 if (vs->vs_aux == VDEV_AUX_SPARED) {
9154 fnvlist_add_string(vds, "state", "INUSE");
9155 used_by_other(zhp, nv, vds);
9156 } else if (vs->vs_state == VDEV_STATE_HEALTHY)
9157 fnvlist_add_string(vds, "state", "AVAIL");
9158 } else {
9159 if (vs->vs_alloc) {
9160 nice_num_str_nvlist(vds, "alloc_space", vs->vs_alloc,
9161 cb->cb_literal, cb->cb_json_as_int,
9162 ZFS_NICENUM_BYTES);
9163 }
9164 if (vs->vs_space) {
9165 nice_num_str_nvlist(vds, "total_space", vs->vs_space,
9166 cb->cb_literal, cb->cb_json_as_int,
9167 ZFS_NICENUM_BYTES);
9168 }
9169 if (vs->vs_dspace) {
9170 nice_num_str_nvlist(vds, "def_space", vs->vs_dspace,
9171 cb->cb_literal, cb->cb_json_as_int,
9172 ZFS_NICENUM_BYTES);
9173 }
9174 if (vs->vs_rsize) {
9175 nice_num_str_nvlist(vds, "rep_dev_size", vs->vs_rsize,
9176 cb->cb_literal, cb->cb_json_as_int,
9177 ZFS_NICENUM_BYTES);
9178 }
9179 if (vs->vs_esize) {
9180 nice_num_str_nvlist(vds, "ex_dev_size", vs->vs_esize,
9181 cb->cb_literal, cb->cb_json_as_int,
9182 ZFS_NICENUM_BYTES);
9183 }
9184 if (vs->vs_self_healed) {
9185 nice_num_str_nvlist(vds, "self_healed",
9186 vs->vs_self_healed, cb->cb_literal,
9187 cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9188 }
9189 if (vs->vs_pspace) {
9190 nice_num_str_nvlist(vds, "phys_space", vs->vs_pspace,
9191 cb->cb_literal, cb->cb_json_as_int,
9192 ZFS_NICENUM_BYTES);
9193 }
9194 nice_num_str_nvlist(vds, "read_errors", vs->vs_read_errors,
9195 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
9196 nice_num_str_nvlist(vds, "write_errors", vs->vs_write_errors,
9197 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
9198 nice_num_str_nvlist(vds, "checksum_errors",
9199 vs->vs_checksum_errors, cb->cb_literal,
9200 cb->cb_json_as_int, ZFS_NICENUM_1024);
9201 if (vs->vs_scan_processed) {
9202 nice_num_str_nvlist(vds, "scan_processed",
9203 vs->vs_scan_processed, cb->cb_literal,
9204 cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9205 }
9206 if (vs->vs_checkpoint_space) {
9207 nice_num_str_nvlist(vds, "checkpoint_space",
9208 vs->vs_checkpoint_space, cb->cb_literal,
9209 cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9210 }
9211 if (vs->vs_resilver_deferred) {
9212 nice_num_str_nvlist(vds, "resilver_deferred",
9213 vs->vs_resilver_deferred, B_TRUE,
9214 cb->cb_json_as_int, ZFS_NICENUM_1024);
9215 }
9216 if (children == 0) {
9217 nice_num_str_nvlist(vds, "slow_ios", vs->vs_slow_ios,
9218 cb->cb_literal, cb->cb_json_as_int,
9219 ZFS_NICENUM_1024);
9220 }
9221 if (cb->cb_print_power) {
9222 if (children == 0) {
9223 /* Only leaf vdevs have physical slots */
9224 switch (zpool_power_current_state(zhp, (char *)
9225 fnvlist_lookup_string(nv,
9226 ZPOOL_CONFIG_PATH))) {
9227 case 0:
9228 fnvlist_add_string(vds, "power_state",
9229 "off");
9230 break;
9231 case 1:
9232 fnvlist_add_string(vds, "power_state",
9233 "on");
9234 break;
9235 default:
9236 fnvlist_add_string(vds, "power_state",
9237 "-");
9238 }
9239 } else {
9240 fnvlist_add_string(vds, "power_state", "-");
9241 }
9242 }
9243 }
9244
9245 if (cb->cb_print_dio_verify) {
9246 nice_num_str_nvlist(vds, "dio_verify_errors",
9247 vs->vs_dio_verify_errors, cb->cb_literal,
9248 cb->cb_json_as_int, ZFS_NICENUM_1024);
9249 }
9250
9251 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
9252 ¬present) == 0) {
9253 nice_num_str_nvlist(vds, ZPOOL_CONFIG_NOT_PRESENT,
9254 1, B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9255 fnvlist_add_string(vds, "was",
9256 fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH));
9257 } else if (vs->vs_aux != VDEV_AUX_NONE) {
9258 fnvlist_add_string(vds, "aux", vdev_aux_str[vs->vs_aux]);
9259 } else if (children == 0 && !isspare &&
9260 getenv("ZPOOL_STATUS_NON_NATIVE_ASHIFT_IGNORE") == NULL &&
9261 VDEV_STAT_VALID(vs_physical_ashift, vsc) &&
9262 vs->vs_configured_ashift < vs->vs_physical_ashift) {
9263 nice_num_str_nvlist(vds, "configured_ashift",
9264 vs->vs_configured_ashift, B_TRUE, cb->cb_json_as_int,
9265 ZFS_NICENUM_1024);
9266 nice_num_str_nvlist(vds, "physical_ashift",
9267 vs->vs_physical_ashift, B_TRUE, cb->cb_json_as_int,
9268 ZFS_NICENUM_1024);
9269 }
9270 if (vs->vs_scan_removing != 0) {
9271 nice_num_str_nvlist(vds, "removing", vs->vs_scan_removing,
9272 B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024);
9273 } else if (VDEV_STAT_VALID(vs_noalloc, vsc) && vs->vs_noalloc != 0) {
9274 nice_num_str_nvlist(vds, "noalloc", vs->vs_noalloc,
9275 B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024);
9276 }
9277
9278 if (cb->vcdl != NULL) {
9279 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
9280 zpool_nvlist_cmd(cb->vcdl, zpool_get_name(zhp),
9281 path, vds);
9282 }
9283 }
9284
9285 if (children == 0) {
9286 if (cb->cb_print_vdev_init) {
9287 if (vs->vs_initialize_state != 0) {
9288 uint64_t st = vs->vs_initialize_state;
9289 fnvlist_add_string(vds, "init_state",
9290 vdev_init_state_str[st]);
9291 nice_num_str_nvlist(vds, "initialized",
9292 vs->vs_initialize_bytes_done,
9293 cb->cb_literal, cb->cb_json_as_int,
9294 ZFS_NICENUM_BYTES);
9295 nice_num_str_nvlist(vds, "to_initialize",
9296 vs->vs_initialize_bytes_est,
9297 cb->cb_literal, cb->cb_json_as_int,
9298 ZFS_NICENUM_BYTES);
9299 nice_num_str_nvlist(vds, "init_time",
9300 vs->vs_initialize_action_time,
9301 cb->cb_literal, cb->cb_json_as_int,
9302 ZFS_NICE_TIMESTAMP);
9303 nice_num_str_nvlist(vds, "init_errors",
9304 vs->vs_initialize_errors,
9305 cb->cb_literal, cb->cb_json_as_int,
9306 ZFS_NICENUM_1024);
9307 } else {
9308 fnvlist_add_string(vds, "init_state",
9309 "UNINITIALIZED");
9310 }
9311 }
9312 if (cb->cb_print_vdev_trim) {
9313 if (vs->vs_trim_notsup == 0) {
9314 if (vs->vs_trim_state != 0) {
9315 uint64_t st = vs->vs_trim_state;
9316 fnvlist_add_string(vds, "trim_state",
9317 vdev_trim_state_str[st]);
9318 nice_num_str_nvlist(vds, "trimmed",
9319 vs->vs_trim_bytes_done,
9320 cb->cb_literal, cb->cb_json_as_int,
9321 ZFS_NICENUM_BYTES);
9322 nice_num_str_nvlist(vds, "to_trim",
9323 vs->vs_trim_bytes_est,
9324 cb->cb_literal, cb->cb_json_as_int,
9325 ZFS_NICENUM_BYTES);
9326 nice_num_str_nvlist(vds, "trim_time",
9327 vs->vs_trim_action_time,
9328 cb->cb_literal, cb->cb_json_as_int,
9329 ZFS_NICE_TIMESTAMP);
9330 nice_num_str_nvlist(vds, "trim_errors",
9331 vs->vs_trim_errors,
9332 cb->cb_literal, cb->cb_json_as_int,
9333 ZFS_NICENUM_1024);
9334 } else
9335 fnvlist_add_string(vds, "trim_state",
9336 "UNTRIMMED");
9337 }
9338 nice_num_str_nvlist(vds, "trim_notsup",
9339 vs->vs_trim_notsup, B_TRUE,
9340 cb->cb_json_as_int, ZFS_NICENUM_1024);
9341 }
9342 } else {
9343 ch = fnvlist_alloc();
9344 }
9345
9346 if (cb->cb_flat_vdevs && children == 0) {
9347 fnvlist_add_nvlist(item, vname, vds);
9348 }
9349
9350 for (int c = 0; c < children; c++) {
9351 uint64_t islog = B_FALSE, ishole = B_FALSE;
9352 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
9353 &islog);
9354 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
9355 &ishole);
9356 if (islog || ishole)
9357 continue;
9358 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
9359 continue;
9360 if (cb->cb_flat_vdevs) {
9361 vdev_stats_nvlist(zhp, cb, child[c], depth + 2, isspare,
9362 vname, item);
9363 }
9364 vdev_stats_nvlist(zhp, cb, child[c], depth + 2, isspare,
9365 vname, ch);
9366 }
9367
9368 if (ch != NULL) {
9369 if (!nvlist_empty(ch))
9370 fnvlist_add_nvlist(vds, "vdevs", ch);
9371 fnvlist_free(ch);
9372 }
9373 fnvlist_add_nvlist(item, vname, vds);
9374 fnvlist_free(vds);
9375 free(vname);
9376 }
9377
9378 static void
class_vdevs_nvlist(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t * nv,const char * class,nvlist_t * item)9379 class_vdevs_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,
9380 const char *class, nvlist_t *item)
9381 {
9382 uint_t c, children;
9383 nvlist_t **child;
9384 nvlist_t *class_obj = NULL;
9385
9386 if (!cb->cb_flat_vdevs)
9387 class_obj = fnvlist_alloc();
9388
9389 assert(zhp != NULL || !cb->cb_verbose);
9390
9391 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &child,
9392 &children) != 0)
9393 return;
9394
9395 for (c = 0; c < children; c++) {
9396 uint64_t is_log = B_FALSE;
9397 const char *bias = NULL;
9398 const char *type = NULL;
9399 char *name = zpool_vdev_name(g_zfs, zhp, child[c],
9400 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
9401
9402 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
9403 &is_log);
9404
9405 if (is_log) {
9406 bias = (char *)VDEV_ALLOC_CLASS_LOGS;
9407 } else {
9408 (void) nvlist_lookup_string(child[c],
9409 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
9410 (void) nvlist_lookup_string(child[c],
9411 ZPOOL_CONFIG_TYPE, &type);
9412 }
9413
9414 if (bias == NULL || strcmp(bias, class) != 0)
9415 continue;
9416 if (!is_log && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
9417 continue;
9418
9419 if (cb->cb_flat_vdevs) {
9420 vdev_stats_nvlist(zhp, cb, child[c], 2, B_FALSE,
9421 NULL, item);
9422 } else {
9423 vdev_stats_nvlist(zhp, cb, child[c], 2, B_FALSE,
9424 NULL, class_obj);
9425 }
9426 free(name);
9427 }
9428 if (!cb->cb_flat_vdevs) {
9429 if (!nvlist_empty(class_obj))
9430 fnvlist_add_nvlist(item, class, class_obj);
9431 fnvlist_free(class_obj);
9432 }
9433 }
9434
9435 static void
l2cache_nvlist(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t * nv,nvlist_t * item)9436 l2cache_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,
9437 nvlist_t *item)
9438 {
9439 nvlist_t *l2c = NULL, **l2cache;
9440 uint_t nl2cache;
9441 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
9442 &l2cache, &nl2cache) == 0) {
9443 if (nl2cache == 0)
9444 return;
9445 if (!cb->cb_flat_vdevs)
9446 l2c = fnvlist_alloc();
9447 for (int i = 0; i < nl2cache; i++) {
9448 if (cb->cb_flat_vdevs) {
9449 vdev_stats_nvlist(zhp, cb, l2cache[i], 2,
9450 B_FALSE, NULL, item);
9451 } else {
9452 vdev_stats_nvlist(zhp, cb, l2cache[i], 2,
9453 B_FALSE, NULL, l2c);
9454 }
9455 }
9456 }
9457 if (!cb->cb_flat_vdevs) {
9458 if (!nvlist_empty(l2c))
9459 fnvlist_add_nvlist(item, "l2cache", l2c);
9460 fnvlist_free(l2c);
9461 }
9462 }
9463
9464 static void
spares_nvlist(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t * nv,nvlist_t * item)9465 spares_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,
9466 nvlist_t *item)
9467 {
9468 nvlist_t *sp = NULL, **spares;
9469 uint_t nspares;
9470 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
9471 &spares, &nspares) == 0) {
9472 if (nspares == 0)
9473 return;
9474 if (!cb->cb_flat_vdevs)
9475 sp = fnvlist_alloc();
9476 for (int i = 0; i < nspares; i++) {
9477 if (cb->cb_flat_vdevs) {
9478 vdev_stats_nvlist(zhp, cb, spares[i], 2, B_TRUE,
9479 NULL, item);
9480 } else {
9481 vdev_stats_nvlist(zhp, cb, spares[i], 2, B_TRUE,
9482 NULL, sp);
9483 }
9484 }
9485 }
9486 if (!cb->cb_flat_vdevs) {
9487 if (!nvlist_empty(sp))
9488 fnvlist_add_nvlist(item, "spares", sp);
9489 fnvlist_free(sp);
9490 }
9491 }
9492
9493 static void
errors_nvlist(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t * item)9494 errors_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *item)
9495 {
9496 uint64_t nerr;
9497 nvlist_t *config = zpool_get_config(zhp, NULL);
9498 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT,
9499 &nerr) == 0) {
9500 nice_num_str_nvlist(item, ZPOOL_CONFIG_ERRCOUNT, nerr,
9501 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
9502 if (nerr != 0 && cb->cb_verbose) {
9503 nvlist_t *nverrlist = NULL;
9504 if (zpool_get_errlog(zhp, &nverrlist) == 0) {
9505 int i = 0;
9506 int count = 0;
9507 size_t len = MAXPATHLEN * 2;
9508 nvpair_t *elem = NULL;
9509
9510 for (nvpair_t *pair =
9511 nvlist_next_nvpair(nverrlist, NULL);
9512 pair != NULL;
9513 pair = nvlist_next_nvpair(nverrlist, pair))
9514 count++;
9515 char **errl = (char **)malloc(
9516 count * sizeof (char *));
9517
9518 while ((elem = nvlist_next_nvpair(nverrlist,
9519 elem)) != NULL) {
9520 nvlist_t *nv;
9521 uint64_t dsobj, obj;
9522
9523 verify(nvpair_value_nvlist(elem,
9524 &nv) == 0);
9525 verify(nvlist_lookup_uint64(nv,
9526 ZPOOL_ERR_DATASET, &dsobj) == 0);
9527 verify(nvlist_lookup_uint64(nv,
9528 ZPOOL_ERR_OBJECT, &obj) == 0);
9529 errl[i] = safe_malloc(len);
9530 zpool_obj_to_path(zhp, dsobj, obj,
9531 errl[i++], len);
9532 }
9533 nvlist_free(nverrlist);
9534 fnvlist_add_string_array(item, "errlist",
9535 (const char **)errl, count);
9536 for (int i = 0; i < count; ++i)
9537 free(errl[i]);
9538 free(errl);
9539 } else
9540 fnvlist_add_string(item, "errlist",
9541 strerror(errno));
9542 }
9543 }
9544 }
9545
9546 static void
ddt_stats_nvlist(ddt_stat_t * dds,status_cbdata_t * cb,nvlist_t * item)9547 ddt_stats_nvlist(ddt_stat_t *dds, status_cbdata_t *cb, nvlist_t *item)
9548 {
9549 nice_num_str_nvlist(item, "blocks", dds->dds_blocks,
9550 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
9551 nice_num_str_nvlist(item, "logical_size", dds->dds_lsize,
9552 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9553 nice_num_str_nvlist(item, "physical_size", dds->dds_psize,
9554 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9555 nice_num_str_nvlist(item, "deflated_size", dds->dds_dsize,
9556 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9557 nice_num_str_nvlist(item, "ref_blocks", dds->dds_ref_blocks,
9558 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
9559 nice_num_str_nvlist(item, "ref_lsize", dds->dds_ref_lsize,
9560 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9561 nice_num_str_nvlist(item, "ref_psize", dds->dds_ref_psize,
9562 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9563 nice_num_str_nvlist(item, "ref_dsize", dds->dds_ref_dsize,
9564 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9565 }
9566
9567 static void
dedup_stats_nvlist(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t * item)9568 dedup_stats_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *item)
9569 {
9570 nvlist_t *config;
9571 if (cb->cb_dedup_stats) {
9572 ddt_histogram_t *ddh;
9573 ddt_stat_t *dds;
9574 ddt_object_t *ddo;
9575 nvlist_t *ddt_stat, *ddt_obj, *dedup;
9576 uint_t c;
9577 uint64_t cspace_prop;
9578
9579 config = zpool_get_config(zhp, NULL);
9580 if (nvlist_lookup_uint64_array(config,
9581 ZPOOL_CONFIG_DDT_OBJ_STATS, (uint64_t **)&ddo, &c) != 0)
9582 return;
9583
9584 dedup = fnvlist_alloc();
9585 ddt_obj = fnvlist_alloc();
9586 nice_num_str_nvlist(dedup, "obj_count", ddo->ddo_count,
9587 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
9588 if (ddo->ddo_count == 0) {
9589 fnvlist_add_nvlist(dedup, ZPOOL_CONFIG_DDT_OBJ_STATS,
9590 ddt_obj);
9591 fnvlist_add_nvlist(item, "dedup_stats", dedup);
9592 fnvlist_free(ddt_obj);
9593 fnvlist_free(dedup);
9594 return;
9595 } else {
9596 nice_num_str_nvlist(dedup, "dspace", ddo->ddo_dspace,
9597 cb->cb_literal, cb->cb_json_as_int,
9598 ZFS_NICENUM_1024);
9599 nice_num_str_nvlist(dedup, "mspace", ddo->ddo_mspace,
9600 cb->cb_literal, cb->cb_json_as_int,
9601 ZFS_NICENUM_1024);
9602 /*
9603 * Squash cached size into in-core size to handle race.
9604 * Only include cached size if it is available.
9605 */
9606 cspace_prop = zpool_get_prop_int(zhp,
9607 ZPOOL_PROP_DEDUPCACHED, NULL);
9608 cspace_prop = MIN(cspace_prop, ddo->ddo_mspace);
9609 nice_num_str_nvlist(dedup, "cspace", cspace_prop,
9610 cb->cb_literal, cb->cb_json_as_int,
9611 ZFS_NICENUM_1024);
9612 }
9613
9614 ddt_stat = fnvlist_alloc();
9615 if (nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_STATS,
9616 (uint64_t **)&dds, &c) == 0) {
9617 nvlist_t *total = fnvlist_alloc();
9618 if (dds->dds_blocks == 0)
9619 fnvlist_add_string(total, "blocks", "0");
9620 else
9621 ddt_stats_nvlist(dds, cb, total);
9622 fnvlist_add_nvlist(ddt_stat, "total", total);
9623 fnvlist_free(total);
9624 }
9625 if (nvlist_lookup_uint64_array(config,
9626 ZPOOL_CONFIG_DDT_HISTOGRAM, (uint64_t **)&ddh, &c) == 0) {
9627 nvlist_t *hist = fnvlist_alloc();
9628 nvlist_t *entry = NULL;
9629 char buf[16];
9630 for (int h = 0; h < 64; h++) {
9631 if (ddh->ddh_stat[h].dds_blocks != 0) {
9632 entry = fnvlist_alloc();
9633 ddt_stats_nvlist(&ddh->ddh_stat[h], cb,
9634 entry);
9635 snprintf(buf, 16, "%d", h);
9636 fnvlist_add_nvlist(hist, buf, entry);
9637 fnvlist_free(entry);
9638 }
9639 }
9640 if (!nvlist_empty(hist))
9641 fnvlist_add_nvlist(ddt_stat, "histogram", hist);
9642 fnvlist_free(hist);
9643 }
9644
9645 if (!nvlist_empty(ddt_obj)) {
9646 fnvlist_add_nvlist(dedup, ZPOOL_CONFIG_DDT_OBJ_STATS,
9647 ddt_obj);
9648 }
9649 fnvlist_free(ddt_obj);
9650 if (!nvlist_empty(ddt_stat)) {
9651 fnvlist_add_nvlist(dedup, ZPOOL_CONFIG_DDT_STATS,
9652 ddt_stat);
9653 }
9654 fnvlist_free(ddt_stat);
9655 if (!nvlist_empty(dedup))
9656 fnvlist_add_nvlist(item, "dedup_stats", dedup);
9657 fnvlist_free(dedup);
9658 }
9659 }
9660
9661 static void
raidz_expand_status_nvlist(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t * nvroot,nvlist_t * item)9662 raidz_expand_status_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb,
9663 nvlist_t *nvroot, nvlist_t *item)
9664 {
9665 uint_t c;
9666 pool_raidz_expand_stat_t *pres = NULL;
9667 if (nvlist_lookup_uint64_array(nvroot,
9668 ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, (uint64_t **)&pres, &c) == 0) {
9669 nvlist_t **child;
9670 uint_t children;
9671 nvlist_t *nv = fnvlist_alloc();
9672 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
9673 &child, &children) == 0);
9674 assert(pres->pres_expanding_vdev < children);
9675 char *name =
9676 zpool_vdev_name(g_zfs, zhp,
9677 child[pres->pres_expanding_vdev], 0);
9678 fill_vdev_info(nv, zhp, name, B_FALSE, cb->cb_json_as_int);
9679 fnvlist_add_string(nv, "state",
9680 pool_scan_state_str[pres->pres_state]);
9681 nice_num_str_nvlist(nv, "expanding_vdev",
9682 pres->pres_expanding_vdev, B_TRUE, cb->cb_json_as_int,
9683 ZFS_NICENUM_1024);
9684 nice_num_str_nvlist(nv, "start_time", pres->pres_start_time,
9685 cb->cb_literal, cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);
9686 nice_num_str_nvlist(nv, "end_time", pres->pres_end_time,
9687 cb->cb_literal, cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);
9688 nice_num_str_nvlist(nv, "to_reflow", pres->pres_to_reflow,
9689 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9690 nice_num_str_nvlist(nv, "reflowed", pres->pres_reflowed,
9691 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9692 nice_num_str_nvlist(nv, "waiting_for_resilver",
9693 pres->pres_waiting_for_resilver, B_TRUE,
9694 cb->cb_json_as_int, ZFS_NICENUM_1024);
9695 fnvlist_add_nvlist(item, ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, nv);
9696 fnvlist_free(nv);
9697 free(name);
9698 }
9699 }
9700
9701 static void
checkpoint_status_nvlist(nvlist_t * nvroot,status_cbdata_t * cb,nvlist_t * item)9702 checkpoint_status_nvlist(nvlist_t *nvroot, status_cbdata_t *cb,
9703 nvlist_t *item)
9704 {
9705 uint_t c;
9706 pool_checkpoint_stat_t *pcs = NULL;
9707 if (nvlist_lookup_uint64_array(nvroot,
9708 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c) == 0) {
9709 nvlist_t *nv = fnvlist_alloc();
9710 fnvlist_add_string(nv, "state",
9711 checkpoint_state_str[pcs->pcs_state]);
9712 nice_num_str_nvlist(nv, "start_time",
9713 pcs->pcs_start_time, cb->cb_literal, cb->cb_json_as_int,
9714 ZFS_NICE_TIMESTAMP);
9715 nice_num_str_nvlist(nv, "space",
9716 pcs->pcs_space, cb->cb_literal, cb->cb_json_as_int,
9717 ZFS_NICENUM_BYTES);
9718 fnvlist_add_nvlist(item, ZPOOL_CONFIG_CHECKPOINT_STATS, nv);
9719 fnvlist_free(nv);
9720 }
9721 }
9722
9723 static void
removal_status_nvlist(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t * nvroot,nvlist_t * item)9724 removal_status_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb,
9725 nvlist_t *nvroot, nvlist_t *item)
9726 {
9727 uint_t c;
9728 pool_removal_stat_t *prs = NULL;
9729 if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_REMOVAL_STATS,
9730 (uint64_t **)&prs, &c) == 0) {
9731 if (prs->prs_state != DSS_NONE) {
9732 nvlist_t **child;
9733 uint_t children;
9734 verify(nvlist_lookup_nvlist_array(nvroot,
9735 ZPOOL_CONFIG_CHILDREN, &child, &children) == 0);
9736 assert(prs->prs_removing_vdev < children);
9737 char *vdev_name = zpool_vdev_name(g_zfs, zhp,
9738 child[prs->prs_removing_vdev], B_TRUE);
9739 nvlist_t *nv = fnvlist_alloc();
9740 fill_vdev_info(nv, zhp, vdev_name, B_FALSE,
9741 cb->cb_json_as_int);
9742 fnvlist_add_string(nv, "state",
9743 pool_scan_state_str[prs->prs_state]);
9744 nice_num_str_nvlist(nv, "removing_vdev",
9745 prs->prs_removing_vdev, B_TRUE, cb->cb_json_as_int,
9746 ZFS_NICENUM_1024);
9747 nice_num_str_nvlist(nv, "start_time",
9748 prs->prs_start_time, cb->cb_literal,
9749 cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);
9750 nice_num_str_nvlist(nv, "end_time", prs->prs_end_time,
9751 cb->cb_literal, cb->cb_json_as_int,
9752 ZFS_NICE_TIMESTAMP);
9753 nice_num_str_nvlist(nv, "to_copy", prs->prs_to_copy,
9754 cb->cb_literal, cb->cb_json_as_int,
9755 ZFS_NICENUM_BYTES);
9756 nice_num_str_nvlist(nv, "copied", prs->prs_copied,
9757 cb->cb_literal, cb->cb_json_as_int,
9758 ZFS_NICENUM_BYTES);
9759 nice_num_str_nvlist(nv, "mapping_memory",
9760 prs->prs_mapping_memory, cb->cb_literal,
9761 cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9762 fnvlist_add_nvlist(item,
9763 ZPOOL_CONFIG_REMOVAL_STATS, nv);
9764 fnvlist_free(nv);
9765 free(vdev_name);
9766 }
9767 }
9768 }
9769
9770 static void
scan_status_nvlist(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t * nvroot,nvlist_t * item)9771 scan_status_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb,
9772 nvlist_t *nvroot, nvlist_t *item)
9773 {
9774 pool_scan_stat_t *ps = NULL;
9775 uint_t c;
9776 nvlist_t *scan = fnvlist_alloc();
9777 nvlist_t **child;
9778 uint_t children;
9779
9780 if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_SCAN_STATS,
9781 (uint64_t **)&ps, &c) == 0) {
9782 fnvlist_add_string(scan, "function",
9783 pool_scan_func_str[ps->pss_func]);
9784 fnvlist_add_string(scan, "state",
9785 pool_scan_state_str[ps->pss_state]);
9786 nice_num_str_nvlist(scan, "start_time", ps->pss_start_time,
9787 cb->cb_literal, cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);
9788 nice_num_str_nvlist(scan, "end_time", ps->pss_end_time,
9789 cb->cb_literal, cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);
9790 nice_num_str_nvlist(scan, "to_examine", ps->pss_to_examine,
9791 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9792 nice_num_str_nvlist(scan, "examined", ps->pss_examined,
9793 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9794 nice_num_str_nvlist(scan, "skipped", ps->pss_skipped,
9795 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9796 nice_num_str_nvlist(scan, "processed", ps->pss_processed,
9797 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9798 nice_num_str_nvlist(scan, "errors", ps->pss_errors,
9799 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
9800 nice_num_str_nvlist(scan, "bytes_per_scan", ps->pss_pass_exam,
9801 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9802 nice_num_str_nvlist(scan, "pass_start", ps->pss_pass_start,
9803 B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024);
9804 nice_num_str_nvlist(scan, "scrub_pause",
9805 ps->pss_pass_scrub_pause, cb->cb_literal,
9806 cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);
9807 nice_num_str_nvlist(scan, "scrub_spent_paused",
9808 ps->pss_pass_scrub_spent_paused,
9809 B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024);
9810 nice_num_str_nvlist(scan, "issued_bytes_per_scan",
9811 ps->pss_pass_issued, cb->cb_literal,
9812 cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9813 nice_num_str_nvlist(scan, "issued", ps->pss_issued,
9814 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9815 if (ps->pss_error_scrub_func == POOL_SCAN_ERRORSCRUB &&
9816 ps->pss_error_scrub_start > ps->pss_start_time) {
9817 fnvlist_add_string(scan, "err_scrub_func",
9818 pool_scan_func_str[ps->pss_error_scrub_func]);
9819 fnvlist_add_string(scan, "err_scrub_state",
9820 pool_scan_state_str[ps->pss_error_scrub_state]);
9821 nice_num_str_nvlist(scan, "err_scrub_start_time",
9822 ps->pss_error_scrub_start,
9823 cb->cb_literal, cb->cb_json_as_int,
9824 ZFS_NICE_TIMESTAMP);
9825 nice_num_str_nvlist(scan, "err_scrub_end_time",
9826 ps->pss_error_scrub_end,
9827 cb->cb_literal, cb->cb_json_as_int,
9828 ZFS_NICE_TIMESTAMP);
9829 nice_num_str_nvlist(scan, "err_scrub_examined",
9830 ps->pss_error_scrub_examined,
9831 cb->cb_literal, cb->cb_json_as_int,
9832 ZFS_NICENUM_1024);
9833 nice_num_str_nvlist(scan, "err_scrub_to_examine",
9834 ps->pss_error_scrub_to_be_examined,
9835 cb->cb_literal, cb->cb_json_as_int,
9836 ZFS_NICENUM_1024);
9837 nice_num_str_nvlist(scan, "err_scrub_pause",
9838 ps->pss_pass_error_scrub_pause,
9839 B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024);
9840 }
9841 }
9842
9843 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
9844 &child, &children) == 0) {
9845 vdev_rebuild_stat_t *vrs;
9846 uint_t i;
9847 char *name;
9848 nvlist_t *nv;
9849 nvlist_t *rebuild = fnvlist_alloc();
9850 uint64_t st;
9851 for (uint_t c = 0; c < children; c++) {
9852 if (nvlist_lookup_uint64_array(child[c],
9853 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs,
9854 &i) == 0) {
9855 if (vrs->vrs_state != VDEV_REBUILD_NONE) {
9856 nv = fnvlist_alloc();
9857 name = zpool_vdev_name(g_zfs, zhp,
9858 child[c], VDEV_NAME_TYPE_ID);
9859 fill_vdev_info(nv, zhp, name, B_FALSE,
9860 cb->cb_json_as_int);
9861 st = vrs->vrs_state;
9862 fnvlist_add_string(nv, "state",
9863 vdev_rebuild_state_str[st]);
9864 nice_num_str_nvlist(nv, "start_time",
9865 vrs->vrs_start_time, cb->cb_literal,
9866 cb->cb_json_as_int,
9867 ZFS_NICE_TIMESTAMP);
9868 nice_num_str_nvlist(nv, "end_time",
9869 vrs->vrs_end_time, cb->cb_literal,
9870 cb->cb_json_as_int,
9871 ZFS_NICE_TIMESTAMP);
9872 nice_num_str_nvlist(nv, "scan_time",
9873 vrs->vrs_scan_time_ms * 1000000,
9874 cb->cb_literal, cb->cb_json_as_int,
9875 ZFS_NICENUM_TIME);
9876 nice_num_str_nvlist(nv, "scanned",
9877 vrs->vrs_bytes_scanned,
9878 cb->cb_literal, cb->cb_json_as_int,
9879 ZFS_NICENUM_BYTES);
9880 nice_num_str_nvlist(nv, "issued",
9881 vrs->vrs_bytes_issued,
9882 cb->cb_literal, cb->cb_json_as_int,
9883 ZFS_NICENUM_BYTES);
9884 nice_num_str_nvlist(nv, "rebuilt",
9885 vrs->vrs_bytes_rebuilt,
9886 cb->cb_literal, cb->cb_json_as_int,
9887 ZFS_NICENUM_BYTES);
9888 nice_num_str_nvlist(nv, "to_scan",
9889 vrs->vrs_bytes_est, cb->cb_literal,
9890 cb->cb_json_as_int,
9891 ZFS_NICENUM_BYTES);
9892 nice_num_str_nvlist(nv, "errors",
9893 vrs->vrs_errors, cb->cb_literal,
9894 cb->cb_json_as_int,
9895 ZFS_NICENUM_1024);
9896 nice_num_str_nvlist(nv, "pass_time",
9897 vrs->vrs_pass_time_ms * 1000000,
9898 cb->cb_literal, cb->cb_json_as_int,
9899 ZFS_NICENUM_TIME);
9900 nice_num_str_nvlist(nv, "pass_scanned",
9901 vrs->vrs_pass_bytes_scanned,
9902 cb->cb_literal, cb->cb_json_as_int,
9903 ZFS_NICENUM_BYTES);
9904 nice_num_str_nvlist(nv, "pass_issued",
9905 vrs->vrs_pass_bytes_issued,
9906 cb->cb_literal, cb->cb_json_as_int,
9907 ZFS_NICENUM_BYTES);
9908 nice_num_str_nvlist(nv, "pass_skipped",
9909 vrs->vrs_pass_bytes_skipped,
9910 cb->cb_literal, cb->cb_json_as_int,
9911 ZFS_NICENUM_BYTES);
9912 fnvlist_add_nvlist(rebuild, name, nv);
9913 free(name);
9914 }
9915 }
9916 }
9917 if (!nvlist_empty(rebuild))
9918 fnvlist_add_nvlist(scan, "rebuild_stats", rebuild);
9919 fnvlist_free(rebuild);
9920 }
9921
9922 if (!nvlist_empty(scan))
9923 fnvlist_add_nvlist(item, ZPOOL_CONFIG_SCAN_STATS, scan);
9924 fnvlist_free(scan);
9925 }
9926
9927 /*
9928 * Print the scan status.
9929 */
9930 static void
print_scan_status(zpool_handle_t * zhp,nvlist_t * nvroot)9931 print_scan_status(zpool_handle_t *zhp, nvlist_t *nvroot)
9932 {
9933 uint64_t rebuild_end_time = 0, resilver_end_time = 0;
9934 boolean_t have_resilver = B_FALSE, have_scrub = B_FALSE;
9935 boolean_t have_errorscrub = B_FALSE;
9936 boolean_t active_resilver = B_FALSE;
9937 pool_checkpoint_stat_t *pcs = NULL;
9938 pool_scan_stat_t *ps = NULL;
9939 uint_t c;
9940 time_t scrub_start = 0, errorscrub_start = 0;
9941
9942 if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_SCAN_STATS,
9943 (uint64_t **)&ps, &c) == 0) {
9944 if (ps->pss_func == POOL_SCAN_RESILVER) {
9945 resilver_end_time = ps->pss_end_time;
9946 active_resilver = (ps->pss_state == DSS_SCANNING);
9947 }
9948
9949 have_resilver = (ps->pss_func == POOL_SCAN_RESILVER);
9950 have_scrub = (ps->pss_func == POOL_SCAN_SCRUB);
9951 scrub_start = ps->pss_start_time;
9952 if (c > offsetof(pool_scan_stat_t,
9953 pss_pass_error_scrub_pause) / 8) {
9954 have_errorscrub = (ps->pss_error_scrub_func ==
9955 POOL_SCAN_ERRORSCRUB);
9956 errorscrub_start = ps->pss_error_scrub_start;
9957 }
9958 }
9959
9960 boolean_t active_rebuild = check_rebuilding(nvroot, &rebuild_end_time);
9961 boolean_t have_rebuild = (active_rebuild || (rebuild_end_time > 0));
9962
9963 /* Always print the scrub status when available. */
9964 if (have_scrub && scrub_start > errorscrub_start)
9965 print_scan_scrub_resilver_status(ps);
9966 else if (have_errorscrub && errorscrub_start >= scrub_start)
9967 print_err_scrub_status(ps);
9968
9969 /*
9970 * When there is an active resilver or rebuild print its status.
9971 * Otherwise print the status of the last resilver or rebuild.
9972 */
9973 if (active_resilver || (!active_rebuild && have_resilver &&
9974 resilver_end_time && resilver_end_time > rebuild_end_time)) {
9975 print_scan_scrub_resilver_status(ps);
9976 } else if (active_rebuild || (!active_resilver && have_rebuild &&
9977 rebuild_end_time && rebuild_end_time > resilver_end_time)) {
9978 print_rebuild_status(zhp, nvroot);
9979 }
9980
9981 (void) nvlist_lookup_uint64_array(nvroot,
9982 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
9983 print_checkpoint_scan_warning(ps, pcs);
9984 }
9985
9986 /*
9987 * Print out detailed removal status.
9988 */
9989 static void
print_removal_status(zpool_handle_t * zhp,pool_removal_stat_t * prs)9990 print_removal_status(zpool_handle_t *zhp, pool_removal_stat_t *prs)
9991 {
9992 char copied_buf[7], examined_buf[7], total_buf[7], rate_buf[7];
9993 time_t start, end;
9994 nvlist_t *config, *nvroot;
9995 nvlist_t **child;
9996 uint_t children;
9997 char *vdev_name;
9998
9999 if (prs == NULL || prs->prs_state == DSS_NONE)
10000 return;
10001
10002 /*
10003 * Determine name of vdev.
10004 */
10005 config = zpool_get_config(zhp, NULL);
10006 nvroot = fnvlist_lookup_nvlist(config,
10007 ZPOOL_CONFIG_VDEV_TREE);
10008 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
10009 &child, &children) == 0);
10010 assert(prs->prs_removing_vdev < children);
10011 vdev_name = zpool_vdev_name(g_zfs, zhp,
10012 child[prs->prs_removing_vdev], B_TRUE);
10013
10014 printf_color(ANSI_BOLD, gettext("remove: "));
10015
10016 start = prs->prs_start_time;
10017 end = prs->prs_end_time;
10018 zfs_nicenum(prs->prs_copied, copied_buf, sizeof (copied_buf));
10019
10020 /*
10021 * Removal is finished or canceled.
10022 */
10023 if (prs->prs_state == DSS_FINISHED) {
10024 uint64_t minutes_taken = (end - start) / 60;
10025
10026 (void) printf(gettext("Removal of vdev %llu copied %s "
10027 "in %lluh%um, completed on %s"),
10028 (longlong_t)prs->prs_removing_vdev,
10029 copied_buf,
10030 (u_longlong_t)(minutes_taken / 60),
10031 (uint_t)(minutes_taken % 60),
10032 ctime((time_t *)&end));
10033 } else if (prs->prs_state == DSS_CANCELED) {
10034 (void) printf(gettext("Removal of %s canceled on %s"),
10035 vdev_name, ctime(&end));
10036 } else {
10037 uint64_t copied, total, elapsed, mins_left, hours_left;
10038 double fraction_done;
10039 uint_t rate;
10040
10041 assert(prs->prs_state == DSS_SCANNING);
10042
10043 /*
10044 * Removal is in progress.
10045 */
10046 (void) printf(gettext(
10047 "Evacuation of %s in progress since %s"),
10048 vdev_name, ctime(&start));
10049
10050 copied = prs->prs_copied > 0 ? prs->prs_copied : 1;
10051 total = prs->prs_to_copy;
10052 fraction_done = (double)copied / total;
10053
10054 /* elapsed time for this pass */
10055 elapsed = time(NULL) - prs->prs_start_time;
10056 elapsed = elapsed > 0 ? elapsed : 1;
10057 rate = copied / elapsed;
10058 rate = rate > 0 ? rate : 1;
10059 mins_left = ((total - copied) / rate) / 60;
10060 hours_left = mins_left / 60;
10061
10062 zfs_nicenum(copied, examined_buf, sizeof (examined_buf));
10063 zfs_nicenum(total, total_buf, sizeof (total_buf));
10064 zfs_nicenum(rate, rate_buf, sizeof (rate_buf));
10065
10066 /*
10067 * do not print estimated time if hours_left is more than
10068 * 30 days
10069 */
10070 (void) printf(gettext(
10071 "\t%s copied out of %s at %s/s, %.2f%% done"),
10072 examined_buf, total_buf, rate_buf, 100 * fraction_done);
10073 if (hours_left < (30 * 24)) {
10074 (void) printf(gettext(", %lluh%um to go\n"),
10075 (u_longlong_t)hours_left, (uint_t)(mins_left % 60));
10076 } else {
10077 (void) printf(gettext(
10078 ", (copy is slow, no estimated time)\n"));
10079 }
10080 }
10081 free(vdev_name);
10082
10083 if (prs->prs_mapping_memory > 0) {
10084 char mem_buf[7];
10085 zfs_nicenum(prs->prs_mapping_memory, mem_buf, sizeof (mem_buf));
10086 (void) printf(gettext(
10087 "\t%s memory used for removed device mappings\n"),
10088 mem_buf);
10089 }
10090 }
10091
10092 /*
10093 * Print out detailed raidz expansion status.
10094 */
10095 static void
print_raidz_expand_status(zpool_handle_t * zhp,pool_raidz_expand_stat_t * pres)10096 print_raidz_expand_status(zpool_handle_t *zhp, pool_raidz_expand_stat_t *pres)
10097 {
10098 char copied_buf[7];
10099
10100 if (pres == NULL || pres->pres_state == DSS_NONE)
10101 return;
10102
10103 /*
10104 * Determine name of vdev.
10105 */
10106 nvlist_t *config = zpool_get_config(zhp, NULL);
10107 nvlist_t *nvroot = fnvlist_lookup_nvlist(config,
10108 ZPOOL_CONFIG_VDEV_TREE);
10109 nvlist_t **child;
10110 uint_t children;
10111 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
10112 &child, &children) == 0);
10113 assert(pres->pres_expanding_vdev < children);
10114
10115 printf_color(ANSI_BOLD, gettext("expand: "));
10116
10117 time_t start = pres->pres_start_time;
10118 time_t end = pres->pres_end_time;
10119 char *vname =
10120 zpool_vdev_name(g_zfs, zhp, child[pres->pres_expanding_vdev], 0);
10121 zfs_nicenum(pres->pres_reflowed, copied_buf, sizeof (copied_buf));
10122
10123 /*
10124 * Expansion is finished or canceled.
10125 */
10126 if (pres->pres_state == DSS_FINISHED) {
10127 char time_buf[32];
10128 secs_to_dhms(end - start, time_buf);
10129
10130 (void) printf(gettext("expanded %s-%u copied %s in %s, "
10131 "on %s"), vname, (int)pres->pres_expanding_vdev,
10132 copied_buf, time_buf, ctime((time_t *)&end));
10133 } else {
10134 char examined_buf[7], total_buf[7], rate_buf[7];
10135 uint64_t copied, total, elapsed, secs_left;
10136 double fraction_done;
10137 uint_t rate;
10138
10139 assert(pres->pres_state == DSS_SCANNING);
10140
10141 /*
10142 * Expansion is in progress.
10143 */
10144 (void) printf(gettext(
10145 "expansion of %s-%u in progress since %s"),
10146 vname, (int)pres->pres_expanding_vdev, ctime(&start));
10147
10148 copied = pres->pres_reflowed > 0 ? pres->pres_reflowed : 1;
10149 total = pres->pres_to_reflow;
10150 fraction_done = (double)copied / total;
10151
10152 /* elapsed time for this pass */
10153 elapsed = time(NULL) - pres->pres_start_time;
10154 elapsed = elapsed > 0 ? elapsed : 1;
10155 rate = copied / elapsed;
10156 rate = rate > 0 ? rate : 1;
10157 secs_left = (total - copied) / rate;
10158
10159 zfs_nicenum(copied, examined_buf, sizeof (examined_buf));
10160 zfs_nicenum(total, total_buf, sizeof (total_buf));
10161 zfs_nicenum(rate, rate_buf, sizeof (rate_buf));
10162
10163 /*
10164 * do not print estimated time if hours_left is more than
10165 * 30 days
10166 */
10167 (void) printf(gettext("\t%s / %s copied at %s/s, %.2f%% done"),
10168 examined_buf, total_buf, rate_buf, 100 * fraction_done);
10169 if (pres->pres_waiting_for_resilver) {
10170 (void) printf(gettext(", paused for resilver or "
10171 "clear\n"));
10172 } else if (secs_left < (30 * 24 * 3600)) {
10173 char time_buf[32];
10174 secs_to_dhms(secs_left, time_buf);
10175 (void) printf(gettext(", %s to go\n"), time_buf);
10176 } else {
10177 (void) printf(gettext(
10178 ", (copy is slow, no estimated time)\n"));
10179 }
10180 }
10181 free(vname);
10182 }
10183 static void
print_checkpoint_status(pool_checkpoint_stat_t * pcs)10184 print_checkpoint_status(pool_checkpoint_stat_t *pcs)
10185 {
10186 time_t start;
10187 char space_buf[7];
10188
10189 if (pcs == NULL || pcs->pcs_state == CS_NONE)
10190 return;
10191
10192 (void) printf(gettext("checkpoint: "));
10193
10194 start = pcs->pcs_start_time;
10195 zfs_nicenum(pcs->pcs_space, space_buf, sizeof (space_buf));
10196
10197 if (pcs->pcs_state == CS_CHECKPOINT_EXISTS) {
10198 char *date = ctime(&start);
10199
10200 /*
10201 * ctime() adds a newline at the end of the generated
10202 * string, thus the weird format specifier and the
10203 * strlen() call used to chop it off from the output.
10204 */
10205 (void) printf(gettext("created %.*s, consumes %s\n"),
10206 (int)(strlen(date) - 1), date, space_buf);
10207 return;
10208 }
10209
10210 assert(pcs->pcs_state == CS_CHECKPOINT_DISCARDING);
10211
10212 (void) printf(gettext("discarding, %s remaining.\n"),
10213 space_buf);
10214 }
10215
10216 static void
print_error_log(zpool_handle_t * zhp)10217 print_error_log(zpool_handle_t *zhp)
10218 {
10219 nvlist_t *nverrlist = NULL;
10220 nvpair_t *elem;
10221 char *pathname;
10222 size_t len = MAXPATHLEN * 2;
10223
10224 if (zpool_get_errlog(zhp, &nverrlist) != 0)
10225 return;
10226
10227 (void) printf("errors: Permanent errors have been "
10228 "detected in the following files:\n\n");
10229
10230 pathname = safe_malloc(len);
10231 elem = NULL;
10232 while ((elem = nvlist_next_nvpair(nverrlist, elem)) != NULL) {
10233 nvlist_t *nv;
10234 uint64_t dsobj, obj;
10235
10236 verify(nvpair_value_nvlist(elem, &nv) == 0);
10237 verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_DATASET,
10238 &dsobj) == 0);
10239 verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_OBJECT,
10240 &obj) == 0);
10241 zpool_obj_to_path(zhp, dsobj, obj, pathname, len);
10242 (void) printf("%7s %s\n", "", pathname);
10243 }
10244 free(pathname);
10245 nvlist_free(nverrlist);
10246 }
10247
10248 static void
print_spares(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t ** spares,uint_t nspares)10249 print_spares(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **spares,
10250 uint_t nspares)
10251 {
10252 uint_t i;
10253 char *name;
10254
10255 if (nspares == 0)
10256 return;
10257
10258 (void) printf(gettext("\tspares\n"));
10259
10260 for (i = 0; i < nspares; i++) {
10261 name = zpool_vdev_name(g_zfs, zhp, spares[i],
10262 cb->cb_name_flags);
10263 print_status_config(zhp, cb, name, spares[i], 2, B_TRUE, NULL);
10264 free(name);
10265 }
10266 }
10267
10268 static void
print_l2cache(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t ** l2cache,uint_t nl2cache)10269 print_l2cache(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **l2cache,
10270 uint_t nl2cache)
10271 {
10272 uint_t i;
10273 char *name;
10274
10275 if (nl2cache == 0)
10276 return;
10277
10278 (void) printf(gettext("\tcache\n"));
10279
10280 for (i = 0; i < nl2cache; i++) {
10281 name = zpool_vdev_name(g_zfs, zhp, l2cache[i],
10282 cb->cb_name_flags);
10283 print_status_config(zhp, cb, name, l2cache[i], 2,
10284 B_FALSE, NULL);
10285 free(name);
10286 }
10287 }
10288
10289 static void
print_dedup_stats(zpool_handle_t * zhp,nvlist_t * config,boolean_t literal)10290 print_dedup_stats(zpool_handle_t *zhp, nvlist_t *config, boolean_t literal)
10291 {
10292 ddt_histogram_t *ddh;
10293 ddt_stat_t *dds;
10294 ddt_object_t *ddo;
10295 uint_t c;
10296 /* Extra space provided for literal display */
10297 char dspace[32], mspace[32], cspace[32];
10298 uint64_t cspace_prop;
10299 enum zfs_nicenum_format format;
10300 zprop_source_t src;
10301
10302 /*
10303 * If the pool was faulted then we may not have been able to
10304 * obtain the config. Otherwise, if we have anything in the dedup
10305 * table continue processing the stats.
10306 */
10307 if (nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_OBJ_STATS,
10308 (uint64_t **)&ddo, &c) != 0)
10309 return;
10310
10311 (void) printf("\n");
10312 (void) printf(gettext(" dedup: "));
10313 if (ddo->ddo_count == 0) {
10314 (void) printf(gettext("no DDT entries\n"));
10315 return;
10316 }
10317
10318 /*
10319 * Squash cached size into in-core size to handle race.
10320 * Only include cached size if it is available.
10321 */
10322 cspace_prop = zpool_get_prop_int(zhp, ZPOOL_PROP_DEDUPCACHED, &src);
10323 cspace_prop = MIN(cspace_prop, ddo->ddo_mspace);
10324 format = literal ? ZFS_NICENUM_RAW : ZFS_NICENUM_1024;
10325 zfs_nicenum_format(cspace_prop, cspace, sizeof (cspace), format);
10326 zfs_nicenum_format(ddo->ddo_dspace, dspace, sizeof (dspace), format);
10327 zfs_nicenum_format(ddo->ddo_mspace, mspace, sizeof (mspace), format);
10328 (void) printf("DDT entries %llu, size %s on disk, %s in core",
10329 (u_longlong_t)ddo->ddo_count,
10330 dspace,
10331 mspace);
10332 if (src != ZPROP_SRC_DEFAULT) {
10333 (void) printf(", %s cached (%.02f%%)",
10334 cspace,
10335 (double)cspace_prop / (double)ddo->ddo_mspace * 100.0);
10336 }
10337 (void) printf("\n");
10338
10339 verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_STATS,
10340 (uint64_t **)&dds, &c) == 0);
10341 verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_HISTOGRAM,
10342 (uint64_t **)&ddh, &c) == 0);
10343 zpool_dump_ddt(dds, ddh);
10344 }
10345
10346 #define ST_SIZE 4096
10347 #define AC_SIZE 2048
10348
10349 static void
print_status_reason(zpool_handle_t * zhp,status_cbdata_t * cbp,zpool_status_t reason,zpool_errata_t errata,nvlist_t * item)10350 print_status_reason(zpool_handle_t *zhp, status_cbdata_t *cbp,
10351 zpool_status_t reason, zpool_errata_t errata, nvlist_t *item)
10352 {
10353 char status[ST_SIZE];
10354 char action[AC_SIZE];
10355 memset(status, 0, ST_SIZE);
10356 memset(action, 0, AC_SIZE);
10357
10358 switch (reason) {
10359 case ZPOOL_STATUS_MISSING_DEV_R:
10360 snprintf(status, ST_SIZE, gettext("One or more devices could "
10361 "not be opened. Sufficient replicas exist for\n\tthe pool "
10362 "to continue functioning in a degraded state.\n"));
10363 snprintf(action, AC_SIZE, gettext("Attach the missing device "
10364 "and online it using 'zpool online'.\n"));
10365 break;
10366
10367 case ZPOOL_STATUS_MISSING_DEV_NR:
10368 snprintf(status, ST_SIZE, gettext("One or more devices could "
10369 "not be opened. There are insufficient\n\treplicas for the"
10370 " pool to continue functioning.\n"));
10371 snprintf(action, AC_SIZE, gettext("Attach the missing device "
10372 "and online it using 'zpool online'.\n"));
10373 break;
10374
10375 case ZPOOL_STATUS_CORRUPT_LABEL_R:
10376 snprintf(status, ST_SIZE, gettext("One or more devices could "
10377 "not be used because the label is missing or\n\tinvalid. "
10378 "Sufficient replicas exist for the pool to continue\n\t"
10379 "functioning in a degraded state.\n"));
10380 snprintf(action, AC_SIZE, gettext("Replace the device using "
10381 "'zpool replace'.\n"));
10382 break;
10383
10384 case ZPOOL_STATUS_CORRUPT_LABEL_NR:
10385 snprintf(status, ST_SIZE, gettext("One or more devices could "
10386 "not be used because the label is missing \n\tor invalid. "
10387 "There are insufficient replicas for the pool to "
10388 "continue\n\tfunctioning.\n"));
10389 zpool_explain_recover(zpool_get_handle(zhp),
10390 zpool_get_name(zhp), reason, zpool_get_config(zhp, NULL),
10391 action, AC_SIZE);
10392 break;
10393
10394 case ZPOOL_STATUS_FAILING_DEV:
10395 snprintf(status, ST_SIZE, gettext("One or more devices has "
10396 "experienced an unrecoverable error. An\n\tattempt was "
10397 "made to correct the error. Applications are "
10398 "unaffected.\n"));
10399 snprintf(action, AC_SIZE, gettext("Determine if the "
10400 "device needs to be replaced, and clear the errors\n\tusing"
10401 " 'zpool clear' or replace the device with 'zpool "
10402 "replace'.\n"));
10403 break;
10404
10405 case ZPOOL_STATUS_OFFLINE_DEV:
10406 snprintf(status, ST_SIZE, gettext("One or more devices has "
10407 "been taken offline by the administrator.\n\tSufficient "
10408 "replicas exist for the pool to continue functioning in "
10409 "a\n\tdegraded state.\n"));
10410 snprintf(action, AC_SIZE, gettext("Online the device "
10411 "using 'zpool online' or replace the device with\n\t'zpool "
10412 "replace'.\n"));
10413 break;
10414
10415 case ZPOOL_STATUS_REMOVED_DEV:
10416 snprintf(status, ST_SIZE, gettext("One or more devices has "
10417 "been removed by the administrator.\n\tSufficient "
10418 "replicas exist for the pool to continue functioning in "
10419 "a\n\tdegraded state.\n"));
10420 snprintf(action, AC_SIZE, gettext("Online the device "
10421 "using zpool online' or replace the device with\n\t'zpool "
10422 "replace'.\n"));
10423 break;
10424
10425 case ZPOOL_STATUS_RESILVERING:
10426 case ZPOOL_STATUS_REBUILDING:
10427 snprintf(status, ST_SIZE, gettext("One or more devices is "
10428 "currently being resilvered. The pool will\n\tcontinue "
10429 "to function, possibly in a degraded state.\n"));
10430 snprintf(action, AC_SIZE, gettext("Wait for the resilver to "
10431 "complete.\n"));
10432 break;
10433
10434 case ZPOOL_STATUS_REBUILD_SCRUB:
10435 snprintf(status, ST_SIZE, gettext("One or more devices have "
10436 "been sequentially resilvered, scrubbing\n\tthe pool "
10437 "is recommended.\n"));
10438 snprintf(action, AC_SIZE, gettext("Use 'zpool scrub' to "
10439 "verify all data checksums.\n"));
10440 break;
10441
10442 case ZPOOL_STATUS_CORRUPT_DATA:
10443 snprintf(status, ST_SIZE, gettext("One or more devices has "
10444 "experienced an error resulting in data\n\tcorruption. "
10445 "Applications may be affected.\n"));
10446 snprintf(action, AC_SIZE, gettext("Restore the file in question"
10447 " if possible. Otherwise restore the\n\tentire pool from "
10448 "backup.\n"));
10449 break;
10450
10451 case ZPOOL_STATUS_CORRUPT_POOL:
10452 snprintf(status, ST_SIZE, gettext("The pool metadata is "
10453 "corrupted and the pool cannot be opened.\n"));
10454 zpool_explain_recover(zpool_get_handle(zhp),
10455 zpool_get_name(zhp), reason, zpool_get_config(zhp, NULL),
10456 action, AC_SIZE);
10457 break;
10458
10459 case ZPOOL_STATUS_VERSION_OLDER:
10460 snprintf(status, ST_SIZE, gettext("The pool is formatted using "
10461 "a legacy on-disk format. The pool can\n\tstill be used, "
10462 "but some features are unavailable.\n"));
10463 snprintf(action, AC_SIZE, gettext("Upgrade the pool using "
10464 "'zpool upgrade'. Once this is done, the\n\tpool will no "
10465 "longer be accessible on software that does not support\n\t"
10466 "feature flags.\n"));
10467 break;
10468
10469 case ZPOOL_STATUS_VERSION_NEWER:
10470 snprintf(status, ST_SIZE, gettext("The pool has been upgraded "
10471 "to a newer, incompatible on-disk version.\n\tThe pool "
10472 "cannot be accessed on this system.\n"));
10473 snprintf(action, AC_SIZE, gettext("Access the pool from a "
10474 "system running more recent software, or\n\trestore the "
10475 "pool from backup.\n"));
10476 break;
10477
10478 case ZPOOL_STATUS_FEAT_DISABLED:
10479 snprintf(status, ST_SIZE, gettext("Some supported and "
10480 "requested features are not enabled on the pool.\n\t"
10481 "The pool can still be used, but some features are "
10482 "unavailable.\n"));
10483 snprintf(action, AC_SIZE, gettext("Enable all features using "
10484 "'zpool upgrade'. Once this is done,\n\tthe pool may no "
10485 "longer be accessible by software that does not support\n\t"
10486 "the features. See zpool-features(7) for details.\n"));
10487 break;
10488
10489 case ZPOOL_STATUS_COMPATIBILITY_ERR:
10490 snprintf(status, ST_SIZE, gettext("This pool has a "
10491 "compatibility list specified, but it could not be\n\t"
10492 "read/parsed at this time. The pool can still be used, "
10493 "but this\n\tshould be investigated.\n"));
10494 snprintf(action, AC_SIZE, gettext("Check the value of the "
10495 "'compatibility' property against the\n\t"
10496 "appropriate file in " ZPOOL_SYSCONF_COMPAT_D " or "
10497 ZPOOL_DATA_COMPAT_D ".\n"));
10498 break;
10499
10500 case ZPOOL_STATUS_INCOMPATIBLE_FEAT:
10501 snprintf(status, ST_SIZE, gettext("One or more features "
10502 "are enabled on the pool despite not being\n\t"
10503 "requested by the 'compatibility' property.\n"));
10504 snprintf(action, AC_SIZE, gettext("Consider setting "
10505 "'compatibility' to an appropriate value, or\n\t"
10506 "adding needed features to the relevant file in\n\t"
10507 ZPOOL_SYSCONF_COMPAT_D " or " ZPOOL_DATA_COMPAT_D ".\n"));
10508 break;
10509
10510 case ZPOOL_STATUS_UNSUP_FEAT_READ:
10511 snprintf(status, ST_SIZE, gettext("The pool cannot be accessed "
10512 "on this system because it uses the\n\tfollowing feature(s)"
10513 " not supported on this system:\n"));
10514 zpool_collect_unsup_feat(zpool_get_config(zhp, NULL), status,
10515 1024);
10516 snprintf(action, AC_SIZE, gettext("Access the pool from a "
10517 "system that supports the required feature(s),\n\tor "
10518 "restore the pool from backup.\n"));
10519 break;
10520
10521 case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
10522 snprintf(status, ST_SIZE, gettext("The pool can only be "
10523 "accessed in read-only mode on this system. It\n\tcannot be"
10524 " accessed in read-write mode because it uses the "
10525 "following\n\tfeature(s) not supported on this system:\n"));
10526 zpool_collect_unsup_feat(zpool_get_config(zhp, NULL), status,
10527 1024);
10528 snprintf(action, AC_SIZE, gettext("The pool cannot be accessed "
10529 "in read-write mode. Import the pool with\n"
10530 "\t\"-o readonly=on\", access the pool from a system that "
10531 "supports the\n\trequired feature(s), or restore the "
10532 "pool from backup.\n"));
10533 break;
10534
10535 case ZPOOL_STATUS_FAULTED_DEV_R:
10536 snprintf(status, ST_SIZE, gettext("One or more devices are "
10537 "faulted in response to persistent errors.\n\tSufficient "
10538 "replicas exist for the pool to continue functioning "
10539 "in a\n\tdegraded state.\n"));
10540 snprintf(action, AC_SIZE, gettext("Replace the faulted device, "
10541 "or use 'zpool clear' to mark the device\n\trepaired.\n"));
10542 break;
10543
10544 case ZPOOL_STATUS_FAULTED_DEV_NR:
10545 snprintf(status, ST_SIZE, gettext("One or more devices are "
10546 "faulted in response to persistent errors. There are "
10547 "insufficient replicas for the pool to\n\tcontinue "
10548 "functioning.\n"));
10549 snprintf(action, AC_SIZE, gettext("Destroy and re-create the "
10550 "pool from a backup source. Manually marking the device\n"
10551 "\trepaired using 'zpool clear' may allow some data "
10552 "to be recovered.\n"));
10553 break;
10554
10555 case ZPOOL_STATUS_IO_FAILURE_MMP:
10556 snprintf(status, ST_SIZE, gettext("The pool is suspended "
10557 "because multihost writes failed or were delayed;\n\t"
10558 "another system could import the pool undetected.\n"));
10559 snprintf(action, AC_SIZE, gettext("Make sure the pool's devices"
10560 " are connected, then reboot your system and\n\timport the "
10561 "pool or run 'zpool clear' to resume the pool.\n"));
10562 break;
10563
10564 case ZPOOL_STATUS_IO_FAILURE_WAIT:
10565 case ZPOOL_STATUS_IO_FAILURE_CONTINUE:
10566 snprintf(status, ST_SIZE, gettext("One or more devices are "
10567 "faulted in response to IO failures.\n"));
10568 snprintf(action, AC_SIZE, gettext("Make sure the affected "
10569 "devices are connected, then run 'zpool clear'.\n"));
10570 break;
10571
10572 case ZPOOL_STATUS_BAD_LOG:
10573 snprintf(status, ST_SIZE, gettext("An intent log record "
10574 "could not be read.\n"
10575 "\tWaiting for administrator intervention to fix the "
10576 "faulted pool.\n"));
10577 snprintf(action, AC_SIZE, gettext("Either restore the affected "
10578 "device(s) and run 'zpool online',\n"
10579 "\tor ignore the intent log records by running "
10580 "'zpool clear'.\n"));
10581 break;
10582
10583 case ZPOOL_STATUS_NON_NATIVE_ASHIFT:
10584 snprintf(status, ST_SIZE, gettext("One or more devices are "
10585 "configured to use a non-native block size.\n"
10586 "\tExpect reduced performance.\n"));
10587 snprintf(action, AC_SIZE, gettext("Replace affected devices "
10588 "with devices that support the\n\tconfigured block size, "
10589 "or migrate data to a properly configured\n\tpool.\n"));
10590 break;
10591
10592 case ZPOOL_STATUS_HOSTID_MISMATCH:
10593 snprintf(status, ST_SIZE, gettext("Mismatch between pool hostid"
10594 " and system hostid on imported pool.\n\tThis pool was "
10595 "previously imported into a system with a different "
10596 "hostid,\n\tand then was verbatim imported into this "
10597 "system.\n"));
10598 snprintf(action, AC_SIZE, gettext("Export this pool on all "
10599 "systems on which it is imported.\n"
10600 "\tThen import it to correct the mismatch.\n"));
10601 break;
10602
10603 case ZPOOL_STATUS_ERRATA:
10604 snprintf(status, ST_SIZE, gettext("Errata #%d detected.\n"),
10605 errata);
10606 switch (errata) {
10607 case ZPOOL_ERRATA_NONE:
10608 break;
10609
10610 case ZPOOL_ERRATA_ZOL_2094_SCRUB:
10611 snprintf(action, AC_SIZE, gettext("To correct the issue"
10612 " run 'zpool scrub'.\n"));
10613 break;
10614
10615 case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION:
10616 (void) strlcat(status, gettext("\tExisting encrypted "
10617 "datasets contain an on-disk incompatibility\n\t "
10618 "which needs to be corrected.\n"), ST_SIZE);
10619 snprintf(action, AC_SIZE, gettext("To correct the issue"
10620 " backup existing encrypted datasets to new\n\t"
10621 "encrypted datasets and destroy the old ones. "
10622 "'zfs mount -o ro' can\n\tbe used to temporarily "
10623 "mount existing encrypted datasets readonly.\n"));
10624 break;
10625
10626 case ZPOOL_ERRATA_ZOL_8308_ENCRYPTION:
10627 (void) strlcat(status, gettext("\tExisting encrypted "
10628 "snapshots and bookmarks contain an on-disk\n\t"
10629 "incompatibility. This may cause on-disk "
10630 "corruption if they are used\n\twith "
10631 "'zfs recv'.\n"), ST_SIZE);
10632 snprintf(action, AC_SIZE, gettext("To correct the"
10633 "issue, enable the bookmark_v2 feature. No "
10634 "additional\n\taction is needed if there are no "
10635 "encrypted snapshots or bookmarks.\n\tIf preserving"
10636 "the encrypted snapshots and bookmarks is required,"
10637 " use\n\ta non-raw send to backup and restore them."
10638 " Alternately, they may be\n\tremoved to resolve "
10639 "the incompatibility.\n"));
10640 break;
10641
10642 default:
10643 /*
10644 * All errata which allow the pool to be imported
10645 * must contain an action message.
10646 */
10647 assert(0);
10648 }
10649 break;
10650
10651 default:
10652 /*
10653 * The remaining errors can't actually be generated, yet.
10654 */
10655 assert(reason == ZPOOL_STATUS_OK);
10656 }
10657
10658 if (status[0] != 0) {
10659 if (cbp->cb_json)
10660 fnvlist_add_string(item, "status", status);
10661 else {
10662 printf_color(ANSI_BOLD, gettext("status: "));
10663 printf_color(ANSI_YELLOW, status);
10664 }
10665 }
10666
10667 if (action[0] != 0) {
10668 if (cbp->cb_json)
10669 fnvlist_add_string(item, "action", action);
10670 else {
10671 printf_color(ANSI_BOLD, gettext("action: "));
10672 printf_color(ANSI_YELLOW, action);
10673 }
10674 }
10675 }
10676
10677 static int
status_callback_json(zpool_handle_t * zhp,void * data)10678 status_callback_json(zpool_handle_t *zhp, void *data)
10679 {
10680 status_cbdata_t *cbp = data;
10681 nvlist_t *config, *nvroot;
10682 const char *msgid;
10683 char pool_guid[256];
10684 char msgbuf[256];
10685 uint64_t guid;
10686 zpool_status_t reason;
10687 zpool_errata_t errata;
10688 uint_t c;
10689 vdev_stat_t *vs;
10690 nvlist_t *item, *d, *load_info, *vds;
10691 item = d = NULL;
10692
10693 /* If dedup stats were requested, also fetch dedupcached. */
10694 if (cbp->cb_dedup_stats > 1)
10695 zpool_add_propname(zhp, ZPOOL_DEDUPCACHED_PROP_NAME);
10696 reason = zpool_get_status(zhp, &msgid, &errata);
10697 /*
10698 * If we were given 'zpool status -x', only report those pools with
10699 * problems.
10700 */
10701 if (cbp->cb_explain &&
10702 (reason == ZPOOL_STATUS_OK ||
10703 reason == ZPOOL_STATUS_VERSION_OLDER ||
10704 reason == ZPOOL_STATUS_FEAT_DISABLED ||
10705 reason == ZPOOL_STATUS_COMPATIBILITY_ERR ||
10706 reason == ZPOOL_STATUS_INCOMPATIBLE_FEAT)) {
10707 return (0);
10708 }
10709
10710 d = fnvlist_lookup_nvlist(cbp->cb_jsobj, "pools");
10711 item = fnvlist_alloc();
10712 vds = fnvlist_alloc();
10713 fill_pool_info(item, zhp, B_FALSE, cbp->cb_json_as_int);
10714 config = zpool_get_config(zhp, NULL);
10715
10716 if (config != NULL) {
10717 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
10718 verify(nvlist_lookup_uint64_array(nvroot,
10719 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &c) == 0);
10720 if (cbp->cb_json_pool_key_guid) {
10721 guid = fnvlist_lookup_uint64(config,
10722 ZPOOL_CONFIG_POOL_GUID);
10723 snprintf(pool_guid, 256, "%llu", (u_longlong_t)guid);
10724 }
10725 cbp->cb_count++;
10726
10727 print_status_reason(zhp, cbp, reason, errata, item);
10728 if (msgid != NULL) {
10729 snprintf(msgbuf, 256,
10730 "https://openzfs.github.io/openzfs-docs/msg/%s",
10731 msgid);
10732 fnvlist_add_string(item, "msgid", msgid);
10733 fnvlist_add_string(item, "moreinfo", msgbuf);
10734 }
10735
10736 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
10737 &load_info) == 0) {
10738 fnvlist_add_nvlist(item, ZPOOL_CONFIG_LOAD_INFO,
10739 load_info);
10740 }
10741
10742 scan_status_nvlist(zhp, cbp, nvroot, item);
10743 removal_status_nvlist(zhp, cbp, nvroot, item);
10744 checkpoint_status_nvlist(nvroot, cbp, item);
10745 raidz_expand_status_nvlist(zhp, cbp, nvroot, item);
10746 vdev_stats_nvlist(zhp, cbp, nvroot, 0, B_FALSE, NULL, vds);
10747 if (cbp->cb_flat_vdevs) {
10748 class_vdevs_nvlist(zhp, cbp, nvroot,
10749 VDEV_ALLOC_BIAS_DEDUP, vds);
10750 class_vdevs_nvlist(zhp, cbp, nvroot,
10751 VDEV_ALLOC_BIAS_SPECIAL, vds);
10752 class_vdevs_nvlist(zhp, cbp, nvroot,
10753 VDEV_ALLOC_CLASS_LOGS, vds);
10754 l2cache_nvlist(zhp, cbp, nvroot, vds);
10755 spares_nvlist(zhp, cbp, nvroot, vds);
10756
10757 fnvlist_add_nvlist(item, "vdevs", vds);
10758 fnvlist_free(vds);
10759 } else {
10760 fnvlist_add_nvlist(item, "vdevs", vds);
10761 fnvlist_free(vds);
10762
10763 class_vdevs_nvlist(zhp, cbp, nvroot,
10764 VDEV_ALLOC_BIAS_DEDUP, item);
10765 class_vdevs_nvlist(zhp, cbp, nvroot,
10766 VDEV_ALLOC_BIAS_SPECIAL, item);
10767 class_vdevs_nvlist(zhp, cbp, nvroot,
10768 VDEV_ALLOC_CLASS_LOGS, item);
10769 l2cache_nvlist(zhp, cbp, nvroot, item);
10770 spares_nvlist(zhp, cbp, nvroot, item);
10771 }
10772 dedup_stats_nvlist(zhp, cbp, item);
10773 errors_nvlist(zhp, cbp, item);
10774 }
10775 if (cbp->cb_json_pool_key_guid) {
10776 fnvlist_add_nvlist(d, pool_guid, item);
10777 } else {
10778 fnvlist_add_nvlist(d, zpool_get_name(zhp),
10779 item);
10780 }
10781 fnvlist_free(item);
10782 return (0);
10783 }
10784
10785 /*
10786 * Display a summary of pool status. Displays a summary such as:
10787 *
10788 * pool: tank
10789 * status: DEGRADED
10790 * reason: One or more devices ...
10791 * see: https://openzfs.github.io/openzfs-docs/msg/ZFS-xxxx-01
10792 * config:
10793 * mirror DEGRADED
10794 * c1t0d0 OK
10795 * c2t0d0 UNAVAIL
10796 *
10797 * When given the '-v' option, we print out the complete config. If the '-e'
10798 * option is specified, then we print out error rate information as well.
10799 */
10800 static int
status_callback(zpool_handle_t * zhp,void * data)10801 status_callback(zpool_handle_t *zhp, void *data)
10802 {
10803 status_cbdata_t *cbp = data;
10804 nvlist_t *config, *nvroot;
10805 const char *msgid;
10806 zpool_status_t reason;
10807 zpool_errata_t errata;
10808 const char *health;
10809 uint_t c;
10810 vdev_stat_t *vs;
10811
10812 /* If dedup stats were requested, also fetch dedupcached. */
10813 if (cbp->cb_dedup_stats > 1)
10814 zpool_add_propname(zhp, ZPOOL_DEDUPCACHED_PROP_NAME);
10815
10816 config = zpool_get_config(zhp, NULL);
10817 reason = zpool_get_status(zhp, &msgid, &errata);
10818
10819 cbp->cb_count++;
10820
10821 /*
10822 * If we were given 'zpool status -x', only report those pools with
10823 * problems.
10824 */
10825 if (cbp->cb_explain &&
10826 (reason == ZPOOL_STATUS_OK ||
10827 reason == ZPOOL_STATUS_VERSION_OLDER ||
10828 reason == ZPOOL_STATUS_FEAT_DISABLED ||
10829 reason == ZPOOL_STATUS_COMPATIBILITY_ERR ||
10830 reason == ZPOOL_STATUS_INCOMPATIBLE_FEAT)) {
10831 if (!cbp->cb_allpools) {
10832 (void) printf(gettext("pool '%s' is healthy\n"),
10833 zpool_get_name(zhp));
10834 if (cbp->cb_first)
10835 cbp->cb_first = B_FALSE;
10836 }
10837 return (0);
10838 }
10839
10840 if (cbp->cb_first)
10841 cbp->cb_first = B_FALSE;
10842 else
10843 (void) printf("\n");
10844
10845 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
10846 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
10847 (uint64_t **)&vs, &c) == 0);
10848
10849 health = zpool_get_state_str(zhp);
10850
10851 printf(" ");
10852 printf_color(ANSI_BOLD, gettext("pool:"));
10853 printf(" %s\n", zpool_get_name(zhp));
10854 fputc(' ', stdout);
10855 printf_color(ANSI_BOLD, gettext("state: "));
10856
10857 printf_color(health_str_to_color(health), "%s", health);
10858
10859 fputc('\n', stdout);
10860 print_status_reason(zhp, cbp, reason, errata, NULL);
10861
10862 if (msgid != NULL) {
10863 printf(" ");
10864 printf_color(ANSI_BOLD, gettext("see:"));
10865 printf(gettext(
10866 " https://openzfs.github.io/openzfs-docs/msg/%s\n"),
10867 msgid);
10868 }
10869
10870 if (config != NULL) {
10871 uint64_t nerr;
10872 nvlist_t **spares, **l2cache;
10873 uint_t nspares, nl2cache;
10874
10875 print_scan_status(zhp, nvroot);
10876
10877 pool_removal_stat_t *prs = NULL;
10878 (void) nvlist_lookup_uint64_array(nvroot,
10879 ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c);
10880 print_removal_status(zhp, prs);
10881
10882 pool_checkpoint_stat_t *pcs = NULL;
10883 (void) nvlist_lookup_uint64_array(nvroot,
10884 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
10885 print_checkpoint_status(pcs);
10886
10887 pool_raidz_expand_stat_t *pres = NULL;
10888 (void) nvlist_lookup_uint64_array(nvroot,
10889 ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, (uint64_t **)&pres, &c);
10890 print_raidz_expand_status(zhp, pres);
10891
10892 cbp->cb_namewidth = max_width(zhp, nvroot, 0, 0,
10893 cbp->cb_name_flags | VDEV_NAME_TYPE_ID);
10894 if (cbp->cb_namewidth < 10)
10895 cbp->cb_namewidth = 10;
10896
10897 color_start(ANSI_BOLD);
10898 (void) printf(gettext("config:\n\n"));
10899 (void) printf(gettext("\t%-*s %-8s %5s %5s %5s"),
10900 cbp->cb_namewidth, "NAME", "STATE", "READ", "WRITE",
10901 "CKSUM");
10902 color_end();
10903
10904 if (cbp->cb_print_slow_ios) {
10905 printf_color(ANSI_BOLD, " %5s", gettext("SLOW"));
10906 }
10907
10908 if (cbp->cb_print_power) {
10909 printf_color(ANSI_BOLD, " %5s", gettext("POWER"));
10910 }
10911
10912 if (cbp->cb_print_dio_verify) {
10913 printf_color(ANSI_BOLD, " %5s", gettext("DIO"));
10914 }
10915
10916 if (cbp->vcdl != NULL)
10917 print_cmd_columns(cbp->vcdl, 0);
10918
10919 printf("\n");
10920
10921 print_status_config(zhp, cbp, zpool_get_name(zhp), nvroot, 0,
10922 B_FALSE, NULL);
10923
10924 print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_BIAS_DEDUP);
10925 print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_BIAS_SPECIAL);
10926 print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_CLASS_LOGS);
10927
10928 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
10929 &l2cache, &nl2cache) == 0)
10930 print_l2cache(zhp, cbp, l2cache, nl2cache);
10931
10932 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
10933 &spares, &nspares) == 0)
10934 print_spares(zhp, cbp, spares, nspares);
10935
10936 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT,
10937 &nerr) == 0) {
10938 (void) printf("\n");
10939 if (nerr == 0) {
10940 (void) printf(gettext(
10941 "errors: No known data errors\n"));
10942 } else if (!cbp->cb_verbose) {
10943 color_start(ANSI_RED);
10944 (void) printf(gettext("errors: %llu data "
10945 "errors, use '-v' for a list\n"),
10946 (u_longlong_t)nerr);
10947 color_end();
10948 } else {
10949 print_error_log(zhp);
10950 }
10951 }
10952
10953 if (cbp->cb_dedup_stats)
10954 print_dedup_stats(zhp, config, cbp->cb_literal);
10955 } else {
10956 (void) printf(gettext("config: The configuration cannot be "
10957 "determined.\n"));
10958 }
10959
10960 return (0);
10961 }
10962
10963 /*
10964 * zpool status [-c [script1,script2,...]] [-dDegiLpPstvx] [--power] ...
10965 * [-T d|u] [pool] [interval [count]]
10966 *
10967 * -c CMD For each vdev, run command CMD
10968 * -d Display Direct I/O write verify errors
10969 * -D Display dedup status (undocumented)
10970 * -e Display only unhealthy vdevs
10971 * -g Display guid for individual vdev name.
10972 * -i Display vdev initialization status.
10973 * -L Follow links when resolving vdev path name.
10974 * -p Display values in parsable (exact) format.
10975 * -P Display full path for vdev name.
10976 * -s Display slow IOs column.
10977 * -t Display vdev TRIM status.
10978 * -T Display a timestamp in date(1) or Unix format
10979 * -v Display complete error logs
10980 * -x Display only pools with potential problems
10981 * -j Display output in JSON format
10982 * --power Display vdev enclosure slot power status
10983 * --json-int Display numbers in inteeger format instead of string
10984 * --json-flat-vdevs Display vdevs in flat hierarchy
10985 * --json-pool-key-guid Use pool GUID as key for pool objects
10986 *
10987 * Describes the health status of all pools or some subset.
10988 */
10989 int
zpool_do_status(int argc,char ** argv)10990 zpool_do_status(int argc, char **argv)
10991 {
10992 int c;
10993 int ret;
10994 float interval = 0;
10995 unsigned long count = 0;
10996 status_cbdata_t cb = { 0 };
10997 nvlist_t *data;
10998 char *cmd = NULL;
10999
11000 struct option long_options[] = {
11001 {"power", no_argument, NULL, ZPOOL_OPTION_POWER},
11002 {"json", no_argument, NULL, 'j'},
11003 {"json-int", no_argument, NULL, ZPOOL_OPTION_JSON_NUMS_AS_INT},
11004 {"json-flat-vdevs", no_argument, NULL,
11005 ZPOOL_OPTION_JSON_FLAT_VDEVS},
11006 {"json-pool-key-guid", no_argument, NULL,
11007 ZPOOL_OPTION_POOL_KEY_GUID},
11008 {0, 0, 0, 0}
11009 };
11010
11011 /* check options */
11012 while ((c = getopt_long(argc, argv, "c:jdDegiLpPstT:vx", long_options,
11013 NULL)) != -1) {
11014 switch (c) {
11015 case 'c':
11016 if (cmd != NULL) {
11017 fprintf(stderr,
11018 gettext("Can't set -c flag twice\n"));
11019 exit(1);
11020 }
11021
11022 if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL &&
11023 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) {
11024 fprintf(stderr, gettext(
11025 "Can't run -c, disabled by "
11026 "ZPOOL_SCRIPTS_ENABLED.\n"));
11027 exit(1);
11028 }
11029
11030 if ((getuid() <= 0 || geteuid() <= 0) &&
11031 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) {
11032 fprintf(stderr, gettext(
11033 "Can't run -c with root privileges "
11034 "unless ZPOOL_SCRIPTS_AS_ROOT is set.\n"));
11035 exit(1);
11036 }
11037 cmd = optarg;
11038 break;
11039 case 'd':
11040 cb.cb_print_dio_verify = B_TRUE;
11041 break;
11042 case 'D':
11043 if (++cb.cb_dedup_stats > 2)
11044 cb.cb_dedup_stats = 2;
11045 break;
11046 case 'e':
11047 cb.cb_print_unhealthy = B_TRUE;
11048 break;
11049 case 'g':
11050 cb.cb_name_flags |= VDEV_NAME_GUID;
11051 break;
11052 case 'i':
11053 cb.cb_print_vdev_init = B_TRUE;
11054 break;
11055 case 'L':
11056 cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
11057 break;
11058 case 'p':
11059 cb.cb_literal = B_TRUE;
11060 break;
11061 case 'P':
11062 cb.cb_name_flags |= VDEV_NAME_PATH;
11063 break;
11064 case 's':
11065 cb.cb_print_slow_ios = B_TRUE;
11066 break;
11067 case 't':
11068 cb.cb_print_vdev_trim = B_TRUE;
11069 break;
11070 case 'T':
11071 get_timestamp_arg(*optarg);
11072 break;
11073 case 'v':
11074 cb.cb_verbose = B_TRUE;
11075 break;
11076 case 'j':
11077 cb.cb_json = B_TRUE;
11078 break;
11079 case 'x':
11080 cb.cb_explain = B_TRUE;
11081 break;
11082 case ZPOOL_OPTION_POWER:
11083 cb.cb_print_power = B_TRUE;
11084 break;
11085 case ZPOOL_OPTION_JSON_FLAT_VDEVS:
11086 cb.cb_flat_vdevs = B_TRUE;
11087 break;
11088 case ZPOOL_OPTION_JSON_NUMS_AS_INT:
11089 cb.cb_json_as_int = B_TRUE;
11090 cb.cb_literal = B_TRUE;
11091 break;
11092 case ZPOOL_OPTION_POOL_KEY_GUID:
11093 cb.cb_json_pool_key_guid = B_TRUE;
11094 break;
11095 case '?':
11096 if (optopt == 'c') {
11097 print_zpool_script_list("status");
11098 exit(0);
11099 } else {
11100 fprintf(stderr,
11101 gettext("invalid option '%c'\n"), optopt);
11102 }
11103 usage(B_FALSE);
11104 }
11105 }
11106
11107 argc -= optind;
11108 argv += optind;
11109
11110 get_interval_count(&argc, argv, &interval, &count);
11111
11112 if (argc == 0)
11113 cb.cb_allpools = B_TRUE;
11114
11115 cb.cb_first = B_TRUE;
11116 cb.cb_print_status = B_TRUE;
11117
11118 if (cb.cb_flat_vdevs && !cb.cb_json) {
11119 fprintf(stderr, gettext("'--json-flat-vdevs' only works with"
11120 " '-j' option\n"));
11121 usage(B_FALSE);
11122 }
11123
11124 if (cb.cb_json_as_int && !cb.cb_json) {
11125 (void) fprintf(stderr, gettext("'--json-int' only works with"
11126 " '-j' option\n"));
11127 usage(B_FALSE);
11128 }
11129
11130 if (!cb.cb_json && cb.cb_json_pool_key_guid) {
11131 (void) fprintf(stderr, gettext("'json-pool-key-guid' only"
11132 " works with '-j' option\n"));
11133 usage(B_FALSE);
11134 }
11135
11136 for (;;) {
11137 if (cb.cb_json) {
11138 cb.cb_jsobj = zpool_json_schema(0, 1);
11139 data = fnvlist_alloc();
11140 fnvlist_add_nvlist(cb.cb_jsobj, "pools", data);
11141 fnvlist_free(data);
11142 }
11143
11144 if (timestamp_fmt != NODATE) {
11145 if (cb.cb_json) {
11146 if (cb.cb_json_as_int) {
11147 fnvlist_add_uint64(cb.cb_jsobj, "time",
11148 time(NULL));
11149 } else {
11150 char ts[128];
11151 get_timestamp(timestamp_fmt, ts, 128);
11152 fnvlist_add_string(cb.cb_jsobj, "time",
11153 ts);
11154 }
11155 } else
11156 print_timestamp(timestamp_fmt);
11157 }
11158
11159 if (cmd != NULL)
11160 cb.vcdl = all_pools_for_each_vdev_run(argc, argv, cmd,
11161 NULL, NULL, 0, 0);
11162
11163 if (cb.cb_json) {
11164 ret = for_each_pool(argc, argv, B_TRUE, NULL,
11165 ZFS_TYPE_POOL, cb.cb_literal,
11166 status_callback_json, &cb);
11167 } else {
11168 ret = for_each_pool(argc, argv, B_TRUE, NULL,
11169 ZFS_TYPE_POOL, cb.cb_literal,
11170 status_callback, &cb);
11171 }
11172
11173 if (cb.vcdl != NULL)
11174 free_vdev_cmd_data_list(cb.vcdl);
11175
11176 if (cb.cb_json) {
11177 if (ret == 0)
11178 zcmd_print_json(cb.cb_jsobj);
11179 else
11180 nvlist_free(cb.cb_jsobj);
11181 } else {
11182 if (argc == 0 && cb.cb_count == 0) {
11183 (void) fprintf(stderr, "%s",
11184 gettext("no pools available\n"));
11185 } else if (cb.cb_explain && cb.cb_first &&
11186 cb.cb_allpools) {
11187 (void) printf("%s",
11188 gettext("all pools are healthy\n"));
11189 }
11190 }
11191
11192 if (ret != 0)
11193 return (ret);
11194
11195 if (interval == 0)
11196 break;
11197
11198 if (count != 0 && --count == 0)
11199 break;
11200
11201 (void) fflush(stdout);
11202 (void) fsleep(interval);
11203 }
11204
11205 return (0);
11206 }
11207
11208 typedef struct upgrade_cbdata {
11209 int cb_first;
11210 int cb_argc;
11211 uint64_t cb_version;
11212 char **cb_argv;
11213 } upgrade_cbdata_t;
11214
11215 static int
check_unsupp_fs(zfs_handle_t * zhp,void * unsupp_fs)11216 check_unsupp_fs(zfs_handle_t *zhp, void *unsupp_fs)
11217 {
11218 int zfs_version = (int)zfs_prop_get_int(zhp, ZFS_PROP_VERSION);
11219 int *count = (int *)unsupp_fs;
11220
11221 if (zfs_version > ZPL_VERSION) {
11222 (void) printf(gettext("%s (v%d) is not supported by this "
11223 "implementation of ZFS.\n"),
11224 zfs_get_name(zhp), zfs_version);
11225 (*count)++;
11226 }
11227
11228 zfs_iter_filesystems_v2(zhp, 0, check_unsupp_fs, unsupp_fs);
11229
11230 zfs_close(zhp);
11231
11232 return (0);
11233 }
11234
11235 static int
upgrade_version(zpool_handle_t * zhp,uint64_t version)11236 upgrade_version(zpool_handle_t *zhp, uint64_t version)
11237 {
11238 int ret;
11239 nvlist_t *config;
11240 uint64_t oldversion;
11241 int unsupp_fs = 0;
11242
11243 config = zpool_get_config(zhp, NULL);
11244 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
11245 &oldversion) == 0);
11246
11247 char compat[ZFS_MAXPROPLEN];
11248 if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, compat,
11249 ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
11250 compat[0] = '\0';
11251
11252 assert(SPA_VERSION_IS_SUPPORTED(oldversion));
11253 assert(oldversion < version);
11254
11255 ret = zfs_iter_root(zpool_get_handle(zhp), check_unsupp_fs, &unsupp_fs);
11256 if (ret != 0)
11257 return (ret);
11258
11259 if (unsupp_fs) {
11260 (void) fprintf(stderr, gettext("Upgrade not performed due "
11261 "to %d unsupported filesystems (max v%d).\n"),
11262 unsupp_fs, (int)ZPL_VERSION);
11263 return (1);
11264 }
11265
11266 if (strcmp(compat, ZPOOL_COMPAT_LEGACY) == 0) {
11267 (void) fprintf(stderr, gettext("Upgrade not performed because "
11268 "'compatibility' property set to '"
11269 ZPOOL_COMPAT_LEGACY "'.\n"));
11270 return (1);
11271 }
11272
11273 ret = zpool_upgrade(zhp, version);
11274 if (ret != 0)
11275 return (ret);
11276
11277 if (version >= SPA_VERSION_FEATURES) {
11278 (void) printf(gettext("Successfully upgraded "
11279 "'%s' from version %llu to feature flags.\n"),
11280 zpool_get_name(zhp), (u_longlong_t)oldversion);
11281 } else {
11282 (void) printf(gettext("Successfully upgraded "
11283 "'%s' from version %llu to version %llu.\n"),
11284 zpool_get_name(zhp), (u_longlong_t)oldversion,
11285 (u_longlong_t)version);
11286 }
11287
11288 return (0);
11289 }
11290
11291 static int
upgrade_enable_all(zpool_handle_t * zhp,int * countp)11292 upgrade_enable_all(zpool_handle_t *zhp, int *countp)
11293 {
11294 int i, ret, count;
11295 boolean_t firstff = B_TRUE;
11296 nvlist_t *enabled = zpool_get_features(zhp);
11297
11298 char compat[ZFS_MAXPROPLEN];
11299 if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, compat,
11300 ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
11301 compat[0] = '\0';
11302
11303 boolean_t requested_features[SPA_FEATURES];
11304 if (zpool_do_load_compat(compat, requested_features) !=
11305 ZPOOL_COMPATIBILITY_OK)
11306 return (-1);
11307
11308 count = 0;
11309 for (i = 0; i < SPA_FEATURES; i++) {
11310 const char *fname = spa_feature_table[i].fi_uname;
11311 const char *fguid = spa_feature_table[i].fi_guid;
11312
11313 if (!spa_feature_table[i].fi_zfs_mod_supported)
11314 continue;
11315
11316 if (!nvlist_exists(enabled, fguid) && requested_features[i]) {
11317 char *propname;
11318 verify(-1 != asprintf(&propname, "feature@%s", fname));
11319 ret = zpool_set_prop(zhp, propname,
11320 ZFS_FEATURE_ENABLED);
11321 if (ret != 0) {
11322 free(propname);
11323 return (ret);
11324 }
11325 count++;
11326
11327 if (firstff) {
11328 (void) printf(gettext("Enabled the "
11329 "following features on '%s':\n"),
11330 zpool_get_name(zhp));
11331 firstff = B_FALSE;
11332 }
11333 (void) printf(gettext(" %s\n"), fname);
11334 free(propname);
11335 }
11336 }
11337
11338 if (countp != NULL)
11339 *countp = count;
11340 return (0);
11341 }
11342
11343 static int
upgrade_cb(zpool_handle_t * zhp,void * arg)11344 upgrade_cb(zpool_handle_t *zhp, void *arg)
11345 {
11346 upgrade_cbdata_t *cbp = arg;
11347 nvlist_t *config;
11348 uint64_t version;
11349 boolean_t modified_pool = B_FALSE;
11350 int ret;
11351
11352 config = zpool_get_config(zhp, NULL);
11353 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
11354 &version) == 0);
11355
11356 assert(SPA_VERSION_IS_SUPPORTED(version));
11357
11358 if (version < cbp->cb_version) {
11359 cbp->cb_first = B_FALSE;
11360 ret = upgrade_version(zhp, cbp->cb_version);
11361 if (ret != 0)
11362 return (ret);
11363 modified_pool = B_TRUE;
11364
11365 /*
11366 * If they did "zpool upgrade -a", then we could
11367 * be doing ioctls to different pools. We need
11368 * to log this history once to each pool, and bypass
11369 * the normal history logging that happens in main().
11370 */
11371 (void) zpool_log_history(g_zfs, history_str);
11372 log_history = B_FALSE;
11373 }
11374
11375 if (cbp->cb_version >= SPA_VERSION_FEATURES) {
11376 int count;
11377 ret = upgrade_enable_all(zhp, &count);
11378 if (ret != 0)
11379 return (ret);
11380
11381 if (count > 0) {
11382 cbp->cb_first = B_FALSE;
11383 modified_pool = B_TRUE;
11384 }
11385 }
11386
11387 if (modified_pool) {
11388 (void) printf("\n");
11389 (void) after_zpool_upgrade(zhp);
11390 }
11391
11392 return (0);
11393 }
11394
11395 static int
upgrade_list_older_cb(zpool_handle_t * zhp,void * arg)11396 upgrade_list_older_cb(zpool_handle_t *zhp, void *arg)
11397 {
11398 upgrade_cbdata_t *cbp = arg;
11399 nvlist_t *config;
11400 uint64_t version;
11401
11402 config = zpool_get_config(zhp, NULL);
11403 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
11404 &version) == 0);
11405
11406 assert(SPA_VERSION_IS_SUPPORTED(version));
11407
11408 if (version < SPA_VERSION_FEATURES) {
11409 if (cbp->cb_first) {
11410 (void) printf(gettext("The following pools are "
11411 "formatted with legacy version numbers and can\n"
11412 "be upgraded to use feature flags. After "
11413 "being upgraded, these pools\nwill no "
11414 "longer be accessible by software that does not "
11415 "support feature\nflags.\n\n"
11416 "Note that setting a pool's 'compatibility' "
11417 "feature to '" ZPOOL_COMPAT_LEGACY "' will\n"
11418 "inhibit upgrades.\n\n"));
11419 (void) printf(gettext("VER POOL\n"));
11420 (void) printf(gettext("--- ------------\n"));
11421 cbp->cb_first = B_FALSE;
11422 }
11423
11424 (void) printf("%2llu %s\n", (u_longlong_t)version,
11425 zpool_get_name(zhp));
11426 }
11427
11428 return (0);
11429 }
11430
11431 static int
upgrade_list_disabled_cb(zpool_handle_t * zhp,void * arg)11432 upgrade_list_disabled_cb(zpool_handle_t *zhp, void *arg)
11433 {
11434 upgrade_cbdata_t *cbp = arg;
11435 nvlist_t *config;
11436 uint64_t version;
11437
11438 config = zpool_get_config(zhp, NULL);
11439 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
11440 &version) == 0);
11441
11442 if (version >= SPA_VERSION_FEATURES) {
11443 int i;
11444 boolean_t poolfirst = B_TRUE;
11445 nvlist_t *enabled = zpool_get_features(zhp);
11446
11447 for (i = 0; i < SPA_FEATURES; i++) {
11448 const char *fguid = spa_feature_table[i].fi_guid;
11449 const char *fname = spa_feature_table[i].fi_uname;
11450
11451 if (!spa_feature_table[i].fi_zfs_mod_supported)
11452 continue;
11453
11454 if (!nvlist_exists(enabled, fguid)) {
11455 if (cbp->cb_first) {
11456 (void) printf(gettext("\nSome "
11457 "supported features are not "
11458 "enabled on the following pools. "
11459 "Once a\nfeature is enabled the "
11460 "pool may become incompatible with "
11461 "software\nthat does not support "
11462 "the feature. See "
11463 "zpool-features(7) for "
11464 "details.\n\n"
11465 "Note that the pool "
11466 "'compatibility' feature can be "
11467 "used to inhibit\nfeature "
11468 "upgrades.\n\n"));
11469 (void) printf(gettext("POOL "
11470 "FEATURE\n"));
11471 (void) printf(gettext("------"
11472 "---------\n"));
11473 cbp->cb_first = B_FALSE;
11474 }
11475
11476 if (poolfirst) {
11477 (void) printf(gettext("%s\n"),
11478 zpool_get_name(zhp));
11479 poolfirst = B_FALSE;
11480 }
11481
11482 (void) printf(gettext(" %s\n"), fname);
11483 }
11484 /*
11485 * If they did "zpool upgrade -a", then we could
11486 * be doing ioctls to different pools. We need
11487 * to log this history once to each pool, and bypass
11488 * the normal history logging that happens in main().
11489 */
11490 (void) zpool_log_history(g_zfs, history_str);
11491 log_history = B_FALSE;
11492 }
11493 }
11494
11495 return (0);
11496 }
11497
11498 static int
upgrade_one(zpool_handle_t * zhp,void * data)11499 upgrade_one(zpool_handle_t *zhp, void *data)
11500 {
11501 boolean_t modified_pool = B_FALSE;
11502 upgrade_cbdata_t *cbp = data;
11503 uint64_t cur_version;
11504 int ret;
11505
11506 if (strcmp("log", zpool_get_name(zhp)) == 0) {
11507 (void) fprintf(stderr, gettext("'log' is now a reserved word\n"
11508 "Pool 'log' must be renamed using export and import"
11509 " to upgrade.\n"));
11510 return (1);
11511 }
11512
11513 cur_version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
11514 if (cur_version > cbp->cb_version) {
11515 (void) printf(gettext("Pool '%s' is already formatted "
11516 "using more current version '%llu'.\n\n"),
11517 zpool_get_name(zhp), (u_longlong_t)cur_version);
11518 return (0);
11519 }
11520
11521 if (cbp->cb_version != SPA_VERSION && cur_version == cbp->cb_version) {
11522 (void) printf(gettext("Pool '%s' is already formatted "
11523 "using version %llu.\n\n"), zpool_get_name(zhp),
11524 (u_longlong_t)cbp->cb_version);
11525 return (0);
11526 }
11527
11528 if (cur_version != cbp->cb_version) {
11529 modified_pool = B_TRUE;
11530 ret = upgrade_version(zhp, cbp->cb_version);
11531 if (ret != 0)
11532 return (ret);
11533 }
11534
11535 if (cbp->cb_version >= SPA_VERSION_FEATURES) {
11536 int count = 0;
11537 ret = upgrade_enable_all(zhp, &count);
11538 if (ret != 0)
11539 return (ret);
11540
11541 if (count != 0) {
11542 modified_pool = B_TRUE;
11543 } else if (cur_version == SPA_VERSION) {
11544 (void) printf(gettext("Pool '%s' already has all "
11545 "supported and requested features enabled.\n"),
11546 zpool_get_name(zhp));
11547 }
11548 }
11549
11550 if (modified_pool) {
11551 (void) printf("\n");
11552 (void) after_zpool_upgrade(zhp);
11553 }
11554
11555 return (0);
11556 }
11557
11558 /*
11559 * zpool upgrade
11560 * zpool upgrade -v
11561 * zpool upgrade [-V version] <-a | pool ...>
11562 *
11563 * With no arguments, display downrev'd ZFS pool available for upgrade.
11564 * Individual pools can be upgraded by specifying the pool, and '-a' will
11565 * upgrade all pools.
11566 */
11567 int
zpool_do_upgrade(int argc,char ** argv)11568 zpool_do_upgrade(int argc, char **argv)
11569 {
11570 int c;
11571 upgrade_cbdata_t cb = { 0 };
11572 int ret = 0;
11573 boolean_t showversions = B_FALSE;
11574 boolean_t upgradeall = B_FALSE;
11575 char *end;
11576
11577
11578 /* check options */
11579 while ((c = getopt(argc, argv, ":avV:")) != -1) {
11580 switch (c) {
11581 case 'a':
11582 upgradeall = B_TRUE;
11583 break;
11584 case 'v':
11585 showversions = B_TRUE;
11586 break;
11587 case 'V':
11588 cb.cb_version = strtoll(optarg, &end, 10);
11589 if (*end != '\0' ||
11590 !SPA_VERSION_IS_SUPPORTED(cb.cb_version)) {
11591 (void) fprintf(stderr,
11592 gettext("invalid version '%s'\n"), optarg);
11593 usage(B_FALSE);
11594 }
11595 break;
11596 case ':':
11597 (void) fprintf(stderr, gettext("missing argument for "
11598 "'%c' option\n"), optopt);
11599 usage(B_FALSE);
11600 break;
11601 case '?':
11602 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
11603 optopt);
11604 usage(B_FALSE);
11605 }
11606 }
11607
11608 cb.cb_argc = argc;
11609 cb.cb_argv = argv;
11610 argc -= optind;
11611 argv += optind;
11612
11613 if (cb.cb_version == 0) {
11614 cb.cb_version = SPA_VERSION;
11615 } else if (!upgradeall && argc == 0) {
11616 (void) fprintf(stderr, gettext("-V option is "
11617 "incompatible with other arguments\n"));
11618 usage(B_FALSE);
11619 }
11620
11621 if (showversions) {
11622 if (upgradeall || argc != 0) {
11623 (void) fprintf(stderr, gettext("-v option is "
11624 "incompatible with other arguments\n"));
11625 usage(B_FALSE);
11626 }
11627 } else if (upgradeall) {
11628 if (argc != 0) {
11629 (void) fprintf(stderr, gettext("-a option should not "
11630 "be used along with a pool name\n"));
11631 usage(B_FALSE);
11632 }
11633 }
11634
11635 (void) printf("%s", gettext("This system supports ZFS pool feature "
11636 "flags.\n\n"));
11637 if (showversions) {
11638 int i;
11639
11640 (void) printf(gettext("The following features are "
11641 "supported:\n\n"));
11642 (void) printf(gettext("FEAT DESCRIPTION\n"));
11643 (void) printf("----------------------------------------------"
11644 "---------------\n");
11645 for (i = 0; i < SPA_FEATURES; i++) {
11646 zfeature_info_t *fi = &spa_feature_table[i];
11647 if (!fi->fi_zfs_mod_supported)
11648 continue;
11649 const char *ro =
11650 (fi->fi_flags & ZFEATURE_FLAG_READONLY_COMPAT) ?
11651 " (read-only compatible)" : "";
11652
11653 (void) printf("%-37s%s\n", fi->fi_uname, ro);
11654 (void) printf(" %s\n", fi->fi_desc);
11655 }
11656 (void) printf("\n");
11657
11658 (void) printf(gettext("The following legacy versions are also "
11659 "supported:\n\n"));
11660 (void) printf(gettext("VER DESCRIPTION\n"));
11661 (void) printf("--- -----------------------------------------"
11662 "---------------\n");
11663 (void) printf(gettext(" 1 Initial ZFS version\n"));
11664 (void) printf(gettext(" 2 Ditto blocks "
11665 "(replicated metadata)\n"));
11666 (void) printf(gettext(" 3 Hot spares and double parity "
11667 "RAID-Z\n"));
11668 (void) printf(gettext(" 4 zpool history\n"));
11669 (void) printf(gettext(" 5 Compression using the gzip "
11670 "algorithm\n"));
11671 (void) printf(gettext(" 6 bootfs pool property\n"));
11672 (void) printf(gettext(" 7 Separate intent log devices\n"));
11673 (void) printf(gettext(" 8 Delegated administration\n"));
11674 (void) printf(gettext(" 9 refquota and refreservation "
11675 "properties\n"));
11676 (void) printf(gettext(" 10 Cache devices\n"));
11677 (void) printf(gettext(" 11 Improved scrub performance\n"));
11678 (void) printf(gettext(" 12 Snapshot properties\n"));
11679 (void) printf(gettext(" 13 snapused property\n"));
11680 (void) printf(gettext(" 14 passthrough-x aclinherit\n"));
11681 (void) printf(gettext(" 15 user/group space accounting\n"));
11682 (void) printf(gettext(" 16 stmf property support\n"));
11683 (void) printf(gettext(" 17 Triple-parity RAID-Z\n"));
11684 (void) printf(gettext(" 18 Snapshot user holds\n"));
11685 (void) printf(gettext(" 19 Log device removal\n"));
11686 (void) printf(gettext(" 20 Compression using zle "
11687 "(zero-length encoding)\n"));
11688 (void) printf(gettext(" 21 Deduplication\n"));
11689 (void) printf(gettext(" 22 Received properties\n"));
11690 (void) printf(gettext(" 23 Slim ZIL\n"));
11691 (void) printf(gettext(" 24 System attributes\n"));
11692 (void) printf(gettext(" 25 Improved scrub stats\n"));
11693 (void) printf(gettext(" 26 Improved snapshot deletion "
11694 "performance\n"));
11695 (void) printf(gettext(" 27 Improved snapshot creation "
11696 "performance\n"));
11697 (void) printf(gettext(" 28 Multiple vdev replacements\n"));
11698 (void) printf(gettext("\nFor more information on a particular "
11699 "version, including supported releases,\n"));
11700 (void) printf(gettext("see the ZFS Administration Guide.\n\n"));
11701 } else if (argc == 0 && upgradeall) {
11702 cb.cb_first = B_TRUE;
11703 ret = zpool_iter(g_zfs, upgrade_cb, &cb);
11704 if (ret == 0 && cb.cb_first) {
11705 if (cb.cb_version == SPA_VERSION) {
11706 (void) printf(gettext("All pools are already "
11707 "formatted using feature flags.\n\n"));
11708 (void) printf(gettext("Every feature flags "
11709 "pool already has all supported and "
11710 "requested features enabled.\n"));
11711 } else {
11712 (void) printf(gettext("All pools are already "
11713 "formatted with version %llu or higher.\n"),
11714 (u_longlong_t)cb.cb_version);
11715 }
11716 }
11717 } else if (argc == 0) {
11718 cb.cb_first = B_TRUE;
11719 ret = zpool_iter(g_zfs, upgrade_list_older_cb, &cb);
11720 assert(ret == 0);
11721
11722 if (cb.cb_first) {
11723 (void) printf(gettext("All pools are formatted "
11724 "using feature flags.\n\n"));
11725 } else {
11726 (void) printf(gettext("\nUse 'zpool upgrade -v' "
11727 "for a list of available legacy versions.\n"));
11728 }
11729
11730 cb.cb_first = B_TRUE;
11731 ret = zpool_iter(g_zfs, upgrade_list_disabled_cb, &cb);
11732 assert(ret == 0);
11733
11734 if (cb.cb_first) {
11735 (void) printf(gettext("Every feature flags pool has "
11736 "all supported and requested features enabled.\n"));
11737 } else {
11738 (void) printf(gettext("\n"));
11739 }
11740 } else {
11741 ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL,
11742 B_FALSE, upgrade_one, &cb);
11743 }
11744
11745 return (ret);
11746 }
11747
11748 typedef struct hist_cbdata {
11749 boolean_t first;
11750 boolean_t longfmt;
11751 boolean_t internal;
11752 } hist_cbdata_t;
11753
11754 static void
print_history_records(nvlist_t * nvhis,hist_cbdata_t * cb)11755 print_history_records(nvlist_t *nvhis, hist_cbdata_t *cb)
11756 {
11757 nvlist_t **records;
11758 uint_t numrecords;
11759 int i;
11760
11761 verify(nvlist_lookup_nvlist_array(nvhis, ZPOOL_HIST_RECORD,
11762 &records, &numrecords) == 0);
11763 for (i = 0; i < numrecords; i++) {
11764 nvlist_t *rec = records[i];
11765 char tbuf[64] = "";
11766
11767 if (nvlist_exists(rec, ZPOOL_HIST_TIME)) {
11768 time_t tsec;
11769 struct tm t;
11770
11771 tsec = fnvlist_lookup_uint64(records[i],
11772 ZPOOL_HIST_TIME);
11773 (void) localtime_r(&tsec, &t);
11774 (void) strftime(tbuf, sizeof (tbuf), "%F.%T", &t);
11775 }
11776
11777 if (nvlist_exists(rec, ZPOOL_HIST_ELAPSED_NS)) {
11778 uint64_t elapsed_ns = fnvlist_lookup_int64(records[i],
11779 ZPOOL_HIST_ELAPSED_NS);
11780 (void) snprintf(tbuf + strlen(tbuf),
11781 sizeof (tbuf) - strlen(tbuf),
11782 " (%lldms)", (long long)elapsed_ns / 1000 / 1000);
11783 }
11784
11785 if (nvlist_exists(rec, ZPOOL_HIST_CMD)) {
11786 (void) printf("%s %s", tbuf,
11787 fnvlist_lookup_string(rec, ZPOOL_HIST_CMD));
11788 } else if (nvlist_exists(rec, ZPOOL_HIST_INT_EVENT)) {
11789 int ievent =
11790 fnvlist_lookup_uint64(rec, ZPOOL_HIST_INT_EVENT);
11791 if (!cb->internal)
11792 continue;
11793 if (ievent >= ZFS_NUM_LEGACY_HISTORY_EVENTS) {
11794 (void) printf("%s unrecognized record:\n",
11795 tbuf);
11796 dump_nvlist(rec, 4);
11797 continue;
11798 }
11799 (void) printf("%s [internal %s txg:%lld] %s", tbuf,
11800 zfs_history_event_names[ievent],
11801 (longlong_t)fnvlist_lookup_uint64(
11802 rec, ZPOOL_HIST_TXG),
11803 fnvlist_lookup_string(rec, ZPOOL_HIST_INT_STR));
11804 } else if (nvlist_exists(rec, ZPOOL_HIST_INT_NAME)) {
11805 if (!cb->internal)
11806 continue;
11807 (void) printf("%s [txg:%lld] %s", tbuf,
11808 (longlong_t)fnvlist_lookup_uint64(
11809 rec, ZPOOL_HIST_TXG),
11810 fnvlist_lookup_string(rec, ZPOOL_HIST_INT_NAME));
11811 if (nvlist_exists(rec, ZPOOL_HIST_DSNAME)) {
11812 (void) printf(" %s (%llu)",
11813 fnvlist_lookup_string(rec,
11814 ZPOOL_HIST_DSNAME),
11815 (u_longlong_t)fnvlist_lookup_uint64(rec,
11816 ZPOOL_HIST_DSID));
11817 }
11818 (void) printf(" %s", fnvlist_lookup_string(rec,
11819 ZPOOL_HIST_INT_STR));
11820 } else if (nvlist_exists(rec, ZPOOL_HIST_IOCTL)) {
11821 if (!cb->internal)
11822 continue;
11823 (void) printf("%s ioctl %s\n", tbuf,
11824 fnvlist_lookup_string(rec, ZPOOL_HIST_IOCTL));
11825 if (nvlist_exists(rec, ZPOOL_HIST_INPUT_NVL)) {
11826 (void) printf(" input:\n");
11827 dump_nvlist(fnvlist_lookup_nvlist(rec,
11828 ZPOOL_HIST_INPUT_NVL), 8);
11829 }
11830 if (nvlist_exists(rec, ZPOOL_HIST_OUTPUT_NVL)) {
11831 (void) printf(" output:\n");
11832 dump_nvlist(fnvlist_lookup_nvlist(rec,
11833 ZPOOL_HIST_OUTPUT_NVL), 8);
11834 }
11835 if (nvlist_exists(rec, ZPOOL_HIST_OUTPUT_SIZE)) {
11836 (void) printf(" output nvlist omitted; "
11837 "original size: %lldKB\n",
11838 (longlong_t)fnvlist_lookup_int64(rec,
11839 ZPOOL_HIST_OUTPUT_SIZE) / 1024);
11840 }
11841 if (nvlist_exists(rec, ZPOOL_HIST_ERRNO)) {
11842 (void) printf(" errno: %lld\n",
11843 (longlong_t)fnvlist_lookup_int64(rec,
11844 ZPOOL_HIST_ERRNO));
11845 }
11846 } else {
11847 if (!cb->internal)
11848 continue;
11849 (void) printf("%s unrecognized record:\n", tbuf);
11850 dump_nvlist(rec, 4);
11851 }
11852
11853 if (!cb->longfmt) {
11854 (void) printf("\n");
11855 continue;
11856 }
11857 (void) printf(" [");
11858 if (nvlist_exists(rec, ZPOOL_HIST_WHO)) {
11859 uid_t who = fnvlist_lookup_uint64(rec, ZPOOL_HIST_WHO);
11860 struct passwd *pwd = getpwuid(who);
11861 (void) printf("user %d ", (int)who);
11862 if (pwd != NULL)
11863 (void) printf("(%s) ", pwd->pw_name);
11864 }
11865 if (nvlist_exists(rec, ZPOOL_HIST_HOST)) {
11866 (void) printf("on %s",
11867 fnvlist_lookup_string(rec, ZPOOL_HIST_HOST));
11868 }
11869 if (nvlist_exists(rec, ZPOOL_HIST_ZONE)) {
11870 (void) printf(":%s",
11871 fnvlist_lookup_string(rec, ZPOOL_HIST_ZONE));
11872 }
11873
11874 (void) printf("]");
11875 (void) printf("\n");
11876 }
11877 }
11878
11879 /*
11880 * Print out the command history for a specific pool.
11881 */
11882 static int
get_history_one(zpool_handle_t * zhp,void * data)11883 get_history_one(zpool_handle_t *zhp, void *data)
11884 {
11885 nvlist_t *nvhis;
11886 int ret;
11887 hist_cbdata_t *cb = (hist_cbdata_t *)data;
11888 uint64_t off = 0;
11889 boolean_t eof = B_FALSE;
11890
11891 cb->first = B_FALSE;
11892
11893 (void) printf(gettext("History for '%s':\n"), zpool_get_name(zhp));
11894
11895 while (!eof) {
11896 if ((ret = zpool_get_history(zhp, &nvhis, &off, &eof)) != 0)
11897 return (ret);
11898
11899 print_history_records(nvhis, cb);
11900 nvlist_free(nvhis);
11901 }
11902 (void) printf("\n");
11903
11904 return (ret);
11905 }
11906
11907 /*
11908 * zpool history <pool>
11909 *
11910 * Displays the history of commands that modified pools.
11911 */
11912 int
zpool_do_history(int argc,char ** argv)11913 zpool_do_history(int argc, char **argv)
11914 {
11915 hist_cbdata_t cbdata = { 0 };
11916 int ret;
11917 int c;
11918
11919 cbdata.first = B_TRUE;
11920 /* check options */
11921 while ((c = getopt(argc, argv, "li")) != -1) {
11922 switch (c) {
11923 case 'l':
11924 cbdata.longfmt = B_TRUE;
11925 break;
11926 case 'i':
11927 cbdata.internal = B_TRUE;
11928 break;
11929 case '?':
11930 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
11931 optopt);
11932 usage(B_FALSE);
11933 }
11934 }
11935 argc -= optind;
11936 argv += optind;
11937
11938 ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL,
11939 B_FALSE, get_history_one, &cbdata);
11940
11941 if (argc == 0 && cbdata.first == B_TRUE) {
11942 (void) fprintf(stderr, gettext("no pools available\n"));
11943 return (0);
11944 }
11945
11946 return (ret);
11947 }
11948
11949 typedef struct ev_opts {
11950 int verbose;
11951 int scripted;
11952 int follow;
11953 int clear;
11954 char poolname[ZFS_MAX_DATASET_NAME_LEN];
11955 } ev_opts_t;
11956
11957 static void
zpool_do_events_short(nvlist_t * nvl,ev_opts_t * opts)11958 zpool_do_events_short(nvlist_t *nvl, ev_opts_t *opts)
11959 {
11960 char ctime_str[26], str[32];
11961 const char *ptr;
11962 int64_t *tv;
11963 uint_t n;
11964
11965 verify(nvlist_lookup_int64_array(nvl, FM_EREPORT_TIME, &tv, &n) == 0);
11966 memset(str, ' ', 32);
11967 (void) ctime_r((const time_t *)&tv[0], ctime_str);
11968 (void) memcpy(str, ctime_str+4, 6); /* 'Jun 30' */
11969 (void) memcpy(str+7, ctime_str+20, 4); /* '1993' */
11970 (void) memcpy(str+12, ctime_str+11, 8); /* '21:49:08' */
11971 (void) sprintf(str+20, ".%09lld", (longlong_t)tv[1]); /* '.123456789' */
11972 if (opts->scripted)
11973 (void) printf(gettext("%s\t"), str);
11974 else
11975 (void) printf(gettext("%s "), str);
11976
11977 verify(nvlist_lookup_string(nvl, FM_CLASS, &ptr) == 0);
11978 (void) printf(gettext("%s\n"), ptr);
11979 }
11980
11981 static void
zpool_do_events_nvprint(nvlist_t * nvl,int depth)11982 zpool_do_events_nvprint(nvlist_t *nvl, int depth)
11983 {
11984 nvpair_t *nvp;
11985 static char flagstr[256];
11986
11987 for (nvp = nvlist_next_nvpair(nvl, NULL);
11988 nvp != NULL; nvp = nvlist_next_nvpair(nvl, nvp)) {
11989
11990 data_type_t type = nvpair_type(nvp);
11991 const char *name = nvpair_name(nvp);
11992
11993 boolean_t b;
11994 uint8_t i8;
11995 uint16_t i16;
11996 uint32_t i32;
11997 uint64_t i64;
11998 const char *str;
11999 nvlist_t *cnv;
12000
12001 printf(gettext("%*s%s = "), depth, "", name);
12002
12003 switch (type) {
12004 case DATA_TYPE_BOOLEAN:
12005 printf(gettext("%s"), "1");
12006 break;
12007
12008 case DATA_TYPE_BOOLEAN_VALUE:
12009 (void) nvpair_value_boolean_value(nvp, &b);
12010 printf(gettext("%s"), b ? "1" : "0");
12011 break;
12012
12013 case DATA_TYPE_BYTE:
12014 (void) nvpair_value_byte(nvp, &i8);
12015 printf(gettext("0x%x"), i8);
12016 break;
12017
12018 case DATA_TYPE_INT8:
12019 (void) nvpair_value_int8(nvp, (void *)&i8);
12020 printf(gettext("0x%x"), i8);
12021 break;
12022
12023 case DATA_TYPE_UINT8:
12024 (void) nvpair_value_uint8(nvp, &i8);
12025 printf(gettext("0x%x"), i8);
12026 break;
12027
12028 case DATA_TYPE_INT16:
12029 (void) nvpair_value_int16(nvp, (void *)&i16);
12030 printf(gettext("0x%x"), i16);
12031 break;
12032
12033 case DATA_TYPE_UINT16:
12034 (void) nvpair_value_uint16(nvp, &i16);
12035 printf(gettext("0x%x"), i16);
12036 break;
12037
12038 case DATA_TYPE_INT32:
12039 (void) nvpair_value_int32(nvp, (void *)&i32);
12040 printf(gettext("0x%x"), i32);
12041 break;
12042
12043 case DATA_TYPE_UINT32:
12044 (void) nvpair_value_uint32(nvp, &i32);
12045 if (strcmp(name,
12046 FM_EREPORT_PAYLOAD_ZFS_ZIO_STAGE) == 0 ||
12047 strcmp(name,
12048 FM_EREPORT_PAYLOAD_ZFS_ZIO_PIPELINE) == 0) {
12049 zfs_valstr_zio_stage(i32, flagstr,
12050 sizeof (flagstr));
12051 printf(gettext("0x%x [%s]"), i32, flagstr);
12052 } else if (strcmp(name,
12053 FM_EREPORT_PAYLOAD_ZFS_ZIO_PRIORITY) == 0) {
12054 zfs_valstr_zio_priority(i32, flagstr,
12055 sizeof (flagstr));
12056 printf(gettext("0x%x [%s]"), i32, flagstr);
12057 } else {
12058 printf(gettext("0x%x"), i32);
12059 }
12060 break;
12061
12062 case DATA_TYPE_INT64:
12063 (void) nvpair_value_int64(nvp, (void *)&i64);
12064 printf(gettext("0x%llx"), (u_longlong_t)i64);
12065 break;
12066
12067 case DATA_TYPE_UINT64:
12068 (void) nvpair_value_uint64(nvp, &i64);
12069 /*
12070 * translate vdev state values to readable
12071 * strings to aide zpool events consumers
12072 */
12073 if (strcmp(name,
12074 FM_EREPORT_PAYLOAD_ZFS_VDEV_STATE) == 0 ||
12075 strcmp(name,
12076 FM_EREPORT_PAYLOAD_ZFS_VDEV_LASTSTATE) == 0) {
12077 printf(gettext("\"%s\" (0x%llx)"),
12078 zpool_state_to_name(i64, VDEV_AUX_NONE),
12079 (u_longlong_t)i64);
12080 } else if (strcmp(name,
12081 FM_EREPORT_PAYLOAD_ZFS_ZIO_FLAGS) == 0) {
12082 zfs_valstr_zio_flag(i64, flagstr,
12083 sizeof (flagstr));
12084 printf(gettext("0x%llx [%s]"),
12085 (u_longlong_t)i64, flagstr);
12086 } else {
12087 printf(gettext("0x%llx"), (u_longlong_t)i64);
12088 }
12089 break;
12090
12091 case DATA_TYPE_HRTIME:
12092 (void) nvpair_value_hrtime(nvp, (void *)&i64);
12093 printf(gettext("0x%llx"), (u_longlong_t)i64);
12094 break;
12095
12096 case DATA_TYPE_STRING:
12097 (void) nvpair_value_string(nvp, &str);
12098 printf(gettext("\"%s\""), str ? str : "<NULL>");
12099 break;
12100
12101 case DATA_TYPE_NVLIST:
12102 printf(gettext("(embedded nvlist)\n"));
12103 (void) nvpair_value_nvlist(nvp, &cnv);
12104 zpool_do_events_nvprint(cnv, depth + 8);
12105 printf(gettext("%*s(end %s)"), depth, "", name);
12106 break;
12107
12108 case DATA_TYPE_NVLIST_ARRAY: {
12109 nvlist_t **val;
12110 uint_t i, nelem;
12111
12112 (void) nvpair_value_nvlist_array(nvp, &val, &nelem);
12113 printf(gettext("(%d embedded nvlists)\n"), nelem);
12114 for (i = 0; i < nelem; i++) {
12115 printf(gettext("%*s%s[%d] = %s\n"),
12116 depth, "", name, i, "(embedded nvlist)");
12117 zpool_do_events_nvprint(val[i], depth + 8);
12118 printf(gettext("%*s(end %s[%i])\n"),
12119 depth, "", name, i);
12120 }
12121 printf(gettext("%*s(end %s)\n"), depth, "", name);
12122 }
12123 break;
12124
12125 case DATA_TYPE_INT8_ARRAY: {
12126 int8_t *val;
12127 uint_t i, nelem;
12128
12129 (void) nvpair_value_int8_array(nvp, &val, &nelem);
12130 for (i = 0; i < nelem; i++)
12131 printf(gettext("0x%x "), val[i]);
12132
12133 break;
12134 }
12135
12136 case DATA_TYPE_UINT8_ARRAY: {
12137 uint8_t *val;
12138 uint_t i, nelem;
12139
12140 (void) nvpair_value_uint8_array(nvp, &val, &nelem);
12141 for (i = 0; i < nelem; i++)
12142 printf(gettext("0x%x "), val[i]);
12143
12144 break;
12145 }
12146
12147 case DATA_TYPE_INT16_ARRAY: {
12148 int16_t *val;
12149 uint_t i, nelem;
12150
12151 (void) nvpair_value_int16_array(nvp, &val, &nelem);
12152 for (i = 0; i < nelem; i++)
12153 printf(gettext("0x%x "), val[i]);
12154
12155 break;
12156 }
12157
12158 case DATA_TYPE_UINT16_ARRAY: {
12159 uint16_t *val;
12160 uint_t i, nelem;
12161
12162 (void) nvpair_value_uint16_array(nvp, &val, &nelem);
12163 for (i = 0; i < nelem; i++)
12164 printf(gettext("0x%x "), val[i]);
12165
12166 break;
12167 }
12168
12169 case DATA_TYPE_INT32_ARRAY: {
12170 int32_t *val;
12171 uint_t i, nelem;
12172
12173 (void) nvpair_value_int32_array(nvp, &val, &nelem);
12174 for (i = 0; i < nelem; i++)
12175 printf(gettext("0x%x "), val[i]);
12176
12177 break;
12178 }
12179
12180 case DATA_TYPE_UINT32_ARRAY: {
12181 uint32_t *val;
12182 uint_t i, nelem;
12183
12184 (void) nvpair_value_uint32_array(nvp, &val, &nelem);
12185 for (i = 0; i < nelem; i++)
12186 printf(gettext("0x%x "), val[i]);
12187
12188 break;
12189 }
12190
12191 case DATA_TYPE_INT64_ARRAY: {
12192 int64_t *val;
12193 uint_t i, nelem;
12194
12195 (void) nvpair_value_int64_array(nvp, &val, &nelem);
12196 for (i = 0; i < nelem; i++)
12197 printf(gettext("0x%llx "),
12198 (u_longlong_t)val[i]);
12199
12200 break;
12201 }
12202
12203 case DATA_TYPE_UINT64_ARRAY: {
12204 uint64_t *val;
12205 uint_t i, nelem;
12206
12207 (void) nvpair_value_uint64_array(nvp, &val, &nelem);
12208 for (i = 0; i < nelem; i++)
12209 printf(gettext("0x%llx "),
12210 (u_longlong_t)val[i]);
12211
12212 break;
12213 }
12214
12215 case DATA_TYPE_STRING_ARRAY: {
12216 const char **str;
12217 uint_t i, nelem;
12218
12219 (void) nvpair_value_string_array(nvp, &str, &nelem);
12220 for (i = 0; i < nelem; i++)
12221 printf(gettext("\"%s\" "),
12222 str[i] ? str[i] : "<NULL>");
12223
12224 break;
12225 }
12226
12227 case DATA_TYPE_BOOLEAN_ARRAY:
12228 case DATA_TYPE_BYTE_ARRAY:
12229 case DATA_TYPE_DOUBLE:
12230 case DATA_TYPE_DONTCARE:
12231 case DATA_TYPE_UNKNOWN:
12232 printf(gettext("<unknown>"));
12233 break;
12234 }
12235
12236 printf(gettext("\n"));
12237 }
12238 }
12239
12240 static int
zpool_do_events_next(ev_opts_t * opts)12241 zpool_do_events_next(ev_opts_t *opts)
12242 {
12243 nvlist_t *nvl;
12244 int zevent_fd, ret, dropped;
12245 const char *pool;
12246
12247 zevent_fd = open(ZFS_DEV, O_RDWR);
12248 VERIFY(zevent_fd >= 0);
12249
12250 if (!opts->scripted)
12251 (void) printf(gettext("%-30s %s\n"), "TIME", "CLASS");
12252
12253 while (1) {
12254 ret = zpool_events_next(g_zfs, &nvl, &dropped,
12255 (opts->follow ? ZEVENT_NONE : ZEVENT_NONBLOCK), zevent_fd);
12256 if (ret || nvl == NULL)
12257 break;
12258
12259 if (dropped > 0)
12260 (void) printf(gettext("dropped %d events\n"), dropped);
12261
12262 if (strlen(opts->poolname) > 0 &&
12263 nvlist_lookup_string(nvl, FM_FMRI_ZFS_POOL, &pool) == 0 &&
12264 strcmp(opts->poolname, pool) != 0)
12265 continue;
12266
12267 zpool_do_events_short(nvl, opts);
12268
12269 if (opts->verbose) {
12270 zpool_do_events_nvprint(nvl, 8);
12271 printf(gettext("\n"));
12272 }
12273 (void) fflush(stdout);
12274
12275 nvlist_free(nvl);
12276 }
12277
12278 VERIFY(0 == close(zevent_fd));
12279
12280 return (ret);
12281 }
12282
12283 static int
zpool_do_events_clear(void)12284 zpool_do_events_clear(void)
12285 {
12286 int count, ret;
12287
12288 ret = zpool_events_clear(g_zfs, &count);
12289 if (!ret)
12290 (void) printf(gettext("cleared %d events\n"), count);
12291
12292 return (ret);
12293 }
12294
12295 /*
12296 * zpool events [-vHf [pool] | -c]
12297 *
12298 * Displays events logs by ZFS.
12299 */
12300 int
zpool_do_events(int argc,char ** argv)12301 zpool_do_events(int argc, char **argv)
12302 {
12303 ev_opts_t opts = { 0 };
12304 int ret;
12305 int c;
12306
12307 /* check options */
12308 while ((c = getopt(argc, argv, "vHfc")) != -1) {
12309 switch (c) {
12310 case 'v':
12311 opts.verbose = 1;
12312 break;
12313 case 'H':
12314 opts.scripted = 1;
12315 break;
12316 case 'f':
12317 opts.follow = 1;
12318 break;
12319 case 'c':
12320 opts.clear = 1;
12321 break;
12322 case '?':
12323 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
12324 optopt);
12325 usage(B_FALSE);
12326 }
12327 }
12328 argc -= optind;
12329 argv += optind;
12330
12331 if (argc > 1) {
12332 (void) fprintf(stderr, gettext("too many arguments\n"));
12333 usage(B_FALSE);
12334 } else if (argc == 1) {
12335 (void) strlcpy(opts.poolname, argv[0], sizeof (opts.poolname));
12336 if (!zfs_name_valid(opts.poolname, ZFS_TYPE_POOL)) {
12337 (void) fprintf(stderr,
12338 gettext("invalid pool name '%s'\n"), opts.poolname);
12339 usage(B_FALSE);
12340 }
12341 }
12342
12343 if ((argc == 1 || opts.verbose || opts.scripted || opts.follow) &&
12344 opts.clear) {
12345 (void) fprintf(stderr,
12346 gettext("invalid options combined with -c\n"));
12347 usage(B_FALSE);
12348 }
12349
12350 if (opts.clear)
12351 ret = zpool_do_events_clear();
12352 else
12353 ret = zpool_do_events_next(&opts);
12354
12355 return (ret);
12356 }
12357
12358 static int
get_callback_vdev(zpool_handle_t * zhp,char * vdevname,void * data)12359 get_callback_vdev(zpool_handle_t *zhp, char *vdevname, void *data)
12360 {
12361 zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
12362 char value[ZFS_MAXPROPLEN];
12363 zprop_source_t srctype;
12364 nvlist_t *props, *item, *d;
12365 props = item = d = NULL;
12366
12367 if (cbp->cb_json) {
12368 d = fnvlist_lookup_nvlist(cbp->cb_jsobj, "vdevs");
12369 if (d == NULL) {
12370 fprintf(stderr, "vdevs obj not found.\n");
12371 exit(1);
12372 }
12373 props = fnvlist_alloc();
12374 }
12375
12376 for (zprop_list_t *pl = cbp->cb_proplist; pl != NULL;
12377 pl = pl->pl_next) {
12378 char *prop_name;
12379 /*
12380 * If the first property is pool name, it is a special
12381 * placeholder that we can skip. This will also skip
12382 * over the name property when 'all' is specified.
12383 */
12384 if (pl->pl_prop == ZPOOL_PROP_NAME &&
12385 pl == cbp->cb_proplist)
12386 continue;
12387
12388 if (pl->pl_prop == ZPROP_INVAL) {
12389 prop_name = pl->pl_user_prop;
12390 } else {
12391 prop_name = (char *)vdev_prop_to_name(pl->pl_prop);
12392 }
12393 if (zpool_get_vdev_prop(zhp, vdevname, pl->pl_prop,
12394 prop_name, value, sizeof (value), &srctype,
12395 cbp->cb_literal) == 0) {
12396 zprop_collect_property(vdevname, cbp, prop_name,
12397 value, srctype, NULL, NULL, props);
12398 }
12399 }
12400
12401 if (cbp->cb_json) {
12402 if (!nvlist_empty(props)) {
12403 item = fnvlist_alloc();
12404 fill_vdev_info(item, zhp, vdevname, B_TRUE,
12405 cbp->cb_json_as_int);
12406 fnvlist_add_nvlist(item, "properties", props);
12407 fnvlist_add_nvlist(d, vdevname, item);
12408 fnvlist_add_nvlist(cbp->cb_jsobj, "vdevs", d);
12409 fnvlist_free(item);
12410 }
12411 fnvlist_free(props);
12412 }
12413
12414 return (0);
12415 }
12416
12417 static int
get_callback_vdev_cb(void * zhp_data,nvlist_t * nv,void * data)12418 get_callback_vdev_cb(void *zhp_data, nvlist_t *nv, void *data)
12419 {
12420 zpool_handle_t *zhp = zhp_data;
12421 zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
12422 char *vdevname;
12423 const char *type;
12424 int ret;
12425
12426 /*
12427 * zpool_vdev_name() transforms the root vdev name (i.e., root-0) to the
12428 * pool name for display purposes, which is not desired. Fallback to
12429 * zpool_vdev_name() when not dealing with the root vdev.
12430 */
12431 type = fnvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE);
12432 if (zhp != NULL && strcmp(type, "root") == 0)
12433 vdevname = strdup("root-0");
12434 else
12435 vdevname = zpool_vdev_name(g_zfs, zhp, nv,
12436 cbp->cb_vdevs.cb_name_flags);
12437
12438 (void) vdev_expand_proplist(zhp, vdevname, &cbp->cb_proplist);
12439
12440 ret = get_callback_vdev(zhp, vdevname, data);
12441
12442 free(vdevname);
12443
12444 return (ret);
12445 }
12446
12447 static int
get_callback(zpool_handle_t * zhp,void * data)12448 get_callback(zpool_handle_t *zhp, void *data)
12449 {
12450 zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
12451 char value[ZFS_MAXPROPLEN];
12452 zprop_source_t srctype;
12453 zprop_list_t *pl;
12454 int vid;
12455 int err = 0;
12456 nvlist_t *props, *item, *d;
12457 props = item = d = NULL;
12458
12459 if (cbp->cb_type == ZFS_TYPE_VDEV) {
12460 if (cbp->cb_json) {
12461 nvlist_t *pool = fnvlist_alloc();
12462 fill_pool_info(pool, zhp, B_FALSE, cbp->cb_json_as_int);
12463 fnvlist_add_nvlist(cbp->cb_jsobj, "pool", pool);
12464 fnvlist_free(pool);
12465 }
12466
12467 if (strcmp(cbp->cb_vdevs.cb_names[0], "all-vdevs") == 0) {
12468 for_each_vdev(zhp, get_callback_vdev_cb, data);
12469 } else {
12470 /* Adjust column widths for vdev properties */
12471 for (vid = 0; vid < cbp->cb_vdevs.cb_names_count;
12472 vid++) {
12473 vdev_expand_proplist(zhp,
12474 cbp->cb_vdevs.cb_names[vid],
12475 &cbp->cb_proplist);
12476 }
12477 /* Display the properties */
12478 for (vid = 0; vid < cbp->cb_vdevs.cb_names_count;
12479 vid++) {
12480 get_callback_vdev(zhp,
12481 cbp->cb_vdevs.cb_names[vid], data);
12482 }
12483 }
12484 } else {
12485 assert(cbp->cb_type == ZFS_TYPE_POOL);
12486 if (cbp->cb_json) {
12487 d = fnvlist_lookup_nvlist(cbp->cb_jsobj, "pools");
12488 if (d == NULL) {
12489 fprintf(stderr, "pools obj not found.\n");
12490 exit(1);
12491 }
12492 props = fnvlist_alloc();
12493 }
12494 for (pl = cbp->cb_proplist; pl != NULL; pl = pl->pl_next) {
12495 /*
12496 * Skip the special fake placeholder. This will also
12497 * skip over the name property when 'all' is specified.
12498 */
12499 if (pl->pl_prop == ZPOOL_PROP_NAME &&
12500 pl == cbp->cb_proplist)
12501 continue;
12502
12503 if (pl->pl_prop == ZPROP_INVAL &&
12504 zfs_prop_user(pl->pl_user_prop)) {
12505 srctype = ZPROP_SRC_LOCAL;
12506
12507 if (zpool_get_userprop(zhp, pl->pl_user_prop,
12508 value, sizeof (value), &srctype) != 0)
12509 continue;
12510
12511 err = zprop_collect_property(
12512 zpool_get_name(zhp), cbp, pl->pl_user_prop,
12513 value, srctype, NULL, NULL, props);
12514 } else if (pl->pl_prop == ZPROP_INVAL &&
12515 (zpool_prop_feature(pl->pl_user_prop) ||
12516 zpool_prop_unsupported(pl->pl_user_prop))) {
12517 srctype = ZPROP_SRC_LOCAL;
12518
12519 if (zpool_prop_get_feature(zhp,
12520 pl->pl_user_prop, value,
12521 sizeof (value)) == 0) {
12522 err = zprop_collect_property(
12523 zpool_get_name(zhp), cbp,
12524 pl->pl_user_prop, value, srctype,
12525 NULL, NULL, props);
12526 }
12527 } else {
12528 if (zpool_get_prop(zhp, pl->pl_prop, value,
12529 sizeof (value), &srctype,
12530 cbp->cb_literal) != 0)
12531 continue;
12532
12533 err = zprop_collect_property(
12534 zpool_get_name(zhp), cbp,
12535 zpool_prop_to_name(pl->pl_prop),
12536 value, srctype, NULL, NULL, props);
12537 }
12538 if (err != 0)
12539 return (err);
12540 }
12541
12542 if (cbp->cb_json) {
12543 if (!nvlist_empty(props)) {
12544 item = fnvlist_alloc();
12545 fill_pool_info(item, zhp, B_TRUE,
12546 cbp->cb_json_as_int);
12547 fnvlist_add_nvlist(item, "properties", props);
12548 if (cbp->cb_json_pool_key_guid) {
12549 char buf[256];
12550 uint64_t guid = fnvlist_lookup_uint64(
12551 zpool_get_config(zhp, NULL),
12552 ZPOOL_CONFIG_POOL_GUID);
12553 snprintf(buf, 256, "%llu",
12554 (u_longlong_t)guid);
12555 fnvlist_add_nvlist(d, buf, item);
12556 } else {
12557 const char *name = zpool_get_name(zhp);
12558 fnvlist_add_nvlist(d, name, item);
12559 }
12560 fnvlist_add_nvlist(cbp->cb_jsobj, "pools", d);
12561 fnvlist_free(item);
12562 }
12563 fnvlist_free(props);
12564 }
12565 }
12566
12567 return (0);
12568 }
12569
12570 /*
12571 * zpool get [-Hp] [-o "all" | field[,...]] <"all" | property[,...]> <pool> ...
12572 *
12573 * -H Scripted mode. Don't display headers, and separate properties
12574 * by a single tab.
12575 * -o List of columns to display. Defaults to
12576 * "name,property,value,source".
12577 * -p Display values in parsable (exact) format.
12578 * -j Display output in JSON format.
12579 * --json-int Display numbers as integers instead of strings.
12580 * --json-pool-key-guid Set pool GUID as key for pool objects.
12581 *
12582 * Get properties of pools in the system. Output space statistics
12583 * for each one as well as other attributes.
12584 */
12585 int
zpool_do_get(int argc,char ** argv)12586 zpool_do_get(int argc, char **argv)
12587 {
12588 zprop_get_cbdata_t cb = { 0 };
12589 zprop_list_t fake_name = { 0 };
12590 int ret;
12591 int c, i;
12592 char *propstr = NULL;
12593 char *vdev = NULL;
12594 nvlist_t *data = NULL;
12595
12596 cb.cb_first = B_TRUE;
12597
12598 /*
12599 * Set up default columns and sources.
12600 */
12601 cb.cb_sources = ZPROP_SRC_ALL;
12602 cb.cb_columns[0] = GET_COL_NAME;
12603 cb.cb_columns[1] = GET_COL_PROPERTY;
12604 cb.cb_columns[2] = GET_COL_VALUE;
12605 cb.cb_columns[3] = GET_COL_SOURCE;
12606 cb.cb_type = ZFS_TYPE_POOL;
12607 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_TYPE_ID;
12608 current_prop_type = cb.cb_type;
12609
12610 struct option long_options[] = {
12611 {"json", no_argument, NULL, 'j'},
12612 {"json-int", no_argument, NULL, ZPOOL_OPTION_JSON_NUMS_AS_INT},
12613 {"json-pool-key-guid", no_argument, NULL,
12614 ZPOOL_OPTION_POOL_KEY_GUID},
12615 {0, 0, 0, 0}
12616 };
12617
12618 /* check options */
12619 while ((c = getopt_long(argc, argv, ":jHpo:", long_options,
12620 NULL)) != -1) {
12621 switch (c) {
12622 case 'p':
12623 cb.cb_literal = B_TRUE;
12624 break;
12625 case 'H':
12626 cb.cb_scripted = B_TRUE;
12627 break;
12628 case 'j':
12629 cb.cb_json = B_TRUE;
12630 cb.cb_jsobj = zpool_json_schema(0, 1);
12631 data = fnvlist_alloc();
12632 break;
12633 case ZPOOL_OPTION_POOL_KEY_GUID:
12634 cb.cb_json_pool_key_guid = B_TRUE;
12635 break;
12636 case ZPOOL_OPTION_JSON_NUMS_AS_INT:
12637 cb.cb_json_as_int = B_TRUE;
12638 cb.cb_literal = B_TRUE;
12639 break;
12640 case 'o':
12641 memset(&cb.cb_columns, 0, sizeof (cb.cb_columns));
12642 i = 0;
12643
12644 for (char *tok; (tok = strsep(&optarg, ",")); ) {
12645 static const char *const col_opts[] =
12646 { "name", "property", "value", "source",
12647 "all" };
12648 static const zfs_get_column_t col_cols[] =
12649 { GET_COL_NAME, GET_COL_PROPERTY, GET_COL_VALUE,
12650 GET_COL_SOURCE };
12651
12652 if (i == ZFS_GET_NCOLS - 1) {
12653 (void) fprintf(stderr, gettext("too "
12654 "many fields given to -o "
12655 "option\n"));
12656 usage(B_FALSE);
12657 }
12658
12659 for (c = 0; c < ARRAY_SIZE(col_opts); ++c)
12660 if (strcmp(tok, col_opts[c]) == 0)
12661 goto found;
12662
12663 (void) fprintf(stderr,
12664 gettext("invalid column name '%s'\n"), tok);
12665 usage(B_FALSE);
12666
12667 found:
12668 if (c >= 4) {
12669 if (i > 0) {
12670 (void) fprintf(stderr,
12671 gettext("\"all\" conflicts "
12672 "with specific fields "
12673 "given to -o option\n"));
12674 usage(B_FALSE);
12675 }
12676
12677 memcpy(cb.cb_columns, col_cols,
12678 sizeof (col_cols));
12679 i = ZFS_GET_NCOLS - 1;
12680 } else
12681 cb.cb_columns[i++] = col_cols[c];
12682 }
12683 break;
12684 case '?':
12685 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
12686 optopt);
12687 usage(B_FALSE);
12688 }
12689 }
12690
12691 argc -= optind;
12692 argv += optind;
12693
12694 if (!cb.cb_json && cb.cb_json_as_int) {
12695 (void) fprintf(stderr, gettext("'--json-int' only works with"
12696 " '-j' option\n"));
12697 usage(B_FALSE);
12698 }
12699
12700 if (!cb.cb_json && cb.cb_json_pool_key_guid) {
12701 (void) fprintf(stderr, gettext("'json-pool-key-guid' only"
12702 " works with '-j' option\n"));
12703 usage(B_FALSE);
12704 }
12705
12706 if (argc < 1) {
12707 (void) fprintf(stderr, gettext("missing property "
12708 "argument\n"));
12709 usage(B_FALSE);
12710 }
12711
12712 /* Properties list is needed later by zprop_get_list() */
12713 propstr = argv[0];
12714
12715 argc--;
12716 argv++;
12717
12718 if (argc == 0) {
12719 /* No args, so just print the defaults. */
12720 } else if (are_all_pools(argc, argv)) {
12721 /* All the args are pool names */
12722 } else if (are_all_pools(1, argv)) {
12723 /* The first arg is a pool name */
12724 if ((argc == 2 && strcmp(argv[1], "all-vdevs") == 0) ||
12725 (argc == 2 && strcmp(argv[1], "root") == 0) ||
12726 are_vdevs_in_pool(argc - 1, argv + 1, argv[0],
12727 &cb.cb_vdevs)) {
12728
12729 if (strcmp(argv[1], "root") == 0)
12730 vdev = strdup("root-0");
12731 else
12732 vdev = strdup(argv[1]);
12733
12734 /* ... and the rest are vdev names */
12735 cb.cb_vdevs.cb_names = &vdev;
12736 cb.cb_vdevs.cb_names_count = argc - 1;
12737 cb.cb_type = ZFS_TYPE_VDEV;
12738 argc = 1; /* One pool to process */
12739 } else {
12740 if (cb.cb_json) {
12741 nvlist_free(cb.cb_jsobj);
12742 nvlist_free(data);
12743 }
12744 fprintf(stderr, gettext("Expected a list of vdevs in"
12745 " \"%s\", but got:\n"), argv[0]);
12746 error_list_unresolved_vdevs(argc - 1, argv + 1,
12747 argv[0], &cb.cb_vdevs);
12748 fprintf(stderr, "\n");
12749 usage(B_FALSE);
12750 return (1);
12751 }
12752 } else {
12753 if (cb.cb_json) {
12754 nvlist_free(cb.cb_jsobj);
12755 nvlist_free(data);
12756 }
12757 /*
12758 * The first arg isn't the name of a valid pool.
12759 */
12760 fprintf(stderr, gettext("Cannot get properties of %s: "
12761 "no such pool available.\n"), argv[0]);
12762 return (1);
12763 }
12764
12765 if (zprop_get_list(g_zfs, propstr, &cb.cb_proplist,
12766 cb.cb_type) != 0) {
12767 /* Use correct list of valid properties (pool or vdev) */
12768 current_prop_type = cb.cb_type;
12769 usage(B_FALSE);
12770 }
12771
12772 if (cb.cb_proplist != NULL) {
12773 fake_name.pl_prop = ZPOOL_PROP_NAME;
12774 fake_name.pl_width = strlen(gettext("NAME"));
12775 fake_name.pl_next = cb.cb_proplist;
12776 cb.cb_proplist = &fake_name;
12777 }
12778
12779 if (cb.cb_json) {
12780 if (cb.cb_type == ZFS_TYPE_VDEV)
12781 fnvlist_add_nvlist(cb.cb_jsobj, "vdevs", data);
12782 else
12783 fnvlist_add_nvlist(cb.cb_jsobj, "pools", data);
12784 fnvlist_free(data);
12785 }
12786
12787 ret = for_each_pool(argc, argv, B_TRUE, &cb.cb_proplist, cb.cb_type,
12788 cb.cb_literal, get_callback, &cb);
12789
12790 if (ret == 0 && cb.cb_json)
12791 zcmd_print_json(cb.cb_jsobj);
12792 else if (ret != 0 && cb.cb_json)
12793 nvlist_free(cb.cb_jsobj);
12794
12795 if (cb.cb_proplist == &fake_name)
12796 zprop_free_list(fake_name.pl_next);
12797 else
12798 zprop_free_list(cb.cb_proplist);
12799
12800 if (vdev != NULL)
12801 free(vdev);
12802
12803 return (ret);
12804 }
12805
12806 typedef struct set_cbdata {
12807 char *cb_propname;
12808 char *cb_value;
12809 zfs_type_t cb_type;
12810 vdev_cbdata_t cb_vdevs;
12811 boolean_t cb_any_successful;
12812 } set_cbdata_t;
12813
12814 static int
set_pool_callback(zpool_handle_t * zhp,set_cbdata_t * cb)12815 set_pool_callback(zpool_handle_t *zhp, set_cbdata_t *cb)
12816 {
12817 int error;
12818
12819 /* Check if we have out-of-bounds features */
12820 if (strcmp(cb->cb_propname, ZPOOL_CONFIG_COMPATIBILITY) == 0) {
12821 boolean_t features[SPA_FEATURES];
12822 if (zpool_do_load_compat(cb->cb_value, features) !=
12823 ZPOOL_COMPATIBILITY_OK)
12824 return (-1);
12825
12826 nvlist_t *enabled = zpool_get_features(zhp);
12827 spa_feature_t i;
12828 for (i = 0; i < SPA_FEATURES; i++) {
12829 const char *fguid = spa_feature_table[i].fi_guid;
12830 if (nvlist_exists(enabled, fguid) && !features[i])
12831 break;
12832 }
12833 if (i < SPA_FEATURES)
12834 (void) fprintf(stderr, gettext("Warning: one or "
12835 "more features already enabled on pool '%s'\n"
12836 "are not present in this compatibility set.\n"),
12837 zpool_get_name(zhp));
12838 }
12839
12840 /* if we're setting a feature, check it's in compatibility set */
12841 if (zpool_prop_feature(cb->cb_propname) &&
12842 strcmp(cb->cb_value, ZFS_FEATURE_ENABLED) == 0) {
12843 char *fname = strchr(cb->cb_propname, '@') + 1;
12844 spa_feature_t f;
12845
12846 if (zfeature_lookup_name(fname, &f) == 0) {
12847 char compat[ZFS_MAXPROPLEN];
12848 if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY,
12849 compat, ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
12850 compat[0] = '\0';
12851
12852 boolean_t features[SPA_FEATURES];
12853 if (zpool_do_load_compat(compat, features) !=
12854 ZPOOL_COMPATIBILITY_OK) {
12855 (void) fprintf(stderr, gettext("Error: "
12856 "cannot enable feature '%s' on pool '%s'\n"
12857 "because the pool's 'compatibility' "
12858 "property cannot be parsed.\n"),
12859 fname, zpool_get_name(zhp));
12860 return (-1);
12861 }
12862
12863 if (!features[f]) {
12864 (void) fprintf(stderr, gettext("Error: "
12865 "cannot enable feature '%s' on pool '%s'\n"
12866 "as it is not specified in this pool's "
12867 "current compatibility set.\n"
12868 "Consider setting 'compatibility' to a "
12869 "less restrictive set, or to 'off'.\n"),
12870 fname, zpool_get_name(zhp));
12871 return (-1);
12872 }
12873 }
12874 }
12875
12876 error = zpool_set_prop(zhp, cb->cb_propname, cb->cb_value);
12877
12878 return (error);
12879 }
12880
12881 static int
set_callback(zpool_handle_t * zhp,void * data)12882 set_callback(zpool_handle_t *zhp, void *data)
12883 {
12884 int error;
12885 set_cbdata_t *cb = (set_cbdata_t *)data;
12886
12887 if (cb->cb_type == ZFS_TYPE_VDEV) {
12888 error = zpool_set_vdev_prop(zhp, *cb->cb_vdevs.cb_names,
12889 cb->cb_propname, cb->cb_value);
12890 } else {
12891 assert(cb->cb_type == ZFS_TYPE_POOL);
12892 error = set_pool_callback(zhp, cb);
12893 }
12894
12895 cb->cb_any_successful = !error;
12896 return (error);
12897 }
12898
12899 int
zpool_do_set(int argc,char ** argv)12900 zpool_do_set(int argc, char **argv)
12901 {
12902 set_cbdata_t cb = { 0 };
12903 int error;
12904 char *vdev = NULL;
12905
12906 current_prop_type = ZFS_TYPE_POOL;
12907 if (argc > 1 && argv[1][0] == '-') {
12908 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
12909 argv[1][1]);
12910 usage(B_FALSE);
12911 }
12912
12913 if (argc < 2) {
12914 (void) fprintf(stderr, gettext("missing property=value "
12915 "argument\n"));
12916 usage(B_FALSE);
12917 }
12918
12919 if (argc < 3) {
12920 (void) fprintf(stderr, gettext("missing pool name\n"));
12921 usage(B_FALSE);
12922 }
12923
12924 if (argc > 4) {
12925 (void) fprintf(stderr, gettext("too many pool names\n"));
12926 usage(B_FALSE);
12927 }
12928
12929 cb.cb_propname = argv[1];
12930 cb.cb_type = ZFS_TYPE_POOL;
12931 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_TYPE_ID;
12932 cb.cb_value = strchr(cb.cb_propname, '=');
12933 if (cb.cb_value == NULL) {
12934 (void) fprintf(stderr, gettext("missing value in "
12935 "property=value argument\n"));
12936 usage(B_FALSE);
12937 }
12938
12939 *(cb.cb_value) = '\0';
12940 cb.cb_value++;
12941 argc -= 2;
12942 argv += 2;
12943
12944 /* argv[0] is pool name */
12945 if (!is_pool(argv[0])) {
12946 (void) fprintf(stderr,
12947 gettext("cannot open '%s': is not a pool\n"), argv[0]);
12948 return (EINVAL);
12949 }
12950
12951 /* argv[1], when supplied, is vdev name */
12952 if (argc == 2) {
12953
12954 if (strcmp(argv[1], "root") == 0)
12955 vdev = strdup("root-0");
12956 else
12957 vdev = strdup(argv[1]);
12958
12959 if (!are_vdevs_in_pool(1, &vdev, argv[0], &cb.cb_vdevs)) {
12960 (void) fprintf(stderr, gettext(
12961 "cannot find '%s' in '%s': device not in pool\n"),
12962 vdev, argv[0]);
12963 free(vdev);
12964 return (EINVAL);
12965 }
12966 cb.cb_vdevs.cb_names = &vdev;
12967 cb.cb_vdevs.cb_names_count = 1;
12968 cb.cb_type = ZFS_TYPE_VDEV;
12969 }
12970
12971 error = for_each_pool(1, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
12972 B_FALSE, set_callback, &cb);
12973
12974 if (vdev != NULL)
12975 free(vdev);
12976
12977 return (error);
12978 }
12979
12980 /* Add up the total number of bytes left to initialize/trim across all vdevs */
12981 static uint64_t
vdev_activity_remaining(nvlist_t * nv,zpool_wait_activity_t activity)12982 vdev_activity_remaining(nvlist_t *nv, zpool_wait_activity_t activity)
12983 {
12984 uint64_t bytes_remaining;
12985 nvlist_t **child;
12986 uint_t c, children;
12987 vdev_stat_t *vs;
12988
12989 assert(activity == ZPOOL_WAIT_INITIALIZE ||
12990 activity == ZPOOL_WAIT_TRIM);
12991
12992 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
12993 (uint64_t **)&vs, &c) == 0);
12994
12995 if (activity == ZPOOL_WAIT_INITIALIZE &&
12996 vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE)
12997 bytes_remaining = vs->vs_initialize_bytes_est -
12998 vs->vs_initialize_bytes_done;
12999 else if (activity == ZPOOL_WAIT_TRIM &&
13000 vs->vs_trim_state == VDEV_TRIM_ACTIVE)
13001 bytes_remaining = vs->vs_trim_bytes_est -
13002 vs->vs_trim_bytes_done;
13003 else
13004 bytes_remaining = 0;
13005
13006 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
13007 &child, &children) != 0)
13008 children = 0;
13009
13010 for (c = 0; c < children; c++)
13011 bytes_remaining += vdev_activity_remaining(child[c], activity);
13012
13013 return (bytes_remaining);
13014 }
13015
13016 /* Add up the total number of bytes left to rebuild across top-level vdevs */
13017 static uint64_t
vdev_activity_top_remaining(nvlist_t * nv)13018 vdev_activity_top_remaining(nvlist_t *nv)
13019 {
13020 uint64_t bytes_remaining = 0;
13021 nvlist_t **child;
13022 uint_t children;
13023 int error;
13024
13025 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
13026 &child, &children) != 0)
13027 children = 0;
13028
13029 for (uint_t c = 0; c < children; c++) {
13030 vdev_rebuild_stat_t *vrs;
13031 uint_t i;
13032
13033 error = nvlist_lookup_uint64_array(child[c],
13034 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i);
13035 if (error == 0) {
13036 if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
13037 bytes_remaining += (vrs->vrs_bytes_est -
13038 vrs->vrs_bytes_rebuilt);
13039 }
13040 }
13041 }
13042
13043 return (bytes_remaining);
13044 }
13045
13046 /* Whether any vdevs are 'spare' or 'replacing' vdevs */
13047 static boolean_t
vdev_any_spare_replacing(nvlist_t * nv)13048 vdev_any_spare_replacing(nvlist_t *nv)
13049 {
13050 nvlist_t **child;
13051 uint_t c, children;
13052 const char *vdev_type;
13053
13054 (void) nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &vdev_type);
13055
13056 if (strcmp(vdev_type, VDEV_TYPE_REPLACING) == 0 ||
13057 strcmp(vdev_type, VDEV_TYPE_SPARE) == 0 ||
13058 strcmp(vdev_type, VDEV_TYPE_DRAID_SPARE) == 0) {
13059 return (B_TRUE);
13060 }
13061
13062 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
13063 &child, &children) != 0)
13064 children = 0;
13065
13066 for (c = 0; c < children; c++) {
13067 if (vdev_any_spare_replacing(child[c]))
13068 return (B_TRUE);
13069 }
13070
13071 return (B_FALSE);
13072 }
13073
13074 typedef struct wait_data {
13075 char *wd_poolname;
13076 boolean_t wd_scripted;
13077 boolean_t wd_exact;
13078 boolean_t wd_headers_once;
13079 boolean_t wd_should_exit;
13080 /* Which activities to wait for */
13081 boolean_t wd_enabled[ZPOOL_WAIT_NUM_ACTIVITIES];
13082 float wd_interval;
13083 pthread_cond_t wd_cv;
13084 pthread_mutex_t wd_mutex;
13085 } wait_data_t;
13086
13087 /*
13088 * Print to stdout a single line, containing one column for each activity that
13089 * we are waiting for specifying how many bytes of work are left for that
13090 * activity.
13091 */
13092 static void
print_wait_status_row(wait_data_t * wd,zpool_handle_t * zhp,int row)13093 print_wait_status_row(wait_data_t *wd, zpool_handle_t *zhp, int row)
13094 {
13095 nvlist_t *config, *nvroot;
13096 uint_t c;
13097 int i;
13098 pool_checkpoint_stat_t *pcs = NULL;
13099 pool_scan_stat_t *pss = NULL;
13100 pool_removal_stat_t *prs = NULL;
13101 pool_raidz_expand_stat_t *pres = NULL;
13102 const char *const headers[] = {"DISCARD", "FREE", "INITIALIZE",
13103 "REPLACE", "REMOVE", "RESILVER", "SCRUB", "TRIM", "RAIDZ_EXPAND"};
13104 int col_widths[ZPOOL_WAIT_NUM_ACTIVITIES];
13105
13106 /* Calculate the width of each column */
13107 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
13108 /*
13109 * Make sure we have enough space in the col for pretty-printed
13110 * numbers and for the column header, and then leave a couple
13111 * spaces between cols for readability.
13112 */
13113 col_widths[i] = MAX(strlen(headers[i]), 6) + 2;
13114 }
13115
13116 if (timestamp_fmt != NODATE)
13117 print_timestamp(timestamp_fmt);
13118
13119 /* Print header if appropriate */
13120 int term_height = terminal_height();
13121 boolean_t reprint_header = (!wd->wd_headers_once && term_height > 0 &&
13122 row % (term_height-1) == 0);
13123 if (!wd->wd_scripted && (row == 0 || reprint_header)) {
13124 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
13125 if (wd->wd_enabled[i])
13126 (void) printf("%*s", col_widths[i], headers[i]);
13127 }
13128 (void) fputc('\n', stdout);
13129 }
13130
13131 /* Bytes of work remaining in each activity */
13132 int64_t bytes_rem[ZPOOL_WAIT_NUM_ACTIVITIES] = {0};
13133
13134 bytes_rem[ZPOOL_WAIT_FREE] =
13135 zpool_get_prop_int(zhp, ZPOOL_PROP_FREEING, NULL);
13136
13137 config = zpool_get_config(zhp, NULL);
13138 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
13139
13140 (void) nvlist_lookup_uint64_array(nvroot,
13141 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
13142 if (pcs != NULL && pcs->pcs_state == CS_CHECKPOINT_DISCARDING)
13143 bytes_rem[ZPOOL_WAIT_CKPT_DISCARD] = pcs->pcs_space;
13144
13145 (void) nvlist_lookup_uint64_array(nvroot,
13146 ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c);
13147 if (prs != NULL && prs->prs_state == DSS_SCANNING)
13148 bytes_rem[ZPOOL_WAIT_REMOVE] = prs->prs_to_copy -
13149 prs->prs_copied;
13150
13151 (void) nvlist_lookup_uint64_array(nvroot,
13152 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&pss, &c);
13153 if (pss != NULL && pss->pss_state == DSS_SCANNING &&
13154 pss->pss_pass_scrub_pause == 0) {
13155 int64_t rem = pss->pss_to_examine - pss->pss_issued;
13156 if (pss->pss_func == POOL_SCAN_SCRUB)
13157 bytes_rem[ZPOOL_WAIT_SCRUB] = rem;
13158 else
13159 bytes_rem[ZPOOL_WAIT_RESILVER] = rem;
13160 } else if (check_rebuilding(nvroot, NULL)) {
13161 bytes_rem[ZPOOL_WAIT_RESILVER] =
13162 vdev_activity_top_remaining(nvroot);
13163 }
13164
13165 (void) nvlist_lookup_uint64_array(nvroot,
13166 ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, (uint64_t **)&pres, &c);
13167 if (pres != NULL && pres->pres_state == DSS_SCANNING) {
13168 int64_t rem = pres->pres_to_reflow - pres->pres_reflowed;
13169 bytes_rem[ZPOOL_WAIT_RAIDZ_EXPAND] = rem;
13170 }
13171
13172 bytes_rem[ZPOOL_WAIT_INITIALIZE] =
13173 vdev_activity_remaining(nvroot, ZPOOL_WAIT_INITIALIZE);
13174 bytes_rem[ZPOOL_WAIT_TRIM] =
13175 vdev_activity_remaining(nvroot, ZPOOL_WAIT_TRIM);
13176
13177 /*
13178 * A replace finishes after resilvering finishes, so the amount of work
13179 * left for a replace is the same as for resilvering.
13180 *
13181 * It isn't quite correct to say that if we have any 'spare' or
13182 * 'replacing' vdevs and a resilver is happening, then a replace is in
13183 * progress, like we do here. When a hot spare is used, the faulted vdev
13184 * is not removed after the hot spare is resilvered, so parent 'spare'
13185 * vdev is not removed either. So we could have a 'spare' vdev, but be
13186 * resilvering for a different reason. However, we use it as a heuristic
13187 * because we don't have access to the DTLs, which could tell us whether
13188 * or not we have really finished resilvering a hot spare.
13189 */
13190 if (vdev_any_spare_replacing(nvroot))
13191 bytes_rem[ZPOOL_WAIT_REPLACE] = bytes_rem[ZPOOL_WAIT_RESILVER];
13192
13193 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
13194 char buf[64];
13195 if (!wd->wd_enabled[i])
13196 continue;
13197
13198 if (wd->wd_exact) {
13199 (void) snprintf(buf, sizeof (buf), "%" PRIi64,
13200 bytes_rem[i]);
13201 } else {
13202 zfs_nicenum(bytes_rem[i], buf, sizeof (buf));
13203 }
13204
13205 if (wd->wd_scripted)
13206 (void) printf(i == 0 ? "%s" : "\t%s", buf);
13207 else
13208 (void) printf(" %*s", col_widths[i] - 1, buf);
13209 }
13210 (void) printf("\n");
13211 (void) fflush(stdout);
13212 }
13213
13214 static void *
wait_status_thread(void * arg)13215 wait_status_thread(void *arg)
13216 {
13217 wait_data_t *wd = (wait_data_t *)arg;
13218 zpool_handle_t *zhp;
13219
13220 if ((zhp = zpool_open(g_zfs, wd->wd_poolname)) == NULL)
13221 return (void *)(1);
13222
13223 for (int row = 0; ; row++) {
13224 boolean_t missing;
13225 struct timespec timeout;
13226 int ret = 0;
13227 (void) clock_gettime(CLOCK_REALTIME, &timeout);
13228
13229 if (zpool_refresh_stats(zhp, &missing) != 0 || missing ||
13230 zpool_props_refresh(zhp) != 0) {
13231 zpool_close(zhp);
13232 return (void *)(uintptr_t)(missing ? 0 : 1);
13233 }
13234
13235 print_wait_status_row(wd, zhp, row);
13236
13237 timeout.tv_sec += floor(wd->wd_interval);
13238 long nanos = timeout.tv_nsec +
13239 (wd->wd_interval - floor(wd->wd_interval)) * NANOSEC;
13240 if (nanos >= NANOSEC) {
13241 timeout.tv_sec++;
13242 timeout.tv_nsec = nanos - NANOSEC;
13243 } else {
13244 timeout.tv_nsec = nanos;
13245 }
13246 pthread_mutex_lock(&wd->wd_mutex);
13247 if (!wd->wd_should_exit)
13248 ret = pthread_cond_timedwait(&wd->wd_cv, &wd->wd_mutex,
13249 &timeout);
13250 pthread_mutex_unlock(&wd->wd_mutex);
13251 if (ret == 0) {
13252 break; /* signaled by main thread */
13253 } else if (ret != ETIMEDOUT) {
13254 (void) fprintf(stderr, gettext("pthread_cond_timedwait "
13255 "failed: %s\n"), strerror(ret));
13256 zpool_close(zhp);
13257 return (void *)(uintptr_t)(1);
13258 }
13259 }
13260
13261 zpool_close(zhp);
13262 return (void *)(0);
13263 }
13264
13265 int
zpool_do_wait(int argc,char ** argv)13266 zpool_do_wait(int argc, char **argv)
13267 {
13268 boolean_t verbose = B_FALSE;
13269 int c, i;
13270 unsigned long count;
13271 pthread_t status_thr;
13272 int error = 0;
13273 zpool_handle_t *zhp;
13274
13275 wait_data_t wd;
13276 wd.wd_scripted = B_FALSE;
13277 wd.wd_exact = B_FALSE;
13278 wd.wd_headers_once = B_FALSE;
13279 wd.wd_should_exit = B_FALSE;
13280
13281 pthread_mutex_init(&wd.wd_mutex, NULL);
13282 pthread_cond_init(&wd.wd_cv, NULL);
13283
13284 /* By default, wait for all types of activity. */
13285 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++)
13286 wd.wd_enabled[i] = B_TRUE;
13287
13288 while ((c = getopt(argc, argv, "HpT:t:")) != -1) {
13289 switch (c) {
13290 case 'H':
13291 wd.wd_scripted = B_TRUE;
13292 break;
13293 case 'n':
13294 wd.wd_headers_once = B_TRUE;
13295 break;
13296 case 'p':
13297 wd.wd_exact = B_TRUE;
13298 break;
13299 case 'T':
13300 get_timestamp_arg(*optarg);
13301 break;
13302 case 't':
13303 /* Reset activities array */
13304 memset(&wd.wd_enabled, 0, sizeof (wd.wd_enabled));
13305
13306 for (char *tok; (tok = strsep(&optarg, ",")); ) {
13307 static const char *const col_opts[] = {
13308 "discard", "free", "initialize", "replace",
13309 "remove", "resilver", "scrub", "trim",
13310 "raidz_expand" };
13311
13312 for (i = 0; i < ARRAY_SIZE(col_opts); ++i)
13313 if (strcmp(tok, col_opts[i]) == 0) {
13314 wd.wd_enabled[i] = B_TRUE;
13315 goto found;
13316 }
13317
13318 (void) fprintf(stderr,
13319 gettext("invalid activity '%s'\n"), tok);
13320 usage(B_FALSE);
13321 found:;
13322 }
13323 break;
13324 case '?':
13325 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
13326 optopt);
13327 usage(B_FALSE);
13328 }
13329 }
13330
13331 argc -= optind;
13332 argv += optind;
13333
13334 get_interval_count(&argc, argv, &wd.wd_interval, &count);
13335 if (count != 0) {
13336 /* This subcmd only accepts an interval, not a count */
13337 (void) fprintf(stderr, gettext("too many arguments\n"));
13338 usage(B_FALSE);
13339 }
13340
13341 if (wd.wd_interval != 0)
13342 verbose = B_TRUE;
13343
13344 if (argc < 1) {
13345 (void) fprintf(stderr, gettext("missing 'pool' argument\n"));
13346 usage(B_FALSE);
13347 }
13348 if (argc > 1) {
13349 (void) fprintf(stderr, gettext("too many arguments\n"));
13350 usage(B_FALSE);
13351 }
13352
13353 wd.wd_poolname = argv[0];
13354
13355 if ((zhp = zpool_open(g_zfs, wd.wd_poolname)) == NULL)
13356 return (1);
13357
13358 if (verbose) {
13359 /*
13360 * We use a separate thread for printing status updates because
13361 * the main thread will call lzc_wait(), which blocks as long
13362 * as an activity is in progress, which can be a long time.
13363 */
13364 if (pthread_create(&status_thr, NULL, wait_status_thread, &wd)
13365 != 0) {
13366 (void) fprintf(stderr, gettext("failed to create status"
13367 "thread: %s\n"), strerror(errno));
13368 zpool_close(zhp);
13369 return (1);
13370 }
13371 }
13372
13373 /*
13374 * Loop over all activities that we are supposed to wait for until none
13375 * of them are in progress. Note that this means we can end up waiting
13376 * for more activities to complete than just those that were in progress
13377 * when we began waiting; if an activity we are interested in begins
13378 * while we are waiting for another activity, we will wait for both to
13379 * complete before exiting.
13380 */
13381 for (;;) {
13382 boolean_t missing = B_FALSE;
13383 boolean_t any_waited = B_FALSE;
13384
13385 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
13386 boolean_t waited;
13387
13388 if (!wd.wd_enabled[i])
13389 continue;
13390
13391 error = zpool_wait_status(zhp, i, &missing, &waited);
13392 if (error != 0 || missing)
13393 break;
13394
13395 any_waited = (any_waited || waited);
13396 }
13397
13398 if (error != 0 || missing || !any_waited)
13399 break;
13400 }
13401
13402 zpool_close(zhp);
13403
13404 if (verbose) {
13405 uintptr_t status;
13406 pthread_mutex_lock(&wd.wd_mutex);
13407 wd.wd_should_exit = B_TRUE;
13408 pthread_cond_signal(&wd.wd_cv);
13409 pthread_mutex_unlock(&wd.wd_mutex);
13410 (void) pthread_join(status_thr, (void *)&status);
13411 if (status != 0)
13412 error = status;
13413 }
13414
13415 pthread_mutex_destroy(&wd.wd_mutex);
13416 pthread_cond_destroy(&wd.wd_cv);
13417 return (error);
13418 }
13419
13420 /*
13421 * zpool ddtprune -d|-p <amount> <pool>
13422 *
13423 * -d <days> Prune entries <days> old and older
13424 * -p <percent> Prune <percent> amount of entries
13425 *
13426 * Prune single reference entries from DDT to satisfy the amount specified.
13427 */
13428 int
zpool_do_ddt_prune(int argc,char ** argv)13429 zpool_do_ddt_prune(int argc, char **argv)
13430 {
13431 zpool_ddt_prune_unit_t unit = ZPOOL_DDT_PRUNE_NONE;
13432 uint64_t amount = 0;
13433 zpool_handle_t *zhp;
13434 char *endptr;
13435 int c;
13436
13437 while ((c = getopt(argc, argv, "d:p:")) != -1) {
13438 switch (c) {
13439 case 'd':
13440 if (unit == ZPOOL_DDT_PRUNE_PERCENTAGE) {
13441 (void) fprintf(stderr, gettext("-d cannot be "
13442 "combined with -p option\n"));
13443 usage(B_FALSE);
13444 }
13445 errno = 0;
13446 amount = strtoull(optarg, &endptr, 0);
13447 if (errno != 0 || *endptr != '\0' || amount == 0) {
13448 (void) fprintf(stderr,
13449 gettext("invalid days value\n"));
13450 usage(B_FALSE);
13451 }
13452 amount *= 86400; /* convert days to seconds */
13453 unit = ZPOOL_DDT_PRUNE_AGE;
13454 break;
13455 case 'p':
13456 if (unit == ZPOOL_DDT_PRUNE_AGE) {
13457 (void) fprintf(stderr, gettext("-p cannot be "
13458 "combined with -d option\n"));
13459 usage(B_FALSE);
13460 }
13461 errno = 0;
13462 amount = strtoull(optarg, &endptr, 0);
13463 if (errno != 0 || *endptr != '\0' ||
13464 amount == 0 || amount > 100) {
13465 (void) fprintf(stderr,
13466 gettext("invalid percentage value\n"));
13467 usage(B_FALSE);
13468 }
13469 unit = ZPOOL_DDT_PRUNE_PERCENTAGE;
13470 break;
13471 case '?':
13472 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
13473 optopt);
13474 usage(B_FALSE);
13475 }
13476 }
13477 argc -= optind;
13478 argv += optind;
13479
13480 if (unit == ZPOOL_DDT_PRUNE_NONE) {
13481 (void) fprintf(stderr,
13482 gettext("missing amount option (-d|-p <value>)\n"));
13483 usage(B_FALSE);
13484 } else if (argc < 1) {
13485 (void) fprintf(stderr, gettext("missing pool argument\n"));
13486 usage(B_FALSE);
13487 } else if (argc > 1) {
13488 (void) fprintf(stderr, gettext("too many arguments\n"));
13489 usage(B_FALSE);
13490 }
13491 zhp = zpool_open(g_zfs, argv[0]);
13492 if (zhp == NULL)
13493 return (-1);
13494
13495 int error = zpool_ddt_prune(zhp, unit, amount);
13496
13497 zpool_close(zhp);
13498
13499 return (error);
13500 }
13501
13502 static int
find_command_idx(const char * command,int * idx)13503 find_command_idx(const char *command, int *idx)
13504 {
13505 for (int i = 0; i < NCOMMAND; ++i) {
13506 if (command_table[i].name == NULL)
13507 continue;
13508
13509 if (strcmp(command, command_table[i].name) == 0) {
13510 *idx = i;
13511 return (0);
13512 }
13513 }
13514 return (1);
13515 }
13516
13517 /*
13518 * Display version message
13519 */
13520 static int
zpool_do_version(int argc,char ** argv)13521 zpool_do_version(int argc, char **argv)
13522 {
13523 int c;
13524 nvlist_t *jsobj = NULL, *zfs_ver = NULL;
13525 boolean_t json = B_FALSE;
13526
13527 struct option long_options[] = {
13528 {"json", no_argument, NULL, 'j'},
13529 };
13530
13531 while ((c = getopt_long(argc, argv, "j", long_options, NULL)) != -1) {
13532 switch (c) {
13533 case 'j':
13534 json = B_TRUE;
13535 jsobj = zpool_json_schema(0, 1);
13536 break;
13537 case '?':
13538 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
13539 optopt);
13540 usage(B_FALSE);
13541 }
13542 }
13543
13544 argc -= optind;
13545 if (argc != 0) {
13546 (void) fprintf(stderr, "too many arguments\n");
13547 usage(B_FALSE);
13548 }
13549
13550 if (json) {
13551 zfs_ver = zfs_version_nvlist();
13552 if (zfs_ver) {
13553 fnvlist_add_nvlist(jsobj, "zfs_version", zfs_ver);
13554 zcmd_print_json(jsobj);
13555 fnvlist_free(zfs_ver);
13556 return (0);
13557 } else
13558 return (-1);
13559 } else
13560 return (zfs_version_print() != 0);
13561 }
13562
13563 /* Display documentation */
13564 static int
zpool_do_help(int argc,char ** argv)13565 zpool_do_help(int argc, char **argv)
13566 {
13567 char page[MAXNAMELEN];
13568 if (argc < 3 || strcmp(argv[2], "zpool") == 0)
13569 strcpy(page, "zpool");
13570 else if (strcmp(argv[2], "concepts") == 0 ||
13571 strcmp(argv[2], "props") == 0)
13572 snprintf(page, sizeof (page), "zpool%s", argv[2]);
13573 else
13574 snprintf(page, sizeof (page), "zpool-%s", argv[2]);
13575
13576 execlp("man", "man", page, NULL);
13577
13578 fprintf(stderr, "couldn't run man program: %s", strerror(errno));
13579 return (-1);
13580 }
13581
13582 /*
13583 * Do zpool_load_compat() and print error message on failure
13584 */
13585 static zpool_compat_status_t
zpool_do_load_compat(const char * compat,boolean_t * list)13586 zpool_do_load_compat(const char *compat, boolean_t *list)
13587 {
13588 char report[1024];
13589
13590 zpool_compat_status_t ret;
13591
13592 ret = zpool_load_compat(compat, list, report, 1024);
13593 switch (ret) {
13594
13595 case ZPOOL_COMPATIBILITY_OK:
13596 break;
13597
13598 case ZPOOL_COMPATIBILITY_NOFILES:
13599 case ZPOOL_COMPATIBILITY_BADFILE:
13600 case ZPOOL_COMPATIBILITY_BADTOKEN:
13601 (void) fprintf(stderr, "Error: %s\n", report);
13602 break;
13603
13604 case ZPOOL_COMPATIBILITY_WARNTOKEN:
13605 (void) fprintf(stderr, "Warning: %s\n", report);
13606 ret = ZPOOL_COMPATIBILITY_OK;
13607 break;
13608 }
13609 return (ret);
13610 }
13611
13612 int
main(int argc,char ** argv)13613 main(int argc, char **argv)
13614 {
13615 int ret = 0;
13616 int i = 0;
13617 char *cmdname;
13618 char **newargv;
13619
13620 (void) setlocale(LC_ALL, "");
13621 (void) setlocale(LC_NUMERIC, "C");
13622 (void) textdomain(TEXT_DOMAIN);
13623 srand(time(NULL));
13624
13625 opterr = 0;
13626
13627 /*
13628 * Make sure the user has specified some command.
13629 */
13630 if (argc < 2) {
13631 (void) fprintf(stderr, gettext("missing command\n"));
13632 usage(B_FALSE);
13633 }
13634
13635 cmdname = argv[1];
13636
13637 /*
13638 * Special case '-?'
13639 */
13640 if ((strcmp(cmdname, "-?") == 0) || strcmp(cmdname, "--help") == 0)
13641 usage(B_TRUE);
13642
13643 /*
13644 * Special case '-V|--version'
13645 */
13646 if ((strcmp(cmdname, "-V") == 0) || (strcmp(cmdname, "--version") == 0))
13647 return (zfs_version_print() != 0);
13648
13649 /*
13650 * Special case 'help'
13651 */
13652 if (strcmp(cmdname, "help") == 0)
13653 return (zpool_do_help(argc, argv));
13654
13655 if ((g_zfs = libzfs_init()) == NULL) {
13656 (void) fprintf(stderr, "%s\n", libzfs_error_init(errno));
13657 return (1);
13658 }
13659
13660 libzfs_print_on_error(g_zfs, B_TRUE);
13661
13662 zfs_save_arguments(argc, argv, history_str, sizeof (history_str));
13663
13664 /*
13665 * Many commands modify input strings for string parsing reasons.
13666 * We create a copy to protect the original argv.
13667 */
13668 newargv = safe_malloc((argc + 1) * sizeof (newargv[0]));
13669 for (i = 0; i < argc; i++)
13670 newargv[i] = strdup(argv[i]);
13671 newargv[argc] = NULL;
13672
13673 /*
13674 * Run the appropriate command.
13675 */
13676 if (find_command_idx(cmdname, &i) == 0) {
13677 current_command = &command_table[i];
13678 ret = command_table[i].func(argc - 1, newargv + 1);
13679 } else if (strchr(cmdname, '=')) {
13680 verify(find_command_idx("set", &i) == 0);
13681 current_command = &command_table[i];
13682 ret = command_table[i].func(argc, newargv);
13683 } else if (strcmp(cmdname, "freeze") == 0 && argc == 3) {
13684 /*
13685 * 'freeze' is a vile debugging abomination, so we treat
13686 * it as such.
13687 */
13688 zfs_cmd_t zc = {"\0"};
13689
13690 (void) strlcpy(zc.zc_name, argv[2], sizeof (zc.zc_name));
13691 ret = zfs_ioctl(g_zfs, ZFS_IOC_POOL_FREEZE, &zc);
13692 if (ret != 0) {
13693 (void) fprintf(stderr,
13694 gettext("failed to freeze pool: %d\n"), errno);
13695 ret = 1;
13696 }
13697
13698 log_history = 0;
13699 } else {
13700 (void) fprintf(stderr, gettext("unrecognized "
13701 "command '%s'\n"), cmdname);
13702 usage(B_FALSE);
13703 ret = 1;
13704 }
13705
13706 for (i = 0; i < argc; i++)
13707 free(newargv[i]);
13708 free(newargv);
13709
13710 if (ret == 0 && log_history)
13711 (void) zpool_log_history(g_zfs, history_str);
13712
13713 libzfs_fini(g_zfs);
13714
13715 /*
13716 * The 'ZFS_ABORT' environment variable causes us to dump core on exit
13717 * for the purposes of running ::findleaks.
13718 */
13719 if (getenv("ZFS_ABORT") != NULL) {
13720 (void) printf("dumping core by request\n");
13721 abort();
13722 }
13723
13724 return (ret);
13725 }
13726