1 // SPDX-License-Identifier: CDDL-1.0
2 /*
3 * CDDL HEADER START
4 *
5 * The contents of this file are subject to the terms of the
6 * Common Development and Distribution License (the "License").
7 * You may not use this file except in compliance with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or https://opensource.org/licenses/CDDL-1.0.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22
23 /*
24 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
26 * Copyright (c) 2011, 2024 by Delphix. All rights reserved.
27 * Copyright (c) 2012 by Frederik Wessels. All rights reserved.
28 * Copyright (c) 2012 by Cyril Plisko. All rights reserved.
29 * Copyright (c) 2013 by Prasad Joshi (sTec). All rights reserved.
30 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>.
31 * Copyright (c) 2017 Datto Inc.
32 * Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
33 * Copyright (c) 2017, Intel Corporation.
34 * Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>
35 * Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
36 * Copyright (c) 2021, 2023, 2025, Klara, Inc.
37 * Copyright (c) 2021, 2025 Hewlett Packard Enterprise Development LP.
38 */
39
40 #include <assert.h>
41 #include <ctype.h>
42 #include <dirent.h>
43 #include <errno.h>
44 #include <fcntl.h>
45 #include <getopt.h>
46 #include <inttypes.h>
47 #include <libgen.h>
48 #include <libintl.h>
49 #include <libuutil.h>
50 #include <locale.h>
51 #include <pthread.h>
52 #include <stdio.h>
53 #include <stdlib.h>
54 #include <string.h>
55 #include <termios.h>
56 #include <thread_pool.h>
57 #include <time.h>
58 #include <unistd.h>
59 #include <pwd.h>
60 #include <zone.h>
61 #include <sys/wait.h>
62 #include <zfs_prop.h>
63 #include <sys/fs/zfs.h>
64 #include <sys/stat.h>
65 #include <sys/systeminfo.h>
66 #include <sys/fm/fs/zfs.h>
67 #include <sys/fm/util.h>
68 #include <sys/fm/protocol.h>
69 #include <sys/zfs_ioctl.h>
70 #include <sys/mount.h>
71 #include <sys/sysmacros.h>
72 #include <string.h>
73 #include <math.h>
74
75 #include <libzfs.h>
76 #include <libzutil.h>
77
78 #include "zpool_util.h"
79 #include "zfs_comutil.h"
80 #include "zfeature_common.h"
81 #include "zfs_valstr.h"
82
83 #include "statcommon.h"
84
85 libzfs_handle_t *g_zfs;
86
87 static int mount_tp_nthr = 512; /* tpool threads for multi-threaded mounting */
88
89 static int zpool_do_create(int, char **);
90 static int zpool_do_destroy(int, char **);
91
92 static int zpool_do_add(int, char **);
93 static int zpool_do_remove(int, char **);
94 static int zpool_do_labelclear(int, char **);
95
96 static int zpool_do_checkpoint(int, char **);
97 static int zpool_do_prefetch(int, char **);
98
99 static int zpool_do_list(int, char **);
100 static int zpool_do_iostat(int, char **);
101 static int zpool_do_status(int, char **);
102
103 static int zpool_do_online(int, char **);
104 static int zpool_do_offline(int, char **);
105 static int zpool_do_clear(int, char **);
106 static int zpool_do_reopen(int, char **);
107
108 static int zpool_do_reguid(int, char **);
109
110 static int zpool_do_attach(int, char **);
111 static int zpool_do_detach(int, char **);
112 static int zpool_do_replace(int, char **);
113 static int zpool_do_split(int, char **);
114
115 static int zpool_do_initialize(int, char **);
116 static int zpool_do_scrub(int, char **);
117 static int zpool_do_resilver(int, char **);
118 static int zpool_do_trim(int, char **);
119
120 static int zpool_do_import(int, char **);
121 static int zpool_do_export(int, char **);
122
123 static int zpool_do_upgrade(int, char **);
124
125 static int zpool_do_history(int, char **);
126 static int zpool_do_events(int, char **);
127
128 static int zpool_do_get(int, char **);
129 static int zpool_do_set(int, char **);
130
131 static int zpool_do_sync(int, char **);
132
133 static int zpool_do_version(int, char **);
134
135 static int zpool_do_wait(int, char **);
136
137 static int zpool_do_ddt_prune(int, char **);
138
139 static int zpool_do_help(int argc, char **argv);
140
141 static zpool_compat_status_t zpool_do_load_compat(
142 const char *, boolean_t *);
143
144 enum zpool_options {
145 ZPOOL_OPTION_POWER = 1024,
146 ZPOOL_OPTION_ALLOW_INUSE,
147 ZPOOL_OPTION_ALLOW_REPLICATION_MISMATCH,
148 ZPOOL_OPTION_ALLOW_ASHIFT_MISMATCH,
149 ZPOOL_OPTION_POOL_KEY_GUID,
150 ZPOOL_OPTION_JSON_NUMS_AS_INT,
151 ZPOOL_OPTION_JSON_FLAT_VDEVS
152 };
153
154 /*
155 * These libumem hooks provide a reasonable set of defaults for the allocator's
156 * debugging facilities.
157 */
158
159 #ifdef DEBUG
160 const char *
_umem_debug_init(void)161 _umem_debug_init(void)
162 {
163 return ("default,verbose"); /* $UMEM_DEBUG setting */
164 }
165
166 const char *
_umem_logging_init(void)167 _umem_logging_init(void)
168 {
169 return ("fail,contents"); /* $UMEM_LOGGING setting */
170 }
171 #endif
172
173 typedef enum {
174 HELP_ADD,
175 HELP_ATTACH,
176 HELP_CLEAR,
177 HELP_CREATE,
178 HELP_CHECKPOINT,
179 HELP_DDT_PRUNE,
180 HELP_DESTROY,
181 HELP_DETACH,
182 HELP_EXPORT,
183 HELP_HISTORY,
184 HELP_IMPORT,
185 HELP_IOSTAT,
186 HELP_LABELCLEAR,
187 HELP_LIST,
188 HELP_OFFLINE,
189 HELP_ONLINE,
190 HELP_PREFETCH,
191 HELP_REPLACE,
192 HELP_REMOVE,
193 HELP_INITIALIZE,
194 HELP_SCRUB,
195 HELP_RESILVER,
196 HELP_TRIM,
197 HELP_STATUS,
198 HELP_UPGRADE,
199 HELP_EVENTS,
200 HELP_GET,
201 HELP_SET,
202 HELP_SPLIT,
203 HELP_SYNC,
204 HELP_REGUID,
205 HELP_REOPEN,
206 HELP_VERSION,
207 HELP_WAIT
208 } zpool_help_t;
209
210
211 /*
212 * Flags for stats to display with "zpool iostats"
213 */
214 enum iostat_type {
215 IOS_DEFAULT = 0,
216 IOS_LATENCY = 1,
217 IOS_QUEUES = 2,
218 IOS_L_HISTO = 3,
219 IOS_RQ_HISTO = 4,
220 IOS_COUNT, /* always last element */
221 };
222
223 /* iostat_type entries as bitmasks */
224 #define IOS_DEFAULT_M (1ULL << IOS_DEFAULT)
225 #define IOS_LATENCY_M (1ULL << IOS_LATENCY)
226 #define IOS_QUEUES_M (1ULL << IOS_QUEUES)
227 #define IOS_L_HISTO_M (1ULL << IOS_L_HISTO)
228 #define IOS_RQ_HISTO_M (1ULL << IOS_RQ_HISTO)
229
230 /* Mask of all the histo bits */
231 #define IOS_ANYHISTO_M (IOS_L_HISTO_M | IOS_RQ_HISTO_M)
232
233 /*
234 * Lookup table for iostat flags to nvlist names. Basically a list
235 * of all the nvlists a flag requires. Also specifies the order in
236 * which data gets printed in zpool iostat.
237 */
238 static const char *vsx_type_to_nvlist[IOS_COUNT][15] = {
239 [IOS_L_HISTO] = {
240 ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
241 ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
242 ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
243 ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
244 ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO,
245 ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO,
246 ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO,
247 ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO,
248 ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO,
249 ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
250 ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
251 NULL},
252 [IOS_LATENCY] = {
253 ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
254 ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
255 ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
256 ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
257 ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
258 ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
259 NULL},
260 [IOS_QUEUES] = {
261 ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE,
262 ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE,
263 ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE,
264 ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE,
265 ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,
266 ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE,
267 ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE,
268 NULL},
269 [IOS_RQ_HISTO] = {
270 ZPOOL_CONFIG_VDEV_SYNC_IND_R_HISTO,
271 ZPOOL_CONFIG_VDEV_SYNC_AGG_R_HISTO,
272 ZPOOL_CONFIG_VDEV_SYNC_IND_W_HISTO,
273 ZPOOL_CONFIG_VDEV_SYNC_AGG_W_HISTO,
274 ZPOOL_CONFIG_VDEV_ASYNC_IND_R_HISTO,
275 ZPOOL_CONFIG_VDEV_ASYNC_AGG_R_HISTO,
276 ZPOOL_CONFIG_VDEV_ASYNC_IND_W_HISTO,
277 ZPOOL_CONFIG_VDEV_ASYNC_AGG_W_HISTO,
278 ZPOOL_CONFIG_VDEV_IND_SCRUB_HISTO,
279 ZPOOL_CONFIG_VDEV_AGG_SCRUB_HISTO,
280 ZPOOL_CONFIG_VDEV_IND_TRIM_HISTO,
281 ZPOOL_CONFIG_VDEV_AGG_TRIM_HISTO,
282 ZPOOL_CONFIG_VDEV_IND_REBUILD_HISTO,
283 ZPOOL_CONFIG_VDEV_AGG_REBUILD_HISTO,
284 NULL},
285 };
286
287 static const char *pool_scan_func_str[] = {
288 "NONE",
289 "SCRUB",
290 "RESILVER",
291 "ERRORSCRUB"
292 };
293
294 static const char *pool_scan_state_str[] = {
295 "NONE",
296 "SCANNING",
297 "FINISHED",
298 "CANCELED",
299 "ERRORSCRUBBING"
300 };
301
302 static const char *vdev_rebuild_state_str[] = {
303 "NONE",
304 "ACTIVE",
305 "CANCELED",
306 "COMPLETE"
307 };
308
309 static const char *checkpoint_state_str[] = {
310 "NONE",
311 "EXISTS",
312 "DISCARDING"
313 };
314
315 static const char *vdev_state_str[] = {
316 "UNKNOWN",
317 "CLOSED",
318 "OFFLINE",
319 "REMOVED",
320 "CANT_OPEN",
321 "FAULTED",
322 "DEGRADED",
323 "ONLINE"
324 };
325
326 static const char *vdev_aux_str[] = {
327 "NONE",
328 "OPEN_FAILED",
329 "CORRUPT_DATA",
330 "NO_REPLICAS",
331 "BAD_GUID_SUM",
332 "TOO_SMALL",
333 "BAD_LABEL",
334 "VERSION_NEWER",
335 "VERSION_OLDER",
336 "UNSUP_FEAT",
337 "SPARED",
338 "ERR_EXCEEDED",
339 "IO_FAILURE",
340 "BAD_LOG",
341 "EXTERNAL",
342 "SPLIT_POOL",
343 "BAD_ASHIFT",
344 "EXTERNAL_PERSIST",
345 "ACTIVE",
346 "CHILDREN_OFFLINE",
347 "ASHIFT_TOO_BIG"
348 };
349
350 static const char *vdev_init_state_str[] = {
351 "NONE",
352 "ACTIVE",
353 "CANCELED",
354 "SUSPENDED",
355 "COMPLETE"
356 };
357
358 static const char *vdev_trim_state_str[] = {
359 "NONE",
360 "ACTIVE",
361 "CANCELED",
362 "SUSPENDED",
363 "COMPLETE"
364 };
365
366 #define ZFS_NICE_TIMESTAMP 100
367
368 /*
369 * Given a cb->cb_flags with a histogram bit set, return the iostat_type.
370 * Right now, only one histo bit is ever set at one time, so we can
371 * just do a highbit64(a)
372 */
373 #define IOS_HISTO_IDX(a) (highbit64(a & IOS_ANYHISTO_M) - 1)
374
375 typedef struct zpool_command {
376 const char *name;
377 int (*func)(int, char **);
378 zpool_help_t usage;
379 } zpool_command_t;
380
381 /*
382 * Master command table. Each ZFS command has a name, associated function, and
383 * usage message. The usage messages need to be internationalized, so we have
384 * to have a function to return the usage message based on a command index.
385 *
386 * These commands are organized according to how they are displayed in the usage
387 * message. An empty command (one with a NULL name) indicates an empty line in
388 * the generic usage message.
389 */
390 static zpool_command_t command_table[] = {
391 { "version", zpool_do_version, HELP_VERSION },
392 { NULL },
393 { "create", zpool_do_create, HELP_CREATE },
394 { "destroy", zpool_do_destroy, HELP_DESTROY },
395 { NULL },
396 { "add", zpool_do_add, HELP_ADD },
397 { "remove", zpool_do_remove, HELP_REMOVE },
398 { NULL },
399 { "labelclear", zpool_do_labelclear, HELP_LABELCLEAR },
400 { NULL },
401 { "checkpoint", zpool_do_checkpoint, HELP_CHECKPOINT },
402 { "prefetch", zpool_do_prefetch, HELP_PREFETCH },
403 { NULL },
404 { "list", zpool_do_list, HELP_LIST },
405 { "iostat", zpool_do_iostat, HELP_IOSTAT },
406 { "status", zpool_do_status, HELP_STATUS },
407 { NULL },
408 { "online", zpool_do_online, HELP_ONLINE },
409 { "offline", zpool_do_offline, HELP_OFFLINE },
410 { "clear", zpool_do_clear, HELP_CLEAR },
411 { "reopen", zpool_do_reopen, HELP_REOPEN },
412 { NULL },
413 { "attach", zpool_do_attach, HELP_ATTACH },
414 { "detach", zpool_do_detach, HELP_DETACH },
415 { "replace", zpool_do_replace, HELP_REPLACE },
416 { "split", zpool_do_split, HELP_SPLIT },
417 { NULL },
418 { "initialize", zpool_do_initialize, HELP_INITIALIZE },
419 { "resilver", zpool_do_resilver, HELP_RESILVER },
420 { "scrub", zpool_do_scrub, HELP_SCRUB },
421 { "trim", zpool_do_trim, HELP_TRIM },
422 { NULL },
423 { "import", zpool_do_import, HELP_IMPORT },
424 { "export", zpool_do_export, HELP_EXPORT },
425 { "upgrade", zpool_do_upgrade, HELP_UPGRADE },
426 { "reguid", zpool_do_reguid, HELP_REGUID },
427 { NULL },
428 { "history", zpool_do_history, HELP_HISTORY },
429 { "events", zpool_do_events, HELP_EVENTS },
430 { NULL },
431 { "get", zpool_do_get, HELP_GET },
432 { "set", zpool_do_set, HELP_SET },
433 { "sync", zpool_do_sync, HELP_SYNC },
434 { NULL },
435 { "wait", zpool_do_wait, HELP_WAIT },
436 { NULL },
437 { "ddtprune", zpool_do_ddt_prune, HELP_DDT_PRUNE },
438 };
439
440 #define NCOMMAND (ARRAY_SIZE(command_table))
441
442 #define VDEV_ALLOC_CLASS_LOGS "logs"
443
444 #define MAX_CMD_LEN 256
445
446 static zpool_command_t *current_command;
447 static zfs_type_t current_prop_type = (ZFS_TYPE_POOL | ZFS_TYPE_VDEV);
448 static char history_str[HIS_MAX_RECORD_LEN];
449 static boolean_t log_history = B_TRUE;
450 static uint_t timestamp_fmt = NODATE;
451
452 static const char *
get_usage(zpool_help_t idx)453 get_usage(zpool_help_t idx)
454 {
455 switch (idx) {
456 case HELP_ADD:
457 return (gettext("\tadd [-afgLnP] [-o property=value] "
458 "<pool> <vdev> ...\n"));
459 case HELP_ATTACH:
460 return (gettext("\tattach [-fsw] [-o property=value] "
461 "<pool> <vdev> <new-device>\n"));
462 case HELP_CLEAR:
463 return (gettext("\tclear [[--power]|[-nF]] <pool> [device]\n"));
464 case HELP_CREATE:
465 return (gettext("\tcreate [-fnd] [-o property=value] ... \n"
466 "\t [-O file-system-property=value] ... \n"
467 "\t [-m mountpoint] [-R root] <pool> <vdev> ...\n"));
468 case HELP_CHECKPOINT:
469 return (gettext("\tcheckpoint [-d [-w]] <pool> ...\n"));
470 case HELP_DESTROY:
471 return (gettext("\tdestroy [-f] <pool>\n"));
472 case HELP_DETACH:
473 return (gettext("\tdetach <pool> <device>\n"));
474 case HELP_EXPORT:
475 return (gettext("\texport [-af] <pool> ...\n"));
476 case HELP_HISTORY:
477 return (gettext("\thistory [-il] [<pool>] ...\n"));
478 case HELP_IMPORT:
479 return (gettext("\timport [-d dir] [-D]\n"
480 "\timport [-o mntopts] [-o property=value] ... \n"
481 "\t [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] "
482 "[-R root] [-F [-n]] -a\n"
483 "\timport [-o mntopts] [-o property=value] ... \n"
484 "\t [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] "
485 "[-R root] [-F [-n]]\n"
486 "\t [--rewind-to-checkpoint] <pool | id> [newpool]\n"));
487 case HELP_IOSTAT:
488 return (gettext("\tiostat [[[-c [script1,script2,...]"
489 "[-lq]]|[-rw]] [-T d | u] [-ghHLpPvy]\n"
490 "\t [[pool ...]|[pool vdev ...]|[vdev ...]]"
491 " [[-n] interval [count]]\n"));
492 case HELP_LABELCLEAR:
493 return (gettext("\tlabelclear [-f] <vdev>\n"));
494 case HELP_LIST:
495 return (gettext("\tlist [-gHLpPv] [-o property[,...]] [-j "
496 "[--json-int, --json-pool-key-guid]] ...\n"
497 "\t [-T d|u] [pool] [interval [count]]\n"));
498 case HELP_PREFETCH:
499 return (gettext("\tprefetch [-t <type>] <pool>\n"));
500 case HELP_OFFLINE:
501 return (gettext("\toffline [--power]|[[-f][-t]] <pool> "
502 "<device> ...\n"));
503 case HELP_ONLINE:
504 return (gettext("\tonline [--power][-e] <pool> <device> "
505 "...\n"));
506 case HELP_REPLACE:
507 return (gettext("\treplace [-fsw] [-o property=value] "
508 "<pool> <device> [new-device]\n"));
509 case HELP_REMOVE:
510 return (gettext("\tremove [-npsw] <pool> <device> ...\n"));
511 case HELP_REOPEN:
512 return (gettext("\treopen [-n] <pool>\n"));
513 case HELP_INITIALIZE:
514 return (gettext("\tinitialize [-c | -s | -u] [-w] <-a | <pool> "
515 "[<device> ...]>\n"));
516 case HELP_SCRUB:
517 return (gettext("\tscrub [-e | -s | -p | -C | -E | -S] [-w] "
518 "<-a | <pool> [<pool> ...]>\n"));
519 case HELP_RESILVER:
520 return (gettext("\tresilver <pool> ...\n"));
521 case HELP_TRIM:
522 return (gettext("\ttrim [-dw] [-r <rate>] [-c | -s] "
523 "<-a | <pool> [<device> ...]>\n"));
524 case HELP_STATUS:
525 return (gettext("\tstatus [-DdegiLPpstvx] "
526 "[-c script1[,script2,...]] ...\n"
527 "\t [-j|--json [--json-flat-vdevs] [--json-int] "
528 "[--json-pool-key-guid]] ...\n"
529 "\t [-T d|u] [--power] [pool] [interval [count]]\n"));
530 case HELP_UPGRADE:
531 return (gettext("\tupgrade\n"
532 "\tupgrade -v\n"
533 "\tupgrade [-V version] <-a | pool ...>\n"));
534 case HELP_EVENTS:
535 return (gettext("\tevents [-vHf [pool] | -c]\n"));
536 case HELP_GET:
537 return (gettext("\tget [-Hp] [-j [--json-int, "
538 "--json-pool-key-guid]] ...\n"
539 "\t [-o \"all\" | field[,...]] "
540 "<\"all\" | property[,...]> <pool> ...\n"));
541 case HELP_SET:
542 return (gettext("\tset <property=value> <pool>\n"
543 "\tset <vdev_property=value> <pool> <vdev>\n"));
544 case HELP_SPLIT:
545 return (gettext("\tsplit [-gLnPl] [-R altroot] [-o mntopts]\n"
546 "\t [-o property=value] <pool> <newpool> "
547 "[<device> ...]\n"));
548 case HELP_REGUID:
549 return (gettext("\treguid [-g guid] <pool>\n"));
550 case HELP_SYNC:
551 return (gettext("\tsync [pool] ...\n"));
552 case HELP_VERSION:
553 return (gettext("\tversion [-j]\n"));
554 case HELP_WAIT:
555 return (gettext("\twait [-Hp] [-T d|u] [-t <activity>[,...]] "
556 "<pool> [interval]\n"));
557 case HELP_DDT_PRUNE:
558 return (gettext("\tddtprune -d|-p <amount> <pool>\n"));
559 default:
560 __builtin_unreachable();
561 }
562 }
563
564 /*
565 * Callback routine that will print out a pool property value.
566 */
567 static int
print_pool_prop_cb(int prop,void * cb)568 print_pool_prop_cb(int prop, void *cb)
569 {
570 FILE *fp = cb;
571
572 (void) fprintf(fp, "\t%-19s ", zpool_prop_to_name(prop));
573
574 if (zpool_prop_readonly(prop))
575 (void) fprintf(fp, " NO ");
576 else
577 (void) fprintf(fp, " YES ");
578
579 if (zpool_prop_values(prop) == NULL)
580 (void) fprintf(fp, "-\n");
581 else
582 (void) fprintf(fp, "%s\n", zpool_prop_values(prop));
583
584 return (ZPROP_CONT);
585 }
586
587 /*
588 * Callback routine that will print out a vdev property value.
589 */
590 static int
print_vdev_prop_cb(int prop,void * cb)591 print_vdev_prop_cb(int prop, void *cb)
592 {
593 FILE *fp = cb;
594
595 (void) fprintf(fp, "\t%-19s ", vdev_prop_to_name(prop));
596
597 if (vdev_prop_readonly(prop))
598 (void) fprintf(fp, " NO ");
599 else
600 (void) fprintf(fp, " YES ");
601
602 if (vdev_prop_values(prop) == NULL)
603 (void) fprintf(fp, "-\n");
604 else
605 (void) fprintf(fp, "%s\n", vdev_prop_values(prop));
606
607 return (ZPROP_CONT);
608 }
609
610 /*
611 * Given a leaf vdev name like 'L5' return its VDEV_CONFIG_PATH like
612 * '/dev/disk/by-vdev/L5'.
613 */
614 static const char *
vdev_name_to_path(zpool_handle_t * zhp,char * vdev)615 vdev_name_to_path(zpool_handle_t *zhp, char *vdev)
616 {
617 nvlist_t *vdev_nv = zpool_find_vdev(zhp, vdev, NULL, NULL, NULL);
618 if (vdev_nv == NULL) {
619 return (NULL);
620 }
621 return (fnvlist_lookup_string(vdev_nv, ZPOOL_CONFIG_PATH));
622 }
623
624 static int
zpool_power_on(zpool_handle_t * zhp,char * vdev)625 zpool_power_on(zpool_handle_t *zhp, char *vdev)
626 {
627 return (zpool_power(zhp, vdev, B_TRUE));
628 }
629
630 static int
zpool_power_on_and_disk_wait(zpool_handle_t * zhp,char * vdev)631 zpool_power_on_and_disk_wait(zpool_handle_t *zhp, char *vdev)
632 {
633 int rc;
634
635 rc = zpool_power_on(zhp, vdev);
636 if (rc != 0)
637 return (rc);
638
639 (void) zpool_disk_wait(vdev_name_to_path(zhp, vdev));
640
641 return (0);
642 }
643
644 static int
zpool_power_on_pool_and_wait_for_devices(zpool_handle_t * zhp)645 zpool_power_on_pool_and_wait_for_devices(zpool_handle_t *zhp)
646 {
647 nvlist_t *nv;
648 const char *path = NULL;
649 int rc;
650
651 /* Power up all the devices first */
652 FOR_EACH_REAL_LEAF_VDEV(zhp, nv) {
653 path = fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH);
654 if (path != NULL) {
655 rc = zpool_power_on(zhp, (char *)path);
656 if (rc != 0) {
657 return (rc);
658 }
659 }
660 }
661
662 /*
663 * Wait for their devices to show up. Since we powered them on
664 * at roughly the same time, they should all come online around
665 * the same time.
666 */
667 FOR_EACH_REAL_LEAF_VDEV(zhp, nv) {
668 path = fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH);
669 (void) zpool_disk_wait(path);
670 }
671
672 return (0);
673 }
674
675 static int
zpool_power_off(zpool_handle_t * zhp,char * vdev)676 zpool_power_off(zpool_handle_t *zhp, char *vdev)
677 {
678 return (zpool_power(zhp, vdev, B_FALSE));
679 }
680
681 /*
682 * Display usage message. If we're inside a command, display only the usage for
683 * that command. Otherwise, iterate over the entire command table and display
684 * a complete usage message.
685 */
686 static __attribute__((noreturn)) void
usage(boolean_t requested)687 usage(boolean_t requested)
688 {
689 FILE *fp = requested ? stdout : stderr;
690
691 if (current_command == NULL) {
692 int i;
693
694 (void) fprintf(fp, gettext("usage: zpool command args ...\n"));
695 (void) fprintf(fp,
696 gettext("where 'command' is one of the following:\n\n"));
697
698 for (i = 0; i < NCOMMAND; i++) {
699 if (command_table[i].name == NULL)
700 (void) fprintf(fp, "\n");
701 else
702 (void) fprintf(fp, "%s",
703 get_usage(command_table[i].usage));
704 }
705
706 (void) fprintf(fp,
707 gettext("\nFor further help on a command or topic, "
708 "run: %s\n"), "zpool help [<topic>]");
709 } else {
710 (void) fprintf(fp, gettext("usage:\n"));
711 (void) fprintf(fp, "%s", get_usage(current_command->usage));
712 }
713
714 if (current_command != NULL &&
715 current_prop_type != (ZFS_TYPE_POOL | ZFS_TYPE_VDEV) &&
716 ((strcmp(current_command->name, "set") == 0) ||
717 (strcmp(current_command->name, "get") == 0) ||
718 (strcmp(current_command->name, "list") == 0))) {
719
720 (void) fprintf(fp, "%s",
721 gettext("\nthe following properties are supported:\n"));
722
723 (void) fprintf(fp, "\n\t%-19s %s %s\n\n",
724 "PROPERTY", "EDIT", "VALUES");
725
726 /* Iterate over all properties */
727 if (current_prop_type == ZFS_TYPE_POOL) {
728 (void) zprop_iter(print_pool_prop_cb, fp, B_FALSE,
729 B_TRUE, current_prop_type);
730
731 (void) fprintf(fp, "\t%-19s ", "feature@...");
732 (void) fprintf(fp, "YES "
733 "disabled | enabled | active\n");
734
735 (void) fprintf(fp, gettext("\nThe feature@ properties "
736 "must be appended with a feature name.\n"
737 "See zpool-features(7).\n"));
738 } else if (current_prop_type == ZFS_TYPE_VDEV) {
739 (void) zprop_iter(print_vdev_prop_cb, fp, B_FALSE,
740 B_TRUE, current_prop_type);
741 }
742 }
743
744 /*
745 * See comments at end of main().
746 */
747 if (getenv("ZFS_ABORT") != NULL) {
748 (void) printf("dumping core by request\n");
749 abort();
750 }
751
752 exit(requested ? 0 : 2);
753 }
754
755 /*
756 * zpool initialize [-c | -s | -u] [-w] <-a | pool> [<vdev> ...]
757 * Initialize all unused blocks in the specified vdevs, or all vdevs in the pool
758 * if none specified.
759 *
760 * -a Use all pools.
761 * -c Cancel. Ends active initializing.
762 * -s Suspend. Initializing can then be restarted with no flags.
763 * -u Uninitialize. Clears initialization state.
764 * -w Wait. Blocks until initializing has completed.
765 */
766 int
zpool_do_initialize(int argc,char ** argv)767 zpool_do_initialize(int argc, char **argv)
768 {
769 int c;
770 char *poolname;
771 zpool_handle_t *zhp;
772 int err = 0;
773 boolean_t wait = B_FALSE;
774 boolean_t initialize_all = B_FALSE;
775
776 struct option long_options[] = {
777 {"cancel", no_argument, NULL, 'c'},
778 {"suspend", no_argument, NULL, 's'},
779 {"uninit", no_argument, NULL, 'u'},
780 {"wait", no_argument, NULL, 'w'},
781 {"all", no_argument, NULL, 'a'},
782 {0, 0, 0, 0}
783 };
784
785 pool_initialize_func_t cmd_type = POOL_INITIALIZE_START;
786 while ((c = getopt_long(argc, argv, "acsuw", long_options,
787 NULL)) != -1) {
788 switch (c) {
789 case 'a':
790 initialize_all = B_TRUE;
791 break;
792 case 'c':
793 if (cmd_type != POOL_INITIALIZE_START &&
794 cmd_type != POOL_INITIALIZE_CANCEL) {
795 (void) fprintf(stderr, gettext("-c cannot be "
796 "combined with other options\n"));
797 usage(B_FALSE);
798 }
799 cmd_type = POOL_INITIALIZE_CANCEL;
800 break;
801 case 's':
802 if (cmd_type != POOL_INITIALIZE_START &&
803 cmd_type != POOL_INITIALIZE_SUSPEND) {
804 (void) fprintf(stderr, gettext("-s cannot be "
805 "combined with other options\n"));
806 usage(B_FALSE);
807 }
808 cmd_type = POOL_INITIALIZE_SUSPEND;
809 break;
810 case 'u':
811 if (cmd_type != POOL_INITIALIZE_START &&
812 cmd_type != POOL_INITIALIZE_UNINIT) {
813 (void) fprintf(stderr, gettext("-u cannot be "
814 "combined with other options\n"));
815 usage(B_FALSE);
816 }
817 cmd_type = POOL_INITIALIZE_UNINIT;
818 break;
819 case 'w':
820 wait = B_TRUE;
821 break;
822 case '?':
823 if (optopt != 0) {
824 (void) fprintf(stderr,
825 gettext("invalid option '%c'\n"), optopt);
826 } else {
827 (void) fprintf(stderr,
828 gettext("invalid option '%s'\n"),
829 argv[optind - 1]);
830 }
831 usage(B_FALSE);
832 }
833 }
834
835 argc -= optind;
836 argv += optind;
837
838 initialize_cbdata_t cbdata = {
839 .wait = wait,
840 .cmd_type = cmd_type
841 };
842
843 if (initialize_all && argc > 0) {
844 (void) fprintf(stderr, gettext("-a cannot be combined with "
845 "individual pools or vdevs\n"));
846 usage(B_FALSE);
847 }
848
849 if (argc < 1 && !initialize_all) {
850 (void) fprintf(stderr, gettext("missing pool name argument\n"));
851 usage(B_FALSE);
852 }
853
854 if (wait && (cmd_type != POOL_INITIALIZE_START)) {
855 (void) fprintf(stderr, gettext("-w cannot be used with -c, -s"
856 "or -u\n"));
857 usage(B_FALSE);
858 }
859
860 if (argc == 0 && initialize_all) {
861 /* Initilize each pool recursively */
862 err = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
863 B_FALSE, zpool_initialize_one, &cbdata);
864 return (err);
865 } else if (argc == 1) {
866 /* no individual leaf vdevs specified, initialize the pool */
867 poolname = argv[0];
868 zhp = zpool_open(g_zfs, poolname);
869 if (zhp == NULL)
870 return (-1);
871 err = zpool_initialize_one(zhp, &cbdata);
872 } else {
873 /* individual leaf vdevs specified, initialize them */
874 poolname = argv[0];
875 zhp = zpool_open(g_zfs, poolname);
876 if (zhp == NULL)
877 return (-1);
878 nvlist_t *vdevs = fnvlist_alloc();
879 for (int i = 1; i < argc; i++) {
880 fnvlist_add_boolean(vdevs, argv[i]);
881 }
882 if (wait)
883 err = zpool_initialize_wait(zhp, cmd_type, vdevs);
884 else
885 err = zpool_initialize(zhp, cmd_type, vdevs);
886 fnvlist_free(vdevs);
887 }
888
889 zpool_close(zhp);
890
891 return (err);
892 }
893
894 /*
895 * print a pool vdev config for dry runs
896 */
897 static void
print_vdev_tree(zpool_handle_t * zhp,const char * name,nvlist_t * nv,int indent,const char * match,int name_flags)898 print_vdev_tree(zpool_handle_t *zhp, const char *name, nvlist_t *nv, int indent,
899 const char *match, int name_flags)
900 {
901 nvlist_t **child;
902 uint_t c, children;
903 char *vname;
904 boolean_t printed = B_FALSE;
905
906 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
907 &child, &children) != 0) {
908 if (name != NULL)
909 (void) printf("\t%*s%s\n", indent, "", name);
910 return;
911 }
912
913 for (c = 0; c < children; c++) {
914 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
915 const char *class = "";
916
917 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
918 &is_hole);
919
920 if (is_hole == B_TRUE) {
921 continue;
922 }
923
924 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
925 &is_log);
926 if (is_log)
927 class = VDEV_ALLOC_BIAS_LOG;
928 (void) nvlist_lookup_string(child[c],
929 ZPOOL_CONFIG_ALLOCATION_BIAS, &class);
930 if (strcmp(match, class) != 0)
931 continue;
932
933 if (!printed && name != NULL) {
934 (void) printf("\t%*s%s\n", indent, "", name);
935 printed = B_TRUE;
936 }
937 vname = zpool_vdev_name(g_zfs, zhp, child[c], name_flags);
938 print_vdev_tree(zhp, vname, child[c], indent + 2, "",
939 name_flags);
940 free(vname);
941 }
942 }
943
944 /*
945 * Print the list of l2cache devices for dry runs.
946 */
947 static void
print_cache_list(nvlist_t * nv,int indent)948 print_cache_list(nvlist_t *nv, int indent)
949 {
950 nvlist_t **child;
951 uint_t c, children;
952
953 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
954 &child, &children) == 0 && children > 0) {
955 (void) printf("\t%*s%s\n", indent, "", "cache");
956 } else {
957 return;
958 }
959 for (c = 0; c < children; c++) {
960 char *vname;
961
962 vname = zpool_vdev_name(g_zfs, NULL, child[c], 0);
963 (void) printf("\t%*s%s\n", indent + 2, "", vname);
964 free(vname);
965 }
966 }
967
968 /*
969 * Print the list of spares for dry runs.
970 */
971 static void
print_spare_list(nvlist_t * nv,int indent)972 print_spare_list(nvlist_t *nv, int indent)
973 {
974 nvlist_t **child;
975 uint_t c, children;
976
977 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
978 &child, &children) == 0 && children > 0) {
979 (void) printf("\t%*s%s\n", indent, "", "spares");
980 } else {
981 return;
982 }
983 for (c = 0; c < children; c++) {
984 char *vname;
985
986 vname = zpool_vdev_name(g_zfs, NULL, child[c], 0);
987 (void) printf("\t%*s%s\n", indent + 2, "", vname);
988 free(vname);
989 }
990 }
991
992 typedef struct spare_cbdata {
993 uint64_t cb_guid;
994 zpool_handle_t *cb_zhp;
995 } spare_cbdata_t;
996
997 static boolean_t
find_vdev(nvlist_t * nv,uint64_t search)998 find_vdev(nvlist_t *nv, uint64_t search)
999 {
1000 uint64_t guid;
1001 nvlist_t **child;
1002 uint_t c, children;
1003
1004 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0 &&
1005 search == guid)
1006 return (B_TRUE);
1007
1008 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1009 &child, &children) == 0) {
1010 for (c = 0; c < children; c++)
1011 if (find_vdev(child[c], search))
1012 return (B_TRUE);
1013 }
1014
1015 return (B_FALSE);
1016 }
1017
1018 static int
find_spare(zpool_handle_t * zhp,void * data)1019 find_spare(zpool_handle_t *zhp, void *data)
1020 {
1021 spare_cbdata_t *cbp = data;
1022 nvlist_t *config, *nvroot;
1023
1024 config = zpool_get_config(zhp, NULL);
1025 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
1026 &nvroot) == 0);
1027
1028 if (find_vdev(nvroot, cbp->cb_guid)) {
1029 cbp->cb_zhp = zhp;
1030 return (1);
1031 }
1032
1033 zpool_close(zhp);
1034 return (0);
1035 }
1036
1037 static void
nice_num_str_nvlist(nvlist_t * item,const char * key,uint64_t value,boolean_t literal,boolean_t as_int,int format)1038 nice_num_str_nvlist(nvlist_t *item, const char *key, uint64_t value,
1039 boolean_t literal, boolean_t as_int, int format)
1040 {
1041 char buf[256];
1042
1043 if (literal) {
1044 if (!as_int)
1045 (void) snprintf(buf, 256, "%llu", (u_longlong_t)value);
1046 } else {
1047 switch (format) {
1048 case ZFS_NICENUM_1024:
1049 zfs_nicenum_format(value, buf, 256, ZFS_NICENUM_1024);
1050 break;
1051 case ZFS_NICENUM_BYTES:
1052 zfs_nicenum_format(value, buf, 256, ZFS_NICENUM_BYTES);
1053 break;
1054 case ZFS_NICENUM_TIME:
1055 zfs_nicenum_format(value, buf, 256, ZFS_NICENUM_TIME);
1056 break;
1057 case ZFS_NICE_TIMESTAMP:
1058 format_timestamp(value, buf, 256);
1059 break;
1060 default:
1061 fprintf(stderr, "Invalid number format");
1062 exit(1);
1063 }
1064 }
1065 if (as_int)
1066 fnvlist_add_uint64(item, key, value);
1067 else
1068 fnvlist_add_string(item, key, buf);
1069 }
1070
1071 /*
1072 * Generates an nvlist with output version for every command based on params.
1073 * Purpose of this is to add a version of JSON output, considering the schema
1074 * format might be updated for each command in future.
1075 *
1076 * Schema:
1077 *
1078 * "output_version": {
1079 * "command": string,
1080 * "vers_major": integer,
1081 * "vers_minor": integer,
1082 * }
1083 */
1084 static nvlist_t *
zpool_json_schema(int maj_v,int min_v)1085 zpool_json_schema(int maj_v, int min_v)
1086 {
1087 char cmd[MAX_CMD_LEN];
1088 nvlist_t *sch = fnvlist_alloc();
1089 nvlist_t *ov = fnvlist_alloc();
1090
1091 (void) snprintf(cmd, MAX_CMD_LEN, "zpool %s", current_command->name);
1092 fnvlist_add_string(ov, "command", cmd);
1093 fnvlist_add_uint32(ov, "vers_major", maj_v);
1094 fnvlist_add_uint32(ov, "vers_minor", min_v);
1095 fnvlist_add_nvlist(sch, "output_version", ov);
1096 fnvlist_free(ov);
1097 return (sch);
1098 }
1099
1100 static void
fill_pool_info(nvlist_t * list,zpool_handle_t * zhp,boolean_t addtype,boolean_t as_int)1101 fill_pool_info(nvlist_t *list, zpool_handle_t *zhp, boolean_t addtype,
1102 boolean_t as_int)
1103 {
1104 nvlist_t *config = zpool_get_config(zhp, NULL);
1105 uint64_t guid = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID);
1106 uint64_t txg = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG);
1107
1108 fnvlist_add_string(list, "name", zpool_get_name(zhp));
1109 if (addtype)
1110 fnvlist_add_string(list, "type", "POOL");
1111 fnvlist_add_string(list, "state", zpool_get_state_str(zhp));
1112 if (as_int) {
1113 if (guid)
1114 fnvlist_add_uint64(list, ZPOOL_CONFIG_POOL_GUID, guid);
1115 if (txg)
1116 fnvlist_add_uint64(list, ZPOOL_CONFIG_POOL_TXG, txg);
1117 fnvlist_add_uint64(list, "spa_version", SPA_VERSION);
1118 fnvlist_add_uint64(list, "zpl_version", ZPL_VERSION);
1119 } else {
1120 char value[ZFS_MAXPROPLEN];
1121 if (guid) {
1122 (void) snprintf(value, ZFS_MAXPROPLEN, "%llu",
1123 (u_longlong_t)guid);
1124 fnvlist_add_string(list, ZPOOL_CONFIG_POOL_GUID, value);
1125 }
1126 if (txg) {
1127 (void) snprintf(value, ZFS_MAXPROPLEN, "%llu",
1128 (u_longlong_t)txg);
1129 fnvlist_add_string(list, ZPOOL_CONFIG_POOL_TXG, value);
1130 }
1131 fnvlist_add_string(list, "spa_version", SPA_VERSION_STRING);
1132 fnvlist_add_string(list, "zpl_version", ZPL_VERSION_STRING);
1133 }
1134 }
1135
1136 static void
used_by_other(zpool_handle_t * zhp,nvlist_t * nvdev,nvlist_t * list)1137 used_by_other(zpool_handle_t *zhp, nvlist_t *nvdev, nvlist_t *list)
1138 {
1139 spare_cbdata_t spare_cb;
1140 verify(nvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_GUID,
1141 &spare_cb.cb_guid) == 0);
1142 if (zpool_iter(g_zfs, find_spare, &spare_cb) == 1) {
1143 if (strcmp(zpool_get_name(spare_cb.cb_zhp),
1144 zpool_get_name(zhp)) != 0) {
1145 fnvlist_add_string(list, "used_by",
1146 zpool_get_name(spare_cb.cb_zhp));
1147 }
1148 zpool_close(spare_cb.cb_zhp);
1149 }
1150 }
1151
1152 static void
fill_vdev_info(nvlist_t * list,zpool_handle_t * zhp,char * name,boolean_t addtype,boolean_t as_int)1153 fill_vdev_info(nvlist_t *list, zpool_handle_t *zhp, char *name,
1154 boolean_t addtype, boolean_t as_int)
1155 {
1156 boolean_t l2c = B_FALSE;
1157 const char *path, *phys, *devid, *bias = NULL;
1158 uint64_t hole = 0, log = 0, spare = 0;
1159 vdev_stat_t *vs;
1160 uint_t c;
1161 nvlist_t *nvdev;
1162 nvlist_t *nvdev_parent = NULL;
1163 char *_name;
1164
1165 if (strcmp(name, zpool_get_name(zhp)) != 0)
1166 _name = name;
1167 else
1168 _name = (char *)"root-0";
1169
1170 nvdev = zpool_find_vdev(zhp, _name, NULL, &l2c, NULL);
1171
1172 fnvlist_add_string(list, "name", name);
1173 if (addtype)
1174 fnvlist_add_string(list, "type", "VDEV");
1175 if (nvdev) {
1176 const char *type = fnvlist_lookup_string(nvdev,
1177 ZPOOL_CONFIG_TYPE);
1178 if (type)
1179 fnvlist_add_string(list, "vdev_type", type);
1180 uint64_t guid = fnvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_GUID);
1181 if (guid) {
1182 if (as_int) {
1183 fnvlist_add_uint64(list, "guid", guid);
1184 } else {
1185 char buf[ZFS_MAXPROPLEN];
1186 (void) snprintf(buf, ZFS_MAXPROPLEN, "%llu",
1187 (u_longlong_t)guid);
1188 fnvlist_add_string(list, "guid", buf);
1189 }
1190 }
1191 if (nvlist_lookup_string(nvdev, ZPOOL_CONFIG_PATH, &path) == 0)
1192 fnvlist_add_string(list, "path", path);
1193 if (nvlist_lookup_string(nvdev, ZPOOL_CONFIG_PHYS_PATH,
1194 &phys) == 0)
1195 fnvlist_add_string(list, "phys_path", phys);
1196 if (nvlist_lookup_string(nvdev, ZPOOL_CONFIG_DEVID,
1197 &devid) == 0)
1198 fnvlist_add_string(list, "devid", devid);
1199 (void) nvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_IS_LOG, &log);
1200 (void) nvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_IS_SPARE,
1201 &spare);
1202 (void) nvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_IS_HOLE, &hole);
1203 if (hole)
1204 fnvlist_add_string(list, "class", VDEV_TYPE_HOLE);
1205 else if (l2c)
1206 fnvlist_add_string(list, "class", VDEV_TYPE_L2CACHE);
1207 else if (spare)
1208 fnvlist_add_string(list, "class", VDEV_TYPE_SPARE);
1209 else if (log)
1210 fnvlist_add_string(list, "class", VDEV_TYPE_LOG);
1211 else {
1212 (void) nvlist_lookup_string(nvdev,
1213 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
1214 if (bias != NULL)
1215 fnvlist_add_string(list, "class", bias);
1216 else {
1217 nvdev_parent = NULL;
1218 nvdev_parent = zpool_find_parent_vdev(zhp,
1219 _name, NULL, NULL, NULL);
1220
1221 /*
1222 * With a mirrored special device, the parent
1223 * "mirror" vdev will have
1224 * ZPOOL_CONFIG_ALLOCATION_BIAS set to "special"
1225 * not the leaf vdevs. If we're a leaf vdev
1226 * in that case we need to look at our parent
1227 * to see if they're "special" to know if we
1228 * are "special" too.
1229 */
1230 if (nvdev_parent) {
1231 (void) nvlist_lookup_string(
1232 nvdev_parent,
1233 ZPOOL_CONFIG_ALLOCATION_BIAS,
1234 &bias);
1235 }
1236 if (bias != NULL)
1237 fnvlist_add_string(list, "class", bias);
1238 else
1239 fnvlist_add_string(list, "class",
1240 "normal");
1241 }
1242 }
1243 if (nvlist_lookup_uint64_array(nvdev, ZPOOL_CONFIG_VDEV_STATS,
1244 (uint64_t **)&vs, &c) == 0) {
1245 fnvlist_add_string(list, "state",
1246 vdev_state_str[vs->vs_state]);
1247 }
1248 }
1249 }
1250
1251 static boolean_t
prop_list_contains_feature(nvlist_t * proplist)1252 prop_list_contains_feature(nvlist_t *proplist)
1253 {
1254 nvpair_t *nvp;
1255 for (nvp = nvlist_next_nvpair(proplist, NULL); NULL != nvp;
1256 nvp = nvlist_next_nvpair(proplist, nvp)) {
1257 if (zpool_prop_feature(nvpair_name(nvp)))
1258 return (B_TRUE);
1259 }
1260 return (B_FALSE);
1261 }
1262
1263 /*
1264 * Add a property pair (name, string-value) into a property nvlist.
1265 */
1266 static int
add_prop_list(const char * propname,const char * propval,nvlist_t ** props,boolean_t poolprop)1267 add_prop_list(const char *propname, const char *propval, nvlist_t **props,
1268 boolean_t poolprop)
1269 {
1270 zpool_prop_t prop = ZPOOL_PROP_INVAL;
1271 nvlist_t *proplist;
1272 const char *normnm;
1273 const char *strval;
1274
1275 if (*props == NULL &&
1276 nvlist_alloc(props, NV_UNIQUE_NAME, 0) != 0) {
1277 (void) fprintf(stderr,
1278 gettext("internal error: out of memory\n"));
1279 return (1);
1280 }
1281
1282 proplist = *props;
1283
1284 if (poolprop) {
1285 const char *vname = zpool_prop_to_name(ZPOOL_PROP_VERSION);
1286 const char *cname =
1287 zpool_prop_to_name(ZPOOL_PROP_COMPATIBILITY);
1288
1289 if ((prop = zpool_name_to_prop(propname)) == ZPOOL_PROP_INVAL &&
1290 (!zpool_prop_feature(propname) &&
1291 !zpool_prop_vdev(propname))) {
1292 (void) fprintf(stderr, gettext("property '%s' is "
1293 "not a valid pool or vdev property\n"), propname);
1294 return (2);
1295 }
1296
1297 /*
1298 * feature@ properties and version should not be specified
1299 * at the same time.
1300 */
1301 if ((prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname) &&
1302 nvlist_exists(proplist, vname)) ||
1303 (prop == ZPOOL_PROP_VERSION &&
1304 prop_list_contains_feature(proplist))) {
1305 (void) fprintf(stderr, gettext("'feature@' and "
1306 "'version' properties cannot be specified "
1307 "together\n"));
1308 return (2);
1309 }
1310
1311 /*
1312 * if version is specified, only "legacy" compatibility
1313 * may be requested
1314 */
1315 if ((prop == ZPOOL_PROP_COMPATIBILITY &&
1316 strcmp(propval, ZPOOL_COMPAT_LEGACY) != 0 &&
1317 nvlist_exists(proplist, vname)) ||
1318 (prop == ZPOOL_PROP_VERSION &&
1319 nvlist_exists(proplist, cname) &&
1320 strcmp(fnvlist_lookup_string(proplist, cname),
1321 ZPOOL_COMPAT_LEGACY) != 0)) {
1322 (void) fprintf(stderr, gettext("when 'version' is "
1323 "specified, the 'compatibility' feature may only "
1324 "be set to '" ZPOOL_COMPAT_LEGACY "'\n"));
1325 return (2);
1326 }
1327
1328 if (zpool_prop_feature(propname) || zpool_prop_vdev(propname))
1329 normnm = propname;
1330 else
1331 normnm = zpool_prop_to_name(prop);
1332 } else {
1333 zfs_prop_t fsprop = zfs_name_to_prop(propname);
1334
1335 if (zfs_prop_valid_for_type(fsprop, ZFS_TYPE_FILESYSTEM,
1336 B_FALSE)) {
1337 normnm = zfs_prop_to_name(fsprop);
1338 } else if (zfs_prop_user(propname) ||
1339 zfs_prop_userquota(propname)) {
1340 normnm = propname;
1341 } else {
1342 (void) fprintf(stderr, gettext("property '%s' is "
1343 "not a valid filesystem property\n"), propname);
1344 return (2);
1345 }
1346 }
1347
1348 if (nvlist_lookup_string(proplist, normnm, &strval) == 0 &&
1349 prop != ZPOOL_PROP_CACHEFILE) {
1350 (void) fprintf(stderr, gettext("property '%s' "
1351 "specified multiple times\n"), propname);
1352 return (2);
1353 }
1354
1355 if (nvlist_add_string(proplist, normnm, propval) != 0) {
1356 (void) fprintf(stderr, gettext("internal "
1357 "error: out of memory\n"));
1358 return (1);
1359 }
1360
1361 return (0);
1362 }
1363
1364 /*
1365 * Set a default property pair (name, string-value) in a property nvlist
1366 */
1367 static int
add_prop_list_default(const char * propname,const char * propval,nvlist_t ** props)1368 add_prop_list_default(const char *propname, const char *propval,
1369 nvlist_t **props)
1370 {
1371 const char *pval;
1372
1373 if (nvlist_lookup_string(*props, propname, &pval) == 0)
1374 return (0);
1375
1376 return (add_prop_list(propname, propval, props, B_TRUE));
1377 }
1378
1379 /*
1380 * zpool add [-afgLnP] [-o property=value] <pool> <vdev> ...
1381 *
1382 * -a Disable the ashift validation checks
1383 * -f Force addition of devices, even if they appear in use
1384 * -g Display guid for individual vdev name.
1385 * -L Follow links when resolving vdev path name.
1386 * -n Do not add the devices, but display the resulting layout if
1387 * they were to be added.
1388 * -o Set property=value.
1389 * -P Display full path for vdev name.
1390 *
1391 * Adds the given vdevs to 'pool'. As with create, the bulk of this work is
1392 * handled by make_root_vdev(), which constructs the nvlist needed to pass to
1393 * libzfs.
1394 */
1395 int
zpool_do_add(int argc,char ** argv)1396 zpool_do_add(int argc, char **argv)
1397 {
1398 boolean_t check_replication = B_TRUE;
1399 boolean_t check_inuse = B_TRUE;
1400 boolean_t dryrun = B_FALSE;
1401 boolean_t check_ashift = B_TRUE;
1402 boolean_t force = B_FALSE;
1403 int name_flags = 0;
1404 int c;
1405 nvlist_t *nvroot;
1406 char *poolname;
1407 int ret;
1408 zpool_handle_t *zhp;
1409 nvlist_t *config;
1410 nvlist_t *props = NULL;
1411 char *propval;
1412
1413 struct option long_options[] = {
1414 {"allow-in-use", no_argument, NULL, ZPOOL_OPTION_ALLOW_INUSE},
1415 {"allow-replication-mismatch", no_argument, NULL,
1416 ZPOOL_OPTION_ALLOW_REPLICATION_MISMATCH},
1417 {"allow-ashift-mismatch", no_argument, NULL,
1418 ZPOOL_OPTION_ALLOW_ASHIFT_MISMATCH},
1419 {0, 0, 0, 0}
1420 };
1421
1422 /* check options */
1423 while ((c = getopt_long(argc, argv, "fgLno:P", long_options, NULL))
1424 != -1) {
1425 switch (c) {
1426 case 'f':
1427 force = B_TRUE;
1428 break;
1429 case 'g':
1430 name_flags |= VDEV_NAME_GUID;
1431 break;
1432 case 'L':
1433 name_flags |= VDEV_NAME_FOLLOW_LINKS;
1434 break;
1435 case 'n':
1436 dryrun = B_TRUE;
1437 break;
1438 case 'o':
1439 if ((propval = strchr(optarg, '=')) == NULL) {
1440 (void) fprintf(stderr, gettext("missing "
1441 "'=' for -o option\n"));
1442 usage(B_FALSE);
1443 }
1444 *propval = '\0';
1445 propval++;
1446
1447 if ((strcmp(optarg, ZPOOL_CONFIG_ASHIFT) != 0) ||
1448 (add_prop_list(optarg, propval, &props, B_TRUE)))
1449 usage(B_FALSE);
1450 break;
1451 case 'P':
1452 name_flags |= VDEV_NAME_PATH;
1453 break;
1454 case ZPOOL_OPTION_ALLOW_INUSE:
1455 check_inuse = B_FALSE;
1456 break;
1457 case ZPOOL_OPTION_ALLOW_REPLICATION_MISMATCH:
1458 check_replication = B_FALSE;
1459 break;
1460 case ZPOOL_OPTION_ALLOW_ASHIFT_MISMATCH:
1461 check_ashift = B_FALSE;
1462 break;
1463 case '?':
1464 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
1465 optopt);
1466 usage(B_FALSE);
1467 }
1468 }
1469
1470 argc -= optind;
1471 argv += optind;
1472
1473 /* get pool name and check number of arguments */
1474 if (argc < 1) {
1475 (void) fprintf(stderr, gettext("missing pool name argument\n"));
1476 usage(B_FALSE);
1477 }
1478 if (argc < 2) {
1479 (void) fprintf(stderr, gettext("missing vdev specification\n"));
1480 usage(B_FALSE);
1481 }
1482
1483 if (force) {
1484 if (!check_inuse || !check_replication || !check_ashift) {
1485 (void) fprintf(stderr, gettext("'-f' option is not "
1486 "allowed with '--allow-replication-mismatch', "
1487 "'--allow-ashift-mismatch', or "
1488 "'--allow-in-use'\n"));
1489 usage(B_FALSE);
1490 }
1491 check_inuse = B_FALSE;
1492 check_replication = B_FALSE;
1493 check_ashift = B_FALSE;
1494 }
1495
1496 poolname = argv[0];
1497
1498 argc--;
1499 argv++;
1500
1501 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
1502 return (1);
1503
1504 if ((config = zpool_get_config(zhp, NULL)) == NULL) {
1505 (void) fprintf(stderr, gettext("pool '%s' is unavailable\n"),
1506 poolname);
1507 zpool_close(zhp);
1508 return (1);
1509 }
1510
1511 /* unless manually specified use "ashift" pool property (if set) */
1512 if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) {
1513 int intval;
1514 zprop_source_t src;
1515 char strval[ZPOOL_MAXPROPLEN];
1516
1517 intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src);
1518 if (src != ZPROP_SRC_DEFAULT) {
1519 (void) sprintf(strval, "%" PRId32, intval);
1520 verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval,
1521 &props, B_TRUE) == 0);
1522 }
1523 }
1524
1525 /* pass off to make_root_vdev for processing */
1526 nvroot = make_root_vdev(zhp, props, !check_inuse,
1527 check_replication, B_FALSE, dryrun, argc, argv);
1528 if (nvroot == NULL) {
1529 zpool_close(zhp);
1530 return (1);
1531 }
1532
1533 if (dryrun) {
1534 nvlist_t *poolnvroot;
1535 nvlist_t **l2child, **sparechild;
1536 uint_t l2children, sparechildren, c;
1537 char *vname;
1538 boolean_t hadcache = B_FALSE, hadspare = B_FALSE;
1539
1540 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
1541 &poolnvroot) == 0);
1542
1543 (void) printf(gettext("would update '%s' to the following "
1544 "configuration:\n\n"), zpool_get_name(zhp));
1545
1546 /* print original main pool and new tree */
1547 print_vdev_tree(zhp, poolname, poolnvroot, 0, "",
1548 name_flags | VDEV_NAME_TYPE_ID);
1549 print_vdev_tree(zhp, NULL, nvroot, 0, "", name_flags);
1550
1551 /* print other classes: 'dedup', 'special', and 'log' */
1552 if (zfs_special_devs(poolnvroot, VDEV_ALLOC_BIAS_DEDUP)) {
1553 print_vdev_tree(zhp, "dedup", poolnvroot, 0,
1554 VDEV_ALLOC_BIAS_DEDUP, name_flags);
1555 print_vdev_tree(zhp, NULL, nvroot, 0,
1556 VDEV_ALLOC_BIAS_DEDUP, name_flags);
1557 } else if (zfs_special_devs(nvroot, VDEV_ALLOC_BIAS_DEDUP)) {
1558 print_vdev_tree(zhp, "dedup", nvroot, 0,
1559 VDEV_ALLOC_BIAS_DEDUP, name_flags);
1560 }
1561
1562 if (zfs_special_devs(poolnvroot, VDEV_ALLOC_BIAS_SPECIAL)) {
1563 print_vdev_tree(zhp, "special", poolnvroot, 0,
1564 VDEV_ALLOC_BIAS_SPECIAL, name_flags);
1565 print_vdev_tree(zhp, NULL, nvroot, 0,
1566 VDEV_ALLOC_BIAS_SPECIAL, name_flags);
1567 } else if (zfs_special_devs(nvroot, VDEV_ALLOC_BIAS_SPECIAL)) {
1568 print_vdev_tree(zhp, "special", nvroot, 0,
1569 VDEV_ALLOC_BIAS_SPECIAL, name_flags);
1570 }
1571
1572 if (num_logs(poolnvroot) > 0) {
1573 print_vdev_tree(zhp, "logs", poolnvroot, 0,
1574 VDEV_ALLOC_BIAS_LOG, name_flags);
1575 print_vdev_tree(zhp, NULL, nvroot, 0,
1576 VDEV_ALLOC_BIAS_LOG, name_flags);
1577 } else if (num_logs(nvroot) > 0) {
1578 print_vdev_tree(zhp, "logs", nvroot, 0,
1579 VDEV_ALLOC_BIAS_LOG, name_flags);
1580 }
1581
1582 /* Do the same for the caches */
1583 if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_L2CACHE,
1584 &l2child, &l2children) == 0 && l2children) {
1585 hadcache = B_TRUE;
1586 (void) printf(gettext("\tcache\n"));
1587 for (c = 0; c < l2children; c++) {
1588 vname = zpool_vdev_name(g_zfs, NULL,
1589 l2child[c], name_flags);
1590 (void) printf("\t %s\n", vname);
1591 free(vname);
1592 }
1593 }
1594 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1595 &l2child, &l2children) == 0 && l2children) {
1596 if (!hadcache)
1597 (void) printf(gettext("\tcache\n"));
1598 for (c = 0; c < l2children; c++) {
1599 vname = zpool_vdev_name(g_zfs, NULL,
1600 l2child[c], name_flags);
1601 (void) printf("\t %s\n", vname);
1602 free(vname);
1603 }
1604 }
1605 /* And finally the spares */
1606 if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_SPARES,
1607 &sparechild, &sparechildren) == 0 && sparechildren > 0) {
1608 hadspare = B_TRUE;
1609 (void) printf(gettext("\tspares\n"));
1610 for (c = 0; c < sparechildren; c++) {
1611 vname = zpool_vdev_name(g_zfs, NULL,
1612 sparechild[c], name_flags);
1613 (void) printf("\t %s\n", vname);
1614 free(vname);
1615 }
1616 }
1617 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1618 &sparechild, &sparechildren) == 0 && sparechildren > 0) {
1619 if (!hadspare)
1620 (void) printf(gettext("\tspares\n"));
1621 for (c = 0; c < sparechildren; c++) {
1622 vname = zpool_vdev_name(g_zfs, NULL,
1623 sparechild[c], name_flags);
1624 (void) printf("\t %s\n", vname);
1625 free(vname);
1626 }
1627 }
1628
1629 ret = 0;
1630 } else {
1631 ret = (zpool_add(zhp, nvroot, check_ashift) != 0);
1632 }
1633
1634 nvlist_free(props);
1635 nvlist_free(nvroot);
1636 zpool_close(zhp);
1637
1638 return (ret);
1639 }
1640
1641 /*
1642 * zpool remove [-npsw] <pool> <vdev> ...
1643 *
1644 * Removes the given vdev from the pool.
1645 */
1646 int
zpool_do_remove(int argc,char ** argv)1647 zpool_do_remove(int argc, char **argv)
1648 {
1649 char *poolname;
1650 int i, ret = 0;
1651 zpool_handle_t *zhp = NULL;
1652 boolean_t stop = B_FALSE;
1653 int c;
1654 boolean_t noop = B_FALSE;
1655 boolean_t parsable = B_FALSE;
1656 boolean_t wait = B_FALSE;
1657
1658 /* check options */
1659 while ((c = getopt(argc, argv, "npsw")) != -1) {
1660 switch (c) {
1661 case 'n':
1662 noop = B_TRUE;
1663 break;
1664 case 'p':
1665 parsable = B_TRUE;
1666 break;
1667 case 's':
1668 stop = B_TRUE;
1669 break;
1670 case 'w':
1671 wait = B_TRUE;
1672 break;
1673 case '?':
1674 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
1675 optopt);
1676 usage(B_FALSE);
1677 }
1678 }
1679
1680 argc -= optind;
1681 argv += optind;
1682
1683 /* get pool name and check number of arguments */
1684 if (argc < 1) {
1685 (void) fprintf(stderr, gettext("missing pool name argument\n"));
1686 usage(B_FALSE);
1687 }
1688
1689 poolname = argv[0];
1690
1691 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
1692 return (1);
1693
1694 if (stop && noop) {
1695 zpool_close(zhp);
1696 (void) fprintf(stderr, gettext("stop request ignored\n"));
1697 return (0);
1698 }
1699
1700 if (stop) {
1701 if (argc > 1) {
1702 (void) fprintf(stderr, gettext("too many arguments\n"));
1703 usage(B_FALSE);
1704 }
1705 if (zpool_vdev_remove_cancel(zhp) != 0)
1706 ret = 1;
1707 if (wait) {
1708 (void) fprintf(stderr, gettext("invalid option "
1709 "combination: -w cannot be used with -s\n"));
1710 usage(B_FALSE);
1711 }
1712 } else {
1713 if (argc < 2) {
1714 (void) fprintf(stderr, gettext("missing device\n"));
1715 usage(B_FALSE);
1716 }
1717
1718 for (i = 1; i < argc; i++) {
1719 if (noop) {
1720 uint64_t size;
1721
1722 if (zpool_vdev_indirect_size(zhp, argv[i],
1723 &size) != 0) {
1724 ret = 1;
1725 break;
1726 }
1727 if (parsable) {
1728 (void) printf("%s %llu\n",
1729 argv[i], (unsigned long long)size);
1730 } else {
1731 char valstr[32];
1732 zfs_nicenum(size, valstr,
1733 sizeof (valstr));
1734 (void) printf("Memory that will be "
1735 "used after removing %s: %s\n",
1736 argv[i], valstr);
1737 }
1738 } else {
1739 if (zpool_vdev_remove(zhp, argv[i]) != 0)
1740 ret = 1;
1741 }
1742 }
1743
1744 if (ret == 0 && wait)
1745 ret = zpool_wait(zhp, ZPOOL_WAIT_REMOVE);
1746 }
1747 zpool_close(zhp);
1748
1749 return (ret);
1750 }
1751
1752 /*
1753 * Return 1 if a vdev is active (being used in a pool)
1754 * Return 0 if a vdev is inactive (offlined or faulted, or not in active pool)
1755 *
1756 * This is useful for checking if a disk in an active pool is offlined or
1757 * faulted.
1758 */
1759 static int
vdev_is_active(char * vdev_path)1760 vdev_is_active(char *vdev_path)
1761 {
1762 int fd;
1763 fd = open(vdev_path, O_EXCL);
1764 if (fd < 0) {
1765 return (1); /* cant open O_EXCL - disk is active */
1766 }
1767
1768 (void) close(fd);
1769 return (0); /* disk is inactive in the pool */
1770 }
1771
1772 /*
1773 * zpool labelclear [-f] <vdev>
1774 *
1775 * -f Force clearing the label for the vdevs which are members of
1776 * the exported or foreign pools.
1777 *
1778 * Verifies that the vdev is not active and zeros out the label information
1779 * on the device.
1780 */
1781 int
zpool_do_labelclear(int argc,char ** argv)1782 zpool_do_labelclear(int argc, char **argv)
1783 {
1784 char vdev[MAXPATHLEN];
1785 char *name = NULL;
1786 int c, fd, ret = 0;
1787 nvlist_t *config;
1788 pool_state_t state;
1789 boolean_t inuse = B_FALSE;
1790 boolean_t force = B_FALSE;
1791
1792 /* check options */
1793 while ((c = getopt(argc, argv, "f")) != -1) {
1794 switch (c) {
1795 case 'f':
1796 force = B_TRUE;
1797 break;
1798 default:
1799 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
1800 optopt);
1801 usage(B_FALSE);
1802 }
1803 }
1804
1805 argc -= optind;
1806 argv += optind;
1807
1808 /* get vdev name */
1809 if (argc < 1) {
1810 (void) fprintf(stderr, gettext("missing vdev name\n"));
1811 usage(B_FALSE);
1812 }
1813 if (argc > 1) {
1814 (void) fprintf(stderr, gettext("too many arguments\n"));
1815 usage(B_FALSE);
1816 }
1817
1818 (void) strlcpy(vdev, argv[0], sizeof (vdev));
1819
1820 /*
1821 * If we cannot open an absolute path, we quit.
1822 * Otherwise if the provided vdev name doesn't point to a file,
1823 * try prepending expected disk paths and partition numbers.
1824 */
1825 if ((fd = open(vdev, O_RDWR)) < 0) {
1826 int error;
1827 if (vdev[0] == '/') {
1828 (void) fprintf(stderr, gettext("failed to open "
1829 "%s: %s\n"), vdev, strerror(errno));
1830 return (1);
1831 }
1832
1833 error = zfs_resolve_shortname(argv[0], vdev, MAXPATHLEN);
1834 if (error == 0 && zfs_dev_is_whole_disk(vdev)) {
1835 if (zfs_append_partition(vdev, MAXPATHLEN) == -1)
1836 error = ENOENT;
1837 }
1838
1839 if (error || ((fd = open(vdev, O_RDWR)) < 0)) {
1840 if (errno == ENOENT) {
1841 (void) fprintf(stderr, gettext(
1842 "failed to find device %s, try "
1843 "specifying absolute path instead\n"),
1844 argv[0]);
1845 return (1);
1846 }
1847
1848 (void) fprintf(stderr, gettext("failed to open %s:"
1849 " %s\n"), vdev, strerror(errno));
1850 return (1);
1851 }
1852 }
1853
1854 /*
1855 * Flush all dirty pages for the block device. This should not be
1856 * fatal when the device does not support BLKFLSBUF as would be the
1857 * case for a file vdev.
1858 */
1859 if ((zfs_dev_flush(fd) != 0) && (errno != ENOTTY))
1860 (void) fprintf(stderr, gettext("failed to invalidate "
1861 "cache for %s: %s\n"), vdev, strerror(errno));
1862
1863 if (zpool_read_label(fd, &config, NULL) != 0) {
1864 (void) fprintf(stderr,
1865 gettext("failed to read label from %s\n"), vdev);
1866 ret = 1;
1867 goto errout;
1868 }
1869 nvlist_free(config);
1870
1871 ret = zpool_in_use(g_zfs, fd, &state, &name, &inuse);
1872 if (ret != 0) {
1873 (void) fprintf(stderr,
1874 gettext("failed to check state for %s\n"), vdev);
1875 ret = 1;
1876 goto errout;
1877 }
1878
1879 if (!inuse)
1880 goto wipe_label;
1881
1882 switch (state) {
1883 default:
1884 case POOL_STATE_ACTIVE:
1885 case POOL_STATE_SPARE:
1886 case POOL_STATE_L2CACHE:
1887 /*
1888 * We allow the user to call 'zpool offline -f'
1889 * on an offlined disk in an active pool. We can check if
1890 * the disk is online by calling vdev_is_active().
1891 */
1892 if (force && !vdev_is_active(vdev))
1893 break;
1894
1895 (void) fprintf(stderr, gettext(
1896 "%s is a member (%s) of pool \"%s\""),
1897 vdev, zpool_pool_state_to_name(state), name);
1898
1899 if (force) {
1900 (void) fprintf(stderr, gettext(
1901 ". Offline the disk first to clear its label."));
1902 }
1903 printf("\n");
1904 ret = 1;
1905 goto errout;
1906
1907 case POOL_STATE_EXPORTED:
1908 if (force)
1909 break;
1910 (void) fprintf(stderr, gettext(
1911 "use '-f' to override the following error:\n"
1912 "%s is a member of exported pool \"%s\"\n"),
1913 vdev, name);
1914 ret = 1;
1915 goto errout;
1916
1917 case POOL_STATE_POTENTIALLY_ACTIVE:
1918 if (force)
1919 break;
1920 (void) fprintf(stderr, gettext(
1921 "use '-f' to override the following error:\n"
1922 "%s is a member of potentially active pool \"%s\"\n"),
1923 vdev, name);
1924 ret = 1;
1925 goto errout;
1926
1927 case POOL_STATE_DESTROYED:
1928 /* inuse should never be set for a destroyed pool */
1929 assert(0);
1930 break;
1931 }
1932
1933 wipe_label:
1934 ret = zpool_clear_label(fd);
1935 if (ret != 0) {
1936 (void) fprintf(stderr,
1937 gettext("failed to clear label for %s\n"), vdev);
1938 }
1939
1940 errout:
1941 free(name);
1942 (void) close(fd);
1943
1944 return (ret);
1945 }
1946
1947 /*
1948 * zpool create [-fnd] [-o property=value] ...
1949 * [-O file-system-property=value] ...
1950 * [-R root] [-m mountpoint] <pool> <dev> ...
1951 *
1952 * -f Force creation, even if devices appear in use
1953 * -n Do not create the pool, but display the resulting layout if it
1954 * were to be created.
1955 * -R Create a pool under an alternate root
1956 * -m Set default mountpoint for the root dataset. By default it's
1957 * '/<pool>'
1958 * -o Set property=value.
1959 * -o Set feature@feature=enabled|disabled.
1960 * -d Don't automatically enable all supported pool features
1961 * (individual features can be enabled with -o).
1962 * -O Set fsproperty=value in the pool's root file system
1963 *
1964 * Creates the named pool according to the given vdev specification. The
1965 * bulk of the vdev processing is done in make_root_vdev() in zpool_vdev.c.
1966 * Once we get the nvlist back from make_root_vdev(), we either print out the
1967 * contents (if '-n' was specified), or pass it to libzfs to do the creation.
1968 */
1969 int
zpool_do_create(int argc,char ** argv)1970 zpool_do_create(int argc, char **argv)
1971 {
1972 boolean_t force = B_FALSE;
1973 boolean_t dryrun = B_FALSE;
1974 boolean_t enable_pool_features = B_TRUE;
1975
1976 int c;
1977 nvlist_t *nvroot = NULL;
1978 char *poolname;
1979 char *tname = NULL;
1980 int ret = 1;
1981 char *altroot = NULL;
1982 char *compat = NULL;
1983 char *mountpoint = NULL;
1984 nvlist_t *fsprops = NULL;
1985 nvlist_t *props = NULL;
1986 char *propval;
1987
1988 /* check options */
1989 while ((c = getopt(argc, argv, ":fndR:m:o:O:t:")) != -1) {
1990 switch (c) {
1991 case 'f':
1992 force = B_TRUE;
1993 break;
1994 case 'n':
1995 dryrun = B_TRUE;
1996 break;
1997 case 'd':
1998 enable_pool_features = B_FALSE;
1999 break;
2000 case 'R':
2001 altroot = optarg;
2002 if (add_prop_list(zpool_prop_to_name(
2003 ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE))
2004 goto errout;
2005 if (add_prop_list_default(zpool_prop_to_name(
2006 ZPOOL_PROP_CACHEFILE), "none", &props))
2007 goto errout;
2008 break;
2009 case 'm':
2010 /* Equivalent to -O mountpoint=optarg */
2011 mountpoint = optarg;
2012 break;
2013 case 'o':
2014 if ((propval = strchr(optarg, '=')) == NULL) {
2015 (void) fprintf(stderr, gettext("missing "
2016 "'=' for -o option\n"));
2017 goto errout;
2018 }
2019 *propval = '\0';
2020 propval++;
2021
2022 if (add_prop_list(optarg, propval, &props, B_TRUE))
2023 goto errout;
2024
2025 /*
2026 * If the user is creating a pool that doesn't support
2027 * feature flags, don't enable any features.
2028 */
2029 if (zpool_name_to_prop(optarg) == ZPOOL_PROP_VERSION) {
2030 char *end;
2031 u_longlong_t ver;
2032
2033 ver = strtoull(propval, &end, 0);
2034 if (*end == '\0' &&
2035 ver < SPA_VERSION_FEATURES) {
2036 enable_pool_features = B_FALSE;
2037 }
2038 }
2039 if (zpool_name_to_prop(optarg) == ZPOOL_PROP_ALTROOT)
2040 altroot = propval;
2041 if (zpool_name_to_prop(optarg) ==
2042 ZPOOL_PROP_COMPATIBILITY)
2043 compat = propval;
2044 break;
2045 case 'O':
2046 if ((propval = strchr(optarg, '=')) == NULL) {
2047 (void) fprintf(stderr, gettext("missing "
2048 "'=' for -O option\n"));
2049 goto errout;
2050 }
2051 *propval = '\0';
2052 propval++;
2053
2054 /*
2055 * Mountpoints are checked and then added later.
2056 * Uniquely among properties, they can be specified
2057 * more than once, to avoid conflict with -m.
2058 */
2059 if (0 == strcmp(optarg,
2060 zfs_prop_to_name(ZFS_PROP_MOUNTPOINT))) {
2061 mountpoint = propval;
2062 } else if (add_prop_list(optarg, propval, &fsprops,
2063 B_FALSE)) {
2064 goto errout;
2065 }
2066 break;
2067 case 't':
2068 /*
2069 * Sanity check temporary pool name.
2070 */
2071 if (strchr(optarg, '/') != NULL) {
2072 (void) fprintf(stderr, gettext("cannot create "
2073 "'%s': invalid character '/' in temporary "
2074 "name\n"), optarg);
2075 (void) fprintf(stderr, gettext("use 'zfs "
2076 "create' to create a dataset\n"));
2077 goto errout;
2078 }
2079
2080 if (add_prop_list(zpool_prop_to_name(
2081 ZPOOL_PROP_TNAME), optarg, &props, B_TRUE))
2082 goto errout;
2083 if (add_prop_list_default(zpool_prop_to_name(
2084 ZPOOL_PROP_CACHEFILE), "none", &props))
2085 goto errout;
2086 tname = optarg;
2087 break;
2088 case ':':
2089 (void) fprintf(stderr, gettext("missing argument for "
2090 "'%c' option\n"), optopt);
2091 goto badusage;
2092 case '?':
2093 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
2094 optopt);
2095 goto badusage;
2096 }
2097 }
2098
2099 argc -= optind;
2100 argv += optind;
2101
2102 /* get pool name and check number of arguments */
2103 if (argc < 1) {
2104 (void) fprintf(stderr, gettext("missing pool name argument\n"));
2105 goto badusage;
2106 }
2107 if (argc < 2) {
2108 (void) fprintf(stderr, gettext("missing vdev specification\n"));
2109 goto badusage;
2110 }
2111
2112 poolname = argv[0];
2113
2114 /*
2115 * As a special case, check for use of '/' in the name, and direct the
2116 * user to use 'zfs create' instead.
2117 */
2118 if (strchr(poolname, '/') != NULL) {
2119 (void) fprintf(stderr, gettext("cannot create '%s': invalid "
2120 "character '/' in pool name\n"), poolname);
2121 (void) fprintf(stderr, gettext("use 'zfs create' to "
2122 "create a dataset\n"));
2123 goto errout;
2124 }
2125
2126 /* pass off to make_root_vdev for bulk processing */
2127 nvroot = make_root_vdev(NULL, props, force, !force, B_FALSE, dryrun,
2128 argc - 1, argv + 1);
2129 if (nvroot == NULL)
2130 goto errout;
2131
2132 /* make_root_vdev() allows 0 toplevel children if there are spares */
2133 if (!zfs_allocatable_devs(nvroot)) {
2134 (void) fprintf(stderr, gettext("invalid vdev "
2135 "specification: at least one toplevel vdev must be "
2136 "specified\n"));
2137 goto errout;
2138 }
2139
2140 if (altroot != NULL && altroot[0] != '/') {
2141 (void) fprintf(stderr, gettext("invalid alternate root '%s': "
2142 "must be an absolute path\n"), altroot);
2143 goto errout;
2144 }
2145
2146 /*
2147 * Check the validity of the mountpoint and direct the user to use the
2148 * '-m' mountpoint option if it looks like its in use.
2149 */
2150 if (mountpoint == NULL ||
2151 (strcmp(mountpoint, ZFS_MOUNTPOINT_LEGACY) != 0 &&
2152 strcmp(mountpoint, ZFS_MOUNTPOINT_NONE) != 0)) {
2153 char buf[MAXPATHLEN];
2154 DIR *dirp;
2155
2156 if (mountpoint && mountpoint[0] != '/') {
2157 (void) fprintf(stderr, gettext("invalid mountpoint "
2158 "'%s': must be an absolute path, 'legacy', or "
2159 "'none'\n"), mountpoint);
2160 goto errout;
2161 }
2162
2163 if (mountpoint == NULL) {
2164 if (altroot != NULL)
2165 (void) snprintf(buf, sizeof (buf), "%s/%s",
2166 altroot, poolname);
2167 else
2168 (void) snprintf(buf, sizeof (buf), "/%s",
2169 poolname);
2170 } else {
2171 if (altroot != NULL)
2172 (void) snprintf(buf, sizeof (buf), "%s%s",
2173 altroot, mountpoint);
2174 else
2175 (void) snprintf(buf, sizeof (buf), "%s",
2176 mountpoint);
2177 }
2178
2179 if ((dirp = opendir(buf)) == NULL && errno != ENOENT) {
2180 (void) fprintf(stderr, gettext("mountpoint '%s' : "
2181 "%s\n"), buf, strerror(errno));
2182 (void) fprintf(stderr, gettext("use '-m' "
2183 "option to provide a different default\n"));
2184 goto errout;
2185 } else if (dirp) {
2186 int count = 0;
2187
2188 while (count < 3 && readdir(dirp) != NULL)
2189 count++;
2190 (void) closedir(dirp);
2191
2192 if (count > 2) {
2193 (void) fprintf(stderr, gettext("mountpoint "
2194 "'%s' exists and is not empty\n"), buf);
2195 (void) fprintf(stderr, gettext("use '-m' "
2196 "option to provide a "
2197 "different default\n"));
2198 goto errout;
2199 }
2200 }
2201 }
2202
2203 /*
2204 * Now that the mountpoint's validity has been checked, ensure that
2205 * the property is set appropriately prior to creating the pool.
2206 */
2207 if (mountpoint != NULL) {
2208 ret = add_prop_list(zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
2209 mountpoint, &fsprops, B_FALSE);
2210 if (ret != 0)
2211 goto errout;
2212 }
2213
2214 ret = 1;
2215 if (dryrun) {
2216 /*
2217 * For a dry run invocation, print out a basic message and run
2218 * through all the vdevs in the list and print out in an
2219 * appropriate hierarchy.
2220 */
2221 (void) printf(gettext("would create '%s' with the "
2222 "following layout:\n\n"), poolname);
2223
2224 print_vdev_tree(NULL, poolname, nvroot, 0, "", 0);
2225 print_vdev_tree(NULL, "dedup", nvroot, 0,
2226 VDEV_ALLOC_BIAS_DEDUP, 0);
2227 print_vdev_tree(NULL, "special", nvroot, 0,
2228 VDEV_ALLOC_BIAS_SPECIAL, 0);
2229 print_vdev_tree(NULL, "logs", nvroot, 0,
2230 VDEV_ALLOC_BIAS_LOG, 0);
2231 print_cache_list(nvroot, 0);
2232 print_spare_list(nvroot, 0);
2233
2234 ret = 0;
2235 } else {
2236 /*
2237 * Load in feature set.
2238 * Note: if compatibility property not given, we'll have
2239 * NULL, which means 'all features'.
2240 */
2241 boolean_t requested_features[SPA_FEATURES];
2242 if (zpool_do_load_compat(compat, requested_features) !=
2243 ZPOOL_COMPATIBILITY_OK)
2244 goto errout;
2245
2246 /*
2247 * props contains list of features to enable.
2248 * For each feature:
2249 * - remove it if feature@name=disabled
2250 * - leave it there if feature@name=enabled
2251 * - add it if:
2252 * - enable_pool_features (ie: no '-d' or '-o version')
2253 * - it's supported by the kernel module
2254 * - it's in the requested feature set
2255 * - warn if it's enabled but not in compat
2256 */
2257 for (spa_feature_t i = 0; i < SPA_FEATURES; i++) {
2258 char propname[MAXPATHLEN];
2259 const char *propval;
2260 zfeature_info_t *feat = &spa_feature_table[i];
2261
2262 (void) snprintf(propname, sizeof (propname),
2263 "feature@%s", feat->fi_uname);
2264
2265 if (!nvlist_lookup_string(props, propname, &propval)) {
2266 if (strcmp(propval,
2267 ZFS_FEATURE_DISABLED) == 0) {
2268 (void) nvlist_remove_all(props,
2269 propname);
2270 } else if (strcmp(propval,
2271 ZFS_FEATURE_ENABLED) == 0 &&
2272 !requested_features[i]) {
2273 (void) fprintf(stderr, gettext(
2274 "Warning: feature \"%s\" enabled "
2275 "but is not in specified "
2276 "'compatibility' feature set.\n"),
2277 feat->fi_uname);
2278 }
2279 } else if (
2280 enable_pool_features &&
2281 feat->fi_zfs_mod_supported &&
2282 requested_features[i]) {
2283 ret = add_prop_list(propname,
2284 ZFS_FEATURE_ENABLED, &props, B_TRUE);
2285 if (ret != 0)
2286 goto errout;
2287 }
2288 }
2289
2290 ret = 1;
2291 if (zpool_create(g_zfs, poolname,
2292 nvroot, props, fsprops) == 0) {
2293 zfs_handle_t *pool = zfs_open(g_zfs,
2294 tname ? tname : poolname, ZFS_TYPE_FILESYSTEM);
2295 if (pool != NULL) {
2296 if (zfs_mount(pool, NULL, 0) == 0) {
2297 ret = zfs_share(pool, NULL);
2298 zfs_commit_shares(NULL);
2299 }
2300 zfs_close(pool);
2301 }
2302 } else if (libzfs_errno(g_zfs) == EZFS_INVALIDNAME) {
2303 (void) fprintf(stderr, gettext("pool name may have "
2304 "been omitted\n"));
2305 }
2306 }
2307
2308 errout:
2309 nvlist_free(nvroot);
2310 nvlist_free(fsprops);
2311 nvlist_free(props);
2312 return (ret);
2313 badusage:
2314 nvlist_free(fsprops);
2315 nvlist_free(props);
2316 usage(B_FALSE);
2317 return (2);
2318 }
2319
2320 /*
2321 * zpool destroy <pool>
2322 *
2323 * -f Forcefully unmount any datasets
2324 *
2325 * Destroy the given pool. Automatically unmounts any datasets in the pool.
2326 */
2327 int
zpool_do_destroy(int argc,char ** argv)2328 zpool_do_destroy(int argc, char **argv)
2329 {
2330 boolean_t force = B_FALSE;
2331 int c;
2332 char *pool;
2333 zpool_handle_t *zhp;
2334 int ret;
2335
2336 /* check options */
2337 while ((c = getopt(argc, argv, "f")) != -1) {
2338 switch (c) {
2339 case 'f':
2340 force = B_TRUE;
2341 break;
2342 case '?':
2343 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
2344 optopt);
2345 usage(B_FALSE);
2346 }
2347 }
2348
2349 argc -= optind;
2350 argv += optind;
2351
2352 /* check arguments */
2353 if (argc < 1) {
2354 (void) fprintf(stderr, gettext("missing pool argument\n"));
2355 usage(B_FALSE);
2356 }
2357 if (argc > 1) {
2358 (void) fprintf(stderr, gettext("too many arguments\n"));
2359 usage(B_FALSE);
2360 }
2361
2362 pool = argv[0];
2363
2364 if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) {
2365 /*
2366 * As a special case, check for use of '/' in the name, and
2367 * direct the user to use 'zfs destroy' instead.
2368 */
2369 if (strchr(pool, '/') != NULL)
2370 (void) fprintf(stderr, gettext("use 'zfs destroy' to "
2371 "destroy a dataset\n"));
2372 return (1);
2373 }
2374
2375 if (zpool_disable_datasets(zhp, force) != 0) {
2376 (void) fprintf(stderr, gettext("could not destroy '%s': "
2377 "could not unmount datasets\n"), zpool_get_name(zhp));
2378 zpool_close(zhp);
2379 return (1);
2380 }
2381
2382 /* The history must be logged as part of the export */
2383 log_history = B_FALSE;
2384
2385 ret = (zpool_destroy(zhp, history_str) != 0);
2386
2387 zpool_close(zhp);
2388
2389 return (ret);
2390 }
2391
2392 typedef struct export_cbdata {
2393 tpool_t *tpool;
2394 pthread_mutex_t mnttab_lock;
2395 boolean_t force;
2396 boolean_t hardforce;
2397 int retval;
2398 } export_cbdata_t;
2399
2400
2401 typedef struct {
2402 char *aea_poolname;
2403 export_cbdata_t *aea_cbdata;
2404 } async_export_args_t;
2405
2406 /*
2407 * Export one pool
2408 */
2409 static int
zpool_export_one(zpool_handle_t * zhp,void * data)2410 zpool_export_one(zpool_handle_t *zhp, void *data)
2411 {
2412 export_cbdata_t *cb = data;
2413
2414 /*
2415 * zpool_disable_datasets() is not thread-safe for mnttab access.
2416 * So we serialize access here for 'zpool export -a' parallel case.
2417 */
2418 if (cb->tpool != NULL)
2419 (void) pthread_mutex_lock(&cb->mnttab_lock);
2420
2421 int retval = zpool_disable_datasets(zhp, cb->force);
2422
2423 if (cb->tpool != NULL)
2424 (void) pthread_mutex_unlock(&cb->mnttab_lock);
2425
2426 if (retval)
2427 return (1);
2428
2429 if (cb->hardforce) {
2430 if (zpool_export_force(zhp, history_str) != 0)
2431 return (1);
2432 } else if (zpool_export(zhp, cb->force, history_str) != 0) {
2433 return (1);
2434 }
2435
2436 return (0);
2437 }
2438
2439 /*
2440 * Asynchronous export request
2441 */
2442 static void
zpool_export_task(void * arg)2443 zpool_export_task(void *arg)
2444 {
2445 async_export_args_t *aea = arg;
2446
2447 zpool_handle_t *zhp = zpool_open(g_zfs, aea->aea_poolname);
2448 if (zhp != NULL) {
2449 int ret = zpool_export_one(zhp, aea->aea_cbdata);
2450 if (ret != 0)
2451 aea->aea_cbdata->retval = ret;
2452 zpool_close(zhp);
2453 } else {
2454 aea->aea_cbdata->retval = 1;
2455 }
2456
2457 free(aea->aea_poolname);
2458 free(aea);
2459 }
2460
2461 /*
2462 * Process an export request in parallel
2463 */
2464 static int
zpool_export_one_async(zpool_handle_t * zhp,void * data)2465 zpool_export_one_async(zpool_handle_t *zhp, void *data)
2466 {
2467 tpool_t *tpool = ((export_cbdata_t *)data)->tpool;
2468 async_export_args_t *aea = safe_malloc(sizeof (async_export_args_t));
2469
2470 /* save pool name since zhp will go out of scope */
2471 aea->aea_poolname = strdup(zpool_get_name(zhp));
2472 aea->aea_cbdata = data;
2473
2474 /* ship off actual export to another thread */
2475 if (tpool_dispatch(tpool, zpool_export_task, (void *)aea) != 0)
2476 return (errno); /* unlikely */
2477 else
2478 return (0);
2479 }
2480
2481 /*
2482 * zpool export [-f] <pool> ...
2483 *
2484 * -a Export all pools
2485 * -f Forcefully unmount datasets
2486 *
2487 * Export the given pools. By default, the command will attempt to cleanly
2488 * unmount any active datasets within the pool. If the '-f' flag is specified,
2489 * then the datasets will be forcefully unmounted.
2490 */
2491 int
zpool_do_export(int argc,char ** argv)2492 zpool_do_export(int argc, char **argv)
2493 {
2494 export_cbdata_t cb;
2495 boolean_t do_all = B_FALSE;
2496 boolean_t force = B_FALSE;
2497 boolean_t hardforce = B_FALSE;
2498 int c, ret;
2499
2500 /* check options */
2501 while ((c = getopt(argc, argv, "afF")) != -1) {
2502 switch (c) {
2503 case 'a':
2504 do_all = B_TRUE;
2505 break;
2506 case 'f':
2507 force = B_TRUE;
2508 break;
2509 case 'F':
2510 hardforce = B_TRUE;
2511 break;
2512 case '?':
2513 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
2514 optopt);
2515 usage(B_FALSE);
2516 }
2517 }
2518
2519 cb.force = force;
2520 cb.hardforce = hardforce;
2521 cb.tpool = NULL;
2522 cb.retval = 0;
2523 argc -= optind;
2524 argv += optind;
2525
2526 /* The history will be logged as part of the export itself */
2527 log_history = B_FALSE;
2528
2529 if (do_all) {
2530 if (argc != 0) {
2531 (void) fprintf(stderr, gettext("too many arguments\n"));
2532 usage(B_FALSE);
2533 }
2534
2535 cb.tpool = tpool_create(1, 5 * sysconf(_SC_NPROCESSORS_ONLN),
2536 0, NULL);
2537 (void) pthread_mutex_init(&cb.mnttab_lock, NULL);
2538
2539 /* Asynchronously call zpool_export_one using thread pool */
2540 ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
2541 B_FALSE, zpool_export_one_async, &cb);
2542
2543 tpool_wait(cb.tpool);
2544 tpool_destroy(cb.tpool);
2545 (void) pthread_mutex_destroy(&cb.mnttab_lock);
2546
2547 return (ret | cb.retval);
2548 }
2549
2550 /* check arguments */
2551 if (argc < 1) {
2552 (void) fprintf(stderr, gettext("missing pool argument\n"));
2553 usage(B_FALSE);
2554 }
2555
2556 ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
2557 B_FALSE, zpool_export_one, &cb);
2558
2559 return (ret);
2560 }
2561
2562 /*
2563 * Given a vdev configuration, determine the maximum width needed for the device
2564 * name column.
2565 */
2566 static int
max_width(zpool_handle_t * zhp,nvlist_t * nv,int depth,int max,int name_flags)2567 max_width(zpool_handle_t *zhp, nvlist_t *nv, int depth, int max,
2568 int name_flags)
2569 {
2570 static const char *const subtypes[] =
2571 {ZPOOL_CONFIG_SPARES, ZPOOL_CONFIG_L2CACHE, ZPOOL_CONFIG_CHILDREN};
2572
2573 char *name = zpool_vdev_name(g_zfs, zhp, nv, name_flags);
2574 max = MAX(strlen(name) + depth, max);
2575 free(name);
2576
2577 nvlist_t **child;
2578 uint_t children;
2579 for (size_t i = 0; i < ARRAY_SIZE(subtypes); ++i)
2580 if (nvlist_lookup_nvlist_array(nv, subtypes[i],
2581 &child, &children) == 0)
2582 for (uint_t c = 0; c < children; ++c)
2583 max = MAX(max_width(zhp, child[c], depth + 2,
2584 max, name_flags), max);
2585
2586 return (max);
2587 }
2588
2589 typedef struct status_cbdata {
2590 int cb_count;
2591 int cb_name_flags;
2592 int cb_namewidth;
2593 boolean_t cb_allpools;
2594 boolean_t cb_verbose;
2595 boolean_t cb_literal;
2596 boolean_t cb_explain;
2597 boolean_t cb_first;
2598 boolean_t cb_dedup_stats;
2599 boolean_t cb_print_unhealthy;
2600 boolean_t cb_print_status;
2601 boolean_t cb_print_slow_ios;
2602 boolean_t cb_print_dio_verify;
2603 boolean_t cb_print_vdev_init;
2604 boolean_t cb_print_vdev_trim;
2605 vdev_cmd_data_list_t *vcdl;
2606 boolean_t cb_print_power;
2607 boolean_t cb_json;
2608 boolean_t cb_flat_vdevs;
2609 nvlist_t *cb_jsobj;
2610 boolean_t cb_json_as_int;
2611 boolean_t cb_json_pool_key_guid;
2612 } status_cbdata_t;
2613
2614 /* Return 1 if string is NULL, empty, or whitespace; return 0 otherwise. */
2615 static boolean_t
is_blank_str(const char * str)2616 is_blank_str(const char *str)
2617 {
2618 for (; str != NULL && *str != '\0'; ++str)
2619 if (!isblank(*str))
2620 return (B_FALSE);
2621 return (B_TRUE);
2622 }
2623
2624 static void
zpool_nvlist_cmd(vdev_cmd_data_list_t * vcdl,const char * pool,const char * path,nvlist_t * item)2625 zpool_nvlist_cmd(vdev_cmd_data_list_t *vcdl, const char *pool, const char *path,
2626 nvlist_t *item)
2627 {
2628 vdev_cmd_data_t *data;
2629 int i, j, k = 1;
2630 char tmp[256];
2631 const char *val;
2632
2633 for (i = 0; i < vcdl->count; i++) {
2634 if ((strcmp(vcdl->data[i].path, path) != 0) ||
2635 (strcmp(vcdl->data[i].pool, pool) != 0))
2636 continue;
2637
2638 data = &vcdl->data[i];
2639 for (j = 0; j < vcdl->uniq_cols_cnt; j++) {
2640 val = NULL;
2641 for (int k = 0; k < data->cols_cnt; k++) {
2642 if (strcmp(data->cols[k],
2643 vcdl->uniq_cols[j]) == 0) {
2644 val = data->lines[k];
2645 break;
2646 }
2647 }
2648 if (val == NULL || is_blank_str(val))
2649 val = "-";
2650 fnvlist_add_string(item, vcdl->uniq_cols[j], val);
2651 }
2652
2653 for (j = data->cols_cnt; j < data->lines_cnt; j++) {
2654 if (data->lines[j]) {
2655 (void) snprintf(tmp, 256, "extra_%d", k++);
2656 fnvlist_add_string(item, tmp,
2657 data->lines[j]);
2658 }
2659 }
2660 break;
2661 }
2662 }
2663
2664 /* Print command output lines for specific vdev in a specific pool */
2665 static void
zpool_print_cmd(vdev_cmd_data_list_t * vcdl,const char * pool,const char * path)2666 zpool_print_cmd(vdev_cmd_data_list_t *vcdl, const char *pool, const char *path)
2667 {
2668 vdev_cmd_data_t *data;
2669 int i, j;
2670 const char *val;
2671
2672 for (i = 0; i < vcdl->count; i++) {
2673 if ((strcmp(vcdl->data[i].path, path) != 0) ||
2674 (strcmp(vcdl->data[i].pool, pool) != 0)) {
2675 /* Not the vdev we're looking for */
2676 continue;
2677 }
2678
2679 data = &vcdl->data[i];
2680 /* Print out all the output values for this vdev */
2681 for (j = 0; j < vcdl->uniq_cols_cnt; j++) {
2682 val = NULL;
2683 /* Does this vdev have values for this column? */
2684 for (int k = 0; k < data->cols_cnt; k++) {
2685 if (strcmp(data->cols[k],
2686 vcdl->uniq_cols[j]) == 0) {
2687 /* yes it does, record the value */
2688 val = data->lines[k];
2689 break;
2690 }
2691 }
2692 /*
2693 * Mark empty values with dashes to make output
2694 * awk-able.
2695 */
2696 if (val == NULL || is_blank_str(val))
2697 val = "-";
2698
2699 printf("%*s", vcdl->uniq_cols_width[j], val);
2700 if (j < vcdl->uniq_cols_cnt - 1)
2701 (void) fputs(" ", stdout);
2702 }
2703
2704 /* Print out any values that aren't in a column at the end */
2705 for (j = data->cols_cnt; j < data->lines_cnt; j++) {
2706 /* Did we have any columns? If so print a spacer. */
2707 if (vcdl->uniq_cols_cnt > 0)
2708 (void) fputs(" ", stdout);
2709
2710 val = data->lines[j];
2711 (void) fputs(val ?: "", stdout);
2712 }
2713 break;
2714 }
2715 }
2716
2717 /*
2718 * Print vdev initialization status for leaves
2719 */
2720 static void
print_status_initialize(vdev_stat_t * vs,boolean_t verbose)2721 print_status_initialize(vdev_stat_t *vs, boolean_t verbose)
2722 {
2723 if (verbose) {
2724 if ((vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE ||
2725 vs->vs_initialize_state == VDEV_INITIALIZE_SUSPENDED ||
2726 vs->vs_initialize_state == VDEV_INITIALIZE_COMPLETE) &&
2727 !vs->vs_scan_removing) {
2728 char zbuf[1024];
2729 char tbuf[256];
2730
2731 time_t t = vs->vs_initialize_action_time;
2732 int initialize_pct = 100;
2733 if (vs->vs_initialize_state !=
2734 VDEV_INITIALIZE_COMPLETE) {
2735 initialize_pct = (vs->vs_initialize_bytes_done *
2736 100 / (vs->vs_initialize_bytes_est + 1));
2737 }
2738
2739 (void) ctime_r(&t, tbuf);
2740 tbuf[24] = 0;
2741
2742 switch (vs->vs_initialize_state) {
2743 case VDEV_INITIALIZE_SUSPENDED:
2744 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2745 gettext("suspended, started at"), tbuf);
2746 break;
2747 case VDEV_INITIALIZE_ACTIVE:
2748 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2749 gettext("started at"), tbuf);
2750 break;
2751 case VDEV_INITIALIZE_COMPLETE:
2752 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2753 gettext("completed at"), tbuf);
2754 break;
2755 }
2756
2757 (void) printf(gettext(" (%d%% initialized%s)"),
2758 initialize_pct, zbuf);
2759 } else {
2760 (void) printf(gettext(" (uninitialized)"));
2761 }
2762 } else if (vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE) {
2763 (void) printf(gettext(" (initializing)"));
2764 }
2765 }
2766
2767 /*
2768 * Print vdev TRIM status for leaves
2769 */
2770 static void
print_status_trim(vdev_stat_t * vs,boolean_t verbose)2771 print_status_trim(vdev_stat_t *vs, boolean_t verbose)
2772 {
2773 if (verbose) {
2774 if ((vs->vs_trim_state == VDEV_TRIM_ACTIVE ||
2775 vs->vs_trim_state == VDEV_TRIM_SUSPENDED ||
2776 vs->vs_trim_state == VDEV_TRIM_COMPLETE) &&
2777 !vs->vs_scan_removing) {
2778 char zbuf[1024];
2779 char tbuf[256];
2780
2781 time_t t = vs->vs_trim_action_time;
2782 int trim_pct = 100;
2783 if (vs->vs_trim_state != VDEV_TRIM_COMPLETE) {
2784 trim_pct = (vs->vs_trim_bytes_done *
2785 100 / (vs->vs_trim_bytes_est + 1));
2786 }
2787
2788 (void) ctime_r(&t, tbuf);
2789 tbuf[24] = 0;
2790
2791 switch (vs->vs_trim_state) {
2792 case VDEV_TRIM_SUSPENDED:
2793 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2794 gettext("suspended, started at"), tbuf);
2795 break;
2796 case VDEV_TRIM_ACTIVE:
2797 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2798 gettext("started at"), tbuf);
2799 break;
2800 case VDEV_TRIM_COMPLETE:
2801 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2802 gettext("completed at"), tbuf);
2803 break;
2804 }
2805
2806 (void) printf(gettext(" (%d%% trimmed%s)"),
2807 trim_pct, zbuf);
2808 } else if (vs->vs_trim_notsup) {
2809 (void) printf(gettext(" (trim unsupported)"));
2810 } else {
2811 (void) printf(gettext(" (untrimmed)"));
2812 }
2813 } else if (vs->vs_trim_state == VDEV_TRIM_ACTIVE) {
2814 (void) printf(gettext(" (trimming)"));
2815 }
2816 }
2817
2818 /*
2819 * Return the color associated with a health string. This includes returning
2820 * NULL for no color change.
2821 */
2822 static const char *
health_str_to_color(const char * health)2823 health_str_to_color(const char *health)
2824 {
2825 if (strcmp(health, gettext("FAULTED")) == 0 ||
2826 strcmp(health, gettext("SUSPENDED")) == 0 ||
2827 strcmp(health, gettext("UNAVAIL")) == 0) {
2828 return (ANSI_RED);
2829 }
2830
2831 if (strcmp(health, gettext("OFFLINE")) == 0 ||
2832 strcmp(health, gettext("DEGRADED")) == 0 ||
2833 strcmp(health, gettext("REMOVED")) == 0) {
2834 return (ANSI_YELLOW);
2835 }
2836
2837 return (NULL);
2838 }
2839
2840 /*
2841 * Called for each leaf vdev. Returns 0 if the vdev is healthy.
2842 * A vdev is unhealthy if any of the following are true:
2843 * 1) there are read, write, or checksum errors,
2844 * 2) its state is not ONLINE, or
2845 * 3) slow IO reporting was requested (-s) and there are slow IOs.
2846 */
2847 static int
vdev_health_check_cb(void * hdl_data,nvlist_t * nv,void * data)2848 vdev_health_check_cb(void *hdl_data, nvlist_t *nv, void *data)
2849 {
2850 status_cbdata_t *cb = data;
2851 vdev_stat_t *vs;
2852 uint_t vsc;
2853 (void) hdl_data;
2854
2855 if (nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
2856 (uint64_t **)&vs, &vsc) != 0)
2857 return (1);
2858
2859 if (vs->vs_checksum_errors || vs->vs_read_errors ||
2860 vs->vs_write_errors || vs->vs_state != VDEV_STATE_HEALTHY)
2861 return (1);
2862
2863 if (cb->cb_print_slow_ios && vs->vs_slow_ios)
2864 return (1);
2865
2866 return (0);
2867 }
2868
2869 /*
2870 * Print out configuration state as requested by status_callback.
2871 */
2872 static void
print_status_config(zpool_handle_t * zhp,status_cbdata_t * cb,const char * name,nvlist_t * nv,int depth,boolean_t isspare,vdev_rebuild_stat_t * vrs)2873 print_status_config(zpool_handle_t *zhp, status_cbdata_t *cb, const char *name,
2874 nvlist_t *nv, int depth, boolean_t isspare, vdev_rebuild_stat_t *vrs)
2875 {
2876 nvlist_t **child, *root;
2877 uint_t c, i, vsc, children;
2878 pool_scan_stat_t *ps = NULL;
2879 vdev_stat_t *vs;
2880 char rbuf[6], wbuf[6], cbuf[6], dbuf[6];
2881 char *vname;
2882 uint64_t notpresent;
2883 spare_cbdata_t spare_cb;
2884 const char *state;
2885 const char *type;
2886 const char *path = NULL;
2887 const char *rcolor = NULL, *wcolor = NULL, *ccolor = NULL,
2888 *scolor = NULL;
2889
2890 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2891 &child, &children) != 0)
2892 children = 0;
2893
2894 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
2895 (uint64_t **)&vs, &vsc) == 0);
2896
2897 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
2898
2899 if (strcmp(type, VDEV_TYPE_INDIRECT) == 0)
2900 return;
2901
2902 state = zpool_state_to_name(vs->vs_state, vs->vs_aux);
2903
2904 if (isspare) {
2905 /*
2906 * For hot spares, we use the terms 'INUSE' and 'AVAILABLE' for
2907 * online drives.
2908 */
2909 if (vs->vs_aux == VDEV_AUX_SPARED)
2910 state = gettext("INUSE");
2911 else if (vs->vs_state == VDEV_STATE_HEALTHY)
2912 state = gettext("AVAIL");
2913 }
2914
2915 /*
2916 * If '-e' is specified then top-level vdevs and their children
2917 * can be pruned if all of their leaves are healthy.
2918 */
2919 if (cb->cb_print_unhealthy && depth > 0 &&
2920 for_each_vdev_in_nvlist(nv, vdev_health_check_cb, cb) == 0) {
2921 return;
2922 }
2923
2924 (void) printf_color(health_str_to_color(state),
2925 "\t%*s%-*s %-8s", depth, "", cb->cb_namewidth - depth,
2926 name, state);
2927
2928 if (!isspare) {
2929 if (vs->vs_read_errors)
2930 rcolor = ANSI_RED;
2931
2932 if (vs->vs_write_errors)
2933 wcolor = ANSI_RED;
2934
2935 if (vs->vs_checksum_errors)
2936 ccolor = ANSI_RED;
2937
2938 if (vs->vs_slow_ios)
2939 scolor = ANSI_BLUE;
2940
2941 if (cb->cb_literal) {
2942 (void) fputc(' ', stdout);
2943 (void) printf_color(rcolor, "%5llu",
2944 (u_longlong_t)vs->vs_read_errors);
2945 (void) fputc(' ', stdout);
2946 (void) printf_color(wcolor, "%5llu",
2947 (u_longlong_t)vs->vs_write_errors);
2948 (void) fputc(' ', stdout);
2949 (void) printf_color(ccolor, "%5llu",
2950 (u_longlong_t)vs->vs_checksum_errors);
2951 } else {
2952 zfs_nicenum(vs->vs_read_errors, rbuf, sizeof (rbuf));
2953 zfs_nicenum(vs->vs_write_errors, wbuf, sizeof (wbuf));
2954 zfs_nicenum(vs->vs_checksum_errors, cbuf,
2955 sizeof (cbuf));
2956 (void) fputc(' ', stdout);
2957 (void) printf_color(rcolor, "%5s", rbuf);
2958 (void) fputc(' ', stdout);
2959 (void) printf_color(wcolor, "%5s", wbuf);
2960 (void) fputc(' ', stdout);
2961 (void) printf_color(ccolor, "%5s", cbuf);
2962 }
2963 if (cb->cb_print_slow_ios) {
2964 if (children == 0) {
2965 /* Only leafs vdevs have slow IOs */
2966 zfs_nicenum(vs->vs_slow_ios, rbuf,
2967 sizeof (rbuf));
2968 } else {
2969 (void) snprintf(rbuf, sizeof (rbuf), "-");
2970 }
2971
2972 if (cb->cb_literal)
2973 (void) printf_color(scolor, " %5llu",
2974 (u_longlong_t)vs->vs_slow_ios);
2975 else
2976 (void) printf_color(scolor, " %5s", rbuf);
2977 }
2978 if (cb->cb_print_power) {
2979 if (children == 0) {
2980 /* Only leaf vdevs have physical slots */
2981 switch (zpool_power_current_state(zhp, (char *)
2982 fnvlist_lookup_string(nv,
2983 ZPOOL_CONFIG_PATH))) {
2984 case 0:
2985 (void) printf_color(ANSI_RED, " %5s",
2986 gettext("off"));
2987 break;
2988 case 1:
2989 printf(" %5s", gettext("on"));
2990 break;
2991 default:
2992 printf(" %5s", "-");
2993 }
2994 } else {
2995 printf(" %5s", "-");
2996 }
2997 }
2998 if (VDEV_STAT_VALID(vs_dio_verify_errors, vsc) &&
2999 cb->cb_print_dio_verify) {
3000 zfs_nicenum(vs->vs_dio_verify_errors, dbuf,
3001 sizeof (dbuf));
3002
3003 if (cb->cb_literal)
3004 printf(" %5llu",
3005 (u_longlong_t)vs->vs_dio_verify_errors);
3006 else
3007 printf(" %5s", dbuf);
3008 }
3009 }
3010
3011 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
3012 ¬present) == 0) {
3013 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0);
3014 (void) printf(" %s %s", gettext("was"), path);
3015 } else if (vs->vs_aux != 0) {
3016 (void) printf(" ");
3017 color_start(ANSI_RED);
3018 switch (vs->vs_aux) {
3019 case VDEV_AUX_OPEN_FAILED:
3020 (void) printf(gettext("cannot open"));
3021 break;
3022
3023 case VDEV_AUX_BAD_GUID_SUM:
3024 (void) printf(gettext("missing device"));
3025 break;
3026
3027 case VDEV_AUX_NO_REPLICAS:
3028 (void) printf(gettext("insufficient replicas"));
3029 break;
3030
3031 case VDEV_AUX_VERSION_NEWER:
3032 (void) printf(gettext("newer version"));
3033 break;
3034
3035 case VDEV_AUX_UNSUP_FEAT:
3036 (void) printf(gettext("unsupported feature(s)"));
3037 break;
3038
3039 case VDEV_AUX_ASHIFT_TOO_BIG:
3040 (void) printf(gettext("unsupported minimum blocksize"));
3041 break;
3042
3043 case VDEV_AUX_SPARED:
3044 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3045 &spare_cb.cb_guid) == 0);
3046 if (zpool_iter(g_zfs, find_spare, &spare_cb) == 1) {
3047 if (strcmp(zpool_get_name(spare_cb.cb_zhp),
3048 zpool_get_name(zhp)) == 0)
3049 (void) printf(gettext("currently in "
3050 "use"));
3051 else
3052 (void) printf(gettext("in use by "
3053 "pool '%s'"),
3054 zpool_get_name(spare_cb.cb_zhp));
3055 zpool_close(spare_cb.cb_zhp);
3056 } else {
3057 (void) printf(gettext("currently in use"));
3058 }
3059 break;
3060
3061 case VDEV_AUX_ERR_EXCEEDED:
3062 if (vs->vs_read_errors + vs->vs_write_errors +
3063 vs->vs_checksum_errors == 0 && children == 0 &&
3064 vs->vs_slow_ios > 0) {
3065 (void) printf(gettext("too many slow I/Os"));
3066 } else {
3067 (void) printf(gettext("too many errors"));
3068 }
3069 break;
3070
3071 case VDEV_AUX_IO_FAILURE:
3072 (void) printf(gettext("experienced I/O failures"));
3073 break;
3074
3075 case VDEV_AUX_BAD_LOG:
3076 (void) printf(gettext("bad intent log"));
3077 break;
3078
3079 case VDEV_AUX_EXTERNAL:
3080 (void) printf(gettext("external device fault"));
3081 break;
3082
3083 case VDEV_AUX_SPLIT_POOL:
3084 (void) printf(gettext("split into new pool"));
3085 break;
3086
3087 case VDEV_AUX_ACTIVE:
3088 (void) printf(gettext("currently in use"));
3089 break;
3090
3091 case VDEV_AUX_CHILDREN_OFFLINE:
3092 (void) printf(gettext("all children offline"));
3093 break;
3094
3095 case VDEV_AUX_BAD_LABEL:
3096 (void) printf(gettext("invalid label"));
3097 break;
3098
3099 default:
3100 (void) printf(gettext("corrupted data"));
3101 break;
3102 }
3103 color_end();
3104 } else if (children == 0 && !isspare &&
3105 getenv("ZPOOL_STATUS_NON_NATIVE_ASHIFT_IGNORE") == NULL &&
3106 VDEV_STAT_VALID(vs_physical_ashift, vsc) &&
3107 vs->vs_configured_ashift < vs->vs_physical_ashift) {
3108 (void) printf(
3109 gettext(" block size: %dB configured, %dB native"),
3110 1 << vs->vs_configured_ashift, 1 << vs->vs_physical_ashift);
3111 }
3112
3113 if (vs->vs_scan_removing != 0) {
3114 (void) printf(gettext(" (removing)"));
3115 } else if (VDEV_STAT_VALID(vs_noalloc, vsc) && vs->vs_noalloc != 0) {
3116 (void) printf(gettext(" (non-allocating)"));
3117 }
3118
3119 /* The root vdev has the scrub/resilver stats */
3120 root = fnvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
3121 ZPOOL_CONFIG_VDEV_TREE);
3122 (void) nvlist_lookup_uint64_array(root, ZPOOL_CONFIG_SCAN_STATS,
3123 (uint64_t **)&ps, &c);
3124
3125 /*
3126 * If you force fault a drive that's resilvering, its scan stats can
3127 * get frozen in time, giving the false impression that it's
3128 * being resilvered. That's why we check the state to see if the vdev
3129 * is healthy before reporting "resilvering" or "repairing".
3130 */
3131 if (ps != NULL && ps->pss_state == DSS_SCANNING && children == 0 &&
3132 vs->vs_state == VDEV_STATE_HEALTHY) {
3133 if (vs->vs_scan_processed != 0) {
3134 (void) printf(gettext(" (%s)"),
3135 (ps->pss_func == POOL_SCAN_RESILVER) ?
3136 "resilvering" : "repairing");
3137 } else if (vs->vs_resilver_deferred) {
3138 (void) printf(gettext(" (awaiting resilver)"));
3139 }
3140 }
3141
3142 /* The top-level vdevs have the rebuild stats */
3143 if (vrs != NULL && vrs->vrs_state == VDEV_REBUILD_ACTIVE &&
3144 children == 0 && vs->vs_state == VDEV_STATE_HEALTHY) {
3145 if (vs->vs_rebuild_processed != 0) {
3146 (void) printf(gettext(" (resilvering)"));
3147 }
3148 }
3149
3150 if (cb->vcdl != NULL) {
3151 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
3152 printf(" ");
3153 zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path);
3154 }
3155 }
3156
3157 /* Display vdev initialization and trim status for leaves. */
3158 if (children == 0) {
3159 print_status_initialize(vs, cb->cb_print_vdev_init);
3160 print_status_trim(vs, cb->cb_print_vdev_trim);
3161 }
3162
3163 (void) printf("\n");
3164
3165 for (c = 0; c < children; c++) {
3166 uint64_t islog = B_FALSE, ishole = B_FALSE;
3167
3168 /* Don't print logs or holes here */
3169 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
3170 &islog);
3171 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
3172 &ishole);
3173 if (islog || ishole)
3174 continue;
3175 /* Only print normal classes here */
3176 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
3177 continue;
3178
3179 /* Provide vdev_rebuild_stats to children if available */
3180 if (vrs == NULL) {
3181 (void) nvlist_lookup_uint64_array(nv,
3182 ZPOOL_CONFIG_REBUILD_STATS,
3183 (uint64_t **)&vrs, &i);
3184 }
3185
3186 vname = zpool_vdev_name(g_zfs, zhp, child[c],
3187 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
3188 print_status_config(zhp, cb, vname, child[c], depth + 2,
3189 isspare, vrs);
3190 free(vname);
3191 }
3192 }
3193
3194 /*
3195 * Print the configuration of an exported pool. Iterate over all vdevs in the
3196 * pool, printing out the name and status for each one.
3197 */
3198 static void
print_import_config(status_cbdata_t * cb,const char * name,nvlist_t * nv,int depth)3199 print_import_config(status_cbdata_t *cb, const char *name, nvlist_t *nv,
3200 int depth)
3201 {
3202 nvlist_t **child;
3203 uint_t c, children;
3204 vdev_stat_t *vs;
3205 const char *type;
3206 char *vname;
3207
3208 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
3209 if (strcmp(type, VDEV_TYPE_MISSING) == 0 ||
3210 strcmp(type, VDEV_TYPE_HOLE) == 0)
3211 return;
3212
3213 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
3214 (uint64_t **)&vs, &c) == 0);
3215
3216 (void) printf("\t%*s%-*s", depth, "", cb->cb_namewidth - depth, name);
3217 (void) printf(" %s", zpool_state_to_name(vs->vs_state, vs->vs_aux));
3218
3219 if (vs->vs_aux != 0) {
3220 (void) printf(" ");
3221
3222 switch (vs->vs_aux) {
3223 case VDEV_AUX_OPEN_FAILED:
3224 (void) printf(gettext("cannot open"));
3225 break;
3226
3227 case VDEV_AUX_BAD_GUID_SUM:
3228 (void) printf(gettext("missing device"));
3229 break;
3230
3231 case VDEV_AUX_NO_REPLICAS:
3232 (void) printf(gettext("insufficient replicas"));
3233 break;
3234
3235 case VDEV_AUX_VERSION_NEWER:
3236 (void) printf(gettext("newer version"));
3237 break;
3238
3239 case VDEV_AUX_UNSUP_FEAT:
3240 (void) printf(gettext("unsupported feature(s)"));
3241 break;
3242
3243 case VDEV_AUX_ERR_EXCEEDED:
3244 (void) printf(gettext("too many errors"));
3245 break;
3246
3247 case VDEV_AUX_ACTIVE:
3248 (void) printf(gettext("currently in use"));
3249 break;
3250
3251 case VDEV_AUX_CHILDREN_OFFLINE:
3252 (void) printf(gettext("all children offline"));
3253 break;
3254
3255 case VDEV_AUX_BAD_LABEL:
3256 (void) printf(gettext("invalid label"));
3257 break;
3258
3259 default:
3260 (void) printf(gettext("corrupted data"));
3261 break;
3262 }
3263 }
3264 (void) printf("\n");
3265
3266 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
3267 &child, &children) != 0)
3268 return;
3269
3270 for (c = 0; c < children; c++) {
3271 uint64_t is_log = B_FALSE;
3272
3273 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
3274 &is_log);
3275 if (is_log)
3276 continue;
3277 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
3278 continue;
3279
3280 vname = zpool_vdev_name(g_zfs, NULL, child[c],
3281 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
3282 print_import_config(cb, vname, child[c], depth + 2);
3283 free(vname);
3284 }
3285
3286 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
3287 &child, &children) == 0) {
3288 (void) printf(gettext("\tcache\n"));
3289 for (c = 0; c < children; c++) {
3290 vname = zpool_vdev_name(g_zfs, NULL, child[c],
3291 cb->cb_name_flags);
3292 (void) printf("\t %s\n", vname);
3293 free(vname);
3294 }
3295 }
3296
3297 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
3298 &child, &children) == 0) {
3299 (void) printf(gettext("\tspares\n"));
3300 for (c = 0; c < children; c++) {
3301 vname = zpool_vdev_name(g_zfs, NULL, child[c],
3302 cb->cb_name_flags);
3303 (void) printf("\t %s\n", vname);
3304 free(vname);
3305 }
3306 }
3307 }
3308
3309 /*
3310 * Print specialized class vdevs.
3311 *
3312 * These are recorded as top level vdevs in the main pool child array
3313 * but with "is_log" set to 1 or an "alloc_bias" string. We use either
3314 * print_status_config() or print_import_config() to print the top level
3315 * class vdevs then any of their children (eg mirrored slogs) are printed
3316 * recursively - which works because only the top level vdev is marked.
3317 */
3318 static void
print_class_vdevs(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t * nv,const char * class)3319 print_class_vdevs(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,
3320 const char *class)
3321 {
3322 uint_t c, children;
3323 nvlist_t **child;
3324 boolean_t printed = B_FALSE;
3325
3326 assert(zhp != NULL || !cb->cb_verbose);
3327
3328 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &child,
3329 &children) != 0)
3330 return;
3331
3332 for (c = 0; c < children; c++) {
3333 uint64_t is_log = B_FALSE;
3334 const char *bias = NULL;
3335 const char *type = NULL;
3336
3337 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
3338 &is_log);
3339
3340 if (is_log) {
3341 bias = (char *)VDEV_ALLOC_CLASS_LOGS;
3342 } else {
3343 (void) nvlist_lookup_string(child[c],
3344 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
3345 (void) nvlist_lookup_string(child[c],
3346 ZPOOL_CONFIG_TYPE, &type);
3347 }
3348
3349 if (bias == NULL || strcmp(bias, class) != 0)
3350 continue;
3351 if (!is_log && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
3352 continue;
3353
3354 if (!printed) {
3355 (void) printf("\t%s\t\n", gettext(class));
3356 printed = B_TRUE;
3357 }
3358
3359 char *name = zpool_vdev_name(g_zfs, zhp, child[c],
3360 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
3361 if (cb->cb_print_status)
3362 print_status_config(zhp, cb, name, child[c], 2,
3363 B_FALSE, NULL);
3364 else
3365 print_import_config(cb, name, child[c], 2);
3366 free(name);
3367 }
3368 }
3369
3370 /*
3371 * Display the status for the given pool.
3372 */
3373 static int
show_import(nvlist_t * config,boolean_t report_error)3374 show_import(nvlist_t *config, boolean_t report_error)
3375 {
3376 uint64_t pool_state;
3377 vdev_stat_t *vs;
3378 const char *name;
3379 uint64_t guid;
3380 uint64_t hostid = 0;
3381 const char *msgid;
3382 const char *hostname = "unknown";
3383 nvlist_t *nvroot, *nvinfo;
3384 zpool_status_t reason;
3385 zpool_errata_t errata;
3386 const char *health;
3387 uint_t vsc;
3388 const char *comment;
3389 const char *indent;
3390 char buf[2048];
3391 status_cbdata_t cb = { 0 };
3392
3393 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
3394 &name) == 0);
3395 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
3396 &guid) == 0);
3397 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
3398 &pool_state) == 0);
3399 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
3400 &nvroot) == 0);
3401
3402 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
3403 (uint64_t **)&vs, &vsc) == 0);
3404 health = zpool_state_to_name(vs->vs_state, vs->vs_aux);
3405
3406 reason = zpool_import_status(config, &msgid, &errata);
3407
3408 /*
3409 * If we're importing using a cachefile, then we won't report any
3410 * errors unless we are in the scan phase of the import.
3411 */
3412 if (reason != ZPOOL_STATUS_OK && !report_error)
3413 return (reason);
3414
3415 if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0) {
3416 indent = " ";
3417 } else {
3418 comment = NULL;
3419 indent = "";
3420 }
3421
3422 (void) printf(gettext("%s pool: %s\n"), indent, name);
3423 (void) printf(gettext("%s id: %llu\n"), indent, (u_longlong_t)guid);
3424 (void) printf(gettext("%s state: %s"), indent, health);
3425 if (pool_state == POOL_STATE_DESTROYED)
3426 (void) printf(gettext(" (DESTROYED)"));
3427 (void) printf("\n");
3428
3429 if (reason != ZPOOL_STATUS_OK) {
3430 (void) printf("%s", indent);
3431 (void) printf_color(ANSI_BOLD, gettext("status: "));
3432 }
3433 switch (reason) {
3434 case ZPOOL_STATUS_MISSING_DEV_R:
3435 case ZPOOL_STATUS_MISSING_DEV_NR:
3436 case ZPOOL_STATUS_BAD_GUID_SUM:
3437 (void) printf_color(ANSI_YELLOW, gettext("One or more devices "
3438 "are missing from the system.\n"));
3439 break;
3440
3441 case ZPOOL_STATUS_CORRUPT_LABEL_R:
3442 case ZPOOL_STATUS_CORRUPT_LABEL_NR:
3443 (void) printf_color(ANSI_YELLOW, gettext("One or more devices "
3444 "contains corrupted data.\n"));
3445 break;
3446
3447 case ZPOOL_STATUS_CORRUPT_DATA:
3448 (void) printf_color(ANSI_YELLOW, gettext("The pool data is "
3449 "corrupted.\n"));
3450 break;
3451
3452 case ZPOOL_STATUS_OFFLINE_DEV:
3453 (void) printf_color(ANSI_YELLOW, gettext("One or more devices "
3454 "are offlined.\n"));
3455 break;
3456
3457 case ZPOOL_STATUS_CORRUPT_POOL:
3458 (void) printf_color(ANSI_YELLOW, gettext("The pool metadata is "
3459 "corrupted.\n"));
3460 break;
3461
3462 case ZPOOL_STATUS_VERSION_OLDER:
3463 (void) printf_color(ANSI_YELLOW, gettext("The pool is "
3464 "formatted using a legacy on-disk version.\n"));
3465 break;
3466
3467 case ZPOOL_STATUS_VERSION_NEWER:
3468 (void) printf_color(ANSI_YELLOW, gettext("The pool is "
3469 "formatted using an incompatible version.\n"));
3470 break;
3471
3472 case ZPOOL_STATUS_FEAT_DISABLED:
3473 (void) printf_color(ANSI_YELLOW, gettext("Some supported "
3474 "features are not enabled on the pool.\n"
3475 "\t%s(Note that they may be intentionally disabled if the\n"
3476 "\t%s'compatibility' property is set.)\n"), indent, indent);
3477 break;
3478
3479 case ZPOOL_STATUS_COMPATIBILITY_ERR:
3480 (void) printf_color(ANSI_YELLOW, gettext("Error reading or "
3481 "parsing the file(s) indicated by the 'compatibility'\n"
3482 "\t%sproperty.\n"), indent);
3483 break;
3484
3485 case ZPOOL_STATUS_INCOMPATIBLE_FEAT:
3486 (void) printf_color(ANSI_YELLOW, gettext("One or more features "
3487 "are enabled on the pool despite not being\n"
3488 "\t%srequested by the 'compatibility' property.\n"),
3489 indent);
3490 break;
3491
3492 case ZPOOL_STATUS_UNSUP_FEAT_READ:
3493 (void) printf_color(ANSI_YELLOW, gettext("The pool uses the "
3494 "following feature(s) not supported on this system:\n"));
3495 color_start(ANSI_YELLOW);
3496 zpool_collect_unsup_feat(config, buf, 2048);
3497 (void) printf("%s", buf);
3498 color_end();
3499 break;
3500
3501 case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
3502 (void) printf_color(ANSI_YELLOW, gettext("The pool can only be "
3503 "accessed in read-only mode on this system. It\n"
3504 "\t%scannot be accessed in read-write mode because it uses "
3505 "the following\n"
3506 "\t%sfeature(s) not supported on this system:\n"),
3507 indent, indent);
3508 color_start(ANSI_YELLOW);
3509 zpool_collect_unsup_feat(config, buf, 2048);
3510 (void) printf("%s", buf);
3511 color_end();
3512 break;
3513
3514 case ZPOOL_STATUS_HOSTID_ACTIVE:
3515 (void) printf_color(ANSI_YELLOW, gettext("The pool is "
3516 "currently imported by another system.\n"));
3517 break;
3518
3519 case ZPOOL_STATUS_HOSTID_REQUIRED:
3520 (void) printf_color(ANSI_YELLOW, gettext("The pool has the "
3521 "multihost property on. It cannot\n"
3522 "\t%sbe safely imported when the system hostid is not "
3523 "set.\n"), indent);
3524 break;
3525
3526 case ZPOOL_STATUS_HOSTID_MISMATCH:
3527 (void) printf_color(ANSI_YELLOW, gettext("The pool was last "
3528 "accessed by another system.\n"));
3529 break;
3530
3531 case ZPOOL_STATUS_FAULTED_DEV_R:
3532 case ZPOOL_STATUS_FAULTED_DEV_NR:
3533 (void) printf_color(ANSI_YELLOW, gettext("One or more devices "
3534 "are faulted.\n"));
3535 break;
3536
3537 case ZPOOL_STATUS_BAD_LOG:
3538 (void) printf_color(ANSI_YELLOW, gettext("An intent log record "
3539 "cannot be read.\n"));
3540 break;
3541
3542 case ZPOOL_STATUS_RESILVERING:
3543 case ZPOOL_STATUS_REBUILDING:
3544 (void) printf_color(ANSI_YELLOW, gettext("One or more devices "
3545 "were being resilvered.\n"));
3546 break;
3547
3548 case ZPOOL_STATUS_ERRATA:
3549 (void) printf_color(ANSI_YELLOW,
3550 gettext("Errata #%d detected.\n"),
3551 errata);
3552 break;
3553
3554 case ZPOOL_STATUS_NON_NATIVE_ASHIFT:
3555 (void) printf_color(ANSI_YELLOW, gettext("One or more devices "
3556 "are configured to use a non-native block size.\n"
3557 "\t%sExpect reduced performance.\n"), indent);
3558 break;
3559
3560 default:
3561 /*
3562 * No other status can be seen when importing pools.
3563 */
3564 assert(reason == ZPOOL_STATUS_OK);
3565 }
3566
3567 /*
3568 * Print out an action according to the overall state of the pool.
3569 */
3570 if (vs->vs_state != VDEV_STATE_HEALTHY ||
3571 reason != ZPOOL_STATUS_ERRATA || errata != ZPOOL_ERRATA_NONE) {
3572 (void) printf("%s", indent);
3573 (void) printf(gettext("action: "));
3574 }
3575 if (vs->vs_state == VDEV_STATE_HEALTHY) {
3576 if (reason == ZPOOL_STATUS_VERSION_OLDER ||
3577 reason == ZPOOL_STATUS_FEAT_DISABLED) {
3578 (void) printf(gettext("The pool can be imported using "
3579 "its name or numeric identifier, though\n"
3580 "\t%ssome features will not be available without "
3581 "an explicit 'zpool upgrade'.\n"), indent);
3582 } else if (reason == ZPOOL_STATUS_COMPATIBILITY_ERR) {
3583 (void) printf(gettext("The pool can be imported using "
3584 "its name or numeric\n"
3585 "\t%sidentifier, though the file(s) indicated by "
3586 "its 'compatibility'\n"
3587 "\t%sproperty cannot be parsed at this time.\n"),
3588 indent, indent);
3589 } else if (reason == ZPOOL_STATUS_HOSTID_MISMATCH) {
3590 (void) printf(gettext("The pool can be imported using "
3591 "its name or numeric identifier and\n"
3592 "\t%sthe '-f' flag.\n"), indent);
3593 } else if (reason == ZPOOL_STATUS_ERRATA) {
3594 switch (errata) {
3595 case ZPOOL_ERRATA_ZOL_2094_SCRUB:
3596 (void) printf(gettext("The pool can be "
3597 "imported using its name or numeric "
3598 "identifier,\n"
3599 "\t%showever there is a compatibility "
3600 "issue which should be corrected\n"
3601 "\t%sby running 'zpool scrub'\n"),
3602 indent, indent);
3603 break;
3604
3605 case ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY:
3606 (void) printf(gettext("The pool cannot be "
3607 "imported with this version of ZFS due to\n"
3608 "\t%san active asynchronous destroy. "
3609 "Revert to an earlier version\n"
3610 "\t%sand allow the destroy to complete "
3611 "before updating.\n"), indent, indent);
3612 break;
3613
3614 case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION:
3615 (void) printf(gettext("Existing encrypted "
3616 "datasets contain an on-disk "
3617 "incompatibility, which\n"
3618 "\t%sneeds to be corrected. Backup these "
3619 "datasets to new encrypted datasets\n"
3620 "\t%sand destroy the old ones.\n"),
3621 indent, indent);
3622 break;
3623
3624 case ZPOOL_ERRATA_ZOL_8308_ENCRYPTION:
3625 (void) printf(gettext("Existing encrypted "
3626 "snapshots and bookmarks contain an "
3627 "on-disk\n"
3628 "\t%sincompatibility. This may cause "
3629 "on-disk corruption if they are used\n"
3630 "\t%swith 'zfs recv'. To correct the "
3631 "issue, enable the bookmark_v2 feature.\n"
3632 "\t%sNo additional action is needed if "
3633 "there are no encrypted snapshots or\n"
3634 "\t%sbookmarks. If preserving the "
3635 "encrypted snapshots and bookmarks is\n"
3636 "\t%srequired, use a non-raw send to "
3637 "backup and restore them. Alternately,\n"
3638 "\t%sthey may be removed to resolve the "
3639 "incompatibility.\n"), indent, indent,
3640 indent, indent, indent, indent);
3641 break;
3642 default:
3643 /*
3644 * All errata must contain an action message.
3645 */
3646 assert(errata == ZPOOL_ERRATA_NONE);
3647 }
3648 } else {
3649 (void) printf(gettext("The pool can be imported using "
3650 "its name or numeric identifier.\n"));
3651 }
3652 } else if (vs->vs_state == VDEV_STATE_DEGRADED) {
3653 (void) printf(gettext("The pool can be imported despite "
3654 "missing or damaged devices. The\n"
3655 "\t%sfault tolerance of the pool may be compromised if "
3656 "imported.\n"), indent);
3657 } else {
3658 switch (reason) {
3659 case ZPOOL_STATUS_VERSION_NEWER:
3660 (void) printf(gettext("The pool cannot be imported. "
3661 "Access the pool on a system running newer\n"
3662 "\t%ssoftware, or recreate the pool from "
3663 "backup.\n"), indent);
3664 break;
3665 case ZPOOL_STATUS_UNSUP_FEAT_READ:
3666 (void) printf(gettext("The pool cannot be imported. "
3667 "Access the pool on a system that supports\n"
3668 "\t%sthe required feature(s), or recreate the pool "
3669 "from backup.\n"), indent);
3670 break;
3671 case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
3672 (void) printf(gettext("The pool cannot be imported in "
3673 "read-write mode. Import the pool with\n"
3674 "\t%s'-o readonly=on', access the pool on a system "
3675 "that supports the\n"
3676 "\t%srequired feature(s), or recreate the pool "
3677 "from backup.\n"), indent, indent);
3678 break;
3679 case ZPOOL_STATUS_MISSING_DEV_R:
3680 case ZPOOL_STATUS_MISSING_DEV_NR:
3681 case ZPOOL_STATUS_BAD_GUID_SUM:
3682 (void) printf(gettext("The pool cannot be imported. "
3683 "Attach the missing\n"
3684 "\t%sdevices and try again.\n"), indent);
3685 break;
3686 case ZPOOL_STATUS_HOSTID_ACTIVE:
3687 VERIFY0(nvlist_lookup_nvlist(config,
3688 ZPOOL_CONFIG_LOAD_INFO, &nvinfo));
3689
3690 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME))
3691 hostname = fnvlist_lookup_string(nvinfo,
3692 ZPOOL_CONFIG_MMP_HOSTNAME);
3693
3694 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID))
3695 hostid = fnvlist_lookup_uint64(nvinfo,
3696 ZPOOL_CONFIG_MMP_HOSTID);
3697
3698 (void) printf(gettext("The pool must be exported from "
3699 "%s (hostid=%"PRIx64")\n"
3700 "\t%sbefore it can be safely imported.\n"),
3701 hostname, hostid, indent);
3702 break;
3703 case ZPOOL_STATUS_HOSTID_REQUIRED:
3704 (void) printf(gettext("Set a unique system hostid with "
3705 "the zgenhostid(8) command.\n"));
3706 break;
3707 default:
3708 (void) printf(gettext("The pool cannot be imported due "
3709 "to damaged devices or data.\n"));
3710 }
3711 }
3712
3713 /* Print the comment attached to the pool. */
3714 if (comment != NULL)
3715 (void) printf(gettext("comment: %s\n"), comment);
3716
3717 /*
3718 * If the state is "closed" or "can't open", and the aux state
3719 * is "corrupt data":
3720 */
3721 if ((vs->vs_state == VDEV_STATE_CLOSED ||
3722 vs->vs_state == VDEV_STATE_CANT_OPEN) &&
3723 vs->vs_aux == VDEV_AUX_CORRUPT_DATA) {
3724 if (pool_state == POOL_STATE_DESTROYED)
3725 (void) printf(gettext("\t%sThe pool was destroyed, "
3726 "but can be imported using the '-Df' flags.\n"),
3727 indent);
3728 else if (pool_state != POOL_STATE_EXPORTED)
3729 (void) printf(gettext("\t%sThe pool may be active on "
3730 "another system, but can be imported using\n"
3731 "\t%sthe '-f' flag.\n"), indent, indent);
3732 }
3733
3734 if (msgid != NULL) {
3735 (void) printf(gettext("%s see: "
3736 "https://openzfs.github.io/openzfs-docs/msg/%s\n"),
3737 indent, msgid);
3738 }
3739
3740 (void) printf(gettext("%sconfig:\n\n"), indent);
3741
3742 cb.cb_namewidth = max_width(NULL, nvroot, 0, strlen(name),
3743 VDEV_NAME_TYPE_ID);
3744 if (cb.cb_namewidth < 10)
3745 cb.cb_namewidth = 10;
3746
3747 print_import_config(&cb, name, nvroot, 0);
3748
3749 print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_BIAS_DEDUP);
3750 print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_BIAS_SPECIAL);
3751 print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_CLASS_LOGS);
3752
3753 if (reason == ZPOOL_STATUS_BAD_GUID_SUM) {
3754 (void) printf(gettext("\n\t%sAdditional devices are known to "
3755 "be part of this pool, though their\n"
3756 "\t%sexact configuration cannot be determined.\n"),
3757 indent, indent);
3758 }
3759 return (0);
3760 }
3761
3762 static boolean_t
zfs_force_import_required(nvlist_t * config)3763 zfs_force_import_required(nvlist_t *config)
3764 {
3765 uint64_t state;
3766 uint64_t hostid = 0;
3767 nvlist_t *nvinfo;
3768
3769 state = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE);
3770 nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
3771
3772 /*
3773 * The hostid on LOAD_INFO comes from the MOS label via
3774 * spa_tryimport(). If its not there then we're likely talking to an
3775 * older kernel, so use the top one, which will be from the label
3776 * discovered in zpool_find_import(), or if a cachefile is in use, the
3777 * local hostid.
3778 */
3779 if (nvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_HOSTID, &hostid) != 0)
3780 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_HOSTID,
3781 &hostid);
3782
3783 if (state != POOL_STATE_EXPORTED && hostid != get_system_hostid())
3784 return (B_TRUE);
3785
3786 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE)) {
3787 mmp_state_t mmp_state = fnvlist_lookup_uint64(nvinfo,
3788 ZPOOL_CONFIG_MMP_STATE);
3789
3790 if (mmp_state != MMP_STATE_INACTIVE)
3791 return (B_TRUE);
3792 }
3793
3794 return (B_FALSE);
3795 }
3796
3797 /*
3798 * Perform the import for the given configuration. This passes the heavy
3799 * lifting off to zpool_import_props(), and then mounts the datasets contained
3800 * within the pool.
3801 */
3802 static int
do_import(nvlist_t * config,const char * newname,const char * mntopts,nvlist_t * props,int flags,uint_t mntthreads)3803 do_import(nvlist_t *config, const char *newname, const char *mntopts,
3804 nvlist_t *props, int flags, uint_t mntthreads)
3805 {
3806 int ret = 0;
3807 int ms_status = 0;
3808 zpool_handle_t *zhp;
3809 const char *name;
3810 uint64_t version;
3811
3812 name = fnvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME);
3813 version = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION);
3814
3815 if (!SPA_VERSION_IS_SUPPORTED(version)) {
3816 (void) fprintf(stderr, gettext("cannot import '%s': pool "
3817 "is formatted using an unsupported ZFS version\n"), name);
3818 return (1);
3819 } else if (zfs_force_import_required(config) &&
3820 !(flags & ZFS_IMPORT_ANY_HOST)) {
3821 mmp_state_t mmp_state = MMP_STATE_INACTIVE;
3822 nvlist_t *nvinfo;
3823
3824 nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
3825 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE))
3826 mmp_state = fnvlist_lookup_uint64(nvinfo,
3827 ZPOOL_CONFIG_MMP_STATE);
3828
3829 if (mmp_state == MMP_STATE_ACTIVE) {
3830 const char *hostname = "<unknown>";
3831 uint64_t hostid = 0;
3832
3833 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME))
3834 hostname = fnvlist_lookup_string(nvinfo,
3835 ZPOOL_CONFIG_MMP_HOSTNAME);
3836
3837 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID))
3838 hostid = fnvlist_lookup_uint64(nvinfo,
3839 ZPOOL_CONFIG_MMP_HOSTID);
3840
3841 (void) fprintf(stderr, gettext("cannot import '%s': "
3842 "pool is imported on %s (hostid: "
3843 "0x%"PRIx64")\nExport the pool on the other "
3844 "system, then run 'zpool import'.\n"),
3845 name, hostname, hostid);
3846 } else if (mmp_state == MMP_STATE_NO_HOSTID) {
3847 (void) fprintf(stderr, gettext("Cannot import '%s': "
3848 "pool has the multihost property on and the\n"
3849 "system's hostid is not set. Set a unique hostid "
3850 "with the zgenhostid(8) command.\n"), name);
3851 } else {
3852 const char *hostname = "<unknown>";
3853 time_t timestamp = 0;
3854 uint64_t hostid = 0;
3855
3856 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_HOSTNAME))
3857 hostname = fnvlist_lookup_string(nvinfo,
3858 ZPOOL_CONFIG_HOSTNAME);
3859 else if (nvlist_exists(config, ZPOOL_CONFIG_HOSTNAME))
3860 hostname = fnvlist_lookup_string(config,
3861 ZPOOL_CONFIG_HOSTNAME);
3862
3863 if (nvlist_exists(config, ZPOOL_CONFIG_TIMESTAMP))
3864 timestamp = fnvlist_lookup_uint64(config,
3865 ZPOOL_CONFIG_TIMESTAMP);
3866
3867 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_HOSTID))
3868 hostid = fnvlist_lookup_uint64(nvinfo,
3869 ZPOOL_CONFIG_HOSTID);
3870 else if (nvlist_exists(config, ZPOOL_CONFIG_HOSTID))
3871 hostid = fnvlist_lookup_uint64(config,
3872 ZPOOL_CONFIG_HOSTID);
3873
3874 (void) fprintf(stderr, gettext("cannot import '%s': "
3875 "pool was previously in use from another system.\n"
3876 "Last accessed by %s (hostid=%"PRIx64") at %s"
3877 "The pool can be imported, use 'zpool import -f' "
3878 "to import the pool.\n"), name, hostname,
3879 hostid, ctime(×tamp));
3880 }
3881
3882 return (1);
3883 }
3884
3885 if (zpool_import_props(g_zfs, config, newname, props, flags) != 0)
3886 return (1);
3887
3888 if (newname != NULL)
3889 name = newname;
3890
3891 if ((zhp = zpool_open_canfail(g_zfs, name)) == NULL)
3892 return (1);
3893
3894 /*
3895 * Loading keys is best effort. We don't want to return immediately
3896 * if it fails but we do want to give the error to the caller.
3897 */
3898 if (flags & ZFS_IMPORT_LOAD_KEYS &&
3899 zfs_crypto_attempt_load_keys(g_zfs, name) != 0)
3900 ret = 1;
3901
3902 if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL &&
3903 !(flags & ZFS_IMPORT_ONLY)) {
3904 ms_status = zpool_enable_datasets(zhp, mntopts, 0, mntthreads);
3905 if (ms_status == EZFS_SHAREFAILED) {
3906 (void) fprintf(stderr, gettext("Import was "
3907 "successful, but unable to share some datasets\n"));
3908 } else if (ms_status == EZFS_MOUNTFAILED) {
3909 (void) fprintf(stderr, gettext("Import was "
3910 "successful, but unable to mount some datasets\n"));
3911 }
3912 }
3913
3914 zpool_close(zhp);
3915 return (ret);
3916 }
3917
3918 typedef struct import_parameters {
3919 nvlist_t *ip_config;
3920 const char *ip_mntopts;
3921 nvlist_t *ip_props;
3922 int ip_flags;
3923 uint_t ip_mntthreads;
3924 int *ip_err;
3925 } import_parameters_t;
3926
3927 static void
do_import_task(void * arg)3928 do_import_task(void *arg)
3929 {
3930 import_parameters_t *ip = arg;
3931 *ip->ip_err |= do_import(ip->ip_config, NULL, ip->ip_mntopts,
3932 ip->ip_props, ip->ip_flags, ip->ip_mntthreads);
3933 free(ip);
3934 }
3935
3936
3937 static int
import_pools(nvlist_t * pools,nvlist_t * props,char * mntopts,int flags,char * orig_name,char * new_name,importargs_t * import)3938 import_pools(nvlist_t *pools, nvlist_t *props, char *mntopts, int flags,
3939 char *orig_name, char *new_name, importargs_t *import)
3940 {
3941 nvlist_t *config = NULL;
3942 nvlist_t *found_config = NULL;
3943 uint64_t pool_state;
3944 boolean_t pool_specified = (import->poolname != NULL ||
3945 import->guid != 0);
3946 uint_t npools = 0;
3947
3948
3949 tpool_t *tp = NULL;
3950 if (import->do_all) {
3951 tp = tpool_create(1, 5 * sysconf(_SC_NPROCESSORS_ONLN),
3952 0, NULL);
3953 }
3954
3955 /*
3956 * At this point we have a list of import candidate configs. Even if
3957 * we were searching by pool name or guid, we still need to
3958 * post-process the list to deal with pool state and possible
3959 * duplicate names.
3960 */
3961 int err = 0;
3962 nvpair_t *elem = NULL;
3963 boolean_t first = B_TRUE;
3964 if (!pool_specified && import->do_all) {
3965 while ((elem = nvlist_next_nvpair(pools, elem)) != NULL)
3966 npools++;
3967 }
3968 while ((elem = nvlist_next_nvpair(pools, elem)) != NULL) {
3969
3970 verify(nvpair_value_nvlist(elem, &config) == 0);
3971
3972 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
3973 &pool_state) == 0);
3974 if (!import->do_destroyed &&
3975 pool_state == POOL_STATE_DESTROYED)
3976 continue;
3977 if (import->do_destroyed &&
3978 pool_state != POOL_STATE_DESTROYED)
3979 continue;
3980
3981 verify(nvlist_add_nvlist(config, ZPOOL_LOAD_POLICY,
3982 import->policy) == 0);
3983
3984 if (!pool_specified) {
3985 if (first)
3986 first = B_FALSE;
3987 else if (!import->do_all)
3988 (void) fputc('\n', stdout);
3989
3990 if (import->do_all) {
3991 import_parameters_t *ip = safe_malloc(
3992 sizeof (import_parameters_t));
3993
3994 ip->ip_config = config;
3995 ip->ip_mntopts = mntopts;
3996 ip->ip_props = props;
3997 ip->ip_flags = flags;
3998 ip->ip_mntthreads = mount_tp_nthr / npools;
3999 ip->ip_err = &err;
4000
4001 (void) tpool_dispatch(tp, do_import_task,
4002 (void *)ip);
4003 } else {
4004 /*
4005 * If we're importing from cachefile, then
4006 * we don't want to report errors until we
4007 * are in the scan phase of the import. If
4008 * we get an error, then we return that error
4009 * to invoke the scan phase.
4010 */
4011 if (import->cachefile && !import->scan)
4012 err = show_import(config, B_FALSE);
4013 else
4014 (void) show_import(config, B_TRUE);
4015 }
4016 } else if (import->poolname != NULL) {
4017 const char *name;
4018
4019 /*
4020 * We are searching for a pool based on name.
4021 */
4022 verify(nvlist_lookup_string(config,
4023 ZPOOL_CONFIG_POOL_NAME, &name) == 0);
4024
4025 if (strcmp(name, import->poolname) == 0) {
4026 if (found_config != NULL) {
4027 (void) fprintf(stderr, gettext(
4028 "cannot import '%s': more than "
4029 "one matching pool\n"),
4030 import->poolname);
4031 (void) fprintf(stderr, gettext(
4032 "import by numeric ID instead\n"));
4033 err = B_TRUE;
4034 }
4035 found_config = config;
4036 }
4037 } else {
4038 uint64_t guid;
4039
4040 /*
4041 * Search for a pool by guid.
4042 */
4043 verify(nvlist_lookup_uint64(config,
4044 ZPOOL_CONFIG_POOL_GUID, &guid) == 0);
4045
4046 if (guid == import->guid)
4047 found_config = config;
4048 }
4049 }
4050 if (import->do_all) {
4051 tpool_wait(tp);
4052 tpool_destroy(tp);
4053 }
4054
4055 /*
4056 * If we were searching for a specific pool, verify that we found a
4057 * pool, and then do the import.
4058 */
4059 if (pool_specified && err == 0) {
4060 if (found_config == NULL) {
4061 (void) fprintf(stderr, gettext("cannot import '%s': "
4062 "no such pool available\n"), orig_name);
4063 err = B_TRUE;
4064 } else {
4065 err |= do_import(found_config, new_name,
4066 mntopts, props, flags, mount_tp_nthr);
4067 }
4068 }
4069
4070 /*
4071 * If we were just looking for pools, report an error if none were
4072 * found.
4073 */
4074 if (!pool_specified && first)
4075 (void) fprintf(stderr,
4076 gettext("no pools available to import\n"));
4077 return (err);
4078 }
4079
4080 typedef struct target_exists_args {
4081 const char *poolname;
4082 uint64_t poolguid;
4083 } target_exists_args_t;
4084
4085 static int
name_or_guid_exists(zpool_handle_t * zhp,void * data)4086 name_or_guid_exists(zpool_handle_t *zhp, void *data)
4087 {
4088 target_exists_args_t *args = data;
4089 nvlist_t *config = zpool_get_config(zhp, NULL);
4090 int found = 0;
4091
4092 if (config == NULL)
4093 return (0);
4094
4095 if (args->poolname != NULL) {
4096 const char *pool_name;
4097
4098 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
4099 &pool_name) == 0);
4100 if (strcmp(pool_name, args->poolname) == 0)
4101 found = 1;
4102 } else {
4103 uint64_t pool_guid;
4104
4105 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
4106 &pool_guid) == 0);
4107 if (pool_guid == args->poolguid)
4108 found = 1;
4109 }
4110 zpool_close(zhp);
4111
4112 return (found);
4113 }
4114 /*
4115 * zpool checkpoint <pool>
4116 * checkpoint --discard <pool>
4117 *
4118 * -d Discard the checkpoint from a checkpointed
4119 * --discard pool.
4120 *
4121 * -w Wait for discarding a checkpoint to complete.
4122 * --wait
4123 *
4124 * Checkpoints the specified pool, by taking a "snapshot" of its
4125 * current state. A pool can only have one checkpoint at a time.
4126 */
4127 int
zpool_do_checkpoint(int argc,char ** argv)4128 zpool_do_checkpoint(int argc, char **argv)
4129 {
4130 boolean_t discard, wait;
4131 char *pool;
4132 zpool_handle_t *zhp;
4133 int c, err;
4134
4135 struct option long_options[] = {
4136 {"discard", no_argument, NULL, 'd'},
4137 {"wait", no_argument, NULL, 'w'},
4138 {0, 0, 0, 0}
4139 };
4140
4141 discard = B_FALSE;
4142 wait = B_FALSE;
4143 while ((c = getopt_long(argc, argv, ":dw", long_options, NULL)) != -1) {
4144 switch (c) {
4145 case 'd':
4146 discard = B_TRUE;
4147 break;
4148 case 'w':
4149 wait = B_TRUE;
4150 break;
4151 case '?':
4152 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
4153 optopt);
4154 usage(B_FALSE);
4155 }
4156 }
4157
4158 if (wait && !discard) {
4159 (void) fprintf(stderr, gettext("--wait only valid when "
4160 "--discard also specified\n"));
4161 usage(B_FALSE);
4162 }
4163
4164 argc -= optind;
4165 argv += optind;
4166
4167 if (argc < 1) {
4168 (void) fprintf(stderr, gettext("missing pool argument\n"));
4169 usage(B_FALSE);
4170 }
4171
4172 if (argc > 1) {
4173 (void) fprintf(stderr, gettext("too many arguments\n"));
4174 usage(B_FALSE);
4175 }
4176
4177 pool = argv[0];
4178
4179 if ((zhp = zpool_open(g_zfs, pool)) == NULL) {
4180 /* As a special case, check for use of '/' in the name */
4181 if (strchr(pool, '/') != NULL)
4182 (void) fprintf(stderr, gettext("'zpool checkpoint' "
4183 "doesn't work on datasets. To save the state "
4184 "of a dataset from a specific point in time "
4185 "please use 'zfs snapshot'\n"));
4186 return (1);
4187 }
4188
4189 if (discard) {
4190 err = (zpool_discard_checkpoint(zhp) != 0);
4191 if (err == 0 && wait)
4192 err = zpool_wait(zhp, ZPOOL_WAIT_CKPT_DISCARD);
4193 } else {
4194 err = (zpool_checkpoint(zhp) != 0);
4195 }
4196
4197 zpool_close(zhp);
4198
4199 return (err);
4200 }
4201
4202 #define CHECKPOINT_OPT 1024
4203
4204 /*
4205 * zpool prefetch [-t <type>] <pool>
4206 *
4207 * Prefetchs a particular type of data in the specified pool.
4208 */
4209 int
zpool_do_prefetch(int argc,char ** argv)4210 zpool_do_prefetch(int argc, char **argv)
4211 {
4212 int c;
4213 char *poolname;
4214 char *typestr = NULL;
4215 zpool_prefetch_type_t type;
4216 zpool_handle_t *zhp;
4217 int err = 0;
4218
4219 while ((c = getopt(argc, argv, "t:")) != -1) {
4220 switch (c) {
4221 case 't':
4222 typestr = optarg;
4223 break;
4224 case ':':
4225 (void) fprintf(stderr, gettext("missing argument for "
4226 "'%c' option\n"), optopt);
4227 usage(B_FALSE);
4228 break;
4229 case '?':
4230 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
4231 optopt);
4232 usage(B_FALSE);
4233 }
4234 }
4235 argc -= optind;
4236 argv += optind;
4237
4238 if (argc < 1) {
4239 (void) fprintf(stderr, gettext("missing pool name argument\n"));
4240 usage(B_FALSE);
4241 }
4242
4243 if (argc > 1) {
4244 (void) fprintf(stderr, gettext("too many arguments\n"));
4245 usage(B_FALSE);
4246 }
4247
4248 poolname = argv[0];
4249
4250 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
4251 return (1);
4252
4253 if (typestr == NULL) {
4254 /* Prefetch all types */
4255 err = zpool_prefetch(zhp, ZPOOL_PREFETCH_DDT);
4256 if (err == 0)
4257 err = zpool_prefetch(zhp, ZPOOL_PREFETCH_BRT);
4258 } else {
4259 if (strcmp(typestr, "ddt") == 0) {
4260 type = ZPOOL_PREFETCH_DDT;
4261 } else if (strcmp(typestr, "brt") == 0) {
4262 type = ZPOOL_PREFETCH_BRT;
4263 } else {
4264 (void) fprintf(stderr,
4265 gettext("unsupported prefetch type\n"));
4266 zpool_close(zhp);
4267 usage(B_FALSE);
4268 }
4269 err = zpool_prefetch(zhp, type);
4270 }
4271
4272 zpool_close(zhp);
4273
4274 return (err);
4275 }
4276
4277 /*
4278 * zpool import [-d dir] [-D]
4279 * import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l]
4280 * [-d dir | -c cachefile | -s] [-f] -a
4281 * import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l]
4282 * [-d dir | -c cachefile | -s] [-f] [-n] [-F] <pool | id>
4283 * [newpool]
4284 *
4285 * -c Read pool information from a cachefile instead of searching
4286 * devices. If importing from a cachefile config fails, then
4287 * fallback to searching for devices only in the directories that
4288 * exist in the cachefile.
4289 *
4290 * -d Scan in a specific directory, other than /dev/. More than
4291 * one directory can be specified using multiple '-d' options.
4292 *
4293 * -D Scan for previously destroyed pools or import all or only
4294 * specified destroyed pools.
4295 *
4296 * -R Temporarily import the pool, with all mountpoints relative to
4297 * the given root. The pool will remain exported when the machine
4298 * is rebooted.
4299 *
4300 * -V Import even in the presence of faulted vdevs. This is an
4301 * intentionally undocumented option for testing purposes, and
4302 * treats the pool configuration as complete, leaving any bad
4303 * vdevs in the FAULTED state. In other words, it does verbatim
4304 * import.
4305 *
4306 * -f Force import, even if it appears that the pool is active.
4307 *
4308 * -F Attempt rewind if necessary.
4309 *
4310 * -n See if rewind would work, but don't actually rewind.
4311 *
4312 * -N Import the pool but don't mount datasets.
4313 *
4314 * -T Specify a starting txg to use for import. This option is
4315 * intentionally undocumented option for testing purposes.
4316 *
4317 * -a Import all pools found.
4318 *
4319 * -l Load encryption keys while importing.
4320 *
4321 * -o Set property=value and/or temporary mount options (without '=').
4322 *
4323 * -s Scan using the default search path, the libblkid cache will
4324 * not be consulted.
4325 *
4326 * --rewind-to-checkpoint
4327 * Import the pool and revert back to the checkpoint.
4328 *
4329 * The import command scans for pools to import, and import pools based on pool
4330 * name and GUID. The pool can also be renamed as part of the import process.
4331 */
4332 int
zpool_do_import(int argc,char ** argv)4333 zpool_do_import(int argc, char **argv)
4334 {
4335 char **searchdirs = NULL;
4336 char *env, *envdup = NULL;
4337 int nsearch = 0;
4338 int c;
4339 int err = 0;
4340 nvlist_t *pools = NULL;
4341 boolean_t do_all = B_FALSE;
4342 boolean_t do_destroyed = B_FALSE;
4343 char *mntopts = NULL;
4344 uint64_t searchguid = 0;
4345 char *searchname = NULL;
4346 char *propval;
4347 nvlist_t *policy = NULL;
4348 nvlist_t *props = NULL;
4349 int flags = ZFS_IMPORT_NORMAL;
4350 uint32_t rewind_policy = ZPOOL_NO_REWIND;
4351 boolean_t dryrun = B_FALSE;
4352 boolean_t do_rewind = B_FALSE;
4353 boolean_t xtreme_rewind = B_FALSE;
4354 boolean_t do_scan = B_FALSE;
4355 boolean_t pool_exists = B_FALSE;
4356 uint64_t txg = -1ULL;
4357 char *cachefile = NULL;
4358 importargs_t idata = { 0 };
4359 char *endptr;
4360
4361 struct option long_options[] = {
4362 {"rewind-to-checkpoint", no_argument, NULL, CHECKPOINT_OPT},
4363 {0, 0, 0, 0}
4364 };
4365
4366 /* check options */
4367 while ((c = getopt_long(argc, argv, ":aCc:d:DEfFlmnNo:R:stT:VX",
4368 long_options, NULL)) != -1) {
4369 switch (c) {
4370 case 'a':
4371 do_all = B_TRUE;
4372 break;
4373 case 'c':
4374 cachefile = optarg;
4375 break;
4376 case 'd':
4377 searchdirs = safe_realloc(searchdirs,
4378 (nsearch + 1) * sizeof (char *));
4379 searchdirs[nsearch++] = optarg;
4380 break;
4381 case 'D':
4382 do_destroyed = B_TRUE;
4383 break;
4384 case 'f':
4385 flags |= ZFS_IMPORT_ANY_HOST;
4386 break;
4387 case 'F':
4388 do_rewind = B_TRUE;
4389 break;
4390 case 'l':
4391 flags |= ZFS_IMPORT_LOAD_KEYS;
4392 break;
4393 case 'm':
4394 flags |= ZFS_IMPORT_MISSING_LOG;
4395 break;
4396 case 'n':
4397 dryrun = B_TRUE;
4398 break;
4399 case 'N':
4400 flags |= ZFS_IMPORT_ONLY;
4401 break;
4402 case 'o':
4403 if ((propval = strchr(optarg, '=')) != NULL) {
4404 *propval = '\0';
4405 propval++;
4406 if (add_prop_list(optarg, propval,
4407 &props, B_TRUE))
4408 goto error;
4409 } else {
4410 mntopts = optarg;
4411 }
4412 break;
4413 case 'R':
4414 if (add_prop_list(zpool_prop_to_name(
4415 ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE))
4416 goto error;
4417 if (add_prop_list_default(zpool_prop_to_name(
4418 ZPOOL_PROP_CACHEFILE), "none", &props))
4419 goto error;
4420 break;
4421 case 's':
4422 do_scan = B_TRUE;
4423 break;
4424 case 't':
4425 flags |= ZFS_IMPORT_TEMP_NAME;
4426 if (add_prop_list_default(zpool_prop_to_name(
4427 ZPOOL_PROP_CACHEFILE), "none", &props))
4428 goto error;
4429 break;
4430
4431 case 'T':
4432 errno = 0;
4433 txg = strtoull(optarg, &endptr, 0);
4434 if (errno != 0 || *endptr != '\0') {
4435 (void) fprintf(stderr,
4436 gettext("invalid txg value\n"));
4437 usage(B_FALSE);
4438 }
4439 rewind_policy = ZPOOL_DO_REWIND | ZPOOL_EXTREME_REWIND;
4440 break;
4441 case 'V':
4442 flags |= ZFS_IMPORT_VERBATIM;
4443 break;
4444 case 'X':
4445 xtreme_rewind = B_TRUE;
4446 break;
4447 case CHECKPOINT_OPT:
4448 flags |= ZFS_IMPORT_CHECKPOINT;
4449 break;
4450 case ':':
4451 (void) fprintf(stderr, gettext("missing argument for "
4452 "'%c' option\n"), optopt);
4453 usage(B_FALSE);
4454 break;
4455 case '?':
4456 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
4457 optopt);
4458 usage(B_FALSE);
4459 }
4460 }
4461
4462 argc -= optind;
4463 argv += optind;
4464
4465 if (cachefile && nsearch != 0) {
4466 (void) fprintf(stderr, gettext("-c is incompatible with -d\n"));
4467 usage(B_FALSE);
4468 }
4469
4470 if (cachefile && do_scan) {
4471 (void) fprintf(stderr, gettext("-c is incompatible with -s\n"));
4472 usage(B_FALSE);
4473 }
4474
4475 if ((flags & ZFS_IMPORT_LOAD_KEYS) && (flags & ZFS_IMPORT_ONLY)) {
4476 (void) fprintf(stderr, gettext("-l is incompatible with -N\n"));
4477 usage(B_FALSE);
4478 }
4479
4480 if ((flags & ZFS_IMPORT_LOAD_KEYS) && !do_all && argc == 0) {
4481 (void) fprintf(stderr, gettext("-l is only meaningful during "
4482 "an import\n"));
4483 usage(B_FALSE);
4484 }
4485
4486 if ((dryrun || xtreme_rewind) && !do_rewind) {
4487 (void) fprintf(stderr,
4488 gettext("-n or -X only meaningful with -F\n"));
4489 usage(B_FALSE);
4490 }
4491 if (dryrun)
4492 rewind_policy = ZPOOL_TRY_REWIND;
4493 else if (do_rewind)
4494 rewind_policy = ZPOOL_DO_REWIND;
4495 if (xtreme_rewind)
4496 rewind_policy |= ZPOOL_EXTREME_REWIND;
4497
4498 /* In the future, we can capture further policy and include it here */
4499 if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 ||
4500 nvlist_add_uint64(policy, ZPOOL_LOAD_REQUEST_TXG, txg) != 0 ||
4501 nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY,
4502 rewind_policy) != 0)
4503 goto error;
4504
4505 /* check argument count */
4506 if (do_all) {
4507 if (argc != 0) {
4508 (void) fprintf(stderr, gettext("too many arguments\n"));
4509 usage(B_FALSE);
4510 }
4511 } else {
4512 if (argc > 2) {
4513 (void) fprintf(stderr, gettext("too many arguments\n"));
4514 usage(B_FALSE);
4515 }
4516 }
4517
4518 /*
4519 * Check for the effective uid. We do this explicitly here because
4520 * otherwise any attempt to discover pools will silently fail.
4521 */
4522 if (argc == 0 && geteuid() != 0) {
4523 (void) fprintf(stderr, gettext("cannot "
4524 "discover pools: permission denied\n"));
4525
4526 free(searchdirs);
4527 nvlist_free(props);
4528 nvlist_free(policy);
4529 return (1);
4530 }
4531
4532 /*
4533 * Depending on the arguments given, we do one of the following:
4534 *
4535 * <none> Iterate through all pools and display information about
4536 * each one.
4537 *
4538 * -a Iterate through all pools and try to import each one.
4539 *
4540 * <id> Find the pool that corresponds to the given GUID/pool
4541 * name and import that one.
4542 *
4543 * -D Above options applies only to destroyed pools.
4544 */
4545 if (argc != 0) {
4546 char *endptr;
4547
4548 errno = 0;
4549 searchguid = strtoull(argv[0], &endptr, 10);
4550 if (errno != 0 || *endptr != '\0') {
4551 searchname = argv[0];
4552 searchguid = 0;
4553 }
4554
4555 /*
4556 * User specified a name or guid. Ensure it's unique.
4557 */
4558 target_exists_args_t search = {searchname, searchguid};
4559 pool_exists = zpool_iter(g_zfs, name_or_guid_exists, &search);
4560 }
4561
4562 /*
4563 * Check the environment for the preferred search path.
4564 */
4565 if ((searchdirs == NULL) && (env = getenv("ZPOOL_IMPORT_PATH"))) {
4566 char *dir, *tmp = NULL;
4567
4568 envdup = strdup(env);
4569
4570 for (dir = strtok_r(envdup, ":", &tmp);
4571 dir != NULL;
4572 dir = strtok_r(NULL, ":", &tmp)) {
4573 searchdirs = safe_realloc(searchdirs,
4574 (nsearch + 1) * sizeof (char *));
4575 searchdirs[nsearch++] = dir;
4576 }
4577 }
4578
4579 idata.path = searchdirs;
4580 idata.paths = nsearch;
4581 idata.poolname = searchname;
4582 idata.guid = searchguid;
4583 idata.cachefile = cachefile;
4584 idata.scan = do_scan;
4585 idata.policy = policy;
4586 idata.do_destroyed = do_destroyed;
4587 idata.do_all = do_all;
4588
4589 libpc_handle_t lpch = {
4590 .lpc_lib_handle = g_zfs,
4591 .lpc_ops = &libzfs_config_ops,
4592 .lpc_printerr = B_TRUE
4593 };
4594 pools = zpool_search_import(&lpch, &idata);
4595
4596 if (pools != NULL && pool_exists &&
4597 (argc == 1 || strcmp(argv[0], argv[1]) == 0)) {
4598 (void) fprintf(stderr, gettext("cannot import '%s': "
4599 "a pool with that name already exists\n"),
4600 argv[0]);
4601 (void) fprintf(stderr, gettext("use the form '%s "
4602 "<pool | id> <newpool>' to give it a new name\n"),
4603 "zpool import");
4604 err = 1;
4605 } else if (pools == NULL && pool_exists) {
4606 (void) fprintf(stderr, gettext("cannot import '%s': "
4607 "a pool with that name is already created/imported,\n"),
4608 argv[0]);
4609 (void) fprintf(stderr, gettext("and no additional pools "
4610 "with that name were found\n"));
4611 err = 1;
4612 } else if (pools == NULL) {
4613 if (argc != 0) {
4614 (void) fprintf(stderr, gettext("cannot import '%s': "
4615 "no such pool available\n"), argv[0]);
4616 }
4617 err = 1;
4618 }
4619
4620 if (err == 1) {
4621 free(searchdirs);
4622 free(envdup);
4623 nvlist_free(policy);
4624 nvlist_free(pools);
4625 nvlist_free(props);
4626 return (1);
4627 }
4628
4629 err = import_pools(pools, props, mntopts, flags,
4630 argc >= 1 ? argv[0] : NULL, argc >= 2 ? argv[1] : NULL, &idata);
4631
4632 /*
4633 * If we're using the cachefile and we failed to import, then
4634 * fallback to scanning the directory for pools that match
4635 * those in the cachefile.
4636 */
4637 if (err != 0 && cachefile != NULL) {
4638 (void) printf(gettext("cachefile import failed, retrying\n"));
4639
4640 /*
4641 * We use the scan flag to gather the directories that exist
4642 * in the cachefile. If we need to fallback to searching for
4643 * the pool config, we will only search devices in these
4644 * directories.
4645 */
4646 idata.scan = B_TRUE;
4647 nvlist_free(pools);
4648 pools = zpool_search_import(&lpch, &idata);
4649
4650 err = import_pools(pools, props, mntopts, flags,
4651 argc >= 1 ? argv[0] : NULL, argc >= 2 ? argv[1] : NULL,
4652 &idata);
4653 }
4654
4655 error:
4656 nvlist_free(props);
4657 nvlist_free(pools);
4658 nvlist_free(policy);
4659 free(searchdirs);
4660 free(envdup);
4661
4662 return (err ? 1 : 0);
4663 }
4664
4665 /*
4666 * zpool sync [-f] [pool] ...
4667 *
4668 * -f (undocumented) force uberblock (and config including zpool cache file)
4669 * update.
4670 *
4671 * Sync the specified pool(s).
4672 * Without arguments "zpool sync" will sync all pools.
4673 * This command initiates TXG sync(s) and will return after the TXG(s) commit.
4674 *
4675 */
4676 static int
zpool_do_sync(int argc,char ** argv)4677 zpool_do_sync(int argc, char **argv)
4678 {
4679 int ret;
4680 boolean_t force = B_FALSE;
4681
4682 /* check options */
4683 while ((ret = getopt(argc, argv, "f")) != -1) {
4684 switch (ret) {
4685 case 'f':
4686 force = B_TRUE;
4687 break;
4688 case '?':
4689 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
4690 optopt);
4691 usage(B_FALSE);
4692 }
4693 }
4694
4695 argc -= optind;
4696 argv += optind;
4697
4698 /* if argc == 0 we will execute zpool_sync_one on all pools */
4699 ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL,
4700 B_FALSE, zpool_sync_one, &force);
4701
4702 return (ret);
4703 }
4704
4705 typedef struct iostat_cbdata {
4706 uint64_t cb_flags;
4707 int cb_namewidth;
4708 int cb_iteration;
4709 boolean_t cb_verbose;
4710 boolean_t cb_literal;
4711 boolean_t cb_scripted;
4712 zpool_list_t *cb_list;
4713 vdev_cmd_data_list_t *vcdl;
4714 vdev_cbdata_t cb_vdevs;
4715 } iostat_cbdata_t;
4716
4717 /* iostat labels */
4718 typedef struct name_and_columns {
4719 const char *name; /* Column name */
4720 unsigned int columns; /* Center name to this number of columns */
4721 } name_and_columns_t;
4722
4723 #define IOSTAT_MAX_LABELS 15 /* Max number of labels on one line */
4724
4725 static const name_and_columns_t iostat_top_labels[][IOSTAT_MAX_LABELS] =
4726 {
4727 [IOS_DEFAULT] = {{"capacity", 2}, {"operations", 2}, {"bandwidth", 2},
4728 {NULL}},
4729 [IOS_LATENCY] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2},
4730 {"asyncq_wait", 2}, {"scrub", 1}, {"trim", 1}, {"rebuild", 1},
4731 {NULL}},
4732 [IOS_QUEUES] = {{"syncq_read", 2}, {"syncq_write", 2},
4733 {"asyncq_read", 2}, {"asyncq_write", 2}, {"scrubq_read", 2},
4734 {"trimq_write", 2}, {"rebuildq_write", 2}, {NULL}},
4735 [IOS_L_HISTO] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2},
4736 {"asyncq_wait", 2}, {NULL}},
4737 [IOS_RQ_HISTO] = {{"sync_read", 2}, {"sync_write", 2},
4738 {"async_read", 2}, {"async_write", 2}, {"scrub", 2},
4739 {"trim", 2}, {"rebuild", 2}, {NULL}},
4740 };
4741
4742 /* Shorthand - if "columns" field not set, default to 1 column */
4743 static const name_and_columns_t iostat_bottom_labels[][IOSTAT_MAX_LABELS] =
4744 {
4745 [IOS_DEFAULT] = {{"alloc"}, {"free"}, {"read"}, {"write"}, {"read"},
4746 {"write"}, {NULL}},
4747 [IOS_LATENCY] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"},
4748 {"write"}, {"read"}, {"write"}, {"wait"}, {"wait"}, {"wait"},
4749 {NULL}},
4750 [IOS_QUEUES] = {{"pend"}, {"activ"}, {"pend"}, {"activ"}, {"pend"},
4751 {"activ"}, {"pend"}, {"activ"}, {"pend"}, {"activ"},
4752 {"pend"}, {"activ"}, {"pend"}, {"activ"}, {NULL}},
4753 [IOS_L_HISTO] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"},
4754 {"write"}, {"read"}, {"write"}, {"scrub"}, {"trim"}, {"rebuild"},
4755 {NULL}},
4756 [IOS_RQ_HISTO] = {{"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"},
4757 {"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"},
4758 {"ind"}, {"agg"}, {NULL}},
4759 };
4760
4761 static const char *histo_to_title[] = {
4762 [IOS_L_HISTO] = "latency",
4763 [IOS_RQ_HISTO] = "req_size",
4764 };
4765
4766 /*
4767 * Return the number of labels in a null-terminated name_and_columns_t
4768 * array.
4769 *
4770 */
4771 static unsigned int
label_array_len(const name_and_columns_t * labels)4772 label_array_len(const name_and_columns_t *labels)
4773 {
4774 int i = 0;
4775
4776 while (labels[i].name)
4777 i++;
4778
4779 return (i);
4780 }
4781
4782 /*
4783 * Return the number of strings in a null-terminated string array.
4784 * For example:
4785 *
4786 * const char foo[] = {"bar", "baz", NULL}
4787 *
4788 * returns 2
4789 */
4790 static uint64_t
str_array_len(const char * array[])4791 str_array_len(const char *array[])
4792 {
4793 uint64_t i = 0;
4794 while (array[i])
4795 i++;
4796
4797 return (i);
4798 }
4799
4800
4801 /*
4802 * Return a default column width for default/latency/queue columns. This does
4803 * not include histograms, which have their columns autosized.
4804 */
4805 static unsigned int
default_column_width(iostat_cbdata_t * cb,enum iostat_type type)4806 default_column_width(iostat_cbdata_t *cb, enum iostat_type type)
4807 {
4808 unsigned long column_width = 5; /* Normal niceprint */
4809 static unsigned long widths[] = {
4810 /*
4811 * Choose some sane default column sizes for printing the
4812 * raw numbers.
4813 */
4814 [IOS_DEFAULT] = 15, /* 1PB capacity */
4815 [IOS_LATENCY] = 10, /* 1B ns = 10sec */
4816 [IOS_QUEUES] = 6, /* 1M queue entries */
4817 [IOS_L_HISTO] = 10, /* 1B ns = 10sec */
4818 [IOS_RQ_HISTO] = 6, /* 1M queue entries */
4819 };
4820
4821 if (cb->cb_literal)
4822 column_width = widths[type];
4823
4824 return (column_width);
4825 }
4826
4827 /*
4828 * Print the column labels, i.e:
4829 *
4830 * capacity operations bandwidth
4831 * alloc free read write read write ...
4832 *
4833 * If force_column_width is set, use it for the column width. If not set, use
4834 * the default column width.
4835 */
4836 static void
print_iostat_labels(iostat_cbdata_t * cb,unsigned int force_column_width,const name_and_columns_t labels[][IOSTAT_MAX_LABELS])4837 print_iostat_labels(iostat_cbdata_t *cb, unsigned int force_column_width,
4838 const name_and_columns_t labels[][IOSTAT_MAX_LABELS])
4839 {
4840 int i, idx, s;
4841 int text_start, rw_column_width, spaces_to_end;
4842 uint64_t flags = cb->cb_flags;
4843 uint64_t f;
4844 unsigned int column_width = force_column_width;
4845
4846 /* For each bit set in flags */
4847 for (f = flags; f; f &= ~(1ULL << idx)) {
4848 idx = lowbit64(f) - 1;
4849 if (!force_column_width)
4850 column_width = default_column_width(cb, idx);
4851 /* Print our top labels centered over "read write" label. */
4852 for (i = 0; i < label_array_len(labels[idx]); i++) {
4853 const char *name = labels[idx][i].name;
4854 /*
4855 * We treat labels[][].columns == 0 as shorthand
4856 * for one column. It makes writing out the label
4857 * tables more concise.
4858 */
4859 unsigned int columns = MAX(1, labels[idx][i].columns);
4860 unsigned int slen = strlen(name);
4861
4862 rw_column_width = (column_width * columns) +
4863 (2 * (columns - 1));
4864
4865 text_start = (int)((rw_column_width) / columns -
4866 slen / columns);
4867 if (text_start < 0)
4868 text_start = 0;
4869
4870 printf(" "); /* Two spaces between columns */
4871
4872 /* Space from beginning of column to label */
4873 for (s = 0; s < text_start; s++)
4874 printf(" ");
4875
4876 printf("%s", name);
4877
4878 /* Print space after label to end of column */
4879 spaces_to_end = rw_column_width - text_start - slen;
4880 if (spaces_to_end < 0)
4881 spaces_to_end = 0;
4882
4883 for (s = 0; s < spaces_to_end; s++)
4884 printf(" ");
4885 }
4886 }
4887 }
4888
4889
4890 /*
4891 * print_cmd_columns - Print custom column titles from -c
4892 *
4893 * If the user specified the "zpool status|iostat -c" then print their custom
4894 * column titles in the header. For example, print_cmd_columns() would print
4895 * the " col1 col2" part of this:
4896 *
4897 * $ zpool iostat -vc 'echo col1=val1; echo col2=val2'
4898 * ...
4899 * capacity operations bandwidth
4900 * pool alloc free read write read write col1 col2
4901 * ---------- ----- ----- ----- ----- ----- ----- ---- ----
4902 * mypool 269K 1008M 0 0 107 946
4903 * mirror 269K 1008M 0 0 107 946
4904 * sdb - - 0 0 102 473 val1 val2
4905 * sdc - - 0 0 5 473 val1 val2
4906 * ---------- ----- ----- ----- ----- ----- ----- ---- ----
4907 */
4908 static void
print_cmd_columns(vdev_cmd_data_list_t * vcdl,int use_dashes)4909 print_cmd_columns(vdev_cmd_data_list_t *vcdl, int use_dashes)
4910 {
4911 int i, j;
4912 vdev_cmd_data_t *data = &vcdl->data[0];
4913
4914 if (vcdl->count == 0 || data == NULL)
4915 return;
4916
4917 /*
4918 * Each vdev cmd should have the same column names unless the user did
4919 * something weird with their cmd. Just take the column names from the
4920 * first vdev and assume it works for all of them.
4921 */
4922 for (i = 0; i < vcdl->uniq_cols_cnt; i++) {
4923 printf(" ");
4924 if (use_dashes) {
4925 for (j = 0; j < vcdl->uniq_cols_width[i]; j++)
4926 printf("-");
4927 } else {
4928 (void) printf_color(ANSI_BOLD, "%*s",
4929 vcdl->uniq_cols_width[i],
4930 vcdl->uniq_cols[i]);
4931 }
4932 }
4933 }
4934
4935
4936 /*
4937 * Utility function to print out a line of dashes like:
4938 *
4939 * -------------------------------- ----- ----- ----- ----- -----
4940 *
4941 * ...or a dashed named-row line like:
4942 *
4943 * logs - - - - -
4944 *
4945 * @cb: iostat data
4946 *
4947 * @force_column_width If non-zero, use the value as the column width.
4948 * Otherwise use the default column widths.
4949 *
4950 * @name: Print a dashed named-row line starting
4951 * with @name. Otherwise, print a regular
4952 * dashed line.
4953 */
4954 static void
print_iostat_dashes(iostat_cbdata_t * cb,unsigned int force_column_width,const char * name)4955 print_iostat_dashes(iostat_cbdata_t *cb, unsigned int force_column_width,
4956 const char *name)
4957 {
4958 int i;
4959 unsigned int namewidth;
4960 uint64_t flags = cb->cb_flags;
4961 uint64_t f;
4962 int idx;
4963 const name_and_columns_t *labels;
4964 const char *title;
4965
4966
4967 if (cb->cb_flags & IOS_ANYHISTO_M) {
4968 title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)];
4969 } else if (cb->cb_vdevs.cb_names_count) {
4970 title = "vdev";
4971 } else {
4972 title = "pool";
4973 }
4974
4975 namewidth = MAX(MAX(strlen(title), cb->cb_namewidth),
4976 name ? strlen(name) : 0);
4977
4978
4979 if (name) {
4980 printf("%-*s", namewidth, name);
4981 } else {
4982 for (i = 0; i < namewidth; i++)
4983 (void) printf("-");
4984 }
4985
4986 /* For each bit in flags */
4987 for (f = flags; f; f &= ~(1ULL << idx)) {
4988 unsigned int column_width;
4989 idx = lowbit64(f) - 1;
4990 if (force_column_width)
4991 column_width = force_column_width;
4992 else
4993 column_width = default_column_width(cb, idx);
4994
4995 labels = iostat_bottom_labels[idx];
4996 for (i = 0; i < label_array_len(labels); i++) {
4997 if (name)
4998 printf(" %*s-", column_width - 1, " ");
4999 else
5000 printf(" %.*s", column_width,
5001 "--------------------");
5002 }
5003 }
5004 }
5005
5006
5007 static void
print_iostat_separator_impl(iostat_cbdata_t * cb,unsigned int force_column_width)5008 print_iostat_separator_impl(iostat_cbdata_t *cb,
5009 unsigned int force_column_width)
5010 {
5011 print_iostat_dashes(cb, force_column_width, NULL);
5012 }
5013
5014 static void
print_iostat_separator(iostat_cbdata_t * cb)5015 print_iostat_separator(iostat_cbdata_t *cb)
5016 {
5017 print_iostat_separator_impl(cb, 0);
5018 }
5019
5020 static void
print_iostat_header_impl(iostat_cbdata_t * cb,unsigned int force_column_width,const char * histo_vdev_name)5021 print_iostat_header_impl(iostat_cbdata_t *cb, unsigned int force_column_width,
5022 const char *histo_vdev_name)
5023 {
5024 unsigned int namewidth;
5025 const char *title;
5026
5027 color_start(ANSI_BOLD);
5028
5029 if (cb->cb_flags & IOS_ANYHISTO_M) {
5030 title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)];
5031 } else if (cb->cb_vdevs.cb_names_count) {
5032 title = "vdev";
5033 } else {
5034 title = "pool";
5035 }
5036
5037 namewidth = MAX(MAX(strlen(title), cb->cb_namewidth),
5038 histo_vdev_name ? strlen(histo_vdev_name) : 0);
5039
5040 if (histo_vdev_name)
5041 printf("%-*s", namewidth, histo_vdev_name);
5042 else
5043 printf("%*s", namewidth, "");
5044
5045
5046 print_iostat_labels(cb, force_column_width, iostat_top_labels);
5047 printf("\n");
5048
5049 printf("%-*s", namewidth, title);
5050
5051 print_iostat_labels(cb, force_column_width, iostat_bottom_labels);
5052 if (cb->vcdl != NULL)
5053 print_cmd_columns(cb->vcdl, 0);
5054
5055 printf("\n");
5056
5057 print_iostat_separator_impl(cb, force_column_width);
5058
5059 if (cb->vcdl != NULL)
5060 print_cmd_columns(cb->vcdl, 1);
5061
5062 color_end();
5063
5064 printf("\n");
5065 }
5066
5067 static void
print_iostat_header(iostat_cbdata_t * cb)5068 print_iostat_header(iostat_cbdata_t *cb)
5069 {
5070 print_iostat_header_impl(cb, 0, NULL);
5071 }
5072
5073 /*
5074 * Prints a size string (i.e. 120M) with the suffix ("M") colored
5075 * by order of magnitude. Uses column_size to add padding.
5076 */
5077 static void
print_stat_color(const char * statbuf,unsigned int column_size)5078 print_stat_color(const char *statbuf, unsigned int column_size)
5079 {
5080 (void) fputs(" ", stdout);
5081 size_t len = strlen(statbuf);
5082 while (len < column_size) {
5083 (void) fputc(' ', stdout);
5084 column_size--;
5085 }
5086 if (*statbuf == '0') {
5087 color_start(ANSI_GRAY);
5088 (void) fputc('0', stdout);
5089 } else {
5090 for (; *statbuf; statbuf++) {
5091 if (*statbuf == 'K') color_start(ANSI_GREEN);
5092 else if (*statbuf == 'M') color_start(ANSI_YELLOW);
5093 else if (*statbuf == 'G') color_start(ANSI_RED);
5094 else if (*statbuf == 'T') color_start(ANSI_BOLD_BLUE);
5095 else if (*statbuf == 'P') color_start(ANSI_MAGENTA);
5096 else if (*statbuf == 'E') color_start(ANSI_CYAN);
5097 (void) fputc(*statbuf, stdout);
5098 if (--column_size <= 0)
5099 break;
5100 }
5101 }
5102 color_end();
5103 }
5104
5105 /*
5106 * Display a single statistic.
5107 */
5108 static void
print_one_stat(uint64_t value,enum zfs_nicenum_format format,unsigned int column_size,boolean_t scripted)5109 print_one_stat(uint64_t value, enum zfs_nicenum_format format,
5110 unsigned int column_size, boolean_t scripted)
5111 {
5112 char buf[64];
5113
5114 zfs_nicenum_format(value, buf, sizeof (buf), format);
5115
5116 if (scripted)
5117 printf("\t%s", buf);
5118 else
5119 print_stat_color(buf, column_size);
5120 }
5121
5122 /*
5123 * Calculate the default vdev stats
5124 *
5125 * Subtract oldvs from newvs, apply a scaling factor, and save the resulting
5126 * stats into calcvs.
5127 */
5128 static void
calc_default_iostats(vdev_stat_t * oldvs,vdev_stat_t * newvs,vdev_stat_t * calcvs)5129 calc_default_iostats(vdev_stat_t *oldvs, vdev_stat_t *newvs,
5130 vdev_stat_t *calcvs)
5131 {
5132 int i;
5133
5134 memcpy(calcvs, newvs, sizeof (*calcvs));
5135 for (i = 0; i < ARRAY_SIZE(calcvs->vs_ops); i++)
5136 calcvs->vs_ops[i] = (newvs->vs_ops[i] - oldvs->vs_ops[i]);
5137
5138 for (i = 0; i < ARRAY_SIZE(calcvs->vs_bytes); i++)
5139 calcvs->vs_bytes[i] = (newvs->vs_bytes[i] - oldvs->vs_bytes[i]);
5140 }
5141
5142 /*
5143 * Internal representation of the extended iostats data.
5144 *
5145 * The extended iostat stats are exported in nvlists as either uint64_t arrays
5146 * or single uint64_t's. We make both look like arrays to make them easier
5147 * to process. In order to make single uint64_t's look like arrays, we set
5148 * __data to the stat data, and then set *data = &__data with count = 1. Then,
5149 * we can just use *data and count.
5150 */
5151 struct stat_array {
5152 uint64_t *data;
5153 uint_t count; /* Number of entries in data[] */
5154 uint64_t __data; /* Only used when data is a single uint64_t */
5155 };
5156
5157 static uint64_t
stat_histo_max(struct stat_array * nva,unsigned int len)5158 stat_histo_max(struct stat_array *nva, unsigned int len)
5159 {
5160 uint64_t max = 0;
5161 int i;
5162 for (i = 0; i < len; i++)
5163 max = MAX(max, array64_max(nva[i].data, nva[i].count));
5164
5165 return (max);
5166 }
5167
5168 /*
5169 * Helper function to lookup a uint64_t array or uint64_t value and store its
5170 * data as a stat_array. If the nvpair is a single uint64_t value, then we make
5171 * it look like a one element array to make it easier to process.
5172 */
5173 static int
nvpair64_to_stat_array(nvlist_t * nvl,const char * name,struct stat_array * nva)5174 nvpair64_to_stat_array(nvlist_t *nvl, const char *name,
5175 struct stat_array *nva)
5176 {
5177 nvpair_t *tmp;
5178 int ret;
5179
5180 verify(nvlist_lookup_nvpair(nvl, name, &tmp) == 0);
5181 switch (nvpair_type(tmp)) {
5182 case DATA_TYPE_UINT64_ARRAY:
5183 ret = nvpair_value_uint64_array(tmp, &nva->data, &nva->count);
5184 break;
5185 case DATA_TYPE_UINT64:
5186 ret = nvpair_value_uint64(tmp, &nva->__data);
5187 nva->data = &nva->__data;
5188 nva->count = 1;
5189 break;
5190 default:
5191 /* Not a uint64_t */
5192 ret = EINVAL;
5193 break;
5194 }
5195
5196 return (ret);
5197 }
5198
5199 /*
5200 * Given a list of nvlist names, look up the extended stats in newnv and oldnv,
5201 * subtract them, and return the results in a newly allocated stat_array.
5202 * You must free the returned array after you are done with it with
5203 * free_calc_stats().
5204 *
5205 * Additionally, you can set "oldnv" to NULL if you simply want the newnv
5206 * values.
5207 */
5208 static struct stat_array *
calc_and_alloc_stats_ex(const char ** names,unsigned int len,nvlist_t * oldnv,nvlist_t * newnv)5209 calc_and_alloc_stats_ex(const char **names, unsigned int len, nvlist_t *oldnv,
5210 nvlist_t *newnv)
5211 {
5212 nvlist_t *oldnvx = NULL, *newnvx;
5213 struct stat_array *oldnva, *newnva, *calcnva;
5214 int i, j;
5215 unsigned int alloc_size = (sizeof (struct stat_array)) * len;
5216
5217 /* Extract our extended stats nvlist from the main list */
5218 verify(nvlist_lookup_nvlist(newnv, ZPOOL_CONFIG_VDEV_STATS_EX,
5219 &newnvx) == 0);
5220 if (oldnv) {
5221 verify(nvlist_lookup_nvlist(oldnv, ZPOOL_CONFIG_VDEV_STATS_EX,
5222 &oldnvx) == 0);
5223 }
5224
5225 newnva = safe_malloc(alloc_size);
5226 oldnva = safe_malloc(alloc_size);
5227 calcnva = safe_malloc(alloc_size);
5228
5229 for (j = 0; j < len; j++) {
5230 verify(nvpair64_to_stat_array(newnvx, names[j],
5231 &newnva[j]) == 0);
5232 calcnva[j].count = newnva[j].count;
5233 alloc_size = calcnva[j].count * sizeof (calcnva[j].data[0]);
5234 calcnva[j].data = safe_malloc(alloc_size);
5235 memcpy(calcnva[j].data, newnva[j].data, alloc_size);
5236
5237 if (oldnvx) {
5238 verify(nvpair64_to_stat_array(oldnvx, names[j],
5239 &oldnva[j]) == 0);
5240 for (i = 0; i < oldnva[j].count; i++)
5241 calcnva[j].data[i] -= oldnva[j].data[i];
5242 }
5243 }
5244 free(newnva);
5245 free(oldnva);
5246 return (calcnva);
5247 }
5248
5249 static void
free_calc_stats(struct stat_array * nva,unsigned int len)5250 free_calc_stats(struct stat_array *nva, unsigned int len)
5251 {
5252 int i;
5253 for (i = 0; i < len; i++)
5254 free(nva[i].data);
5255
5256 free(nva);
5257 }
5258
5259 static void
print_iostat_histo(struct stat_array * nva,unsigned int len,iostat_cbdata_t * cb,unsigned int column_width,unsigned int namewidth,double scale)5260 print_iostat_histo(struct stat_array *nva, unsigned int len,
5261 iostat_cbdata_t *cb, unsigned int column_width, unsigned int namewidth,
5262 double scale)
5263 {
5264 int i, j;
5265 char buf[6];
5266 uint64_t val;
5267 enum zfs_nicenum_format format;
5268 unsigned int buckets;
5269 unsigned int start_bucket;
5270
5271 if (cb->cb_literal)
5272 format = ZFS_NICENUM_RAW;
5273 else
5274 format = ZFS_NICENUM_1024;
5275
5276 /* All these histos are the same size, so just use nva[0].count */
5277 buckets = nva[0].count;
5278
5279 if (cb->cb_flags & IOS_RQ_HISTO_M) {
5280 /* Start at 512 - req size should never be lower than this */
5281 start_bucket = 9;
5282 } else {
5283 start_bucket = 0;
5284 }
5285
5286 for (j = start_bucket; j < buckets; j++) {
5287 /* Print histogram bucket label */
5288 if (cb->cb_flags & IOS_L_HISTO_M) {
5289 /* Ending range of this bucket */
5290 val = (1UL << (j + 1)) - 1;
5291 zfs_nicetime(val, buf, sizeof (buf));
5292 } else {
5293 /* Request size (starting range of bucket) */
5294 val = (1UL << j);
5295 zfs_nicenum(val, buf, sizeof (buf));
5296 }
5297
5298 if (cb->cb_scripted)
5299 printf("%llu", (u_longlong_t)val);
5300 else
5301 printf("%-*s", namewidth, buf);
5302
5303 /* Print the values on the line */
5304 for (i = 0; i < len; i++) {
5305 print_one_stat(nva[i].data[j] * scale, format,
5306 column_width, cb->cb_scripted);
5307 }
5308 printf("\n");
5309 }
5310 }
5311
5312 static void
print_solid_separator(unsigned int length)5313 print_solid_separator(unsigned int length)
5314 {
5315 while (length--)
5316 printf("-");
5317 printf("\n");
5318 }
5319
5320 static void
print_iostat_histos(iostat_cbdata_t * cb,nvlist_t * oldnv,nvlist_t * newnv,double scale,const char * name)5321 print_iostat_histos(iostat_cbdata_t *cb, nvlist_t *oldnv,
5322 nvlist_t *newnv, double scale, const char *name)
5323 {
5324 unsigned int column_width;
5325 unsigned int namewidth;
5326 unsigned int entire_width;
5327 enum iostat_type type;
5328 struct stat_array *nva;
5329 const char **names;
5330 unsigned int names_len;
5331
5332 /* What type of histo are we? */
5333 type = IOS_HISTO_IDX(cb->cb_flags);
5334
5335 /* Get NULL-terminated array of nvlist names for our histo */
5336 names = vsx_type_to_nvlist[type];
5337 names_len = str_array_len(names); /* num of names */
5338
5339 nva = calc_and_alloc_stats_ex(names, names_len, oldnv, newnv);
5340
5341 if (cb->cb_literal) {
5342 column_width = MAX(5,
5343 (unsigned int) log10(stat_histo_max(nva, names_len)) + 1);
5344 } else {
5345 column_width = 5;
5346 }
5347
5348 namewidth = MAX(cb->cb_namewidth,
5349 strlen(histo_to_title[IOS_HISTO_IDX(cb->cb_flags)]));
5350
5351 /*
5352 * Calculate the entire line width of what we're printing. The
5353 * +2 is for the two spaces between columns:
5354 */
5355 /* read write */
5356 /* ----- ----- */
5357 /* |___| <---------- column_width */
5358 /* */
5359 /* |__________| <--- entire_width */
5360 /* */
5361 entire_width = namewidth + (column_width + 2) *
5362 label_array_len(iostat_bottom_labels[type]);
5363
5364 if (cb->cb_scripted)
5365 printf("%s\n", name);
5366 else
5367 print_iostat_header_impl(cb, column_width, name);
5368
5369 print_iostat_histo(nva, names_len, cb, column_width,
5370 namewidth, scale);
5371
5372 free_calc_stats(nva, names_len);
5373 if (!cb->cb_scripted)
5374 print_solid_separator(entire_width);
5375 }
5376
5377 /*
5378 * Calculate the average latency of a power-of-two latency histogram
5379 */
5380 static uint64_t
single_histo_average(uint64_t * histo,unsigned int buckets)5381 single_histo_average(uint64_t *histo, unsigned int buckets)
5382 {
5383 int i;
5384 uint64_t count = 0, total = 0;
5385
5386 for (i = 0; i < buckets; i++) {
5387 /*
5388 * Our buckets are power-of-two latency ranges. Use the
5389 * midpoint latency of each bucket to calculate the average.
5390 * For example:
5391 *
5392 * Bucket Midpoint
5393 * 8ns-15ns: 12ns
5394 * 16ns-31ns: 24ns
5395 * ...
5396 */
5397 if (histo[i] != 0) {
5398 total += histo[i] * (((1UL << i) + ((1UL << i)/2)));
5399 count += histo[i];
5400 }
5401 }
5402
5403 /* Prevent divide by zero */
5404 return (count == 0 ? 0 : total / count);
5405 }
5406
5407 static void
print_iostat_queues(iostat_cbdata_t * cb,nvlist_t * newnv)5408 print_iostat_queues(iostat_cbdata_t *cb, nvlist_t *newnv)
5409 {
5410 const char *names[] = {
5411 ZPOOL_CONFIG_VDEV_SYNC_R_PEND_QUEUE,
5412 ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE,
5413 ZPOOL_CONFIG_VDEV_SYNC_W_PEND_QUEUE,
5414 ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE,
5415 ZPOOL_CONFIG_VDEV_ASYNC_R_PEND_QUEUE,
5416 ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE,
5417 ZPOOL_CONFIG_VDEV_ASYNC_W_PEND_QUEUE,
5418 ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE,
5419 ZPOOL_CONFIG_VDEV_SCRUB_PEND_QUEUE,
5420 ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,
5421 ZPOOL_CONFIG_VDEV_TRIM_PEND_QUEUE,
5422 ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE,
5423 ZPOOL_CONFIG_VDEV_REBUILD_PEND_QUEUE,
5424 ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE,
5425 };
5426
5427 struct stat_array *nva;
5428
5429 unsigned int column_width = default_column_width(cb, IOS_QUEUES);
5430 enum zfs_nicenum_format format;
5431
5432 nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), NULL, newnv);
5433
5434 if (cb->cb_literal)
5435 format = ZFS_NICENUM_RAW;
5436 else
5437 format = ZFS_NICENUM_1024;
5438
5439 for (int i = 0; i < ARRAY_SIZE(names); i++) {
5440 uint64_t val = nva[i].data[0];
5441 print_one_stat(val, format, column_width, cb->cb_scripted);
5442 }
5443
5444 free_calc_stats(nva, ARRAY_SIZE(names));
5445 }
5446
5447 static void
print_iostat_latency(iostat_cbdata_t * cb,nvlist_t * oldnv,nvlist_t * newnv)5448 print_iostat_latency(iostat_cbdata_t *cb, nvlist_t *oldnv,
5449 nvlist_t *newnv)
5450 {
5451 int i;
5452 uint64_t val;
5453 const char *names[] = {
5454 ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
5455 ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
5456 ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
5457 ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
5458 ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO,
5459 ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO,
5460 ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO,
5461 ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO,
5462 ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO,
5463 ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
5464 ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
5465 };
5466 struct stat_array *nva;
5467
5468 unsigned int column_width = default_column_width(cb, IOS_LATENCY);
5469 enum zfs_nicenum_format format;
5470
5471 nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), oldnv, newnv);
5472
5473 if (cb->cb_literal)
5474 format = ZFS_NICENUM_RAWTIME;
5475 else
5476 format = ZFS_NICENUM_TIME;
5477
5478 /* Print our avg latencies on the line */
5479 for (i = 0; i < ARRAY_SIZE(names); i++) {
5480 /* Compute average latency for a latency histo */
5481 val = single_histo_average(nva[i].data, nva[i].count);
5482 print_one_stat(val, format, column_width, cb->cb_scripted);
5483 }
5484 free_calc_stats(nva, ARRAY_SIZE(names));
5485 }
5486
5487 /*
5488 * Print default statistics (capacity/operations/bandwidth)
5489 */
5490 static void
print_iostat_default(vdev_stat_t * vs,iostat_cbdata_t * cb,double scale)5491 print_iostat_default(vdev_stat_t *vs, iostat_cbdata_t *cb, double scale)
5492 {
5493 unsigned int column_width = default_column_width(cb, IOS_DEFAULT);
5494 enum zfs_nicenum_format format;
5495 char na; /* char to print for "not applicable" values */
5496
5497 if (cb->cb_literal) {
5498 format = ZFS_NICENUM_RAW;
5499 na = '0';
5500 } else {
5501 format = ZFS_NICENUM_1024;
5502 na = '-';
5503 }
5504
5505 /* only toplevel vdevs have capacity stats */
5506 if (vs->vs_space == 0) {
5507 if (cb->cb_scripted)
5508 printf("\t%c\t%c", na, na);
5509 else
5510 printf(" %*c %*c", column_width, na, column_width,
5511 na);
5512 } else {
5513 print_one_stat(vs->vs_alloc, format, column_width,
5514 cb->cb_scripted);
5515 print_one_stat(vs->vs_space - vs->vs_alloc, format,
5516 column_width, cb->cb_scripted);
5517 }
5518
5519 print_one_stat((uint64_t)(vs->vs_ops[ZIO_TYPE_READ] * scale),
5520 format, column_width, cb->cb_scripted);
5521 print_one_stat((uint64_t)(vs->vs_ops[ZIO_TYPE_WRITE] * scale),
5522 format, column_width, cb->cb_scripted);
5523 print_one_stat((uint64_t)(vs->vs_bytes[ZIO_TYPE_READ] * scale),
5524 format, column_width, cb->cb_scripted);
5525 print_one_stat((uint64_t)(vs->vs_bytes[ZIO_TYPE_WRITE] * scale),
5526 format, column_width, cb->cb_scripted);
5527 }
5528
5529 static const char *const class_name[] = {
5530 VDEV_ALLOC_BIAS_DEDUP,
5531 VDEV_ALLOC_BIAS_SPECIAL,
5532 VDEV_ALLOC_CLASS_LOGS
5533 };
5534
5535 /*
5536 * Print out all the statistics for the given vdev. This can either be the
5537 * toplevel configuration, or called recursively. If 'name' is NULL, then this
5538 * is a verbose output, and we don't want to display the toplevel pool stats.
5539 *
5540 * Returns the number of stat lines printed.
5541 */
5542 static unsigned int
print_vdev_stats(zpool_handle_t * zhp,const char * name,nvlist_t * oldnv,nvlist_t * newnv,iostat_cbdata_t * cb,int depth)5543 print_vdev_stats(zpool_handle_t *zhp, const char *name, nvlist_t *oldnv,
5544 nvlist_t *newnv, iostat_cbdata_t *cb, int depth)
5545 {
5546 nvlist_t **oldchild, **newchild;
5547 uint_t c, children, oldchildren;
5548 vdev_stat_t *oldvs, *newvs, *calcvs;
5549 vdev_stat_t zerovs = { 0 };
5550 char *vname;
5551 int i;
5552 int ret = 0;
5553 uint64_t tdelta;
5554 double scale;
5555
5556 if (strcmp(name, VDEV_TYPE_INDIRECT) == 0)
5557 return (ret);
5558
5559 calcvs = safe_malloc(sizeof (*calcvs));
5560
5561 if (oldnv != NULL) {
5562 verify(nvlist_lookup_uint64_array(oldnv,
5563 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&oldvs, &c) == 0);
5564 } else {
5565 oldvs = &zerovs;
5566 }
5567
5568 /* Do we only want to see a specific vdev? */
5569 for (i = 0; i < cb->cb_vdevs.cb_names_count; i++) {
5570 /* Yes we do. Is this the vdev? */
5571 if (strcmp(name, cb->cb_vdevs.cb_names[i]) == 0) {
5572 /*
5573 * This is our vdev. Since it is the only vdev we
5574 * will be displaying, make depth = 0 so that it
5575 * doesn't get indented.
5576 */
5577 depth = 0;
5578 break;
5579 }
5580 }
5581
5582 if (cb->cb_vdevs.cb_names_count && (i == cb->cb_vdevs.cb_names_count)) {
5583 /* Couldn't match the name */
5584 goto children;
5585 }
5586
5587
5588 verify(nvlist_lookup_uint64_array(newnv, ZPOOL_CONFIG_VDEV_STATS,
5589 (uint64_t **)&newvs, &c) == 0);
5590
5591 /*
5592 * Print the vdev name unless it's is a histogram. Histograms
5593 * display the vdev name in the header itself.
5594 */
5595 if (!(cb->cb_flags & IOS_ANYHISTO_M)) {
5596 if (cb->cb_scripted) {
5597 printf("%s", name);
5598 } else {
5599 if (strlen(name) + depth > cb->cb_namewidth)
5600 (void) printf("%*s%s", depth, "", name);
5601 else
5602 (void) printf("%*s%s%*s", depth, "", name,
5603 (int)(cb->cb_namewidth - strlen(name) -
5604 depth), "");
5605 }
5606 }
5607
5608 /* Calculate our scaling factor */
5609 tdelta = newvs->vs_timestamp - oldvs->vs_timestamp;
5610 if ((oldvs->vs_timestamp == 0) && (cb->cb_flags & IOS_ANYHISTO_M)) {
5611 /*
5612 * If we specify printing histograms with no time interval, then
5613 * print the histogram numbers over the entire lifetime of the
5614 * vdev.
5615 */
5616 scale = 1;
5617 } else {
5618 if (tdelta == 0)
5619 scale = 1.0;
5620 else
5621 scale = (double)NANOSEC / tdelta;
5622 }
5623
5624 if (cb->cb_flags & IOS_DEFAULT_M) {
5625 calc_default_iostats(oldvs, newvs, calcvs);
5626 print_iostat_default(calcvs, cb, scale);
5627 }
5628 if (cb->cb_flags & IOS_LATENCY_M)
5629 print_iostat_latency(cb, oldnv, newnv);
5630 if (cb->cb_flags & IOS_QUEUES_M)
5631 print_iostat_queues(cb, newnv);
5632 if (cb->cb_flags & IOS_ANYHISTO_M) {
5633 printf("\n");
5634 print_iostat_histos(cb, oldnv, newnv, scale, name);
5635 }
5636
5637 if (cb->vcdl != NULL) {
5638 const char *path;
5639 if (nvlist_lookup_string(newnv, ZPOOL_CONFIG_PATH,
5640 &path) == 0) {
5641 printf(" ");
5642 zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path);
5643 }
5644 }
5645
5646 if (!(cb->cb_flags & IOS_ANYHISTO_M))
5647 printf("\n");
5648
5649 ret++;
5650
5651 children:
5652
5653 free(calcvs);
5654
5655 if (!cb->cb_verbose)
5656 return (ret);
5657
5658 if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_CHILDREN,
5659 &newchild, &children) != 0)
5660 return (ret);
5661
5662 if (oldnv) {
5663 if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_CHILDREN,
5664 &oldchild, &oldchildren) != 0)
5665 return (ret);
5666
5667 children = MIN(oldchildren, children);
5668 }
5669
5670 /*
5671 * print normal top-level devices
5672 */
5673 for (c = 0; c < children; c++) {
5674 uint64_t ishole = B_FALSE, islog = B_FALSE;
5675
5676 (void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_HOLE,
5677 &ishole);
5678
5679 (void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_LOG,
5680 &islog);
5681
5682 if (ishole || islog)
5683 continue;
5684
5685 if (nvlist_exists(newchild[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
5686 continue;
5687
5688 vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
5689 cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID);
5690 ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c] : NULL,
5691 newchild[c], cb, depth + 2);
5692 free(vname);
5693 }
5694
5695 /*
5696 * print all other top-level devices
5697 */
5698 for (uint_t n = 0; n < ARRAY_SIZE(class_name); n++) {
5699 boolean_t printed = B_FALSE;
5700
5701 for (c = 0; c < children; c++) {
5702 uint64_t islog = B_FALSE;
5703 const char *bias = NULL;
5704 const char *type = NULL;
5705
5706 (void) nvlist_lookup_uint64(newchild[c],
5707 ZPOOL_CONFIG_IS_LOG, &islog);
5708 if (islog) {
5709 bias = VDEV_ALLOC_CLASS_LOGS;
5710 } else {
5711 (void) nvlist_lookup_string(newchild[c],
5712 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
5713 (void) nvlist_lookup_string(newchild[c],
5714 ZPOOL_CONFIG_TYPE, &type);
5715 }
5716 if (bias == NULL || strcmp(bias, class_name[n]) != 0)
5717 continue;
5718 if (!islog && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
5719 continue;
5720
5721 if (!printed) {
5722 if ((!(cb->cb_flags & IOS_ANYHISTO_M)) &&
5723 !cb->cb_scripted &&
5724 !cb->cb_vdevs.cb_names) {
5725 print_iostat_dashes(cb, 0,
5726 class_name[n]);
5727 }
5728 printf("\n");
5729 printed = B_TRUE;
5730 }
5731
5732 vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
5733 cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID);
5734 ret += print_vdev_stats(zhp, vname, oldnv ?
5735 oldchild[c] : NULL, newchild[c], cb, depth + 2);
5736 free(vname);
5737 }
5738 }
5739
5740 /*
5741 * Include level 2 ARC devices in iostat output
5742 */
5743 if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_L2CACHE,
5744 &newchild, &children) != 0)
5745 return (ret);
5746
5747 if (oldnv) {
5748 if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_L2CACHE,
5749 &oldchild, &oldchildren) != 0)
5750 return (ret);
5751
5752 children = MIN(oldchildren, children);
5753 }
5754
5755 if (children > 0) {
5756 if ((!(cb->cb_flags & IOS_ANYHISTO_M)) && !cb->cb_scripted &&
5757 !cb->cb_vdevs.cb_names) {
5758 print_iostat_dashes(cb, 0, "cache");
5759 }
5760 printf("\n");
5761
5762 for (c = 0; c < children; c++) {
5763 vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
5764 cb->cb_vdevs.cb_name_flags);
5765 ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c]
5766 : NULL, newchild[c], cb, depth + 2);
5767 free(vname);
5768 }
5769 }
5770
5771 return (ret);
5772 }
5773
5774 /*
5775 * Callback to print out the iostats for the given pool.
5776 */
5777 static int
print_iostat(zpool_handle_t * zhp,void * data)5778 print_iostat(zpool_handle_t *zhp, void *data)
5779 {
5780 iostat_cbdata_t *cb = data;
5781 nvlist_t *oldconfig, *newconfig;
5782 nvlist_t *oldnvroot, *newnvroot;
5783 int ret;
5784
5785 newconfig = zpool_get_config(zhp, &oldconfig);
5786
5787 if (cb->cb_iteration == 1)
5788 oldconfig = NULL;
5789
5790 verify(nvlist_lookup_nvlist(newconfig, ZPOOL_CONFIG_VDEV_TREE,
5791 &newnvroot) == 0);
5792
5793 if (oldconfig == NULL)
5794 oldnvroot = NULL;
5795 else
5796 verify(nvlist_lookup_nvlist(oldconfig, ZPOOL_CONFIG_VDEV_TREE,
5797 &oldnvroot) == 0);
5798
5799 ret = print_vdev_stats(zhp, zpool_get_name(zhp), oldnvroot, newnvroot,
5800 cb, 0);
5801 if ((ret != 0) && !(cb->cb_flags & IOS_ANYHISTO_M) &&
5802 !cb->cb_scripted && cb->cb_verbose &&
5803 !cb->cb_vdevs.cb_names_count) {
5804 print_iostat_separator(cb);
5805 if (cb->vcdl != NULL) {
5806 print_cmd_columns(cb->vcdl, 1);
5807 }
5808 printf("\n");
5809 }
5810
5811 return (ret);
5812 }
5813
5814 static int
get_columns(void)5815 get_columns(void)
5816 {
5817 struct winsize ws;
5818 int columns = 80;
5819 int error;
5820
5821 if (isatty(STDOUT_FILENO)) {
5822 error = ioctl(STDOUT_FILENO, TIOCGWINSZ, &ws);
5823 if (error == 0)
5824 columns = ws.ws_col;
5825 } else {
5826 columns = 999;
5827 }
5828
5829 return (columns);
5830 }
5831
5832 /*
5833 * Return the required length of the pool/vdev name column. The minimum
5834 * allowed width and output formatting flags must be provided.
5835 */
5836 static int
get_namewidth(zpool_handle_t * zhp,int min_width,int flags,boolean_t verbose)5837 get_namewidth(zpool_handle_t *zhp, int min_width, int flags, boolean_t verbose)
5838 {
5839 nvlist_t *config, *nvroot;
5840 int width = min_width;
5841
5842 if ((config = zpool_get_config(zhp, NULL)) != NULL) {
5843 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
5844 &nvroot) == 0);
5845 size_t poolname_len = strlen(zpool_get_name(zhp));
5846 if (verbose == B_FALSE) {
5847 width = MAX(poolname_len, min_width);
5848 } else {
5849 width = MAX(poolname_len,
5850 max_width(zhp, nvroot, 0, min_width, flags));
5851 }
5852 }
5853
5854 return (width);
5855 }
5856
5857 /*
5858 * Parse the input string, get the 'interval' and 'count' value if there is one.
5859 */
5860 static void
get_interval_count(int * argcp,char ** argv,float * iv,unsigned long * cnt)5861 get_interval_count(int *argcp, char **argv, float *iv,
5862 unsigned long *cnt)
5863 {
5864 float interval = 0;
5865 unsigned long count = 0;
5866 int argc = *argcp;
5867
5868 /*
5869 * Determine if the last argument is an integer or a pool name
5870 */
5871 if (argc > 0 && zfs_isnumber(argv[argc - 1])) {
5872 char *end;
5873
5874 errno = 0;
5875 interval = strtof(argv[argc - 1], &end);
5876
5877 if (*end == '\0' && errno == 0) {
5878 if (interval == 0) {
5879 (void) fprintf(stderr, gettext(
5880 "interval cannot be zero\n"));
5881 usage(B_FALSE);
5882 }
5883 /*
5884 * Ignore the last parameter
5885 */
5886 argc--;
5887 } else {
5888 /*
5889 * If this is not a valid number, just plow on. The
5890 * user will get a more informative error message later
5891 * on.
5892 */
5893 interval = 0;
5894 }
5895 }
5896
5897 /*
5898 * If the last argument is also an integer, then we have both a count
5899 * and an interval.
5900 */
5901 if (argc > 0 && zfs_isnumber(argv[argc - 1])) {
5902 char *end;
5903
5904 errno = 0;
5905 count = interval;
5906 interval = strtof(argv[argc - 1], &end);
5907
5908 if (*end == '\0' && errno == 0) {
5909 if (interval == 0) {
5910 (void) fprintf(stderr, gettext(
5911 "interval cannot be zero\n"));
5912 usage(B_FALSE);
5913 }
5914
5915 /*
5916 * Ignore the last parameter
5917 */
5918 argc--;
5919 } else {
5920 interval = 0;
5921 }
5922 }
5923
5924 *iv = interval;
5925 *cnt = count;
5926 *argcp = argc;
5927 }
5928
5929 static void
get_timestamp_arg(char c)5930 get_timestamp_arg(char c)
5931 {
5932 if (c == 'u')
5933 timestamp_fmt = UDATE;
5934 else if (c == 'd')
5935 timestamp_fmt = DDATE;
5936 else
5937 usage(B_FALSE);
5938 }
5939
5940 /*
5941 * Return stat flags that are supported by all pools by both the module and
5942 * zpool iostat. "*data" should be initialized to all 0xFFs before running.
5943 * It will get ANDed down until only the flags that are supported on all pools
5944 * remain.
5945 */
5946 static int
get_stat_flags_cb(zpool_handle_t * zhp,void * data)5947 get_stat_flags_cb(zpool_handle_t *zhp, void *data)
5948 {
5949 uint64_t *mask = data;
5950 nvlist_t *config, *nvroot, *nvx;
5951 uint64_t flags = 0;
5952 int i, j;
5953
5954 config = zpool_get_config(zhp, NULL);
5955 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
5956 &nvroot) == 0);
5957
5958 /* Default stats are always supported, but for completeness.. */
5959 if (nvlist_exists(nvroot, ZPOOL_CONFIG_VDEV_STATS))
5960 flags |= IOS_DEFAULT_M;
5961
5962 /* Get our extended stats nvlist from the main list */
5963 if (nvlist_lookup_nvlist(nvroot, ZPOOL_CONFIG_VDEV_STATS_EX,
5964 &nvx) != 0) {
5965 /*
5966 * No extended stats; they're probably running an older
5967 * module. No big deal, we support that too.
5968 */
5969 goto end;
5970 }
5971
5972 /* For each extended stat, make sure all its nvpairs are supported */
5973 for (j = 0; j < ARRAY_SIZE(vsx_type_to_nvlist); j++) {
5974 if (!vsx_type_to_nvlist[j][0])
5975 continue;
5976
5977 /* Start off by assuming the flag is supported, then check */
5978 flags |= (1ULL << j);
5979 for (i = 0; vsx_type_to_nvlist[j][i]; i++) {
5980 if (!nvlist_exists(nvx, vsx_type_to_nvlist[j][i])) {
5981 /* flag isn't supported */
5982 flags = flags & ~(1ULL << j);
5983 break;
5984 }
5985 }
5986 }
5987 end:
5988 *mask = *mask & flags;
5989 return (0);
5990 }
5991
5992 /*
5993 * Return a bitmask of stats that are supported on all pools by both the module
5994 * and zpool iostat.
5995 */
5996 static uint64_t
get_stat_flags(zpool_list_t * list)5997 get_stat_flags(zpool_list_t *list)
5998 {
5999 uint64_t mask = -1;
6000
6001 /*
6002 * get_stat_flags_cb() will lop off bits from "mask" until only the
6003 * flags that are supported on all pools remain.
6004 */
6005 (void) pool_list_iter(list, B_FALSE, get_stat_flags_cb, &mask);
6006 return (mask);
6007 }
6008
6009 /*
6010 * Return 1 if cb_data->cb_names[0] is this vdev's name, 0 otherwise.
6011 */
6012 static int
is_vdev_cb(void * zhp_data,nvlist_t * nv,void * cb_data)6013 is_vdev_cb(void *zhp_data, nvlist_t *nv, void *cb_data)
6014 {
6015 uint64_t guid;
6016 vdev_cbdata_t *cb = cb_data;
6017 zpool_handle_t *zhp = zhp_data;
6018
6019 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
6020 return (0);
6021
6022 return (guid == zpool_vdev_path_to_guid(zhp, cb->cb_names[0]));
6023 }
6024
6025 /*
6026 * Returns 1 if cb_data->cb_names[0] is a vdev name, 0 otherwise.
6027 */
6028 static int
is_vdev(zpool_handle_t * zhp,void * cb_data)6029 is_vdev(zpool_handle_t *zhp, void *cb_data)
6030 {
6031 return (for_each_vdev(zhp, is_vdev_cb, cb_data));
6032 }
6033
6034 /*
6035 * Check if vdevs are in a pool
6036 *
6037 * Return 1 if all argv[] strings are vdev names in pool "pool_name". Otherwise
6038 * return 0. If pool_name is NULL, then search all pools.
6039 */
6040 static int
are_vdevs_in_pool(int argc,char ** argv,char * pool_name,vdev_cbdata_t * cb)6041 are_vdevs_in_pool(int argc, char **argv, char *pool_name,
6042 vdev_cbdata_t *cb)
6043 {
6044 char **tmp_name;
6045 int ret = 0;
6046 int i;
6047 int pool_count = 0;
6048
6049 if ((argc == 0) || !*argv)
6050 return (0);
6051
6052 if (pool_name)
6053 pool_count = 1;
6054
6055 /* Temporarily hijack cb_names for a second... */
6056 tmp_name = cb->cb_names;
6057
6058 /* Go though our list of prospective vdev names */
6059 for (i = 0; i < argc; i++) {
6060 cb->cb_names = argv + i;
6061
6062 /* Is this name a vdev in our pools? */
6063 ret = for_each_pool(pool_count, &pool_name, B_TRUE, NULL,
6064 ZFS_TYPE_POOL, B_FALSE, is_vdev, cb);
6065 if (!ret) {
6066 /* No match */
6067 break;
6068 }
6069 }
6070
6071 cb->cb_names = tmp_name;
6072
6073 return (ret);
6074 }
6075
6076 static int
is_pool_cb(zpool_handle_t * zhp,void * data)6077 is_pool_cb(zpool_handle_t *zhp, void *data)
6078 {
6079 char *name = data;
6080 if (strcmp(name, zpool_get_name(zhp)) == 0)
6081 return (1);
6082
6083 return (0);
6084 }
6085
6086 /*
6087 * Do we have a pool named *name? If so, return 1, otherwise 0.
6088 */
6089 static int
is_pool(char * name)6090 is_pool(char *name)
6091 {
6092 return (for_each_pool(0, NULL, B_TRUE, NULL, ZFS_TYPE_POOL, B_FALSE,
6093 is_pool_cb, name));
6094 }
6095
6096 /* Are all our argv[] strings pool names? If so return 1, 0 otherwise. */
6097 static int
are_all_pools(int argc,char ** argv)6098 are_all_pools(int argc, char **argv)
6099 {
6100 if ((argc == 0) || !*argv)
6101 return (0);
6102
6103 while (--argc >= 0)
6104 if (!is_pool(argv[argc]))
6105 return (0);
6106
6107 return (1);
6108 }
6109
6110 /*
6111 * Helper function to print out vdev/pool names we can't resolve. Used for an
6112 * error message.
6113 */
6114 static void
error_list_unresolved_vdevs(int argc,char ** argv,char * pool_name,vdev_cbdata_t * cb)6115 error_list_unresolved_vdevs(int argc, char **argv, char *pool_name,
6116 vdev_cbdata_t *cb)
6117 {
6118 int i;
6119 char *name;
6120 char *str;
6121 for (i = 0; i < argc; i++) {
6122 name = argv[i];
6123
6124 if (is_pool(name))
6125 str = gettext("pool");
6126 else if (are_vdevs_in_pool(1, &name, pool_name, cb))
6127 str = gettext("vdev in this pool");
6128 else if (are_vdevs_in_pool(1, &name, NULL, cb))
6129 str = gettext("vdev in another pool");
6130 else
6131 str = gettext("unknown");
6132
6133 fprintf(stderr, "\t%s (%s)\n", name, str);
6134 }
6135 }
6136
6137 /*
6138 * Same as get_interval_count(), but with additional checks to not misinterpret
6139 * guids as interval/count values. Assumes VDEV_NAME_GUID is set in
6140 * cb.cb_vdevs.cb_name_flags.
6141 */
6142 static void
get_interval_count_filter_guids(int * argc,char ** argv,float * interval,unsigned long * count,iostat_cbdata_t * cb)6143 get_interval_count_filter_guids(int *argc, char **argv, float *interval,
6144 unsigned long *count, iostat_cbdata_t *cb)
6145 {
6146 int argc_for_interval = 0;
6147
6148 /* Is the last arg an interval value? Or a guid? */
6149 if (*argc >= 1 && !are_vdevs_in_pool(1, &argv[*argc - 1], NULL,
6150 &cb->cb_vdevs)) {
6151 /*
6152 * The last arg is not a guid, so it's probably an
6153 * interval value.
6154 */
6155 argc_for_interval++;
6156
6157 if (*argc >= 2 &&
6158 !are_vdevs_in_pool(1, &argv[*argc - 2], NULL,
6159 &cb->cb_vdevs)) {
6160 /*
6161 * The 2nd to last arg is not a guid, so it's probably
6162 * an interval value.
6163 */
6164 argc_for_interval++;
6165 }
6166 }
6167
6168 /* Point to our list of possible intervals */
6169 char **tmpargv = &argv[*argc - argc_for_interval];
6170
6171 *argc = *argc - argc_for_interval;
6172 get_interval_count(&argc_for_interval, tmpargv,
6173 interval, count);
6174 }
6175
6176 /*
6177 * Terminal height, in rows. Returns -1 if stdout is not connected to a TTY or
6178 * if we were unable to determine its size.
6179 */
6180 static int
terminal_height(void)6181 terminal_height(void)
6182 {
6183 struct winsize win;
6184
6185 if (isatty(STDOUT_FILENO) == 0)
6186 return (-1);
6187
6188 if (ioctl(STDOUT_FILENO, TIOCGWINSZ, &win) != -1 && win.ws_row > 0)
6189 return (win.ws_row);
6190
6191 return (-1);
6192 }
6193
6194 /*
6195 * Run one of the zpool status/iostat -c scripts with the help (-h) option and
6196 * print the result.
6197 *
6198 * name: Short name of the script ('iostat').
6199 * path: Full path to the script ('/usr/local/etc/zfs/zpool.d/iostat');
6200 */
6201 static void
print_zpool_script_help(char * name,char * path)6202 print_zpool_script_help(char *name, char *path)
6203 {
6204 char *argv[] = {path, (char *)"-h", NULL};
6205 char **lines = NULL;
6206 int lines_cnt = 0;
6207 int rc;
6208
6209 rc = libzfs_run_process_get_stdout_nopath(path, argv, NULL, &lines,
6210 &lines_cnt);
6211 if (rc != 0 || lines == NULL || lines_cnt <= 0) {
6212 if (lines != NULL)
6213 libzfs_free_str_array(lines, lines_cnt);
6214 return;
6215 }
6216
6217 for (int i = 0; i < lines_cnt; i++)
6218 if (!is_blank_str(lines[i]))
6219 printf(" %-14s %s\n", name, lines[i]);
6220
6221 libzfs_free_str_array(lines, lines_cnt);
6222 }
6223
6224 /*
6225 * Go though the zpool status/iostat -c scripts in the user's path, run their
6226 * help option (-h), and print out the results.
6227 */
6228 static void
print_zpool_dir_scripts(char * dirpath)6229 print_zpool_dir_scripts(char *dirpath)
6230 {
6231 DIR *dir;
6232 struct dirent *ent;
6233 char fullpath[MAXPATHLEN];
6234 struct stat dir_stat;
6235
6236 if ((dir = opendir(dirpath)) != NULL) {
6237 /* print all the files and directories within directory */
6238 while ((ent = readdir(dir)) != NULL) {
6239 if (snprintf(fullpath, sizeof (fullpath), "%s/%s",
6240 dirpath, ent->d_name) >= sizeof (fullpath)) {
6241 (void) fprintf(stderr,
6242 gettext("internal error: "
6243 "ZPOOL_SCRIPTS_PATH too large.\n"));
6244 exit(1);
6245 }
6246
6247 /* Print the scripts */
6248 if (stat(fullpath, &dir_stat) == 0)
6249 if (dir_stat.st_mode & S_IXUSR &&
6250 S_ISREG(dir_stat.st_mode))
6251 print_zpool_script_help(ent->d_name,
6252 fullpath);
6253 }
6254 (void) closedir(dir);
6255 }
6256 }
6257
6258 /*
6259 * Print out help text for all zpool status/iostat -c scripts.
6260 */
6261 static void
print_zpool_script_list(const char * subcommand)6262 print_zpool_script_list(const char *subcommand)
6263 {
6264 char *dir, *sp, *tmp;
6265
6266 printf(gettext("Available 'zpool %s -c' commands:\n"), subcommand);
6267
6268 sp = zpool_get_cmd_search_path();
6269 if (sp == NULL)
6270 return;
6271
6272 for (dir = strtok_r(sp, ":", &tmp);
6273 dir != NULL;
6274 dir = strtok_r(NULL, ":", &tmp))
6275 print_zpool_dir_scripts(dir);
6276
6277 free(sp);
6278 }
6279
6280 /*
6281 * Set the minimum pool/vdev name column width. The width must be at least 10,
6282 * but may be as large as the column width - 42 so it still fits on one line.
6283 * NOTE: 42 is the width of the default capacity/operations/bandwidth output
6284 */
6285 static int
get_namewidth_iostat(zpool_handle_t * zhp,void * data)6286 get_namewidth_iostat(zpool_handle_t *zhp, void *data)
6287 {
6288 iostat_cbdata_t *cb = data;
6289 int width, available_width;
6290
6291 /*
6292 * get_namewidth() returns the maximum width of any name in that column
6293 * for any pool/vdev/device line that will be output.
6294 */
6295 width = get_namewidth(zhp, cb->cb_namewidth,
6296 cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID, cb->cb_verbose);
6297
6298 /*
6299 * The width we are calculating is the width of the header and also the
6300 * padding width for names that are less than maximum width. The stats
6301 * take up 42 characters, so the width available for names is:
6302 */
6303 available_width = get_columns() - 42;
6304
6305 /*
6306 * If the maximum width fits on a screen, then great! Make everything
6307 * line up by justifying all lines to the same width. If that max
6308 * width is larger than what's available, the name plus stats won't fit
6309 * on one line, and justifying to that width would cause every line to
6310 * wrap on the screen. We only want lines with long names to wrap.
6311 * Limit the padding to what won't wrap.
6312 */
6313 if (width > available_width)
6314 width = available_width;
6315
6316 /*
6317 * And regardless of whatever the screen width is (get_columns can
6318 * return 0 if the width is not known or less than 42 for a narrow
6319 * terminal) have the width be a minimum of 10.
6320 */
6321 if (width < 10)
6322 width = 10;
6323
6324 /* Save the calculated width */
6325 cb->cb_namewidth = width;
6326
6327 return (0);
6328 }
6329
6330 /*
6331 * zpool iostat [[-c [script1,script2,...]] [-lq]|[-rw]] [-ghHLpPvy] [-n name]
6332 * [-T d|u] [[ pool ...]|[pool vdev ...]|[vdev ...]]
6333 * [interval [count]]
6334 *
6335 * -c CMD For each vdev, run command CMD
6336 * -g Display guid for individual vdev name.
6337 * -L Follow links when resolving vdev path name.
6338 * -P Display full path for vdev name.
6339 * -v Display statistics for individual vdevs
6340 * -h Display help
6341 * -p Display values in parsable (exact) format.
6342 * -H Scripted mode. Don't display headers, and separate properties
6343 * by a single tab.
6344 * -l Display average latency
6345 * -q Display queue depths
6346 * -w Display latency histograms
6347 * -r Display request size histogram
6348 * -T Display a timestamp in date(1) or Unix format
6349 * -n Only print headers once
6350 *
6351 * This command can be tricky because we want to be able to deal with pool
6352 * creation/destruction as well as vdev configuration changes. The bulk of this
6353 * processing is handled by the pool_list_* routines in zpool_iter.c. We rely
6354 * on pool_list_refresh() to detect the addition and removal of pools.
6355 * Configuration changes are all handled within libzfs.
6356 */
6357 int
zpool_do_iostat(int argc,char ** argv)6358 zpool_do_iostat(int argc, char **argv)
6359 {
6360 int c;
6361 int ret;
6362 float interval = 0;
6363 unsigned long count = 0;
6364 zpool_list_t *list;
6365 boolean_t verbose = B_FALSE;
6366 boolean_t latency = B_FALSE, l_histo = B_FALSE, rq_histo = B_FALSE;
6367 boolean_t queues = B_FALSE, parsable = B_FALSE, scripted = B_FALSE;
6368 boolean_t omit_since_boot = B_FALSE;
6369 boolean_t guid = B_FALSE;
6370 boolean_t follow_links = B_FALSE;
6371 boolean_t full_name = B_FALSE;
6372 boolean_t headers_once = B_FALSE;
6373 iostat_cbdata_t cb = { 0 };
6374 char *cmd = NULL;
6375
6376 /* Used for printing error message */
6377 const char flag_to_arg[] = {[IOS_LATENCY] = 'l', [IOS_QUEUES] = 'q',
6378 [IOS_L_HISTO] = 'w', [IOS_RQ_HISTO] = 'r'};
6379
6380 uint64_t unsupported_flags;
6381
6382 /* check options */
6383 while ((c = getopt(argc, argv, "c:gLPT:vyhplqrwnH")) != -1) {
6384 switch (c) {
6385 case 'c':
6386 if (cmd != NULL) {
6387 fprintf(stderr,
6388 gettext("Can't set -c flag twice\n"));
6389 exit(1);
6390 }
6391
6392 if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL &&
6393 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) {
6394 fprintf(stderr, gettext(
6395 "Can't run -c, disabled by "
6396 "ZPOOL_SCRIPTS_ENABLED.\n"));
6397 exit(1);
6398 }
6399
6400 if ((getuid() <= 0 || geteuid() <= 0) &&
6401 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) {
6402 fprintf(stderr, gettext(
6403 "Can't run -c with root privileges "
6404 "unless ZPOOL_SCRIPTS_AS_ROOT is set.\n"));
6405 exit(1);
6406 }
6407 cmd = optarg;
6408 verbose = B_TRUE;
6409 break;
6410 case 'g':
6411 guid = B_TRUE;
6412 break;
6413 case 'L':
6414 follow_links = B_TRUE;
6415 break;
6416 case 'P':
6417 full_name = B_TRUE;
6418 break;
6419 case 'T':
6420 get_timestamp_arg(*optarg);
6421 break;
6422 case 'v':
6423 verbose = B_TRUE;
6424 break;
6425 case 'p':
6426 parsable = B_TRUE;
6427 break;
6428 case 'l':
6429 latency = B_TRUE;
6430 break;
6431 case 'q':
6432 queues = B_TRUE;
6433 break;
6434 case 'H':
6435 scripted = B_TRUE;
6436 break;
6437 case 'w':
6438 l_histo = B_TRUE;
6439 break;
6440 case 'r':
6441 rq_histo = B_TRUE;
6442 break;
6443 case 'y':
6444 omit_since_boot = B_TRUE;
6445 break;
6446 case 'n':
6447 headers_once = B_TRUE;
6448 break;
6449 case 'h':
6450 usage(B_FALSE);
6451 break;
6452 case '?':
6453 if (optopt == 'c') {
6454 print_zpool_script_list("iostat");
6455 exit(0);
6456 } else {
6457 fprintf(stderr,
6458 gettext("invalid option '%c'\n"), optopt);
6459 }
6460 usage(B_FALSE);
6461 }
6462 }
6463
6464 argc -= optind;
6465 argv += optind;
6466
6467 cb.cb_literal = parsable;
6468 cb.cb_scripted = scripted;
6469
6470 if (guid)
6471 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_GUID;
6472 if (follow_links)
6473 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
6474 if (full_name)
6475 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_PATH;
6476 cb.cb_iteration = 0;
6477 cb.cb_namewidth = 0;
6478 cb.cb_verbose = verbose;
6479
6480 /* Get our interval and count values (if any) */
6481 if (guid) {
6482 get_interval_count_filter_guids(&argc, argv, &interval,
6483 &count, &cb);
6484 } else {
6485 get_interval_count(&argc, argv, &interval, &count);
6486 }
6487
6488 if (argc == 0) {
6489 /* No args, so just print the defaults. */
6490 } else if (are_all_pools(argc, argv)) {
6491 /* All the args are pool names */
6492 } else if (are_vdevs_in_pool(argc, argv, NULL, &cb.cb_vdevs)) {
6493 /* All the args are vdevs */
6494 cb.cb_vdevs.cb_names = argv;
6495 cb.cb_vdevs.cb_names_count = argc;
6496 argc = 0; /* No pools to process */
6497 } else if (are_all_pools(1, argv)) {
6498 /* The first arg is a pool name */
6499 if (are_vdevs_in_pool(argc - 1, argv + 1, argv[0],
6500 &cb.cb_vdevs)) {
6501 /* ...and the rest are vdev names */
6502 cb.cb_vdevs.cb_names = argv + 1;
6503 cb.cb_vdevs.cb_names_count = argc - 1;
6504 argc = 1; /* One pool to process */
6505 } else {
6506 fprintf(stderr, gettext("Expected either a list of "));
6507 fprintf(stderr, gettext("pools, or list of vdevs in"));
6508 fprintf(stderr, " \"%s\", ", argv[0]);
6509 fprintf(stderr, gettext("but got:\n"));
6510 error_list_unresolved_vdevs(argc - 1, argv + 1,
6511 argv[0], &cb.cb_vdevs);
6512 fprintf(stderr, "\n");
6513 usage(B_FALSE);
6514 }
6515 } else {
6516 /*
6517 * The args don't make sense. The first arg isn't a pool name,
6518 * nor are all the args vdevs.
6519 */
6520 fprintf(stderr, gettext("Unable to parse pools/vdevs list.\n"));
6521 fprintf(stderr, "\n");
6522 return (1);
6523 }
6524
6525 if (cb.cb_vdevs.cb_names_count != 0) {
6526 /*
6527 * If user specified vdevs, it implies verbose.
6528 */
6529 cb.cb_verbose = B_TRUE;
6530 }
6531
6532 /*
6533 * Construct the list of all interesting pools.
6534 */
6535 ret = 0;
6536 if ((list = pool_list_get(argc, argv, NULL, ZFS_TYPE_POOL, parsable,
6537 &ret)) == NULL)
6538 return (1);
6539
6540 if (pool_list_count(list) == 0 && argc != 0) {
6541 pool_list_free(list);
6542 return (1);
6543 }
6544
6545 if (pool_list_count(list) == 0 && interval == 0) {
6546 pool_list_free(list);
6547 (void) fprintf(stderr, gettext("no pools available\n"));
6548 return (1);
6549 }
6550
6551 if ((l_histo || rq_histo) && (cmd != NULL || latency || queues)) {
6552 pool_list_free(list);
6553 (void) fprintf(stderr,
6554 gettext("[-r|-w] isn't allowed with [-c|-l|-q]\n"));
6555 usage(B_FALSE);
6556 }
6557
6558 if (l_histo && rq_histo) {
6559 pool_list_free(list);
6560 (void) fprintf(stderr,
6561 gettext("Only one of [-r|-w] can be passed at a time\n"));
6562 usage(B_FALSE);
6563 }
6564
6565 /*
6566 * Enter the main iostat loop.
6567 */
6568 cb.cb_list = list;
6569
6570 if (l_histo) {
6571 /*
6572 * Histograms tables look out of place when you try to display
6573 * them with the other stats, so make a rule that you can only
6574 * print histograms by themselves.
6575 */
6576 cb.cb_flags = IOS_L_HISTO_M;
6577 } else if (rq_histo) {
6578 cb.cb_flags = IOS_RQ_HISTO_M;
6579 } else {
6580 cb.cb_flags = IOS_DEFAULT_M;
6581 if (latency)
6582 cb.cb_flags |= IOS_LATENCY_M;
6583 if (queues)
6584 cb.cb_flags |= IOS_QUEUES_M;
6585 }
6586
6587 /*
6588 * See if the module supports all the stats we want to display.
6589 */
6590 unsupported_flags = cb.cb_flags & ~get_stat_flags(list);
6591 if (unsupported_flags) {
6592 uint64_t f;
6593 int idx;
6594 fprintf(stderr,
6595 gettext("The loaded zfs module doesn't support:"));
6596
6597 /* for each bit set in unsupported_flags */
6598 for (f = unsupported_flags; f; f &= ~(1ULL << idx)) {
6599 idx = lowbit64(f) - 1;
6600 fprintf(stderr, " -%c", flag_to_arg[idx]);
6601 }
6602
6603 fprintf(stderr, ". Try running a newer module.\n");
6604 pool_list_free(list);
6605
6606 return (1);
6607 }
6608
6609 int last_npools = 0;
6610 for (;;) {
6611 /*
6612 * Refresh all pools in list, adding or removing pools as
6613 * necessary.
6614 */
6615 int npools = pool_list_refresh(list);
6616 if (npools == 0) {
6617 (void) fprintf(stderr, gettext("no pools available\n"));
6618 } else {
6619 /*
6620 * If the list of pools has changed since last time
6621 * around, reset the iteration count to force the
6622 * header to be redisplayed.
6623 */
6624 if (last_npools != npools)
6625 cb.cb_iteration = 0;
6626
6627 /*
6628 * If this is the first iteration and -y was supplied
6629 * we skip any printing.
6630 */
6631 boolean_t skip = (omit_since_boot &&
6632 cb.cb_iteration == 0);
6633
6634 /*
6635 * Iterate over all pools to determine the maximum width
6636 * for the pool / device name column across all pools.
6637 */
6638 cb.cb_namewidth = 0;
6639 (void) pool_list_iter(list, B_FALSE,
6640 get_namewidth_iostat, &cb);
6641
6642 if (timestamp_fmt != NODATE)
6643 print_timestamp(timestamp_fmt);
6644
6645 if (cmd != NULL && cb.cb_verbose &&
6646 !(cb.cb_flags & IOS_ANYHISTO_M)) {
6647 cb.vcdl = all_pools_for_each_vdev_run(argc,
6648 argv, cmd, g_zfs, cb.cb_vdevs.cb_names,
6649 cb.cb_vdevs.cb_names_count,
6650 cb.cb_vdevs.cb_name_flags);
6651 } else {
6652 cb.vcdl = NULL;
6653 }
6654
6655
6656 /*
6657 * Check terminal size so we can print headers
6658 * even when terminal window has its height
6659 * changed.
6660 */
6661 int winheight = terminal_height();
6662 /*
6663 * Are we connected to TTY? If not, headers_once
6664 * should be true, to avoid breaking scripts.
6665 */
6666 if (winheight < 0)
6667 headers_once = B_TRUE;
6668
6669 /*
6670 * If it's the first time and we're not skipping it,
6671 * or either skip or verbose mode, print the header.
6672 *
6673 * The histogram code explicitly prints its header on
6674 * every vdev, so skip this for histograms.
6675 */
6676 if (((++cb.cb_iteration == 1 && !skip) ||
6677 (skip != verbose) ||
6678 (!headers_once &&
6679 (cb.cb_iteration % winheight) == 0)) &&
6680 (!(cb.cb_flags & IOS_ANYHISTO_M)) &&
6681 !cb.cb_scripted)
6682 print_iostat_header(&cb);
6683
6684 if (skip) {
6685 (void) fflush(stdout);
6686 (void) fsleep(interval);
6687 last_npools = npools;
6688 continue;
6689 }
6690
6691 (void) pool_list_iter(list, B_FALSE, print_iostat, &cb);
6692
6693 /*
6694 * If there's more than one pool, and we're not in
6695 * verbose mode (which prints a separator for us),
6696 * then print a separator.
6697 *
6698 * In addition, if we're printing specific vdevs then
6699 * we also want an ending separator.
6700 */
6701 if (((npools > 1 && !verbose &&
6702 !(cb.cb_flags & IOS_ANYHISTO_M)) ||
6703 (!(cb.cb_flags & IOS_ANYHISTO_M) &&
6704 cb.cb_vdevs.cb_names_count)) &&
6705 !cb.cb_scripted) {
6706 print_iostat_separator(&cb);
6707 if (cb.vcdl != NULL)
6708 print_cmd_columns(cb.vcdl, 1);
6709 printf("\n");
6710 }
6711
6712 if (cb.vcdl != NULL)
6713 free_vdev_cmd_data_list(cb.vcdl);
6714
6715 }
6716
6717 if (interval == 0)
6718 break;
6719
6720 if (count != 0 && --count == 0)
6721 break;
6722
6723 (void) fflush(stdout);
6724 (void) fsleep(interval);
6725
6726 last_npools = npools;
6727 }
6728
6729 pool_list_free(list);
6730
6731 return (ret);
6732 }
6733
6734 typedef struct list_cbdata {
6735 boolean_t cb_verbose;
6736 int cb_name_flags;
6737 int cb_namewidth;
6738 boolean_t cb_json;
6739 boolean_t cb_scripted;
6740 zprop_list_t *cb_proplist;
6741 boolean_t cb_literal;
6742 nvlist_t *cb_jsobj;
6743 boolean_t cb_json_as_int;
6744 boolean_t cb_json_pool_key_guid;
6745 } list_cbdata_t;
6746
6747
6748 /*
6749 * Given a list of columns to display, output appropriate headers for each one.
6750 */
6751 static void
print_header(list_cbdata_t * cb)6752 print_header(list_cbdata_t *cb)
6753 {
6754 zprop_list_t *pl = cb->cb_proplist;
6755 char headerbuf[ZPOOL_MAXPROPLEN];
6756 const char *header;
6757 boolean_t first = B_TRUE;
6758 boolean_t right_justify;
6759 size_t width = 0;
6760
6761 for (; pl != NULL; pl = pl->pl_next) {
6762 width = pl->pl_width;
6763 if (first && cb->cb_verbose) {
6764 /*
6765 * Reset the width to accommodate the verbose listing
6766 * of devices.
6767 */
6768 width = cb->cb_namewidth;
6769 }
6770
6771 if (!first)
6772 (void) fputs(" ", stdout);
6773 else
6774 first = B_FALSE;
6775
6776 right_justify = B_FALSE;
6777 if (pl->pl_prop != ZPROP_USERPROP) {
6778 header = zpool_prop_column_name(pl->pl_prop);
6779 right_justify = zpool_prop_align_right(pl->pl_prop);
6780 } else {
6781 int i;
6782
6783 for (i = 0; pl->pl_user_prop[i] != '\0'; i++)
6784 headerbuf[i] = toupper(pl->pl_user_prop[i]);
6785 headerbuf[i] = '\0';
6786 header = headerbuf;
6787 }
6788
6789 if (pl->pl_next == NULL && !right_justify)
6790 (void) fputs(header, stdout);
6791 else if (right_justify)
6792 (void) printf("%*s", (int)width, header);
6793 else
6794 (void) printf("%-*s", (int)width, header);
6795 }
6796
6797 (void) fputc('\n', stdout);
6798 }
6799
6800 /*
6801 * Given a pool and a list of properties, print out all the properties according
6802 * to the described layout. Used by zpool_do_list().
6803 */
6804 static void
collect_pool(zpool_handle_t * zhp,list_cbdata_t * cb)6805 collect_pool(zpool_handle_t *zhp, list_cbdata_t *cb)
6806 {
6807 zprop_list_t *pl = cb->cb_proplist;
6808 boolean_t first = B_TRUE;
6809 char property[ZPOOL_MAXPROPLEN];
6810 const char *propstr;
6811 boolean_t right_justify;
6812 size_t width;
6813 zprop_source_t sourcetype = ZPROP_SRC_NONE;
6814 nvlist_t *item, *d, *props;
6815 item = d = props = NULL;
6816
6817 if (cb->cb_json) {
6818 item = fnvlist_alloc();
6819 props = fnvlist_alloc();
6820 d = fnvlist_lookup_nvlist(cb->cb_jsobj, "pools");
6821 if (d == NULL) {
6822 fprintf(stderr, "pools obj not found.\n");
6823 exit(1);
6824 }
6825 fill_pool_info(item, zhp, B_TRUE, cb->cb_json_as_int);
6826 }
6827
6828 for (; pl != NULL; pl = pl->pl_next) {
6829
6830 width = pl->pl_width;
6831 if (first && cb->cb_verbose) {
6832 /*
6833 * Reset the width to accommodate the verbose listing
6834 * of devices.
6835 */
6836 width = cb->cb_namewidth;
6837 }
6838
6839 if (!cb->cb_json && !first) {
6840 if (cb->cb_scripted)
6841 (void) fputc('\t', stdout);
6842 else
6843 (void) fputs(" ", stdout);
6844 } else {
6845 first = B_FALSE;
6846 }
6847
6848 right_justify = B_FALSE;
6849 if (pl->pl_prop != ZPROP_USERPROP) {
6850 if (zpool_get_prop(zhp, pl->pl_prop, property,
6851 sizeof (property), &sourcetype,
6852 cb->cb_literal) != 0)
6853 propstr = "-";
6854 else
6855 propstr = property;
6856
6857 right_justify = zpool_prop_align_right(pl->pl_prop);
6858 } else if ((zpool_prop_feature(pl->pl_user_prop) ||
6859 zpool_prop_unsupported(pl->pl_user_prop)) &&
6860 zpool_prop_get_feature(zhp, pl->pl_user_prop, property,
6861 sizeof (property)) == 0) {
6862 propstr = property;
6863 sourcetype = ZPROP_SRC_LOCAL;
6864 } else if (zfs_prop_user(pl->pl_user_prop) &&
6865 zpool_get_userprop(zhp, pl->pl_user_prop, property,
6866 sizeof (property), &sourcetype) == 0) {
6867 propstr = property;
6868 } else {
6869 propstr = "-";
6870 }
6871
6872 if (cb->cb_json) {
6873 if (pl->pl_prop == ZPOOL_PROP_NAME)
6874 continue;
6875 const char *prop_name;
6876 if (pl->pl_prop != ZPROP_USERPROP)
6877 prop_name = zpool_prop_to_name(pl->pl_prop);
6878 else
6879 prop_name = pl->pl_user_prop;
6880 (void) zprop_nvlist_one_property(
6881 prop_name, propstr,
6882 sourcetype, NULL, NULL, props, cb->cb_json_as_int);
6883 } else {
6884 /*
6885 * If this is being called in scripted mode, or if this
6886 * is the last column and it is left-justified, don't
6887 * include a width format specifier.
6888 */
6889 if (cb->cb_scripted || (pl->pl_next == NULL &&
6890 !right_justify))
6891 (void) fputs(propstr, stdout);
6892 else if (right_justify)
6893 (void) printf("%*s", (int)width, propstr);
6894 else
6895 (void) printf("%-*s", (int)width, propstr);
6896 }
6897 }
6898
6899 if (cb->cb_json) {
6900 fnvlist_add_nvlist(item, "properties", props);
6901 if (cb->cb_json_pool_key_guid) {
6902 char pool_guid[256];
6903 uint64_t guid = fnvlist_lookup_uint64(
6904 zpool_get_config(zhp, NULL),
6905 ZPOOL_CONFIG_POOL_GUID);
6906 (void) snprintf(pool_guid, 256, "%llu",
6907 (u_longlong_t)guid);
6908 fnvlist_add_nvlist(d, pool_guid, item);
6909 } else {
6910 fnvlist_add_nvlist(d, zpool_get_name(zhp),
6911 item);
6912 }
6913 fnvlist_free(props);
6914 fnvlist_free(item);
6915 } else
6916 (void) fputc('\n', stdout);
6917 }
6918
6919 static void
collect_vdev_prop(zpool_prop_t prop,uint64_t value,const char * str,boolean_t scripted,boolean_t valid,enum zfs_nicenum_format format,boolean_t json,nvlist_t * nvl,boolean_t as_int)6920 collect_vdev_prop(zpool_prop_t prop, uint64_t value, const char *str,
6921 boolean_t scripted, boolean_t valid, enum zfs_nicenum_format format,
6922 boolean_t json, nvlist_t *nvl, boolean_t as_int)
6923 {
6924 char propval[64];
6925 boolean_t fixed;
6926 size_t width = zprop_width(prop, &fixed, ZFS_TYPE_POOL);
6927
6928 switch (prop) {
6929 case ZPOOL_PROP_SIZE:
6930 case ZPOOL_PROP_EXPANDSZ:
6931 case ZPOOL_PROP_CHECKPOINT:
6932 case ZPOOL_PROP_DEDUPRATIO:
6933 case ZPOOL_PROP_DEDUPCACHED:
6934 if (value == 0)
6935 (void) strlcpy(propval, "-", sizeof (propval));
6936 else
6937 zfs_nicenum_format(value, propval, sizeof (propval),
6938 format);
6939 break;
6940 case ZPOOL_PROP_FRAGMENTATION:
6941 if (value == ZFS_FRAG_INVALID) {
6942 (void) strlcpy(propval, "-", sizeof (propval));
6943 } else if (format == ZFS_NICENUM_RAW) {
6944 (void) snprintf(propval, sizeof (propval), "%llu",
6945 (unsigned long long)value);
6946 } else {
6947 (void) snprintf(propval, sizeof (propval), "%llu%%",
6948 (unsigned long long)value);
6949 }
6950 break;
6951 case ZPOOL_PROP_CAPACITY:
6952 /* capacity value is in parts-per-10,000 (aka permyriad) */
6953 if (format == ZFS_NICENUM_RAW)
6954 (void) snprintf(propval, sizeof (propval), "%llu",
6955 (unsigned long long)value / 100);
6956 else
6957 (void) snprintf(propval, sizeof (propval),
6958 value < 1000 ? "%1.2f%%" : value < 10000 ?
6959 "%2.1f%%" : "%3.0f%%", value / 100.0);
6960 break;
6961 case ZPOOL_PROP_HEALTH:
6962 width = 8;
6963 (void) strlcpy(propval, str, sizeof (propval));
6964 break;
6965 default:
6966 zfs_nicenum_format(value, propval, sizeof (propval), format);
6967 }
6968
6969 if (!valid)
6970 (void) strlcpy(propval, "-", sizeof (propval));
6971
6972 if (json) {
6973 (void) zprop_nvlist_one_property(zpool_prop_to_name(prop),
6974 propval, ZPROP_SRC_NONE, NULL, NULL, nvl, as_int);
6975 } else {
6976 if (scripted)
6977 (void) printf("\t%s", propval);
6978 else
6979 (void) printf(" %*s", (int)width, propval);
6980 }
6981 }
6982
6983 /*
6984 * print static default line per vdev
6985 */
6986 static void
collect_list_stats(zpool_handle_t * zhp,const char * name,nvlist_t * nv,list_cbdata_t * cb,int depth,boolean_t isspare,nvlist_t * item)6987 collect_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv,
6988 list_cbdata_t *cb, int depth, boolean_t isspare, nvlist_t *item)
6989 {
6990 nvlist_t **child;
6991 vdev_stat_t *vs;
6992 uint_t c, children = 0;
6993 char *vname;
6994 boolean_t scripted = cb->cb_scripted;
6995 uint64_t islog = B_FALSE;
6996 nvlist_t *props, *ent, *ch, *obj, *l2c, *sp;
6997 props = ent = ch = obj = sp = l2c = NULL;
6998 const char *dashes = "%-*s - - - - "
6999 "- - - - -\n";
7000
7001 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
7002 (uint64_t **)&vs, &c) == 0);
7003
7004 if (name != NULL) {
7005 boolean_t toplevel = (vs->vs_space != 0);
7006 uint64_t cap;
7007 enum zfs_nicenum_format format;
7008 const char *state;
7009
7010 if (cb->cb_literal)
7011 format = ZFS_NICENUM_RAW;
7012 else
7013 format = ZFS_NICENUM_1024;
7014
7015 if (strcmp(name, VDEV_TYPE_INDIRECT) == 0)
7016 return;
7017
7018 if (cb->cb_json) {
7019 props = fnvlist_alloc();
7020 ent = fnvlist_alloc();
7021 fill_vdev_info(ent, zhp, (char *)name, B_FALSE,
7022 cb->cb_json_as_int);
7023 } else {
7024 if (scripted)
7025 (void) printf("\t%s", name);
7026 else if (strlen(name) + depth > cb->cb_namewidth)
7027 (void) printf("%*s%s", depth, "", name);
7028 else
7029 (void) printf("%*s%s%*s", depth, "", name,
7030 (int)(cb->cb_namewidth - strlen(name) -
7031 depth), "");
7032 }
7033
7034 /*
7035 * Print the properties for the individual vdevs. Some
7036 * properties are only applicable to toplevel vdevs. The
7037 * 'toplevel' boolean value is passed to the print_one_column()
7038 * to indicate that the value is valid.
7039 */
7040 for (zprop_list_t *pl = cb->cb_proplist; pl != NULL;
7041 pl = pl->pl_next) {
7042 switch (pl->pl_prop) {
7043 case ZPOOL_PROP_SIZE:
7044 if (VDEV_STAT_VALID(vs_pspace, c) &&
7045 vs->vs_pspace) {
7046 collect_vdev_prop(
7047 ZPOOL_PROP_SIZE, vs->vs_pspace,
7048 NULL, scripted, B_TRUE, format,
7049 cb->cb_json, props,
7050 cb->cb_json_as_int);
7051 } else {
7052 collect_vdev_prop(
7053 ZPOOL_PROP_SIZE, vs->vs_space, NULL,
7054 scripted, toplevel, format,
7055 cb->cb_json, props,
7056 cb->cb_json_as_int);
7057 }
7058 break;
7059 case ZPOOL_PROP_ALLOCATED:
7060 collect_vdev_prop(ZPOOL_PROP_ALLOCATED,
7061 vs->vs_alloc, NULL, scripted, toplevel,
7062 format, cb->cb_json, props,
7063 cb->cb_json_as_int);
7064 break;
7065
7066 case ZPOOL_PROP_FREE:
7067 collect_vdev_prop(ZPOOL_PROP_FREE,
7068 vs->vs_space - vs->vs_alloc, NULL, scripted,
7069 toplevel, format, cb->cb_json, props,
7070 cb->cb_json_as_int);
7071 break;
7072
7073 case ZPOOL_PROP_CHECKPOINT:
7074 collect_vdev_prop(ZPOOL_PROP_CHECKPOINT,
7075 vs->vs_checkpoint_space, NULL, scripted,
7076 toplevel, format, cb->cb_json, props,
7077 cb->cb_json_as_int);
7078 break;
7079
7080 case ZPOOL_PROP_EXPANDSZ:
7081 collect_vdev_prop(ZPOOL_PROP_EXPANDSZ,
7082 vs->vs_esize, NULL, scripted, B_TRUE,
7083 format, cb->cb_json, props,
7084 cb->cb_json_as_int);
7085 break;
7086
7087 case ZPOOL_PROP_FRAGMENTATION:
7088 collect_vdev_prop(
7089 ZPOOL_PROP_FRAGMENTATION,
7090 vs->vs_fragmentation, NULL, scripted,
7091 (vs->vs_fragmentation != ZFS_FRAG_INVALID &&
7092 toplevel),
7093 format, cb->cb_json, props,
7094 cb->cb_json_as_int);
7095 break;
7096
7097 case ZPOOL_PROP_CAPACITY:
7098 cap = (vs->vs_space == 0) ?
7099 0 : (vs->vs_alloc * 10000 / vs->vs_space);
7100 collect_vdev_prop(ZPOOL_PROP_CAPACITY, cap,
7101 NULL, scripted, toplevel, format,
7102 cb->cb_json, props, cb->cb_json_as_int);
7103 break;
7104
7105 case ZPOOL_PROP_HEALTH:
7106 state = zpool_state_to_name(vs->vs_state,
7107 vs->vs_aux);
7108 if (isspare) {
7109 if (vs->vs_aux == VDEV_AUX_SPARED)
7110 state = "INUSE";
7111 else if (vs->vs_state ==
7112 VDEV_STATE_HEALTHY)
7113 state = "AVAIL";
7114 }
7115 collect_vdev_prop(ZPOOL_PROP_HEALTH, 0, state,
7116 scripted, B_TRUE, format, cb->cb_json,
7117 props, cb->cb_json_as_int);
7118 break;
7119
7120 case ZPOOL_PROP_NAME:
7121 break;
7122
7123 default:
7124 collect_vdev_prop(pl->pl_prop, 0,
7125 NULL, scripted, B_FALSE, format,
7126 cb->cb_json, props, cb->cb_json_as_int);
7127
7128 }
7129
7130
7131 }
7132
7133 if (cb->cb_json) {
7134 fnvlist_add_nvlist(ent, "properties", props);
7135 fnvlist_free(props);
7136 } else
7137 (void) fputc('\n', stdout);
7138 }
7139
7140 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
7141 &child, &children) != 0) {
7142 if (cb->cb_json) {
7143 fnvlist_add_nvlist(item, name, ent);
7144 fnvlist_free(ent);
7145 }
7146 return;
7147 }
7148
7149 if (cb->cb_json) {
7150 ch = fnvlist_alloc();
7151 }
7152
7153 /* list the normal vdevs first */
7154 for (c = 0; c < children; c++) {
7155 uint64_t ishole = B_FALSE;
7156
7157 if (nvlist_lookup_uint64(child[c],
7158 ZPOOL_CONFIG_IS_HOLE, &ishole) == 0 && ishole)
7159 continue;
7160
7161 if (nvlist_lookup_uint64(child[c],
7162 ZPOOL_CONFIG_IS_LOG, &islog) == 0 && islog)
7163 continue;
7164
7165 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
7166 continue;
7167
7168 vname = zpool_vdev_name(g_zfs, zhp, child[c],
7169 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
7170
7171 if (name == NULL || cb->cb_json != B_TRUE)
7172 collect_list_stats(zhp, vname, child[c], cb, depth + 2,
7173 B_FALSE, item);
7174 else if (cb->cb_json) {
7175 collect_list_stats(zhp, vname, child[c], cb, depth + 2,
7176 B_FALSE, ch);
7177 }
7178 free(vname);
7179 }
7180
7181 if (cb->cb_json) {
7182 if (!nvlist_empty(ch))
7183 fnvlist_add_nvlist(ent, "vdevs", ch);
7184 fnvlist_free(ch);
7185 }
7186
7187 /* list the classes: 'logs', 'dedup', and 'special' */
7188 for (uint_t n = 0; n < ARRAY_SIZE(class_name); n++) {
7189 boolean_t printed = B_FALSE;
7190 if (cb->cb_json)
7191 obj = fnvlist_alloc();
7192 for (c = 0; c < children; c++) {
7193 const char *bias = NULL;
7194 const char *type = NULL;
7195
7196 if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
7197 &islog) == 0 && islog) {
7198 bias = VDEV_ALLOC_CLASS_LOGS;
7199 } else {
7200 (void) nvlist_lookup_string(child[c],
7201 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
7202 (void) nvlist_lookup_string(child[c],
7203 ZPOOL_CONFIG_TYPE, &type);
7204 }
7205 if (bias == NULL || strcmp(bias, class_name[n]) != 0)
7206 continue;
7207 if (!islog && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
7208 continue;
7209
7210 if (!printed && !cb->cb_json) {
7211 /* LINTED E_SEC_PRINTF_VAR_FMT */
7212 (void) printf(dashes, cb->cb_namewidth,
7213 class_name[n]);
7214 printed = B_TRUE;
7215 }
7216 vname = zpool_vdev_name(g_zfs, zhp, child[c],
7217 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
7218 collect_list_stats(zhp, vname, child[c], cb, depth + 2,
7219 B_FALSE, obj);
7220 free(vname);
7221 }
7222 if (cb->cb_json) {
7223 if (!nvlist_empty(obj))
7224 fnvlist_add_nvlist(item, class_name[n], obj);
7225 fnvlist_free(obj);
7226 }
7227 }
7228
7229 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
7230 &child, &children) == 0 && children > 0) {
7231 if (cb->cb_json) {
7232 l2c = fnvlist_alloc();
7233 } else {
7234 /* LINTED E_SEC_PRINTF_VAR_FMT */
7235 (void) printf(dashes, cb->cb_namewidth, "cache");
7236 }
7237 for (c = 0; c < children; c++) {
7238 vname = zpool_vdev_name(g_zfs, zhp, child[c],
7239 cb->cb_name_flags);
7240 collect_list_stats(zhp, vname, child[c], cb, depth + 2,
7241 B_FALSE, l2c);
7242 free(vname);
7243 }
7244 if (cb->cb_json) {
7245 if (!nvlist_empty(l2c))
7246 fnvlist_add_nvlist(item, "l2cache", l2c);
7247 fnvlist_free(l2c);
7248 }
7249 }
7250
7251 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, &child,
7252 &children) == 0 && children > 0) {
7253 if (cb->cb_json) {
7254 sp = fnvlist_alloc();
7255 } else {
7256 /* LINTED E_SEC_PRINTF_VAR_FMT */
7257 (void) printf(dashes, cb->cb_namewidth, "spare");
7258 }
7259 for (c = 0; c < children; c++) {
7260 vname = zpool_vdev_name(g_zfs, zhp, child[c],
7261 cb->cb_name_flags);
7262 collect_list_stats(zhp, vname, child[c], cb, depth + 2,
7263 B_TRUE, sp);
7264 free(vname);
7265 }
7266 if (cb->cb_json) {
7267 if (!nvlist_empty(sp))
7268 fnvlist_add_nvlist(item, "spares", sp);
7269 fnvlist_free(sp);
7270 }
7271 }
7272
7273 if (name != NULL && cb->cb_json) {
7274 fnvlist_add_nvlist(item, name, ent);
7275 fnvlist_free(ent);
7276 }
7277 }
7278
7279 /*
7280 * Generic callback function to list a pool.
7281 */
7282 static int
list_callback(zpool_handle_t * zhp,void * data)7283 list_callback(zpool_handle_t *zhp, void *data)
7284 {
7285 nvlist_t *p, *d, *nvdevs;
7286 uint64_t guid;
7287 char pool_guid[256];
7288 const char *pool_name = zpool_get_name(zhp);
7289 list_cbdata_t *cbp = data;
7290 p = d = nvdevs = NULL;
7291
7292 collect_pool(zhp, cbp);
7293
7294 if (cbp->cb_verbose) {
7295 nvlist_t *config, *nvroot;
7296 config = zpool_get_config(zhp, NULL);
7297 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
7298 &nvroot) == 0);
7299 if (cbp->cb_json) {
7300 d = fnvlist_lookup_nvlist(cbp->cb_jsobj,
7301 "pools");
7302 if (cbp->cb_json_pool_key_guid) {
7303 guid = fnvlist_lookup_uint64(config,
7304 ZPOOL_CONFIG_POOL_GUID);
7305 (void) snprintf(pool_guid, 256, "%llu",
7306 (u_longlong_t)guid);
7307 p = fnvlist_lookup_nvlist(d, pool_guid);
7308 } else {
7309 p = fnvlist_lookup_nvlist(d, pool_name);
7310 }
7311 nvdevs = fnvlist_alloc();
7312 }
7313 collect_list_stats(zhp, NULL, nvroot, cbp, 0, B_FALSE, nvdevs);
7314 if (cbp->cb_json) {
7315 fnvlist_add_nvlist(p, "vdevs", nvdevs);
7316 if (cbp->cb_json_pool_key_guid)
7317 fnvlist_add_nvlist(d, pool_guid, p);
7318 else
7319 fnvlist_add_nvlist(d, pool_name, p);
7320 fnvlist_add_nvlist(cbp->cb_jsobj, "pools", d);
7321 fnvlist_free(nvdevs);
7322 }
7323 }
7324
7325 return (0);
7326 }
7327
7328 /*
7329 * Set the minimum pool/vdev name column width. The width must be at least 9,
7330 * but may be as large as needed.
7331 */
7332 static int
get_namewidth_list(zpool_handle_t * zhp,void * data)7333 get_namewidth_list(zpool_handle_t *zhp, void *data)
7334 {
7335 list_cbdata_t *cb = data;
7336 int width;
7337
7338 width = get_namewidth(zhp, cb->cb_namewidth,
7339 cb->cb_name_flags | VDEV_NAME_TYPE_ID, cb->cb_verbose);
7340
7341 if (width < 9)
7342 width = 9;
7343
7344 cb->cb_namewidth = width;
7345
7346 return (0);
7347 }
7348
7349 /*
7350 * zpool list [-gHLpP] [-o prop[,prop]*] [-T d|u] [pool] ... [interval [count]]
7351 *
7352 * -g Display guid for individual vdev name.
7353 * -H Scripted mode. Don't display headers, and separate properties
7354 * by a single tab.
7355 * -L Follow links when resolving vdev path name.
7356 * -o List of properties to display. Defaults to
7357 * "name,size,allocated,free,expandsize,fragmentation,capacity,"
7358 * "dedupratio,health,altroot"
7359 * -p Display values in parsable (exact) format.
7360 * -P Display full path for vdev name.
7361 * -T Display a timestamp in date(1) or Unix format
7362 * -j Display the output in JSON format
7363 * --json-int Display the numbers as integer instead of strings.
7364 * --json-pool-key-guid Set pool GUID as key for pool objects.
7365 *
7366 * List all pools in the system, whether or not they're healthy. Output space
7367 * statistics for each one, as well as health status summary.
7368 */
7369 int
zpool_do_list(int argc,char ** argv)7370 zpool_do_list(int argc, char **argv)
7371 {
7372 int c;
7373 int ret = 0;
7374 list_cbdata_t cb = { 0 };
7375 static char default_props[] =
7376 "name,size,allocated,free,checkpoint,expandsize,fragmentation,"
7377 "capacity,dedupratio,health,altroot";
7378 char *props = default_props;
7379 float interval = 0;
7380 unsigned long count = 0;
7381 zpool_list_t *list;
7382 boolean_t first = B_TRUE;
7383 nvlist_t *data = NULL;
7384 current_prop_type = ZFS_TYPE_POOL;
7385
7386 struct option long_options[] = {
7387 {"json", no_argument, NULL, 'j'},
7388 {"json-int", no_argument, NULL, ZPOOL_OPTION_JSON_NUMS_AS_INT},
7389 {"json-pool-key-guid", no_argument, NULL,
7390 ZPOOL_OPTION_POOL_KEY_GUID},
7391 {0, 0, 0, 0}
7392 };
7393
7394 /* check options */
7395 while ((c = getopt_long(argc, argv, ":gjHLo:pPT:v", long_options,
7396 NULL)) != -1) {
7397 switch (c) {
7398 case 'g':
7399 cb.cb_name_flags |= VDEV_NAME_GUID;
7400 break;
7401 case 'H':
7402 cb.cb_scripted = B_TRUE;
7403 break;
7404 case 'L':
7405 cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
7406 break;
7407 case 'o':
7408 props = optarg;
7409 break;
7410 case 'P':
7411 cb.cb_name_flags |= VDEV_NAME_PATH;
7412 break;
7413 case 'p':
7414 cb.cb_literal = B_TRUE;
7415 break;
7416 case 'j':
7417 cb.cb_json = B_TRUE;
7418 break;
7419 case ZPOOL_OPTION_JSON_NUMS_AS_INT:
7420 cb.cb_json_as_int = B_TRUE;
7421 cb.cb_literal = B_TRUE;
7422 break;
7423 case ZPOOL_OPTION_POOL_KEY_GUID:
7424 cb.cb_json_pool_key_guid = B_TRUE;
7425 break;
7426 case 'T':
7427 get_timestamp_arg(*optarg);
7428 break;
7429 case 'v':
7430 cb.cb_verbose = B_TRUE;
7431 cb.cb_namewidth = 8; /* 8 until precalc is avail */
7432 break;
7433 case ':':
7434 (void) fprintf(stderr, gettext("missing argument for "
7435 "'%c' option\n"), optopt);
7436 usage(B_FALSE);
7437 break;
7438 case '?':
7439 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7440 optopt);
7441 usage(B_FALSE);
7442 }
7443 }
7444
7445 argc -= optind;
7446 argv += optind;
7447
7448 if (!cb.cb_json && cb.cb_json_as_int) {
7449 (void) fprintf(stderr, gettext("'--json-int' only works with"
7450 " '-j' option\n"));
7451 usage(B_FALSE);
7452 }
7453
7454 if (!cb.cb_json && cb.cb_json_pool_key_guid) {
7455 (void) fprintf(stderr, gettext("'json-pool-key-guid' only"
7456 " works with '-j' option\n"));
7457 usage(B_FALSE);
7458 }
7459
7460 get_interval_count(&argc, argv, &interval, &count);
7461
7462 if (zprop_get_list(g_zfs, props, &cb.cb_proplist, ZFS_TYPE_POOL) != 0)
7463 usage(B_FALSE);
7464
7465 for (;;) {
7466 if ((list = pool_list_get(argc, argv, &cb.cb_proplist,
7467 ZFS_TYPE_POOL, cb.cb_literal, &ret)) == NULL)
7468 return (1);
7469
7470 if (pool_list_count(list) == 0)
7471 break;
7472
7473 if (cb.cb_json) {
7474 cb.cb_jsobj = zpool_json_schema(0, 1);
7475 data = fnvlist_alloc();
7476 fnvlist_add_nvlist(cb.cb_jsobj, "pools", data);
7477 fnvlist_free(data);
7478 }
7479
7480 cb.cb_namewidth = 0;
7481 (void) pool_list_iter(list, B_FALSE, get_namewidth_list, &cb);
7482
7483 if (timestamp_fmt != NODATE) {
7484 if (cb.cb_json) {
7485 if (cb.cb_json_as_int) {
7486 fnvlist_add_uint64(cb.cb_jsobj, "time",
7487 time(NULL));
7488 } else {
7489 char ts[128];
7490 get_timestamp(timestamp_fmt, ts, 128);
7491 fnvlist_add_string(cb.cb_jsobj, "time",
7492 ts);
7493 }
7494 } else
7495 print_timestamp(timestamp_fmt);
7496 }
7497
7498 if (!cb.cb_scripted && (first || cb.cb_verbose) &&
7499 !cb.cb_json) {
7500 print_header(&cb);
7501 first = B_FALSE;
7502 }
7503 ret = pool_list_iter(list, B_TRUE, list_callback, &cb);
7504
7505 if (ret == 0 && cb.cb_json)
7506 zcmd_print_json(cb.cb_jsobj);
7507 else if (ret != 0 && cb.cb_json)
7508 nvlist_free(cb.cb_jsobj);
7509
7510 if (interval == 0)
7511 break;
7512
7513 if (count != 0 && --count == 0)
7514 break;
7515
7516 pool_list_free(list);
7517
7518 (void) fflush(stdout);
7519 (void) fsleep(interval);
7520 }
7521
7522 if (argc == 0 && !cb.cb_scripted && !cb.cb_json &&
7523 pool_list_count(list) == 0) {
7524 (void) printf(gettext("no pools available\n"));
7525 ret = 0;
7526 }
7527
7528 pool_list_free(list);
7529 zprop_free_list(cb.cb_proplist);
7530 return (ret);
7531 }
7532
7533 static int
zpool_do_attach_or_replace(int argc,char ** argv,int replacing)7534 zpool_do_attach_or_replace(int argc, char **argv, int replacing)
7535 {
7536 boolean_t force = B_FALSE;
7537 boolean_t rebuild = B_FALSE;
7538 boolean_t wait = B_FALSE;
7539 int c;
7540 nvlist_t *nvroot;
7541 char *poolname, *old_disk, *new_disk;
7542 zpool_handle_t *zhp;
7543 nvlist_t *props = NULL;
7544 char *propval;
7545 int ret;
7546
7547 /* check options */
7548 while ((c = getopt(argc, argv, "fo:sw")) != -1) {
7549 switch (c) {
7550 case 'f':
7551 force = B_TRUE;
7552 break;
7553 case 'o':
7554 if ((propval = strchr(optarg, '=')) == NULL) {
7555 (void) fprintf(stderr, gettext("missing "
7556 "'=' for -o option\n"));
7557 usage(B_FALSE);
7558 }
7559 *propval = '\0';
7560 propval++;
7561
7562 if ((strcmp(optarg, ZPOOL_CONFIG_ASHIFT) != 0) ||
7563 (add_prop_list(optarg, propval, &props, B_TRUE)))
7564 usage(B_FALSE);
7565 break;
7566 case 's':
7567 rebuild = B_TRUE;
7568 break;
7569 case 'w':
7570 wait = B_TRUE;
7571 break;
7572 case '?':
7573 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7574 optopt);
7575 usage(B_FALSE);
7576 }
7577 }
7578
7579 argc -= optind;
7580 argv += optind;
7581
7582 /* get pool name and check number of arguments */
7583 if (argc < 1) {
7584 (void) fprintf(stderr, gettext("missing pool name argument\n"));
7585 usage(B_FALSE);
7586 }
7587
7588 poolname = argv[0];
7589
7590 if (argc < 2) {
7591 (void) fprintf(stderr,
7592 gettext("missing <device> specification\n"));
7593 usage(B_FALSE);
7594 }
7595
7596 old_disk = argv[1];
7597
7598 if (argc < 3) {
7599 if (!replacing) {
7600 (void) fprintf(stderr,
7601 gettext("missing <new_device> specification\n"));
7602 usage(B_FALSE);
7603 }
7604 new_disk = old_disk;
7605 argc -= 1;
7606 argv += 1;
7607 } else {
7608 new_disk = argv[2];
7609 argc -= 2;
7610 argv += 2;
7611 }
7612
7613 if (argc > 1) {
7614 (void) fprintf(stderr, gettext("too many arguments\n"));
7615 usage(B_FALSE);
7616 }
7617
7618 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) {
7619 nvlist_free(props);
7620 return (1);
7621 }
7622
7623 if (zpool_get_config(zhp, NULL) == NULL) {
7624 (void) fprintf(stderr, gettext("pool '%s' is unavailable\n"),
7625 poolname);
7626 zpool_close(zhp);
7627 nvlist_free(props);
7628 return (1);
7629 }
7630
7631 /* unless manually specified use "ashift" pool property (if set) */
7632 if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) {
7633 int intval;
7634 zprop_source_t src;
7635 char strval[ZPOOL_MAXPROPLEN];
7636
7637 intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src);
7638 if (src != ZPROP_SRC_DEFAULT) {
7639 (void) sprintf(strval, "%" PRId32, intval);
7640 verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval,
7641 &props, B_TRUE) == 0);
7642 }
7643 }
7644
7645 nvroot = make_root_vdev(zhp, props, force, B_FALSE, replacing, B_FALSE,
7646 argc, argv);
7647 if (nvroot == NULL) {
7648 zpool_close(zhp);
7649 nvlist_free(props);
7650 return (1);
7651 }
7652
7653 ret = zpool_vdev_attach(zhp, old_disk, new_disk, nvroot, replacing,
7654 rebuild);
7655
7656 if (ret == 0 && wait) {
7657 zpool_wait_activity_t activity = ZPOOL_WAIT_RESILVER;
7658 char raidz_prefix[] = "raidz";
7659 if (replacing) {
7660 activity = ZPOOL_WAIT_REPLACE;
7661 } else if (strncmp(old_disk,
7662 raidz_prefix, strlen(raidz_prefix)) == 0) {
7663 activity = ZPOOL_WAIT_RAIDZ_EXPAND;
7664 }
7665 ret = zpool_wait(zhp, activity);
7666 }
7667
7668 nvlist_free(props);
7669 nvlist_free(nvroot);
7670 zpool_close(zhp);
7671
7672 return (ret);
7673 }
7674
7675 /*
7676 * zpool replace [-fsw] [-o property=value] <pool> <device> <new_device>
7677 *
7678 * -f Force attach, even if <new_device> appears to be in use.
7679 * -s Use sequential instead of healing reconstruction for resilver.
7680 * -o Set property=value.
7681 * -w Wait for replacing to complete before returning
7682 *
7683 * Replace <device> with <new_device>.
7684 */
7685 int
zpool_do_replace(int argc,char ** argv)7686 zpool_do_replace(int argc, char **argv)
7687 {
7688 return (zpool_do_attach_or_replace(argc, argv, B_TRUE));
7689 }
7690
7691 /*
7692 * zpool attach [-fsw] [-o property=value] <pool> <vdev> <new_device>
7693 *
7694 * -f Force attach, even if <new_device> appears to be in use.
7695 * -s Use sequential instead of healing reconstruction for resilver.
7696 * -o Set property=value.
7697 * -w Wait for resilvering (mirror) or expansion (raidz) to complete
7698 * before returning.
7699 *
7700 * Attach <new_device> to a <vdev>, where the vdev can be of type
7701 * device, mirror or raidz. If <vdev> is not part of a mirror, then <vdev> will
7702 * be transformed into a mirror of <vdev> and <new_device>. When a mirror
7703 * is involved, <new_device> will begin life with a DTL of [0, now], and will
7704 * immediately begin to resilver itself. For the raidz case, a expansion will
7705 * commence and reflow the raidz data across all the disks including the
7706 * <new_device>.
7707 */
7708 int
zpool_do_attach(int argc,char ** argv)7709 zpool_do_attach(int argc, char **argv)
7710 {
7711 return (zpool_do_attach_or_replace(argc, argv, B_FALSE));
7712 }
7713
7714 /*
7715 * zpool detach [-f] <pool> <device>
7716 *
7717 * -f Force detach of <device>, even if DTLs argue against it
7718 * (not supported yet)
7719 *
7720 * Detach a device from a mirror. The operation will be refused if <device>
7721 * is the last device in the mirror, or if the DTLs indicate that this device
7722 * has the only valid copy of some data.
7723 */
7724 int
zpool_do_detach(int argc,char ** argv)7725 zpool_do_detach(int argc, char **argv)
7726 {
7727 int c;
7728 char *poolname, *path;
7729 zpool_handle_t *zhp;
7730 int ret;
7731
7732 /* check options */
7733 while ((c = getopt(argc, argv, "")) != -1) {
7734 switch (c) {
7735 case '?':
7736 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7737 optopt);
7738 usage(B_FALSE);
7739 }
7740 }
7741
7742 argc -= optind;
7743 argv += optind;
7744
7745 /* get pool name and check number of arguments */
7746 if (argc < 1) {
7747 (void) fprintf(stderr, gettext("missing pool name argument\n"));
7748 usage(B_FALSE);
7749 }
7750
7751 if (argc < 2) {
7752 (void) fprintf(stderr,
7753 gettext("missing <device> specification\n"));
7754 usage(B_FALSE);
7755 }
7756
7757 poolname = argv[0];
7758 path = argv[1];
7759
7760 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
7761 return (1);
7762
7763 ret = zpool_vdev_detach(zhp, path);
7764
7765 zpool_close(zhp);
7766
7767 return (ret);
7768 }
7769
7770 /*
7771 * zpool split [-gLnP] [-o prop=val] ...
7772 * [-o mntopt] ...
7773 * [-R altroot] <pool> <newpool> [<device> ...]
7774 *
7775 * -g Display guid for individual vdev name.
7776 * -L Follow links when resolving vdev path name.
7777 * -n Do not split the pool, but display the resulting layout if
7778 * it were to be split.
7779 * -o Set property=value, or set mount options.
7780 * -P Display full path for vdev name.
7781 * -R Mount the split-off pool under an alternate root.
7782 * -l Load encryption keys while importing.
7783 *
7784 * Splits the named pool and gives it the new pool name. Devices to be split
7785 * off may be listed, provided that no more than one device is specified
7786 * per top-level vdev mirror. The newly split pool is left in an exported
7787 * state unless -R is specified.
7788 *
7789 * Restrictions: the top-level of the pool pool must only be made up of
7790 * mirrors; all devices in the pool must be healthy; no device may be
7791 * undergoing a resilvering operation.
7792 */
7793 int
zpool_do_split(int argc,char ** argv)7794 zpool_do_split(int argc, char **argv)
7795 {
7796 char *srcpool, *newpool, *propval;
7797 char *mntopts = NULL;
7798 splitflags_t flags;
7799 int c, ret = 0;
7800 int ms_status = 0;
7801 boolean_t loadkeys = B_FALSE;
7802 zpool_handle_t *zhp;
7803 nvlist_t *config, *props = NULL;
7804
7805 flags.dryrun = B_FALSE;
7806 flags.import = B_FALSE;
7807 flags.name_flags = 0;
7808
7809 /* check options */
7810 while ((c = getopt(argc, argv, ":gLR:lno:P")) != -1) {
7811 switch (c) {
7812 case 'g':
7813 flags.name_flags |= VDEV_NAME_GUID;
7814 break;
7815 case 'L':
7816 flags.name_flags |= VDEV_NAME_FOLLOW_LINKS;
7817 break;
7818 case 'R':
7819 flags.import = B_TRUE;
7820 if (add_prop_list(
7821 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), optarg,
7822 &props, B_TRUE) != 0) {
7823 nvlist_free(props);
7824 usage(B_FALSE);
7825 }
7826 break;
7827 case 'l':
7828 loadkeys = B_TRUE;
7829 break;
7830 case 'n':
7831 flags.dryrun = B_TRUE;
7832 break;
7833 case 'o':
7834 if ((propval = strchr(optarg, '=')) != NULL) {
7835 *propval = '\0';
7836 propval++;
7837 if (add_prop_list(optarg, propval,
7838 &props, B_TRUE) != 0) {
7839 nvlist_free(props);
7840 usage(B_FALSE);
7841 }
7842 } else {
7843 mntopts = optarg;
7844 }
7845 break;
7846 case 'P':
7847 flags.name_flags |= VDEV_NAME_PATH;
7848 break;
7849 case ':':
7850 (void) fprintf(stderr, gettext("missing argument for "
7851 "'%c' option\n"), optopt);
7852 usage(B_FALSE);
7853 break;
7854 case '?':
7855 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7856 optopt);
7857 usage(B_FALSE);
7858 break;
7859 }
7860 }
7861
7862 if (!flags.import && mntopts != NULL) {
7863 (void) fprintf(stderr, gettext("setting mntopts is only "
7864 "valid when importing the pool\n"));
7865 usage(B_FALSE);
7866 }
7867
7868 if (!flags.import && loadkeys) {
7869 (void) fprintf(stderr, gettext("loading keys is only "
7870 "valid when importing the pool\n"));
7871 usage(B_FALSE);
7872 }
7873
7874 argc -= optind;
7875 argv += optind;
7876
7877 if (argc < 1) {
7878 (void) fprintf(stderr, gettext("Missing pool name\n"));
7879 usage(B_FALSE);
7880 }
7881 if (argc < 2) {
7882 (void) fprintf(stderr, gettext("Missing new pool name\n"));
7883 usage(B_FALSE);
7884 }
7885
7886 srcpool = argv[0];
7887 newpool = argv[1];
7888
7889 argc -= 2;
7890 argv += 2;
7891
7892 if ((zhp = zpool_open(g_zfs, srcpool)) == NULL) {
7893 nvlist_free(props);
7894 return (1);
7895 }
7896
7897 config = split_mirror_vdev(zhp, newpool, props, flags, argc, argv);
7898 if (config == NULL) {
7899 ret = 1;
7900 } else {
7901 if (flags.dryrun) {
7902 (void) printf(gettext("would create '%s' with the "
7903 "following layout:\n\n"), newpool);
7904 print_vdev_tree(NULL, newpool, config, 0, "",
7905 flags.name_flags);
7906 print_vdev_tree(NULL, "dedup", config, 0,
7907 VDEV_ALLOC_BIAS_DEDUP, 0);
7908 print_vdev_tree(NULL, "special", config, 0,
7909 VDEV_ALLOC_BIAS_SPECIAL, 0);
7910 }
7911 }
7912
7913 zpool_close(zhp);
7914
7915 if (ret != 0 || flags.dryrun || !flags.import) {
7916 nvlist_free(config);
7917 nvlist_free(props);
7918 return (ret);
7919 }
7920
7921 /*
7922 * The split was successful. Now we need to open the new
7923 * pool and import it.
7924 */
7925 if ((zhp = zpool_open_canfail(g_zfs, newpool)) == NULL) {
7926 nvlist_free(config);
7927 nvlist_free(props);
7928 return (1);
7929 }
7930
7931 if (loadkeys) {
7932 ret = zfs_crypto_attempt_load_keys(g_zfs, newpool);
7933 if (ret != 0)
7934 ret = 1;
7935 }
7936
7937 if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL) {
7938 ms_status = zpool_enable_datasets(zhp, mntopts, 0,
7939 mount_tp_nthr);
7940 if (ms_status == EZFS_SHAREFAILED) {
7941 (void) fprintf(stderr, gettext("Split was successful, "
7942 "datasets are mounted but sharing of some datasets "
7943 "has failed\n"));
7944 } else if (ms_status == EZFS_MOUNTFAILED) {
7945 (void) fprintf(stderr, gettext("Split was successful"
7946 ", but some datasets could not be mounted\n"));
7947 (void) fprintf(stderr, gettext("Try doing '%s' with a "
7948 "different altroot\n"), "zpool import");
7949 }
7950 }
7951 zpool_close(zhp);
7952 nvlist_free(config);
7953 nvlist_free(props);
7954
7955 return (ret);
7956 }
7957
7958
7959 /*
7960 * zpool online [--power] <pool> <device> ...
7961 *
7962 * --power: Power on the enclosure slot to the drive (if possible)
7963 */
7964 int
zpool_do_online(int argc,char ** argv)7965 zpool_do_online(int argc, char **argv)
7966 {
7967 int c, i;
7968 char *poolname;
7969 zpool_handle_t *zhp;
7970 int ret = 0;
7971 vdev_state_t newstate;
7972 int flags = 0;
7973 boolean_t is_power_on = B_FALSE;
7974 struct option long_options[] = {
7975 {"power", no_argument, NULL, ZPOOL_OPTION_POWER},
7976 {0, 0, 0, 0}
7977 };
7978
7979 /* check options */
7980 while ((c = getopt_long(argc, argv, "e", long_options, NULL)) != -1) {
7981 switch (c) {
7982 case 'e':
7983 flags |= ZFS_ONLINE_EXPAND;
7984 break;
7985 case ZPOOL_OPTION_POWER:
7986 is_power_on = B_TRUE;
7987 break;
7988 case '?':
7989 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7990 optopt);
7991 usage(B_FALSE);
7992 }
7993 }
7994
7995 if (libzfs_envvar_is_set("ZPOOL_AUTO_POWER_ON_SLOT"))
7996 is_power_on = B_TRUE;
7997
7998 argc -= optind;
7999 argv += optind;
8000
8001 /* get pool name and check number of arguments */
8002 if (argc < 1) {
8003 (void) fprintf(stderr, gettext("missing pool name\n"));
8004 usage(B_FALSE);
8005 }
8006 if (argc < 2) {
8007 (void) fprintf(stderr, gettext("missing device name\n"));
8008 usage(B_FALSE);
8009 }
8010
8011 poolname = argv[0];
8012
8013 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) {
8014 (void) fprintf(stderr, gettext("failed to open pool "
8015 "\"%s\""), poolname);
8016 return (1);
8017 }
8018
8019 for (i = 1; i < argc; i++) {
8020 vdev_state_t oldstate;
8021 boolean_t avail_spare, l2cache;
8022 int rc;
8023
8024 if (is_power_on) {
8025 rc = zpool_power_on_and_disk_wait(zhp, argv[i]);
8026 if (rc == ENOTSUP) {
8027 (void) fprintf(stderr,
8028 gettext("Power control not supported\n"));
8029 }
8030 if (rc != 0)
8031 return (rc);
8032 }
8033
8034 nvlist_t *tgt = zpool_find_vdev(zhp, argv[i], &avail_spare,
8035 &l2cache, NULL);
8036 if (tgt == NULL) {
8037 ret = 1;
8038 (void) fprintf(stderr, gettext("couldn't find device "
8039 "\"%s\" in pool \"%s\"\n"), argv[i], poolname);
8040 continue;
8041 }
8042 uint_t vsc;
8043 oldstate = ((vdev_stat_t *)fnvlist_lookup_uint64_array(tgt,
8044 ZPOOL_CONFIG_VDEV_STATS, &vsc))->vs_state;
8045 if ((rc = zpool_vdev_online(zhp, argv[i], flags,
8046 &newstate)) == 0) {
8047 if (newstate != VDEV_STATE_HEALTHY) {
8048 (void) printf(gettext("warning: device '%s' "
8049 "onlined, but remains in faulted state\n"),
8050 argv[i]);
8051 if (newstate == VDEV_STATE_FAULTED)
8052 (void) printf(gettext("use 'zpool "
8053 "clear' to restore a faulted "
8054 "device\n"));
8055 else
8056 (void) printf(gettext("use 'zpool "
8057 "replace' to replace devices "
8058 "that are no longer present\n"));
8059 if ((flags & ZFS_ONLINE_EXPAND)) {
8060 (void) printf(gettext("%s: failed "
8061 "to expand usable space on "
8062 "unhealthy device '%s'\n"),
8063 (oldstate >= VDEV_STATE_DEGRADED ?
8064 "error" : "warning"), argv[i]);
8065 if (oldstate >= VDEV_STATE_DEGRADED) {
8066 ret = 1;
8067 break;
8068 }
8069 }
8070 }
8071 } else {
8072 (void) fprintf(stderr, gettext("Failed to online "
8073 "\"%s\" in pool \"%s\": %d\n"),
8074 argv[i], poolname, rc);
8075 ret = 1;
8076 }
8077 }
8078
8079 zpool_close(zhp);
8080
8081 return (ret);
8082 }
8083
8084 /*
8085 * zpool offline [-ft]|[--power] <pool> <device> ...
8086 *
8087 *
8088 * -f Force the device into a faulted state.
8089 *
8090 * -t Only take the device off-line temporarily. The offline/faulted
8091 * state will not be persistent across reboots.
8092 *
8093 * --power Power off the enclosure slot to the drive (if possible)
8094 */
8095 int
zpool_do_offline(int argc,char ** argv)8096 zpool_do_offline(int argc, char **argv)
8097 {
8098 int c, i;
8099 char *poolname;
8100 zpool_handle_t *zhp;
8101 int ret = 0;
8102 boolean_t istmp = B_FALSE;
8103 boolean_t fault = B_FALSE;
8104 boolean_t is_power_off = B_FALSE;
8105
8106 struct option long_options[] = {
8107 {"power", no_argument, NULL, ZPOOL_OPTION_POWER},
8108 {0, 0, 0, 0}
8109 };
8110
8111 /* check options */
8112 while ((c = getopt_long(argc, argv, "ft", long_options, NULL)) != -1) {
8113 switch (c) {
8114 case 'f':
8115 fault = B_TRUE;
8116 break;
8117 case 't':
8118 istmp = B_TRUE;
8119 break;
8120 case ZPOOL_OPTION_POWER:
8121 is_power_off = B_TRUE;
8122 break;
8123 case '?':
8124 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
8125 optopt);
8126 usage(B_FALSE);
8127 }
8128 }
8129
8130 if (is_power_off && fault) {
8131 (void) fprintf(stderr,
8132 gettext("-0 and -f cannot be used together\n"));
8133 usage(B_FALSE);
8134 }
8135
8136 if (is_power_off && istmp) {
8137 (void) fprintf(stderr,
8138 gettext("-0 and -t cannot be used together\n"));
8139 usage(B_FALSE);
8140 }
8141
8142 argc -= optind;
8143 argv += optind;
8144
8145 /* get pool name and check number of arguments */
8146 if (argc < 1) {
8147 (void) fprintf(stderr, gettext("missing pool name\n"));
8148 usage(B_FALSE);
8149 }
8150 if (argc < 2) {
8151 (void) fprintf(stderr, gettext("missing device name\n"));
8152 usage(B_FALSE);
8153 }
8154
8155 poolname = argv[0];
8156
8157 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) {
8158 (void) fprintf(stderr, gettext("failed to open pool "
8159 "\"%s\""), poolname);
8160 return (1);
8161 }
8162
8163 for (i = 1; i < argc; i++) {
8164 uint64_t guid = zpool_vdev_path_to_guid(zhp, argv[i]);
8165 if (is_power_off) {
8166 /*
8167 * Note: we have to power off first, then set REMOVED,
8168 * or else zpool_vdev_set_removed_state() returns
8169 * EAGAIN.
8170 */
8171 ret = zpool_power_off(zhp, argv[i]);
8172 if (ret != 0) {
8173 (void) fprintf(stderr, "%s %s %d\n",
8174 gettext("unable to power off slot for"),
8175 argv[i], ret);
8176 }
8177 (void) zpool_vdev_set_removed_state(zhp, guid,
8178 VDEV_AUX_NONE);
8179
8180 } else if (fault) {
8181 vdev_aux_t aux;
8182 if (istmp == B_FALSE) {
8183 /* Force the fault to persist across imports */
8184 aux = VDEV_AUX_EXTERNAL_PERSIST;
8185 } else {
8186 aux = VDEV_AUX_EXTERNAL;
8187 }
8188
8189 if (guid == 0 || zpool_vdev_fault(zhp, guid, aux) != 0)
8190 ret = 1;
8191 } else {
8192 if (zpool_vdev_offline(zhp, argv[i], istmp) != 0)
8193 ret = 1;
8194 }
8195 }
8196
8197 zpool_close(zhp);
8198
8199 return (ret);
8200 }
8201
8202 /*
8203 * zpool clear [-nF]|[--power] <pool> [device]
8204 *
8205 * Clear all errors associated with a pool or a particular device.
8206 */
8207 int
zpool_do_clear(int argc,char ** argv)8208 zpool_do_clear(int argc, char **argv)
8209 {
8210 int c;
8211 int ret = 0;
8212 boolean_t dryrun = B_FALSE;
8213 boolean_t do_rewind = B_FALSE;
8214 boolean_t xtreme_rewind = B_FALSE;
8215 boolean_t is_power_on = B_FALSE;
8216 uint32_t rewind_policy = ZPOOL_NO_REWIND;
8217 nvlist_t *policy = NULL;
8218 zpool_handle_t *zhp;
8219 char *pool, *device;
8220
8221 struct option long_options[] = {
8222 {"power", no_argument, NULL, ZPOOL_OPTION_POWER},
8223 {0, 0, 0, 0}
8224 };
8225
8226 /* check options */
8227 while ((c = getopt_long(argc, argv, "FnX", long_options,
8228 NULL)) != -1) {
8229 switch (c) {
8230 case 'F':
8231 do_rewind = B_TRUE;
8232 break;
8233 case 'n':
8234 dryrun = B_TRUE;
8235 break;
8236 case 'X':
8237 xtreme_rewind = B_TRUE;
8238 break;
8239 case ZPOOL_OPTION_POWER:
8240 is_power_on = B_TRUE;
8241 break;
8242 case '?':
8243 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
8244 optopt);
8245 usage(B_FALSE);
8246 }
8247 }
8248
8249 if (libzfs_envvar_is_set("ZPOOL_AUTO_POWER_ON_SLOT"))
8250 is_power_on = B_TRUE;
8251
8252 argc -= optind;
8253 argv += optind;
8254
8255 if (argc < 1) {
8256 (void) fprintf(stderr, gettext("missing pool name\n"));
8257 usage(B_FALSE);
8258 }
8259
8260 if (argc > 2) {
8261 (void) fprintf(stderr, gettext("too many arguments\n"));
8262 usage(B_FALSE);
8263 }
8264
8265 if ((dryrun || xtreme_rewind) && !do_rewind) {
8266 (void) fprintf(stderr,
8267 gettext("-n or -X only meaningful with -F\n"));
8268 usage(B_FALSE);
8269 }
8270 if (dryrun)
8271 rewind_policy = ZPOOL_TRY_REWIND;
8272 else if (do_rewind)
8273 rewind_policy = ZPOOL_DO_REWIND;
8274 if (xtreme_rewind)
8275 rewind_policy |= ZPOOL_EXTREME_REWIND;
8276
8277 /* In future, further rewind policy choices can be passed along here */
8278 if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 ||
8279 nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY,
8280 rewind_policy) != 0) {
8281 return (1);
8282 }
8283
8284 pool = argv[0];
8285 device = argc == 2 ? argv[1] : NULL;
8286
8287 if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) {
8288 nvlist_free(policy);
8289 return (1);
8290 }
8291
8292 if (is_power_on) {
8293 if (device == NULL) {
8294 (void) zpool_power_on_pool_and_wait_for_devices(zhp);
8295 } else {
8296 (void) zpool_power_on_and_disk_wait(zhp, device);
8297 }
8298 }
8299
8300 if (zpool_clear(zhp, device, policy) != 0)
8301 ret = 1;
8302
8303 zpool_close(zhp);
8304
8305 nvlist_free(policy);
8306
8307 return (ret);
8308 }
8309
8310 /*
8311 * zpool reguid [-g <guid>] <pool>
8312 */
8313 int
zpool_do_reguid(int argc,char ** argv)8314 zpool_do_reguid(int argc, char **argv)
8315 {
8316 uint64_t guid;
8317 uint64_t *guidp = NULL;
8318 int c;
8319 char *endptr;
8320 char *poolname;
8321 zpool_handle_t *zhp;
8322 int ret = 0;
8323
8324 /* check options */
8325 while ((c = getopt(argc, argv, "g:")) != -1) {
8326 switch (c) {
8327 case 'g':
8328 errno = 0;
8329 guid = strtoull(optarg, &endptr, 10);
8330 if (errno != 0 || *endptr != '\0') {
8331 (void) fprintf(stderr,
8332 gettext("invalid GUID: %s\n"), optarg);
8333 usage(B_FALSE);
8334 }
8335 guidp = &guid;
8336 break;
8337 case '?':
8338 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
8339 optopt);
8340 usage(B_FALSE);
8341 }
8342 }
8343
8344 argc -= optind;
8345 argv += optind;
8346
8347 /* get pool name and check number of arguments */
8348 if (argc < 1) {
8349 (void) fprintf(stderr, gettext("missing pool name\n"));
8350 usage(B_FALSE);
8351 }
8352
8353 if (argc > 1) {
8354 (void) fprintf(stderr, gettext("too many arguments\n"));
8355 usage(B_FALSE);
8356 }
8357
8358 poolname = argv[0];
8359 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
8360 return (1);
8361
8362 ret = zpool_set_guid(zhp, guidp);
8363
8364 zpool_close(zhp);
8365 return (ret);
8366 }
8367
8368
8369 /*
8370 * zpool reopen <pool>
8371 *
8372 * Reopen the pool so that the kernel can update the sizes of all vdevs.
8373 */
8374 int
zpool_do_reopen(int argc,char ** argv)8375 zpool_do_reopen(int argc, char **argv)
8376 {
8377 int c;
8378 int ret = 0;
8379 boolean_t scrub_restart = B_TRUE;
8380
8381 /* check options */
8382 while ((c = getopt(argc, argv, "n")) != -1) {
8383 switch (c) {
8384 case 'n':
8385 scrub_restart = B_FALSE;
8386 break;
8387 case '?':
8388 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
8389 optopt);
8390 usage(B_FALSE);
8391 }
8392 }
8393
8394 argc -= optind;
8395 argv += optind;
8396
8397 /* if argc == 0 we will execute zpool_reopen_one on all pools */
8398 ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
8399 B_FALSE, zpool_reopen_one, &scrub_restart);
8400
8401 return (ret);
8402 }
8403
8404 typedef struct scrub_cbdata {
8405 int cb_type;
8406 pool_scrub_cmd_t cb_scrub_cmd;
8407 time_t cb_date_start;
8408 time_t cb_date_end;
8409 } scrub_cbdata_t;
8410
8411 static boolean_t
zpool_has_checkpoint(zpool_handle_t * zhp)8412 zpool_has_checkpoint(zpool_handle_t *zhp)
8413 {
8414 nvlist_t *config, *nvroot;
8415
8416 config = zpool_get_config(zhp, NULL);
8417
8418 if (config != NULL) {
8419 pool_checkpoint_stat_t *pcs = NULL;
8420 uint_t c;
8421
8422 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
8423 (void) nvlist_lookup_uint64_array(nvroot,
8424 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
8425
8426 if (pcs == NULL || pcs->pcs_state == CS_NONE)
8427 return (B_FALSE);
8428
8429 assert(pcs->pcs_state == CS_CHECKPOINT_EXISTS ||
8430 pcs->pcs_state == CS_CHECKPOINT_DISCARDING);
8431 return (B_TRUE);
8432 }
8433
8434 return (B_FALSE);
8435 }
8436
8437 static int
scrub_callback(zpool_handle_t * zhp,void * data)8438 scrub_callback(zpool_handle_t *zhp, void *data)
8439 {
8440 scrub_cbdata_t *cb = data;
8441 int err;
8442
8443 /*
8444 * Ignore faulted pools.
8445 */
8446 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
8447 (void) fprintf(stderr, gettext("cannot scan '%s': pool is "
8448 "currently unavailable\n"), zpool_get_name(zhp));
8449 return (1);
8450 }
8451
8452 err = zpool_scan_range(zhp, cb->cb_type, cb->cb_scrub_cmd,
8453 cb->cb_date_start, cb->cb_date_end);
8454 if (err == 0 && zpool_has_checkpoint(zhp) &&
8455 cb->cb_type == POOL_SCAN_SCRUB) {
8456 (void) printf(gettext("warning: will not scrub state that "
8457 "belongs to the checkpoint of pool '%s'\n"),
8458 zpool_get_name(zhp));
8459 }
8460
8461 return (err != 0);
8462 }
8463
8464 static int
wait_callback(zpool_handle_t * zhp,void * data)8465 wait_callback(zpool_handle_t *zhp, void *data)
8466 {
8467 zpool_wait_activity_t *act = data;
8468 return (zpool_wait(zhp, *act));
8469 }
8470
8471 static time_t
date_string_to_sec(const char * timestr,boolean_t rounding)8472 date_string_to_sec(const char *timestr, boolean_t rounding)
8473 {
8474 struct tm tm = {0};
8475 int adjustment = rounding ? 1 : 0;
8476
8477 /* Allow mktime to determine timezone. */
8478 tm.tm_isdst = -1;
8479
8480 if (strptime(timestr, "%Y-%m-%d %H:%M", &tm) == NULL) {
8481 if (strptime(timestr, "%Y-%m-%d", &tm) == NULL) {
8482 fprintf(stderr, gettext("Failed to parse the date.\n"));
8483 usage(B_FALSE);
8484 }
8485 adjustment *= 24 * 60 * 60;
8486 } else {
8487 adjustment *= 60;
8488 }
8489
8490 return (mktime(&tm) + adjustment);
8491 }
8492
8493 /*
8494 * zpool scrub [-e | -s | -p | -C | -E | -S] [-w] [-a | <pool> ...]
8495 *
8496 * -a Scrub all pools.
8497 * -e Only scrub blocks in the error log.
8498 * -E End date of scrub.
8499 * -S Start date of scrub.
8500 * -s Stop. Stops any in-progress scrub.
8501 * -p Pause. Pause in-progress scrub.
8502 * -w Wait. Blocks until scrub has completed.
8503 * -C Scrub from last saved txg.
8504 */
8505 int
zpool_do_scrub(int argc,char ** argv)8506 zpool_do_scrub(int argc, char **argv)
8507 {
8508 int c;
8509 scrub_cbdata_t cb;
8510 boolean_t wait = B_FALSE;
8511 int error;
8512
8513 cb.cb_type = POOL_SCAN_SCRUB;
8514 cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
8515 cb.cb_date_start = cb.cb_date_end = 0;
8516
8517 boolean_t is_error_scrub = B_FALSE;
8518 boolean_t is_pause = B_FALSE;
8519 boolean_t is_stop = B_FALSE;
8520 boolean_t is_txg_continue = B_FALSE;
8521 boolean_t scrub_all = B_FALSE;
8522
8523 /* check options */
8524 while ((c = getopt(argc, argv, "aspweCE:S:")) != -1) {
8525 switch (c) {
8526 case 'a':
8527 scrub_all = B_TRUE;
8528 break;
8529 case 'e':
8530 is_error_scrub = B_TRUE;
8531 break;
8532 case 'E':
8533 /*
8534 * Round the date. It's better to scrub more data than
8535 * less. This also makes the date inclusive.
8536 */
8537 cb.cb_date_end = date_string_to_sec(optarg, B_TRUE);
8538 break;
8539 case 's':
8540 is_stop = B_TRUE;
8541 break;
8542 case 'S':
8543 cb.cb_date_start = date_string_to_sec(optarg, B_FALSE);
8544 break;
8545 case 'p':
8546 is_pause = B_TRUE;
8547 break;
8548 case 'w':
8549 wait = B_TRUE;
8550 break;
8551 case 'C':
8552 is_txg_continue = B_TRUE;
8553 break;
8554 case '?':
8555 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
8556 optopt);
8557 usage(B_FALSE);
8558 }
8559 }
8560
8561 if (is_pause && is_stop) {
8562 (void) fprintf(stderr, gettext("invalid option "
8563 "combination: -s and -p are mutually exclusive\n"));
8564 usage(B_FALSE);
8565 } else if (is_pause && is_txg_continue) {
8566 (void) fprintf(stderr, gettext("invalid option "
8567 "combination: -p and -C are mutually exclusive\n"));
8568 usage(B_FALSE);
8569 } else if (is_stop && is_txg_continue) {
8570 (void) fprintf(stderr, gettext("invalid option "
8571 "combination: -s and -C are mutually exclusive\n"));
8572 usage(B_FALSE);
8573 } else if (is_error_scrub && is_txg_continue) {
8574 (void) fprintf(stderr, gettext("invalid option "
8575 "combination: -e and -C are mutually exclusive\n"));
8576 usage(B_FALSE);
8577 } else {
8578 if (is_error_scrub)
8579 cb.cb_type = POOL_SCAN_ERRORSCRUB;
8580
8581 if (is_pause) {
8582 cb.cb_scrub_cmd = POOL_SCRUB_PAUSE;
8583 } else if (is_stop) {
8584 cb.cb_type = POOL_SCAN_NONE;
8585 } else if (is_txg_continue) {
8586 cb.cb_scrub_cmd = POOL_SCRUB_FROM_LAST_TXG;
8587 } else {
8588 cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
8589 }
8590 }
8591
8592 if ((cb.cb_date_start != 0 || cb.cb_date_end != 0) &&
8593 cb.cb_scrub_cmd != POOL_SCRUB_NORMAL) {
8594 (void) fprintf(stderr, gettext("invalid option combination: "
8595 "start/end date is available only with normal scrub\n"));
8596 usage(B_FALSE);
8597 }
8598 if (cb.cb_date_start != 0 && cb.cb_date_end != 0 &&
8599 cb.cb_date_start > cb.cb_date_end) {
8600 (void) fprintf(stderr, gettext("invalid arguments: "
8601 "end date has to be later than start date\n"));
8602 usage(B_FALSE);
8603 }
8604
8605 if (wait && (cb.cb_type == POOL_SCAN_NONE ||
8606 cb.cb_scrub_cmd == POOL_SCRUB_PAUSE)) {
8607 (void) fprintf(stderr, gettext("invalid option combination: "
8608 "-w cannot be used with -p or -s\n"));
8609 usage(B_FALSE);
8610 }
8611
8612 argc -= optind;
8613 argv += optind;
8614
8615 if (argc < 1 && !scrub_all) {
8616 (void) fprintf(stderr, gettext("missing pool name argument\n"));
8617 usage(B_FALSE);
8618 }
8619
8620 error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
8621 B_FALSE, scrub_callback, &cb);
8622
8623 if (wait && !error) {
8624 zpool_wait_activity_t act = ZPOOL_WAIT_SCRUB;
8625 error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
8626 B_FALSE, wait_callback, &act);
8627 }
8628
8629 return (error);
8630 }
8631
8632 /*
8633 * zpool resilver <pool> ...
8634 *
8635 * Restarts any in-progress resilver
8636 */
8637 int
zpool_do_resilver(int argc,char ** argv)8638 zpool_do_resilver(int argc, char **argv)
8639 {
8640 int c;
8641 scrub_cbdata_t cb;
8642
8643 cb.cb_type = POOL_SCAN_RESILVER;
8644 cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
8645 cb.cb_date_start = cb.cb_date_end = 0;
8646
8647 /* check options */
8648 while ((c = getopt(argc, argv, "")) != -1) {
8649 switch (c) {
8650 case '?':
8651 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
8652 optopt);
8653 usage(B_FALSE);
8654 }
8655 }
8656
8657 argc -= optind;
8658 argv += optind;
8659
8660 if (argc < 1) {
8661 (void) fprintf(stderr, gettext("missing pool name argument\n"));
8662 usage(B_FALSE);
8663 }
8664
8665 return (for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
8666 B_FALSE, scrub_callback, &cb));
8667 }
8668
8669 /*
8670 * zpool trim [-d] [-r <rate>] [-c | -s] <-a | pool> [<device> ...]
8671 *
8672 * -a Trim all pools.
8673 * -c Cancel. Ends any in-progress trim.
8674 * -d Secure trim. Requires kernel and device support.
8675 * -r <rate> Sets the TRIM rate in bytes (per second). Supports
8676 * adding a multiplier suffix such as 'k' or 'm'.
8677 * -s Suspend. TRIM can then be restarted with no flags.
8678 * -w Wait. Blocks until trimming has completed.
8679 */
8680 int
zpool_do_trim(int argc,char ** argv)8681 zpool_do_trim(int argc, char **argv)
8682 {
8683 struct option long_options[] = {
8684 {"cancel", no_argument, NULL, 'c'},
8685 {"secure", no_argument, NULL, 'd'},
8686 {"rate", required_argument, NULL, 'r'},
8687 {"suspend", no_argument, NULL, 's'},
8688 {"wait", no_argument, NULL, 'w'},
8689 {"all", no_argument, NULL, 'a'},
8690 {0, 0, 0, 0}
8691 };
8692
8693 pool_trim_func_t cmd_type = POOL_TRIM_START;
8694 uint64_t rate = 0;
8695 boolean_t secure = B_FALSE;
8696 boolean_t wait = B_FALSE;
8697 boolean_t trimall = B_FALSE;
8698 int error;
8699
8700 int c;
8701 while ((c = getopt_long(argc, argv, "acdr:sw", long_options, NULL))
8702 != -1) {
8703 switch (c) {
8704 case 'a':
8705 trimall = B_TRUE;
8706 break;
8707 case 'c':
8708 if (cmd_type != POOL_TRIM_START &&
8709 cmd_type != POOL_TRIM_CANCEL) {
8710 (void) fprintf(stderr, gettext("-c cannot be "
8711 "combined with other options\n"));
8712 usage(B_FALSE);
8713 }
8714 cmd_type = POOL_TRIM_CANCEL;
8715 break;
8716 case 'd':
8717 if (cmd_type != POOL_TRIM_START) {
8718 (void) fprintf(stderr, gettext("-d cannot be "
8719 "combined with the -c or -s options\n"));
8720 usage(B_FALSE);
8721 }
8722 secure = B_TRUE;
8723 break;
8724 case 'r':
8725 if (cmd_type != POOL_TRIM_START) {
8726 (void) fprintf(stderr, gettext("-r cannot be "
8727 "combined with the -c or -s options\n"));
8728 usage(B_FALSE);
8729 }
8730 if (zfs_nicestrtonum(g_zfs, optarg, &rate) == -1) {
8731 (void) fprintf(stderr, "%s: %s\n",
8732 gettext("invalid value for rate"),
8733 libzfs_error_description(g_zfs));
8734 usage(B_FALSE);
8735 }
8736 break;
8737 case 's':
8738 if (cmd_type != POOL_TRIM_START &&
8739 cmd_type != POOL_TRIM_SUSPEND) {
8740 (void) fprintf(stderr, gettext("-s cannot be "
8741 "combined with other options\n"));
8742 usage(B_FALSE);
8743 }
8744 cmd_type = POOL_TRIM_SUSPEND;
8745 break;
8746 case 'w':
8747 wait = B_TRUE;
8748 break;
8749 case '?':
8750 if (optopt != 0) {
8751 (void) fprintf(stderr,
8752 gettext("invalid option '%c'\n"), optopt);
8753 } else {
8754 (void) fprintf(stderr,
8755 gettext("invalid option '%s'\n"),
8756 argv[optind - 1]);
8757 }
8758 usage(B_FALSE);
8759 }
8760 }
8761
8762 argc -= optind;
8763 argv += optind;
8764
8765 trimflags_t trim_flags = {
8766 .secure = secure,
8767 .rate = rate,
8768 .wait = wait,
8769 };
8770
8771 trim_cbdata_t cbdata = {
8772 .trim_flags = trim_flags,
8773 .cmd_type = cmd_type
8774 };
8775
8776 if (argc < 1 && !trimall) {
8777 (void) fprintf(stderr, gettext("missing pool name argument\n"));
8778 usage(B_FALSE);
8779 }
8780
8781 if (wait && (cmd_type != POOL_TRIM_START)) {
8782 (void) fprintf(stderr, gettext("-w cannot be used with -c or "
8783 "-s options\n"));
8784 usage(B_FALSE);
8785 }
8786
8787 if (trimall && argc > 0) {
8788 (void) fprintf(stderr, gettext("-a cannot be combined with "
8789 "individual zpools or vdevs\n"));
8790 usage(B_FALSE);
8791 }
8792
8793 if (argc == 0 && trimall) {
8794 cbdata.trim_flags.fullpool = B_TRUE;
8795 /* Trim each pool recursively */
8796 error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
8797 B_FALSE, zpool_trim_one, &cbdata);
8798 } else if (argc == 1) {
8799 char *poolname = argv[0];
8800 zpool_handle_t *zhp = zpool_open(g_zfs, poolname);
8801 if (zhp == NULL)
8802 return (-1);
8803 /* no individual leaf vdevs specified, so add them all */
8804 error = zpool_trim_one(zhp, &cbdata);
8805 zpool_close(zhp);
8806 } else {
8807 char *poolname = argv[0];
8808 zpool_handle_t *zhp = zpool_open(g_zfs, poolname);
8809 if (zhp == NULL)
8810 return (-1);
8811 /* leaf vdevs specified, trim only those */
8812 cbdata.trim_flags.fullpool = B_FALSE;
8813 nvlist_t *vdevs = fnvlist_alloc();
8814 for (int i = 1; i < argc; i++) {
8815 fnvlist_add_boolean(vdevs, argv[i]);
8816 }
8817 error = zpool_trim(zhp, cbdata.cmd_type, vdevs,
8818 &cbdata.trim_flags);
8819 fnvlist_free(vdevs);
8820 zpool_close(zhp);
8821 }
8822
8823 return (error);
8824 }
8825
8826 /*
8827 * Converts a total number of seconds to a human readable string broken
8828 * down in to days/hours/minutes/seconds.
8829 */
8830 static void
secs_to_dhms(uint64_t total,char * buf)8831 secs_to_dhms(uint64_t total, char *buf)
8832 {
8833 uint64_t days = total / 60 / 60 / 24;
8834 uint64_t hours = (total / 60 / 60) % 24;
8835 uint64_t mins = (total / 60) % 60;
8836 uint64_t secs = (total % 60);
8837
8838 if (days > 0) {
8839 (void) sprintf(buf, "%llu days %02llu:%02llu:%02llu",
8840 (u_longlong_t)days, (u_longlong_t)hours,
8841 (u_longlong_t)mins, (u_longlong_t)secs);
8842 } else {
8843 (void) sprintf(buf, "%02llu:%02llu:%02llu",
8844 (u_longlong_t)hours, (u_longlong_t)mins,
8845 (u_longlong_t)secs);
8846 }
8847 }
8848
8849 /*
8850 * Print out detailed error scrub status.
8851 */
8852 static void
print_err_scrub_status(pool_scan_stat_t * ps)8853 print_err_scrub_status(pool_scan_stat_t *ps)
8854 {
8855 time_t start, end, pause;
8856 uint64_t total_secs_left;
8857 uint64_t secs_left, mins_left, hours_left, days_left;
8858 uint64_t examined, to_be_examined;
8859
8860 if (ps == NULL || ps->pss_error_scrub_func != POOL_SCAN_ERRORSCRUB) {
8861 return;
8862 }
8863
8864 (void) printf(gettext(" scrub: "));
8865
8866 start = ps->pss_error_scrub_start;
8867 end = ps->pss_error_scrub_end;
8868 pause = ps->pss_pass_error_scrub_pause;
8869 examined = ps->pss_error_scrub_examined;
8870 to_be_examined = ps->pss_error_scrub_to_be_examined;
8871
8872 assert(ps->pss_error_scrub_func == POOL_SCAN_ERRORSCRUB);
8873
8874 if (ps->pss_error_scrub_state == DSS_FINISHED) {
8875 total_secs_left = end - start;
8876 days_left = total_secs_left / 60 / 60 / 24;
8877 hours_left = (total_secs_left / 60 / 60) % 24;
8878 mins_left = (total_secs_left / 60) % 60;
8879 secs_left = (total_secs_left % 60);
8880
8881 (void) printf(gettext("scrubbed %llu error blocks in %llu days "
8882 "%02llu:%02llu:%02llu on %s"), (u_longlong_t)examined,
8883 (u_longlong_t)days_left, (u_longlong_t)hours_left,
8884 (u_longlong_t)mins_left, (u_longlong_t)secs_left,
8885 ctime(&end));
8886
8887 return;
8888 } else if (ps->pss_error_scrub_state == DSS_CANCELED) {
8889 (void) printf(gettext("error scrub canceled on %s"),
8890 ctime(&end));
8891 return;
8892 }
8893 assert(ps->pss_error_scrub_state == DSS_ERRORSCRUBBING);
8894
8895 /* Error scrub is in progress. */
8896 if (pause == 0) {
8897 (void) printf(gettext("error scrub in progress since %s"),
8898 ctime(&start));
8899 } else {
8900 (void) printf(gettext("error scrub paused since %s"),
8901 ctime(&pause));
8902 (void) printf(gettext("\terror scrub started on %s"),
8903 ctime(&start));
8904 }
8905
8906 double fraction_done = (double)examined / (to_be_examined + examined);
8907 (void) printf(gettext("\t%.2f%% done, issued I/O for %llu error"
8908 " blocks"), 100 * fraction_done, (u_longlong_t)examined);
8909
8910 (void) printf("\n");
8911 }
8912
8913 /*
8914 * Print out detailed scrub status.
8915 */
8916 static void
print_scan_scrub_resilver_status(pool_scan_stat_t * ps)8917 print_scan_scrub_resilver_status(pool_scan_stat_t *ps)
8918 {
8919 time_t start, end, pause;
8920 uint64_t pass_scanned, scanned, pass_issued, issued, total_s, total_i;
8921 uint64_t elapsed, scan_rate, issue_rate;
8922 double fraction_done;
8923 char processed_buf[7], scanned_buf[7], issued_buf[7], total_s_buf[7];
8924 char total_i_buf[7], srate_buf[7], irate_buf[7], time_buf[32];
8925
8926 printf(" ");
8927 (void) printf_color(ANSI_BOLD, gettext("scan:"));
8928 printf(" ");
8929
8930 /* If there's never been a scan, there's not much to say. */
8931 if (ps == NULL || ps->pss_func == POOL_SCAN_NONE ||
8932 ps->pss_func >= POOL_SCAN_FUNCS) {
8933 (void) printf(gettext("none requested\n"));
8934 return;
8935 }
8936
8937 start = ps->pss_start_time;
8938 end = ps->pss_end_time;
8939 pause = ps->pss_pass_scrub_pause;
8940
8941 zfs_nicebytes(ps->pss_processed, processed_buf, sizeof (processed_buf));
8942
8943 int is_resilver = ps->pss_func == POOL_SCAN_RESILVER;
8944 int is_scrub = ps->pss_func == POOL_SCAN_SCRUB;
8945 assert(is_resilver || is_scrub);
8946
8947 /* Scan is finished or canceled. */
8948 if (ps->pss_state == DSS_FINISHED) {
8949 secs_to_dhms(end - start, time_buf);
8950
8951 if (is_scrub) {
8952 (void) printf(gettext("scrub repaired %s "
8953 "in %s with %llu errors on %s"), processed_buf,
8954 time_buf, (u_longlong_t)ps->pss_errors,
8955 ctime(&end));
8956 } else if (is_resilver) {
8957 (void) printf(gettext("resilvered %s "
8958 "in %s with %llu errors on %s"), processed_buf,
8959 time_buf, (u_longlong_t)ps->pss_errors,
8960 ctime(&end));
8961 }
8962 return;
8963 } else if (ps->pss_state == DSS_CANCELED) {
8964 if (is_scrub) {
8965 (void) printf(gettext("scrub canceled on %s"),
8966 ctime(&end));
8967 } else if (is_resilver) {
8968 (void) printf(gettext("resilver canceled on %s"),
8969 ctime(&end));
8970 }
8971 return;
8972 }
8973
8974 assert(ps->pss_state == DSS_SCANNING);
8975
8976 /* Scan is in progress. Resilvers can't be paused. */
8977 if (is_scrub) {
8978 if (pause == 0) {
8979 (void) printf(gettext("scrub in progress since %s"),
8980 ctime(&start));
8981 } else {
8982 (void) printf(gettext("scrub paused since %s"),
8983 ctime(&pause));
8984 (void) printf(gettext("\tscrub started on %s"),
8985 ctime(&start));
8986 }
8987 } else if (is_resilver) {
8988 (void) printf(gettext("resilver in progress since %s"),
8989 ctime(&start));
8990 }
8991
8992 scanned = ps->pss_examined;
8993 pass_scanned = ps->pss_pass_exam;
8994 issued = ps->pss_issued;
8995 pass_issued = ps->pss_pass_issued;
8996 total_s = ps->pss_to_examine;
8997 total_i = ps->pss_to_examine - ps->pss_skipped;
8998
8999 /* we are only done with a block once we have issued the IO for it */
9000 fraction_done = (double)issued / total_i;
9001
9002 /* elapsed time for this pass, rounding up to 1 if it's 0 */
9003 elapsed = time(NULL) - ps->pss_pass_start;
9004 elapsed -= ps->pss_pass_scrub_spent_paused;
9005 elapsed = (elapsed != 0) ? elapsed : 1;
9006
9007 scan_rate = pass_scanned / elapsed;
9008 issue_rate = pass_issued / elapsed;
9009
9010 /* format all of the numbers we will be reporting */
9011 zfs_nicebytes(scanned, scanned_buf, sizeof (scanned_buf));
9012 zfs_nicebytes(issued, issued_buf, sizeof (issued_buf));
9013 zfs_nicebytes(total_s, total_s_buf, sizeof (total_s_buf));
9014 zfs_nicebytes(total_i, total_i_buf, sizeof (total_i_buf));
9015
9016 /* do not print estimated time if we have a paused scrub */
9017 (void) printf(gettext("\t%s / %s scanned"), scanned_buf, total_s_buf);
9018 if (pause == 0 && scan_rate > 0) {
9019 zfs_nicebytes(scan_rate, srate_buf, sizeof (srate_buf));
9020 (void) printf(gettext(" at %s/s"), srate_buf);
9021 }
9022 (void) printf(gettext(", %s / %s issued"), issued_buf, total_i_buf);
9023 if (pause == 0 && issue_rate > 0) {
9024 zfs_nicebytes(issue_rate, irate_buf, sizeof (irate_buf));
9025 (void) printf(gettext(" at %s/s"), irate_buf);
9026 }
9027 (void) printf(gettext("\n"));
9028
9029 if (is_resilver) {
9030 (void) printf(gettext("\t%s resilvered, %.2f%% done"),
9031 processed_buf, 100 * fraction_done);
9032 } else if (is_scrub) {
9033 (void) printf(gettext("\t%s repaired, %.2f%% done"),
9034 processed_buf, 100 * fraction_done);
9035 }
9036
9037 if (pause == 0) {
9038 /*
9039 * Only provide an estimate iff:
9040 * 1) we haven't yet issued all we expected, and
9041 * 2) the issue rate exceeds 10 MB/s, and
9042 * 3) it's either:
9043 * a) a resilver which has started repairs, or
9044 * b) a scrub which has entered the issue phase.
9045 */
9046 if (total_i >= issued && issue_rate >= 10 * 1024 * 1024 &&
9047 ((is_resilver && ps->pss_processed > 0) ||
9048 (is_scrub && issued > 0))) {
9049 secs_to_dhms((total_i - issued) / issue_rate, time_buf);
9050 (void) printf(gettext(", %s to go\n"), time_buf);
9051 } else {
9052 (void) printf(gettext(", no estimated "
9053 "completion time\n"));
9054 }
9055 } else {
9056 (void) printf(gettext("\n"));
9057 }
9058 }
9059
9060 static void
print_rebuild_status_impl(vdev_rebuild_stat_t * vrs,uint_t c,char * vdev_name)9061 print_rebuild_status_impl(vdev_rebuild_stat_t *vrs, uint_t c, char *vdev_name)
9062 {
9063 if (vrs == NULL || vrs->vrs_state == VDEV_REBUILD_NONE)
9064 return;
9065
9066 printf(" ");
9067 (void) printf_color(ANSI_BOLD, gettext("scan:"));
9068 printf(" ");
9069
9070 uint64_t bytes_scanned = vrs->vrs_bytes_scanned;
9071 uint64_t bytes_issued = vrs->vrs_bytes_issued;
9072 uint64_t bytes_rebuilt = vrs->vrs_bytes_rebuilt;
9073 uint64_t bytes_est_s = vrs->vrs_bytes_est;
9074 uint64_t bytes_est_i = vrs->vrs_bytes_est;
9075 if (c > offsetof(vdev_rebuild_stat_t, vrs_pass_bytes_skipped) / 8)
9076 bytes_est_i -= vrs->vrs_pass_bytes_skipped;
9077 uint64_t scan_rate = (vrs->vrs_pass_bytes_scanned /
9078 (vrs->vrs_pass_time_ms + 1)) * 1000;
9079 uint64_t issue_rate = (vrs->vrs_pass_bytes_issued /
9080 (vrs->vrs_pass_time_ms + 1)) * 1000;
9081 double scan_pct = MIN((double)bytes_scanned * 100 /
9082 (bytes_est_s + 1), 100);
9083
9084 /* Format all of the numbers we will be reporting */
9085 char bytes_scanned_buf[7], bytes_issued_buf[7];
9086 char bytes_rebuilt_buf[7], bytes_est_s_buf[7], bytes_est_i_buf[7];
9087 char scan_rate_buf[7], issue_rate_buf[7], time_buf[32];
9088 zfs_nicebytes(bytes_scanned, bytes_scanned_buf,
9089 sizeof (bytes_scanned_buf));
9090 zfs_nicebytes(bytes_issued, bytes_issued_buf,
9091 sizeof (bytes_issued_buf));
9092 zfs_nicebytes(bytes_rebuilt, bytes_rebuilt_buf,
9093 sizeof (bytes_rebuilt_buf));
9094 zfs_nicebytes(bytes_est_s, bytes_est_s_buf, sizeof (bytes_est_s_buf));
9095 zfs_nicebytes(bytes_est_i, bytes_est_i_buf, sizeof (bytes_est_i_buf));
9096
9097 time_t start = vrs->vrs_start_time;
9098 time_t end = vrs->vrs_end_time;
9099
9100 /* Rebuild is finished or canceled. */
9101 if (vrs->vrs_state == VDEV_REBUILD_COMPLETE) {
9102 secs_to_dhms(vrs->vrs_scan_time_ms / 1000, time_buf);
9103 (void) printf(gettext("resilvered (%s) %s in %s "
9104 "with %llu errors on %s"), vdev_name, bytes_rebuilt_buf,
9105 time_buf, (u_longlong_t)vrs->vrs_errors, ctime(&end));
9106 return;
9107 } else if (vrs->vrs_state == VDEV_REBUILD_CANCELED) {
9108 (void) printf(gettext("resilver (%s) canceled on %s"),
9109 vdev_name, ctime(&end));
9110 return;
9111 } else if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
9112 (void) printf(gettext("resilver (%s) in progress since %s"),
9113 vdev_name, ctime(&start));
9114 }
9115
9116 assert(vrs->vrs_state == VDEV_REBUILD_ACTIVE);
9117
9118 (void) printf(gettext("\t%s / %s scanned"), bytes_scanned_buf,
9119 bytes_est_s_buf);
9120 if (scan_rate > 0) {
9121 zfs_nicebytes(scan_rate, scan_rate_buf, sizeof (scan_rate_buf));
9122 (void) printf(gettext(" at %s/s"), scan_rate_buf);
9123 }
9124 (void) printf(gettext(", %s / %s issued"), bytes_issued_buf,
9125 bytes_est_i_buf);
9126 if (issue_rate > 0) {
9127 zfs_nicebytes(issue_rate, issue_rate_buf,
9128 sizeof (issue_rate_buf));
9129 (void) printf(gettext(" at %s/s"), issue_rate_buf);
9130 }
9131 (void) printf(gettext("\n"));
9132
9133 (void) printf(gettext("\t%s resilvered, %.2f%% done"),
9134 bytes_rebuilt_buf, scan_pct);
9135
9136 if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
9137 if (bytes_est_s >= bytes_scanned &&
9138 scan_rate >= 10 * 1024 * 1024) {
9139 secs_to_dhms((bytes_est_s - bytes_scanned) / scan_rate,
9140 time_buf);
9141 (void) printf(gettext(", %s to go\n"), time_buf);
9142 } else {
9143 (void) printf(gettext(", no estimated "
9144 "completion time\n"));
9145 }
9146 } else {
9147 (void) printf(gettext("\n"));
9148 }
9149 }
9150
9151 /*
9152 * Print rebuild status for top-level vdevs.
9153 */
9154 static void
print_rebuild_status(zpool_handle_t * zhp,nvlist_t * nvroot)9155 print_rebuild_status(zpool_handle_t *zhp, nvlist_t *nvroot)
9156 {
9157 nvlist_t **child;
9158 uint_t children;
9159
9160 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
9161 &child, &children) != 0)
9162 children = 0;
9163
9164 for (uint_t c = 0; c < children; c++) {
9165 vdev_rebuild_stat_t *vrs;
9166 uint_t i;
9167
9168 if (nvlist_lookup_uint64_array(child[c],
9169 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i) == 0) {
9170 char *name = zpool_vdev_name(g_zfs, zhp,
9171 child[c], VDEV_NAME_TYPE_ID);
9172 print_rebuild_status_impl(vrs, i, name);
9173 free(name);
9174 }
9175 }
9176 }
9177
9178 /*
9179 * As we don't scrub checkpointed blocks, we want to warn the user that we
9180 * skipped scanning some blocks if a checkpoint exists or existed at any
9181 * time during the scan. If a sequential instead of healing reconstruction
9182 * was performed then the blocks were reconstructed. However, their checksums
9183 * have not been verified so we still print the warning.
9184 */
9185 static void
print_checkpoint_scan_warning(pool_scan_stat_t * ps,pool_checkpoint_stat_t * pcs)9186 print_checkpoint_scan_warning(pool_scan_stat_t *ps, pool_checkpoint_stat_t *pcs)
9187 {
9188 if (ps == NULL || pcs == NULL)
9189 return;
9190
9191 if (pcs->pcs_state == CS_NONE ||
9192 pcs->pcs_state == CS_CHECKPOINT_DISCARDING)
9193 return;
9194
9195 assert(pcs->pcs_state == CS_CHECKPOINT_EXISTS);
9196
9197 if (ps->pss_state == DSS_NONE)
9198 return;
9199
9200 if ((ps->pss_state == DSS_FINISHED || ps->pss_state == DSS_CANCELED) &&
9201 ps->pss_end_time < pcs->pcs_start_time)
9202 return;
9203
9204 if (ps->pss_state == DSS_FINISHED || ps->pss_state == DSS_CANCELED) {
9205 (void) printf(gettext(" scan warning: skipped blocks "
9206 "that are only referenced by the checkpoint.\n"));
9207 } else {
9208 assert(ps->pss_state == DSS_SCANNING);
9209 (void) printf(gettext(" scan warning: skipping blocks "
9210 "that are only referenced by the checkpoint.\n"));
9211 }
9212 }
9213
9214 /*
9215 * Returns B_TRUE if there is an active rebuild in progress. Otherwise,
9216 * B_FALSE is returned and 'rebuild_end_time' is set to the end time for
9217 * the last completed (or cancelled) rebuild.
9218 */
9219 static boolean_t
check_rebuilding(nvlist_t * nvroot,uint64_t * rebuild_end_time)9220 check_rebuilding(nvlist_t *nvroot, uint64_t *rebuild_end_time)
9221 {
9222 nvlist_t **child;
9223 uint_t children;
9224 boolean_t rebuilding = B_FALSE;
9225 uint64_t end_time = 0;
9226
9227 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
9228 &child, &children) != 0)
9229 children = 0;
9230
9231 for (uint_t c = 0; c < children; c++) {
9232 vdev_rebuild_stat_t *vrs;
9233 uint_t i;
9234
9235 if (nvlist_lookup_uint64_array(child[c],
9236 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i) == 0) {
9237
9238 if (vrs->vrs_end_time > end_time)
9239 end_time = vrs->vrs_end_time;
9240
9241 if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
9242 rebuilding = B_TRUE;
9243 end_time = 0;
9244 break;
9245 }
9246 }
9247 }
9248
9249 if (rebuild_end_time != NULL)
9250 *rebuild_end_time = end_time;
9251
9252 return (rebuilding);
9253 }
9254
9255 static void
vdev_stats_nvlist(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t * nv,int depth,boolean_t isspare,char * parent,nvlist_t * item)9256 vdev_stats_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,
9257 int depth, boolean_t isspare, char *parent, nvlist_t *item)
9258 {
9259 nvlist_t *vds, **child, *ch = NULL;
9260 uint_t vsc, children;
9261 vdev_stat_t *vs;
9262 char *vname;
9263 uint64_t notpresent;
9264 const char *type, *path;
9265
9266 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
9267 &child, &children) != 0)
9268 children = 0;
9269 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
9270 (uint64_t **)&vs, &vsc) == 0);
9271 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
9272 if (strcmp(type, VDEV_TYPE_INDIRECT) == 0)
9273 return;
9274
9275 if (cb->cb_print_unhealthy && depth > 0 &&
9276 for_each_vdev_in_nvlist(nv, vdev_health_check_cb, cb) == 0) {
9277 return;
9278 }
9279 vname = zpool_vdev_name(g_zfs, zhp, nv,
9280 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
9281 vds = fnvlist_alloc();
9282 fill_vdev_info(vds, zhp, vname, B_FALSE, cb->cb_json_as_int);
9283 if (cb->cb_flat_vdevs && parent != NULL) {
9284 fnvlist_add_string(vds, "parent", parent);
9285 }
9286
9287 if (isspare) {
9288 if (vs->vs_aux == VDEV_AUX_SPARED) {
9289 fnvlist_add_string(vds, "state", "INUSE");
9290 used_by_other(zhp, nv, vds);
9291 } else if (vs->vs_state == VDEV_STATE_HEALTHY)
9292 fnvlist_add_string(vds, "state", "AVAIL");
9293 } else {
9294 if (vs->vs_alloc) {
9295 nice_num_str_nvlist(vds, "alloc_space", vs->vs_alloc,
9296 cb->cb_literal, cb->cb_json_as_int,
9297 ZFS_NICENUM_BYTES);
9298 }
9299 if (vs->vs_space) {
9300 nice_num_str_nvlist(vds, "total_space", vs->vs_space,
9301 cb->cb_literal, cb->cb_json_as_int,
9302 ZFS_NICENUM_BYTES);
9303 }
9304 if (vs->vs_dspace) {
9305 nice_num_str_nvlist(vds, "def_space", vs->vs_dspace,
9306 cb->cb_literal, cb->cb_json_as_int,
9307 ZFS_NICENUM_BYTES);
9308 }
9309 if (vs->vs_rsize) {
9310 nice_num_str_nvlist(vds, "rep_dev_size", vs->vs_rsize,
9311 cb->cb_literal, cb->cb_json_as_int,
9312 ZFS_NICENUM_BYTES);
9313 }
9314 if (vs->vs_esize) {
9315 nice_num_str_nvlist(vds, "ex_dev_size", vs->vs_esize,
9316 cb->cb_literal, cb->cb_json_as_int,
9317 ZFS_NICENUM_BYTES);
9318 }
9319 if (vs->vs_self_healed) {
9320 nice_num_str_nvlist(vds, "self_healed",
9321 vs->vs_self_healed, cb->cb_literal,
9322 cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9323 }
9324 if (vs->vs_pspace) {
9325 nice_num_str_nvlist(vds, "phys_space", vs->vs_pspace,
9326 cb->cb_literal, cb->cb_json_as_int,
9327 ZFS_NICENUM_BYTES);
9328 }
9329 nice_num_str_nvlist(vds, "read_errors", vs->vs_read_errors,
9330 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
9331 nice_num_str_nvlist(vds, "write_errors", vs->vs_write_errors,
9332 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
9333 nice_num_str_nvlist(vds, "checksum_errors",
9334 vs->vs_checksum_errors, cb->cb_literal,
9335 cb->cb_json_as_int, ZFS_NICENUM_1024);
9336 if (vs->vs_scan_processed) {
9337 nice_num_str_nvlist(vds, "scan_processed",
9338 vs->vs_scan_processed, cb->cb_literal,
9339 cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9340 }
9341 if (vs->vs_checkpoint_space) {
9342 nice_num_str_nvlist(vds, "checkpoint_space",
9343 vs->vs_checkpoint_space, cb->cb_literal,
9344 cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9345 }
9346 if (vs->vs_resilver_deferred) {
9347 nice_num_str_nvlist(vds, "resilver_deferred",
9348 vs->vs_resilver_deferred, B_TRUE,
9349 cb->cb_json_as_int, ZFS_NICENUM_1024);
9350 }
9351 if (children == 0) {
9352 nice_num_str_nvlist(vds, "slow_ios", vs->vs_slow_ios,
9353 cb->cb_literal, cb->cb_json_as_int,
9354 ZFS_NICENUM_1024);
9355 }
9356 if (cb->cb_print_power) {
9357 if (children == 0) {
9358 /* Only leaf vdevs have physical slots */
9359 switch (zpool_power_current_state(zhp, (char *)
9360 fnvlist_lookup_string(nv,
9361 ZPOOL_CONFIG_PATH))) {
9362 case 0:
9363 fnvlist_add_string(vds, "power_state",
9364 "off");
9365 break;
9366 case 1:
9367 fnvlist_add_string(vds, "power_state",
9368 "on");
9369 break;
9370 default:
9371 fnvlist_add_string(vds, "power_state",
9372 "-");
9373 }
9374 } else {
9375 fnvlist_add_string(vds, "power_state", "-");
9376 }
9377 }
9378 }
9379
9380 if (cb->cb_print_dio_verify) {
9381 nice_num_str_nvlist(vds, "dio_verify_errors",
9382 vs->vs_dio_verify_errors, cb->cb_literal,
9383 cb->cb_json_as_int, ZFS_NICENUM_1024);
9384 }
9385
9386 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
9387 ¬present) == 0) {
9388 nice_num_str_nvlist(vds, ZPOOL_CONFIG_NOT_PRESENT,
9389 1, B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9390 fnvlist_add_string(vds, "was",
9391 fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH));
9392 } else if (vs->vs_aux != VDEV_AUX_NONE) {
9393 fnvlist_add_string(vds, "aux", vdev_aux_str[vs->vs_aux]);
9394 } else if (children == 0 && !isspare &&
9395 getenv("ZPOOL_STATUS_NON_NATIVE_ASHIFT_IGNORE") == NULL &&
9396 VDEV_STAT_VALID(vs_physical_ashift, vsc) &&
9397 vs->vs_configured_ashift < vs->vs_physical_ashift) {
9398 nice_num_str_nvlist(vds, "configured_ashift",
9399 vs->vs_configured_ashift, B_TRUE, cb->cb_json_as_int,
9400 ZFS_NICENUM_1024);
9401 nice_num_str_nvlist(vds, "physical_ashift",
9402 vs->vs_physical_ashift, B_TRUE, cb->cb_json_as_int,
9403 ZFS_NICENUM_1024);
9404 }
9405 if (vs->vs_scan_removing != 0) {
9406 nice_num_str_nvlist(vds, "removing", vs->vs_scan_removing,
9407 B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024);
9408 } else if (VDEV_STAT_VALID(vs_noalloc, vsc) && vs->vs_noalloc != 0) {
9409 nice_num_str_nvlist(vds, "noalloc", vs->vs_noalloc,
9410 B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024);
9411 }
9412
9413 if (cb->vcdl != NULL) {
9414 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
9415 zpool_nvlist_cmd(cb->vcdl, zpool_get_name(zhp),
9416 path, vds);
9417 }
9418 }
9419
9420 if (children == 0) {
9421 if (cb->cb_print_vdev_init) {
9422 if (vs->vs_initialize_state != 0) {
9423 uint64_t st = vs->vs_initialize_state;
9424 fnvlist_add_string(vds, "init_state",
9425 vdev_init_state_str[st]);
9426 nice_num_str_nvlist(vds, "initialized",
9427 vs->vs_initialize_bytes_done,
9428 cb->cb_literal, cb->cb_json_as_int,
9429 ZFS_NICENUM_BYTES);
9430 nice_num_str_nvlist(vds, "to_initialize",
9431 vs->vs_initialize_bytes_est,
9432 cb->cb_literal, cb->cb_json_as_int,
9433 ZFS_NICENUM_BYTES);
9434 nice_num_str_nvlist(vds, "init_time",
9435 vs->vs_initialize_action_time,
9436 cb->cb_literal, cb->cb_json_as_int,
9437 ZFS_NICE_TIMESTAMP);
9438 nice_num_str_nvlist(vds, "init_errors",
9439 vs->vs_initialize_errors,
9440 cb->cb_literal, cb->cb_json_as_int,
9441 ZFS_NICENUM_1024);
9442 } else {
9443 fnvlist_add_string(vds, "init_state",
9444 "UNINITIALIZED");
9445 }
9446 }
9447 if (cb->cb_print_vdev_trim) {
9448 if (vs->vs_trim_notsup == 0) {
9449 if (vs->vs_trim_state != 0) {
9450 uint64_t st = vs->vs_trim_state;
9451 fnvlist_add_string(vds, "trim_state",
9452 vdev_trim_state_str[st]);
9453 nice_num_str_nvlist(vds, "trimmed",
9454 vs->vs_trim_bytes_done,
9455 cb->cb_literal, cb->cb_json_as_int,
9456 ZFS_NICENUM_BYTES);
9457 nice_num_str_nvlist(vds, "to_trim",
9458 vs->vs_trim_bytes_est,
9459 cb->cb_literal, cb->cb_json_as_int,
9460 ZFS_NICENUM_BYTES);
9461 nice_num_str_nvlist(vds, "trim_time",
9462 vs->vs_trim_action_time,
9463 cb->cb_literal, cb->cb_json_as_int,
9464 ZFS_NICE_TIMESTAMP);
9465 nice_num_str_nvlist(vds, "trim_errors",
9466 vs->vs_trim_errors,
9467 cb->cb_literal, cb->cb_json_as_int,
9468 ZFS_NICENUM_1024);
9469 } else
9470 fnvlist_add_string(vds, "trim_state",
9471 "UNTRIMMED");
9472 }
9473 nice_num_str_nvlist(vds, "trim_notsup",
9474 vs->vs_trim_notsup, B_TRUE,
9475 cb->cb_json_as_int, ZFS_NICENUM_1024);
9476 }
9477 } else {
9478 ch = fnvlist_alloc();
9479 }
9480
9481 if (cb->cb_flat_vdevs && children == 0) {
9482 fnvlist_add_nvlist(item, vname, vds);
9483 }
9484
9485 for (int c = 0; c < children; c++) {
9486 uint64_t islog = B_FALSE, ishole = B_FALSE;
9487 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
9488 &islog);
9489 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
9490 &ishole);
9491 if (islog || ishole)
9492 continue;
9493 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
9494 continue;
9495 if (cb->cb_flat_vdevs) {
9496 vdev_stats_nvlist(zhp, cb, child[c], depth + 2, isspare,
9497 vname, item);
9498 }
9499 vdev_stats_nvlist(zhp, cb, child[c], depth + 2, isspare,
9500 vname, ch);
9501 }
9502
9503 if (ch != NULL) {
9504 if (!nvlist_empty(ch))
9505 fnvlist_add_nvlist(vds, "vdevs", ch);
9506 fnvlist_free(ch);
9507 }
9508 fnvlist_add_nvlist(item, vname, vds);
9509 fnvlist_free(vds);
9510 free(vname);
9511 }
9512
9513 static void
class_vdevs_nvlist(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t * nv,const char * class,nvlist_t * item)9514 class_vdevs_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,
9515 const char *class, nvlist_t *item)
9516 {
9517 uint_t c, children;
9518 nvlist_t **child;
9519 nvlist_t *class_obj = NULL;
9520
9521 if (!cb->cb_flat_vdevs)
9522 class_obj = fnvlist_alloc();
9523
9524 assert(zhp != NULL || !cb->cb_verbose);
9525
9526 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &child,
9527 &children) != 0)
9528 return;
9529
9530 for (c = 0; c < children; c++) {
9531 uint64_t is_log = B_FALSE;
9532 const char *bias = NULL;
9533 const char *type = NULL;
9534 char *name = zpool_vdev_name(g_zfs, zhp, child[c],
9535 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
9536
9537 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
9538 &is_log);
9539
9540 if (is_log) {
9541 bias = (char *)VDEV_ALLOC_CLASS_LOGS;
9542 } else {
9543 (void) nvlist_lookup_string(child[c],
9544 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
9545 (void) nvlist_lookup_string(child[c],
9546 ZPOOL_CONFIG_TYPE, &type);
9547 }
9548
9549 if (bias == NULL || strcmp(bias, class) != 0)
9550 continue;
9551 if (!is_log && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
9552 continue;
9553
9554 if (cb->cb_flat_vdevs) {
9555 vdev_stats_nvlist(zhp, cb, child[c], 2, B_FALSE,
9556 NULL, item);
9557 } else {
9558 vdev_stats_nvlist(zhp, cb, child[c], 2, B_FALSE,
9559 NULL, class_obj);
9560 }
9561 free(name);
9562 }
9563 if (!cb->cb_flat_vdevs) {
9564 if (!nvlist_empty(class_obj))
9565 fnvlist_add_nvlist(item, class, class_obj);
9566 fnvlist_free(class_obj);
9567 }
9568 }
9569
9570 static void
l2cache_nvlist(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t * nv,nvlist_t * item)9571 l2cache_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,
9572 nvlist_t *item)
9573 {
9574 nvlist_t *l2c = NULL, **l2cache;
9575 uint_t nl2cache;
9576 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
9577 &l2cache, &nl2cache) == 0) {
9578 if (nl2cache == 0)
9579 return;
9580 if (!cb->cb_flat_vdevs)
9581 l2c = fnvlist_alloc();
9582 for (int i = 0; i < nl2cache; i++) {
9583 if (cb->cb_flat_vdevs) {
9584 vdev_stats_nvlist(zhp, cb, l2cache[i], 2,
9585 B_FALSE, NULL, item);
9586 } else {
9587 vdev_stats_nvlist(zhp, cb, l2cache[i], 2,
9588 B_FALSE, NULL, l2c);
9589 }
9590 }
9591 }
9592 if (!cb->cb_flat_vdevs) {
9593 if (!nvlist_empty(l2c))
9594 fnvlist_add_nvlist(item, "l2cache", l2c);
9595 fnvlist_free(l2c);
9596 }
9597 }
9598
9599 static void
spares_nvlist(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t * nv,nvlist_t * item)9600 spares_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,
9601 nvlist_t *item)
9602 {
9603 nvlist_t *sp = NULL, **spares;
9604 uint_t nspares;
9605 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
9606 &spares, &nspares) == 0) {
9607 if (nspares == 0)
9608 return;
9609 if (!cb->cb_flat_vdevs)
9610 sp = fnvlist_alloc();
9611 for (int i = 0; i < nspares; i++) {
9612 if (cb->cb_flat_vdevs) {
9613 vdev_stats_nvlist(zhp, cb, spares[i], 2, B_TRUE,
9614 NULL, item);
9615 } else {
9616 vdev_stats_nvlist(zhp, cb, spares[i], 2, B_TRUE,
9617 NULL, sp);
9618 }
9619 }
9620 }
9621 if (!cb->cb_flat_vdevs) {
9622 if (!nvlist_empty(sp))
9623 fnvlist_add_nvlist(item, "spares", sp);
9624 fnvlist_free(sp);
9625 }
9626 }
9627
9628 static void
errors_nvlist(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t * item)9629 errors_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *item)
9630 {
9631 uint64_t nerr;
9632 nvlist_t *config = zpool_get_config(zhp, NULL);
9633 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT,
9634 &nerr) == 0) {
9635 nice_num_str_nvlist(item, ZPOOL_CONFIG_ERRCOUNT, nerr,
9636 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
9637 if (nerr != 0 && cb->cb_verbose) {
9638 nvlist_t *nverrlist = NULL;
9639 if (zpool_get_errlog(zhp, &nverrlist) == 0) {
9640 int i = 0;
9641 int count = 0;
9642 size_t len = MAXPATHLEN * 2;
9643 nvpair_t *elem = NULL;
9644
9645 for (nvpair_t *pair =
9646 nvlist_next_nvpair(nverrlist, NULL);
9647 pair != NULL;
9648 pair = nvlist_next_nvpair(nverrlist, pair))
9649 count++;
9650 char **errl = (char **)malloc(
9651 count * sizeof (char *));
9652
9653 while ((elem = nvlist_next_nvpair(nverrlist,
9654 elem)) != NULL) {
9655 nvlist_t *nv;
9656 uint64_t dsobj, obj;
9657
9658 verify(nvpair_value_nvlist(elem,
9659 &nv) == 0);
9660 verify(nvlist_lookup_uint64(nv,
9661 ZPOOL_ERR_DATASET, &dsobj) == 0);
9662 verify(nvlist_lookup_uint64(nv,
9663 ZPOOL_ERR_OBJECT, &obj) == 0);
9664 errl[i] = safe_malloc(len);
9665 zpool_obj_to_path(zhp, dsobj, obj,
9666 errl[i++], len);
9667 }
9668 nvlist_free(nverrlist);
9669 fnvlist_add_string_array(item, "errlist",
9670 (const char **)errl, count);
9671 for (int i = 0; i < count; ++i)
9672 free(errl[i]);
9673 free(errl);
9674 } else
9675 fnvlist_add_string(item, "errlist",
9676 strerror(errno));
9677 }
9678 }
9679 }
9680
9681 static void
ddt_stats_nvlist(ddt_stat_t * dds,status_cbdata_t * cb,nvlist_t * item)9682 ddt_stats_nvlist(ddt_stat_t *dds, status_cbdata_t *cb, nvlist_t *item)
9683 {
9684 nice_num_str_nvlist(item, "blocks", dds->dds_blocks,
9685 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
9686 nice_num_str_nvlist(item, "logical_size", dds->dds_lsize,
9687 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9688 nice_num_str_nvlist(item, "physical_size", dds->dds_psize,
9689 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9690 nice_num_str_nvlist(item, "deflated_size", dds->dds_dsize,
9691 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9692 nice_num_str_nvlist(item, "ref_blocks", dds->dds_ref_blocks,
9693 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
9694 nice_num_str_nvlist(item, "ref_lsize", dds->dds_ref_lsize,
9695 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9696 nice_num_str_nvlist(item, "ref_psize", dds->dds_ref_psize,
9697 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9698 nice_num_str_nvlist(item, "ref_dsize", dds->dds_ref_dsize,
9699 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9700 }
9701
9702 static void
dedup_stats_nvlist(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t * item)9703 dedup_stats_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *item)
9704 {
9705 nvlist_t *config;
9706 if (cb->cb_dedup_stats) {
9707 ddt_histogram_t *ddh;
9708 ddt_stat_t *dds;
9709 ddt_object_t *ddo;
9710 nvlist_t *ddt_stat, *ddt_obj, *dedup;
9711 uint_t c;
9712 uint64_t cspace_prop;
9713
9714 config = zpool_get_config(zhp, NULL);
9715 if (nvlist_lookup_uint64_array(config,
9716 ZPOOL_CONFIG_DDT_OBJ_STATS, (uint64_t **)&ddo, &c) != 0)
9717 return;
9718
9719 dedup = fnvlist_alloc();
9720 ddt_obj = fnvlist_alloc();
9721 nice_num_str_nvlist(dedup, "obj_count", ddo->ddo_count,
9722 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
9723 if (ddo->ddo_count == 0) {
9724 fnvlist_add_nvlist(dedup, ZPOOL_CONFIG_DDT_OBJ_STATS,
9725 ddt_obj);
9726 fnvlist_add_nvlist(item, "dedup_stats", dedup);
9727 fnvlist_free(ddt_obj);
9728 fnvlist_free(dedup);
9729 return;
9730 } else {
9731 nice_num_str_nvlist(dedup, "dspace", ddo->ddo_dspace,
9732 cb->cb_literal, cb->cb_json_as_int,
9733 ZFS_NICENUM_1024);
9734 nice_num_str_nvlist(dedup, "mspace", ddo->ddo_mspace,
9735 cb->cb_literal, cb->cb_json_as_int,
9736 ZFS_NICENUM_1024);
9737 /*
9738 * Squash cached size into in-core size to handle race.
9739 * Only include cached size if it is available.
9740 */
9741 cspace_prop = zpool_get_prop_int(zhp,
9742 ZPOOL_PROP_DEDUPCACHED, NULL);
9743 cspace_prop = MIN(cspace_prop, ddo->ddo_mspace);
9744 nice_num_str_nvlist(dedup, "cspace", cspace_prop,
9745 cb->cb_literal, cb->cb_json_as_int,
9746 ZFS_NICENUM_1024);
9747 }
9748
9749 ddt_stat = fnvlist_alloc();
9750 if (nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_STATS,
9751 (uint64_t **)&dds, &c) == 0) {
9752 nvlist_t *total = fnvlist_alloc();
9753 if (dds->dds_blocks == 0)
9754 fnvlist_add_string(total, "blocks", "0");
9755 else
9756 ddt_stats_nvlist(dds, cb, total);
9757 fnvlist_add_nvlist(ddt_stat, "total", total);
9758 fnvlist_free(total);
9759 }
9760 if (nvlist_lookup_uint64_array(config,
9761 ZPOOL_CONFIG_DDT_HISTOGRAM, (uint64_t **)&ddh, &c) == 0) {
9762 nvlist_t *hist = fnvlist_alloc();
9763 nvlist_t *entry = NULL;
9764 char buf[16];
9765 for (int h = 0; h < 64; h++) {
9766 if (ddh->ddh_stat[h].dds_blocks != 0) {
9767 entry = fnvlist_alloc();
9768 ddt_stats_nvlist(&ddh->ddh_stat[h], cb,
9769 entry);
9770 (void) snprintf(buf, 16, "%d", h);
9771 fnvlist_add_nvlist(hist, buf, entry);
9772 fnvlist_free(entry);
9773 }
9774 }
9775 if (!nvlist_empty(hist))
9776 fnvlist_add_nvlist(ddt_stat, "histogram", hist);
9777 fnvlist_free(hist);
9778 }
9779
9780 if (!nvlist_empty(ddt_obj)) {
9781 fnvlist_add_nvlist(dedup, ZPOOL_CONFIG_DDT_OBJ_STATS,
9782 ddt_obj);
9783 }
9784 fnvlist_free(ddt_obj);
9785 if (!nvlist_empty(ddt_stat)) {
9786 fnvlist_add_nvlist(dedup, ZPOOL_CONFIG_DDT_STATS,
9787 ddt_stat);
9788 }
9789 fnvlist_free(ddt_stat);
9790 if (!nvlist_empty(dedup))
9791 fnvlist_add_nvlist(item, "dedup_stats", dedup);
9792 fnvlist_free(dedup);
9793 }
9794 }
9795
9796 static void
raidz_expand_status_nvlist(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t * nvroot,nvlist_t * item)9797 raidz_expand_status_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb,
9798 nvlist_t *nvroot, nvlist_t *item)
9799 {
9800 uint_t c;
9801 pool_raidz_expand_stat_t *pres = NULL;
9802 if (nvlist_lookup_uint64_array(nvroot,
9803 ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, (uint64_t **)&pres, &c) == 0) {
9804 nvlist_t **child;
9805 uint_t children;
9806 nvlist_t *nv = fnvlist_alloc();
9807 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
9808 &child, &children) == 0);
9809 assert(pres->pres_expanding_vdev < children);
9810 char *name =
9811 zpool_vdev_name(g_zfs, zhp,
9812 child[pres->pres_expanding_vdev], 0);
9813 fill_vdev_info(nv, zhp, name, B_FALSE, cb->cb_json_as_int);
9814 fnvlist_add_string(nv, "state",
9815 pool_scan_state_str[pres->pres_state]);
9816 nice_num_str_nvlist(nv, "expanding_vdev",
9817 pres->pres_expanding_vdev, B_TRUE, cb->cb_json_as_int,
9818 ZFS_NICENUM_1024);
9819 nice_num_str_nvlist(nv, "start_time", pres->pres_start_time,
9820 cb->cb_literal, cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);
9821 nice_num_str_nvlist(nv, "end_time", pres->pres_end_time,
9822 cb->cb_literal, cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);
9823 nice_num_str_nvlist(nv, "to_reflow", pres->pres_to_reflow,
9824 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9825 nice_num_str_nvlist(nv, "reflowed", pres->pres_reflowed,
9826 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9827 nice_num_str_nvlist(nv, "waiting_for_resilver",
9828 pres->pres_waiting_for_resilver, B_TRUE,
9829 cb->cb_json_as_int, ZFS_NICENUM_1024);
9830 fnvlist_add_nvlist(item, ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, nv);
9831 fnvlist_free(nv);
9832 free(name);
9833 }
9834 }
9835
9836 static void
checkpoint_status_nvlist(nvlist_t * nvroot,status_cbdata_t * cb,nvlist_t * item)9837 checkpoint_status_nvlist(nvlist_t *nvroot, status_cbdata_t *cb,
9838 nvlist_t *item)
9839 {
9840 uint_t c;
9841 pool_checkpoint_stat_t *pcs = NULL;
9842 if (nvlist_lookup_uint64_array(nvroot,
9843 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c) == 0) {
9844 nvlist_t *nv = fnvlist_alloc();
9845 fnvlist_add_string(nv, "state",
9846 checkpoint_state_str[pcs->pcs_state]);
9847 nice_num_str_nvlist(nv, "start_time",
9848 pcs->pcs_start_time, cb->cb_literal, cb->cb_json_as_int,
9849 ZFS_NICE_TIMESTAMP);
9850 nice_num_str_nvlist(nv, "space",
9851 pcs->pcs_space, cb->cb_literal, cb->cb_json_as_int,
9852 ZFS_NICENUM_BYTES);
9853 fnvlist_add_nvlist(item, ZPOOL_CONFIG_CHECKPOINT_STATS, nv);
9854 fnvlist_free(nv);
9855 }
9856 }
9857
9858 static void
removal_status_nvlist(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t * nvroot,nvlist_t * item)9859 removal_status_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb,
9860 nvlist_t *nvroot, nvlist_t *item)
9861 {
9862 uint_t c;
9863 pool_removal_stat_t *prs = NULL;
9864 if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_REMOVAL_STATS,
9865 (uint64_t **)&prs, &c) == 0) {
9866 if (prs->prs_state != DSS_NONE) {
9867 nvlist_t **child;
9868 uint_t children;
9869 verify(nvlist_lookup_nvlist_array(nvroot,
9870 ZPOOL_CONFIG_CHILDREN, &child, &children) == 0);
9871 assert(prs->prs_removing_vdev < children);
9872 char *vdev_name = zpool_vdev_name(g_zfs, zhp,
9873 child[prs->prs_removing_vdev], B_TRUE);
9874 nvlist_t *nv = fnvlist_alloc();
9875 fill_vdev_info(nv, zhp, vdev_name, B_FALSE,
9876 cb->cb_json_as_int);
9877 fnvlist_add_string(nv, "state",
9878 pool_scan_state_str[prs->prs_state]);
9879 nice_num_str_nvlist(nv, "removing_vdev",
9880 prs->prs_removing_vdev, B_TRUE, cb->cb_json_as_int,
9881 ZFS_NICENUM_1024);
9882 nice_num_str_nvlist(nv, "start_time",
9883 prs->prs_start_time, cb->cb_literal,
9884 cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);
9885 nice_num_str_nvlist(nv, "end_time", prs->prs_end_time,
9886 cb->cb_literal, cb->cb_json_as_int,
9887 ZFS_NICE_TIMESTAMP);
9888 nice_num_str_nvlist(nv, "to_copy", prs->prs_to_copy,
9889 cb->cb_literal, cb->cb_json_as_int,
9890 ZFS_NICENUM_BYTES);
9891 nice_num_str_nvlist(nv, "copied", prs->prs_copied,
9892 cb->cb_literal, cb->cb_json_as_int,
9893 ZFS_NICENUM_BYTES);
9894 nice_num_str_nvlist(nv, "mapping_memory",
9895 prs->prs_mapping_memory, cb->cb_literal,
9896 cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9897 fnvlist_add_nvlist(item,
9898 ZPOOL_CONFIG_REMOVAL_STATS, nv);
9899 fnvlist_free(nv);
9900 free(vdev_name);
9901 }
9902 }
9903 }
9904
9905 static void
scan_status_nvlist(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t * nvroot,nvlist_t * item)9906 scan_status_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb,
9907 nvlist_t *nvroot, nvlist_t *item)
9908 {
9909 pool_scan_stat_t *ps = NULL;
9910 uint_t c;
9911 nvlist_t *scan = fnvlist_alloc();
9912 nvlist_t **child;
9913 uint_t children;
9914
9915 if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_SCAN_STATS,
9916 (uint64_t **)&ps, &c) == 0) {
9917 fnvlist_add_string(scan, "function",
9918 pool_scan_func_str[ps->pss_func]);
9919 fnvlist_add_string(scan, "state",
9920 pool_scan_state_str[ps->pss_state]);
9921 nice_num_str_nvlist(scan, "start_time", ps->pss_start_time,
9922 cb->cb_literal, cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);
9923 nice_num_str_nvlist(scan, "end_time", ps->pss_end_time,
9924 cb->cb_literal, cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);
9925 nice_num_str_nvlist(scan, "to_examine", ps->pss_to_examine,
9926 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9927 nice_num_str_nvlist(scan, "examined", ps->pss_examined,
9928 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9929 nice_num_str_nvlist(scan, "skipped", ps->pss_skipped,
9930 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9931 nice_num_str_nvlist(scan, "processed", ps->pss_processed,
9932 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9933 nice_num_str_nvlist(scan, "errors", ps->pss_errors,
9934 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
9935 nice_num_str_nvlist(scan, "bytes_per_scan", ps->pss_pass_exam,
9936 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9937 nice_num_str_nvlist(scan, "pass_start", ps->pss_pass_start,
9938 B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024);
9939 nice_num_str_nvlist(scan, "scrub_pause",
9940 ps->pss_pass_scrub_pause, cb->cb_literal,
9941 cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);
9942 nice_num_str_nvlist(scan, "scrub_spent_paused",
9943 ps->pss_pass_scrub_spent_paused,
9944 B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024);
9945 nice_num_str_nvlist(scan, "issued_bytes_per_scan",
9946 ps->pss_pass_issued, cb->cb_literal,
9947 cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9948 nice_num_str_nvlist(scan, "issued", ps->pss_issued,
9949 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9950 if (ps->pss_error_scrub_func == POOL_SCAN_ERRORSCRUB &&
9951 ps->pss_error_scrub_start > ps->pss_start_time) {
9952 fnvlist_add_string(scan, "err_scrub_func",
9953 pool_scan_func_str[ps->pss_error_scrub_func]);
9954 fnvlist_add_string(scan, "err_scrub_state",
9955 pool_scan_state_str[ps->pss_error_scrub_state]);
9956 nice_num_str_nvlist(scan, "err_scrub_start_time",
9957 ps->pss_error_scrub_start,
9958 cb->cb_literal, cb->cb_json_as_int,
9959 ZFS_NICE_TIMESTAMP);
9960 nice_num_str_nvlist(scan, "err_scrub_end_time",
9961 ps->pss_error_scrub_end,
9962 cb->cb_literal, cb->cb_json_as_int,
9963 ZFS_NICE_TIMESTAMP);
9964 nice_num_str_nvlist(scan, "err_scrub_examined",
9965 ps->pss_error_scrub_examined,
9966 cb->cb_literal, cb->cb_json_as_int,
9967 ZFS_NICENUM_1024);
9968 nice_num_str_nvlist(scan, "err_scrub_to_examine",
9969 ps->pss_error_scrub_to_be_examined,
9970 cb->cb_literal, cb->cb_json_as_int,
9971 ZFS_NICENUM_1024);
9972 nice_num_str_nvlist(scan, "err_scrub_pause",
9973 ps->pss_pass_error_scrub_pause,
9974 B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024);
9975 }
9976 }
9977
9978 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
9979 &child, &children) == 0) {
9980 vdev_rebuild_stat_t *vrs;
9981 uint_t i;
9982 char *name;
9983 nvlist_t *nv;
9984 nvlist_t *rebuild = fnvlist_alloc();
9985 uint64_t st;
9986 for (uint_t c = 0; c < children; c++) {
9987 if (nvlist_lookup_uint64_array(child[c],
9988 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs,
9989 &i) == 0) {
9990 if (vrs->vrs_state != VDEV_REBUILD_NONE) {
9991 nv = fnvlist_alloc();
9992 name = zpool_vdev_name(g_zfs, zhp,
9993 child[c], VDEV_NAME_TYPE_ID);
9994 fill_vdev_info(nv, zhp, name, B_FALSE,
9995 cb->cb_json_as_int);
9996 st = vrs->vrs_state;
9997 fnvlist_add_string(nv, "state",
9998 vdev_rebuild_state_str[st]);
9999 nice_num_str_nvlist(nv, "start_time",
10000 vrs->vrs_start_time, cb->cb_literal,
10001 cb->cb_json_as_int,
10002 ZFS_NICE_TIMESTAMP);
10003 nice_num_str_nvlist(nv, "end_time",
10004 vrs->vrs_end_time, cb->cb_literal,
10005 cb->cb_json_as_int,
10006 ZFS_NICE_TIMESTAMP);
10007 nice_num_str_nvlist(nv, "scan_time",
10008 vrs->vrs_scan_time_ms * 1000000,
10009 cb->cb_literal, cb->cb_json_as_int,
10010 ZFS_NICENUM_TIME);
10011 nice_num_str_nvlist(nv, "scanned",
10012 vrs->vrs_bytes_scanned,
10013 cb->cb_literal, cb->cb_json_as_int,
10014 ZFS_NICENUM_BYTES);
10015 nice_num_str_nvlist(nv, "issued",
10016 vrs->vrs_bytes_issued,
10017 cb->cb_literal, cb->cb_json_as_int,
10018 ZFS_NICENUM_BYTES);
10019 nice_num_str_nvlist(nv, "rebuilt",
10020 vrs->vrs_bytes_rebuilt,
10021 cb->cb_literal, cb->cb_json_as_int,
10022 ZFS_NICENUM_BYTES);
10023 nice_num_str_nvlist(nv, "to_scan",
10024 vrs->vrs_bytes_est, cb->cb_literal,
10025 cb->cb_json_as_int,
10026 ZFS_NICENUM_BYTES);
10027 nice_num_str_nvlist(nv, "errors",
10028 vrs->vrs_errors, cb->cb_literal,
10029 cb->cb_json_as_int,
10030 ZFS_NICENUM_1024);
10031 nice_num_str_nvlist(nv, "pass_time",
10032 vrs->vrs_pass_time_ms * 1000000,
10033 cb->cb_literal, cb->cb_json_as_int,
10034 ZFS_NICENUM_TIME);
10035 nice_num_str_nvlist(nv, "pass_scanned",
10036 vrs->vrs_pass_bytes_scanned,
10037 cb->cb_literal, cb->cb_json_as_int,
10038 ZFS_NICENUM_BYTES);
10039 nice_num_str_nvlist(nv, "pass_issued",
10040 vrs->vrs_pass_bytes_issued,
10041 cb->cb_literal, cb->cb_json_as_int,
10042 ZFS_NICENUM_BYTES);
10043 nice_num_str_nvlist(nv, "pass_skipped",
10044 vrs->vrs_pass_bytes_skipped,
10045 cb->cb_literal, cb->cb_json_as_int,
10046 ZFS_NICENUM_BYTES);
10047 fnvlist_add_nvlist(rebuild, name, nv);
10048 free(name);
10049 }
10050 }
10051 }
10052 if (!nvlist_empty(rebuild))
10053 fnvlist_add_nvlist(scan, "rebuild_stats", rebuild);
10054 fnvlist_free(rebuild);
10055 }
10056
10057 if (!nvlist_empty(scan))
10058 fnvlist_add_nvlist(item, ZPOOL_CONFIG_SCAN_STATS, scan);
10059 fnvlist_free(scan);
10060 }
10061
10062 /*
10063 * Print the scan status.
10064 */
10065 static void
print_scan_status(zpool_handle_t * zhp,nvlist_t * nvroot)10066 print_scan_status(zpool_handle_t *zhp, nvlist_t *nvroot)
10067 {
10068 uint64_t rebuild_end_time = 0, resilver_end_time = 0;
10069 boolean_t have_resilver = B_FALSE, have_scrub = B_FALSE;
10070 boolean_t have_errorscrub = B_FALSE;
10071 boolean_t active_resilver = B_FALSE;
10072 pool_checkpoint_stat_t *pcs = NULL;
10073 pool_scan_stat_t *ps = NULL;
10074 uint_t c;
10075 time_t scrub_start = 0, errorscrub_start = 0;
10076
10077 if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_SCAN_STATS,
10078 (uint64_t **)&ps, &c) == 0) {
10079 if (ps->pss_func == POOL_SCAN_RESILVER) {
10080 resilver_end_time = ps->pss_end_time;
10081 active_resilver = (ps->pss_state == DSS_SCANNING);
10082 }
10083
10084 have_resilver = (ps->pss_func == POOL_SCAN_RESILVER);
10085 have_scrub = (ps->pss_func == POOL_SCAN_SCRUB);
10086 scrub_start = ps->pss_start_time;
10087 if (c > offsetof(pool_scan_stat_t,
10088 pss_pass_error_scrub_pause) / 8) {
10089 have_errorscrub = (ps->pss_error_scrub_func ==
10090 POOL_SCAN_ERRORSCRUB);
10091 errorscrub_start = ps->pss_error_scrub_start;
10092 }
10093 }
10094
10095 boolean_t active_rebuild = check_rebuilding(nvroot, &rebuild_end_time);
10096 boolean_t have_rebuild = (active_rebuild || (rebuild_end_time > 0));
10097
10098 /* Always print the scrub status when available. */
10099 if (have_scrub && scrub_start > errorscrub_start)
10100 print_scan_scrub_resilver_status(ps);
10101 else if (have_errorscrub && errorscrub_start >= scrub_start)
10102 print_err_scrub_status(ps);
10103
10104 /*
10105 * When there is an active resilver or rebuild print its status.
10106 * Otherwise print the status of the last resilver or rebuild.
10107 */
10108 if (active_resilver || (!active_rebuild && have_resilver &&
10109 resilver_end_time && resilver_end_time > rebuild_end_time)) {
10110 print_scan_scrub_resilver_status(ps);
10111 } else if (active_rebuild || (!active_resilver && have_rebuild &&
10112 rebuild_end_time && rebuild_end_time > resilver_end_time)) {
10113 print_rebuild_status(zhp, nvroot);
10114 }
10115
10116 (void) nvlist_lookup_uint64_array(nvroot,
10117 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
10118 print_checkpoint_scan_warning(ps, pcs);
10119 }
10120
10121 /*
10122 * Print out detailed removal status.
10123 */
10124 static void
print_removal_status(zpool_handle_t * zhp,pool_removal_stat_t * prs)10125 print_removal_status(zpool_handle_t *zhp, pool_removal_stat_t *prs)
10126 {
10127 char copied_buf[7], examined_buf[7], total_buf[7], rate_buf[7];
10128 time_t start, end;
10129 nvlist_t *config, *nvroot;
10130 nvlist_t **child;
10131 uint_t children;
10132 char *vdev_name;
10133
10134 if (prs == NULL || prs->prs_state == DSS_NONE)
10135 return;
10136
10137 /*
10138 * Determine name of vdev.
10139 */
10140 config = zpool_get_config(zhp, NULL);
10141 nvroot = fnvlist_lookup_nvlist(config,
10142 ZPOOL_CONFIG_VDEV_TREE);
10143 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
10144 &child, &children) == 0);
10145 assert(prs->prs_removing_vdev < children);
10146 vdev_name = zpool_vdev_name(g_zfs, zhp,
10147 child[prs->prs_removing_vdev], B_TRUE);
10148
10149 (void) printf_color(ANSI_BOLD, gettext("remove: "));
10150
10151 start = prs->prs_start_time;
10152 end = prs->prs_end_time;
10153 zfs_nicenum(prs->prs_copied, copied_buf, sizeof (copied_buf));
10154
10155 /*
10156 * Removal is finished or canceled.
10157 */
10158 if (prs->prs_state == DSS_FINISHED) {
10159 uint64_t minutes_taken = (end - start) / 60;
10160
10161 (void) printf(gettext("Removal of vdev %llu copied %s "
10162 "in %lluh%um, completed on %s"),
10163 (longlong_t)prs->prs_removing_vdev,
10164 copied_buf,
10165 (u_longlong_t)(minutes_taken / 60),
10166 (uint_t)(minutes_taken % 60),
10167 ctime((time_t *)&end));
10168 } else if (prs->prs_state == DSS_CANCELED) {
10169 (void) printf(gettext("Removal of %s canceled on %s"),
10170 vdev_name, ctime(&end));
10171 } else {
10172 uint64_t copied, total, elapsed, rate, mins_left, hours_left;
10173 double fraction_done;
10174
10175 assert(prs->prs_state == DSS_SCANNING);
10176
10177 /*
10178 * Removal is in progress.
10179 */
10180 (void) printf(gettext(
10181 "Evacuation of %s in progress since %s"),
10182 vdev_name, ctime(&start));
10183
10184 copied = prs->prs_copied > 0 ? prs->prs_copied : 1;
10185 total = prs->prs_to_copy;
10186 fraction_done = (double)copied / total;
10187
10188 /* elapsed time for this pass */
10189 elapsed = time(NULL) - prs->prs_start_time;
10190 elapsed = elapsed > 0 ? elapsed : 1;
10191 rate = copied / elapsed;
10192 rate = rate > 0 ? rate : 1;
10193 mins_left = ((total - copied) / rate) / 60;
10194 hours_left = mins_left / 60;
10195
10196 zfs_nicenum(copied, examined_buf, sizeof (examined_buf));
10197 zfs_nicenum(total, total_buf, sizeof (total_buf));
10198 zfs_nicenum(rate, rate_buf, sizeof (rate_buf));
10199
10200 /*
10201 * do not print estimated time if hours_left is more than
10202 * 30 days
10203 */
10204 (void) printf(gettext(
10205 "\t%s copied out of %s at %s/s, %.2f%% done"),
10206 examined_buf, total_buf, rate_buf, 100 * fraction_done);
10207 if (hours_left < (30 * 24)) {
10208 (void) printf(gettext(", %lluh%um to go\n"),
10209 (u_longlong_t)hours_left, (uint_t)(mins_left % 60));
10210 } else {
10211 (void) printf(gettext(
10212 ", (copy is slow, no estimated time)\n"));
10213 }
10214 }
10215 free(vdev_name);
10216
10217 if (prs->prs_mapping_memory > 0) {
10218 char mem_buf[7];
10219 zfs_nicenum(prs->prs_mapping_memory, mem_buf, sizeof (mem_buf));
10220 (void) printf(gettext(
10221 "\t%s memory used for removed device mappings\n"),
10222 mem_buf);
10223 }
10224 }
10225
10226 /*
10227 * Print out detailed raidz expansion status.
10228 */
10229 static void
print_raidz_expand_status(zpool_handle_t * zhp,pool_raidz_expand_stat_t * pres)10230 print_raidz_expand_status(zpool_handle_t *zhp, pool_raidz_expand_stat_t *pres)
10231 {
10232 char copied_buf[7];
10233
10234 if (pres == NULL || pres->pres_state == DSS_NONE)
10235 return;
10236
10237 /*
10238 * Determine name of vdev.
10239 */
10240 nvlist_t *config = zpool_get_config(zhp, NULL);
10241 nvlist_t *nvroot = fnvlist_lookup_nvlist(config,
10242 ZPOOL_CONFIG_VDEV_TREE);
10243 nvlist_t **child;
10244 uint_t children;
10245 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
10246 &child, &children) == 0);
10247 assert(pres->pres_expanding_vdev < children);
10248
10249 (void) printf_color(ANSI_BOLD, gettext("expand: "));
10250
10251 time_t start = pres->pres_start_time;
10252 time_t end = pres->pres_end_time;
10253 char *vname =
10254 zpool_vdev_name(g_zfs, zhp, child[pres->pres_expanding_vdev], 0);
10255 zfs_nicenum(pres->pres_reflowed, copied_buf, sizeof (copied_buf));
10256
10257 /*
10258 * Expansion is finished or canceled.
10259 */
10260 if (pres->pres_state == DSS_FINISHED) {
10261 char time_buf[32];
10262 secs_to_dhms(end - start, time_buf);
10263
10264 (void) printf(gettext("expanded %s-%u copied %s in %s, "
10265 "on %s"), vname, (int)pres->pres_expanding_vdev,
10266 copied_buf, time_buf, ctime((time_t *)&end));
10267 } else {
10268 char examined_buf[7], total_buf[7], rate_buf[7];
10269 uint64_t copied, total, elapsed, rate, secs_left;
10270 double fraction_done;
10271
10272 assert(pres->pres_state == DSS_SCANNING);
10273
10274 /*
10275 * Expansion is in progress.
10276 */
10277 (void) printf(gettext(
10278 "expansion of %s-%u in progress since %s"),
10279 vname, (int)pres->pres_expanding_vdev, ctime(&start));
10280
10281 copied = pres->pres_reflowed > 0 ? pres->pres_reflowed : 1;
10282 total = pres->pres_to_reflow;
10283 fraction_done = (double)copied / total;
10284
10285 /* elapsed time for this pass */
10286 elapsed = time(NULL) - pres->pres_start_time;
10287 elapsed = elapsed > 0 ? elapsed : 1;
10288 rate = copied / elapsed;
10289 rate = rate > 0 ? rate : 1;
10290 secs_left = (total - copied) / rate;
10291
10292 zfs_nicenum(copied, examined_buf, sizeof (examined_buf));
10293 zfs_nicenum(total, total_buf, sizeof (total_buf));
10294 zfs_nicenum(rate, rate_buf, sizeof (rate_buf));
10295
10296 /*
10297 * do not print estimated time if hours_left is more than
10298 * 30 days
10299 */
10300 (void) printf(gettext("\t%s / %s copied at %s/s, %.2f%% done"),
10301 examined_buf, total_buf, rate_buf, 100 * fraction_done);
10302 if (pres->pres_waiting_for_resilver) {
10303 (void) printf(gettext(", paused for resilver or "
10304 "clear\n"));
10305 } else if (secs_left < (30 * 24 * 3600)) {
10306 char time_buf[32];
10307 secs_to_dhms(secs_left, time_buf);
10308 (void) printf(gettext(", %s to go\n"), time_buf);
10309 } else {
10310 (void) printf(gettext(
10311 ", (copy is slow, no estimated time)\n"));
10312 }
10313 }
10314 free(vname);
10315 }
10316 static void
print_checkpoint_status(pool_checkpoint_stat_t * pcs)10317 print_checkpoint_status(pool_checkpoint_stat_t *pcs)
10318 {
10319 time_t start;
10320 char space_buf[7];
10321
10322 if (pcs == NULL || pcs->pcs_state == CS_NONE)
10323 return;
10324
10325 (void) printf(gettext("checkpoint: "));
10326
10327 start = pcs->pcs_start_time;
10328 zfs_nicenum(pcs->pcs_space, space_buf, sizeof (space_buf));
10329
10330 if (pcs->pcs_state == CS_CHECKPOINT_EXISTS) {
10331 char *date = ctime(&start);
10332
10333 /*
10334 * ctime() adds a newline at the end of the generated
10335 * string, thus the weird format specifier and the
10336 * strlen() call used to chop it off from the output.
10337 */
10338 (void) printf(gettext("created %.*s, consumes %s\n"),
10339 (int)(strlen(date) - 1), date, space_buf);
10340 return;
10341 }
10342
10343 assert(pcs->pcs_state == CS_CHECKPOINT_DISCARDING);
10344
10345 (void) printf(gettext("discarding, %s remaining.\n"),
10346 space_buf);
10347 }
10348
10349 static void
print_error_log(zpool_handle_t * zhp)10350 print_error_log(zpool_handle_t *zhp)
10351 {
10352 nvlist_t *nverrlist = NULL;
10353 nvpair_t *elem;
10354 char *pathname;
10355 size_t len = MAXPATHLEN * 2;
10356
10357 if (zpool_get_errlog(zhp, &nverrlist) != 0)
10358 return;
10359
10360 (void) printf("errors: Permanent errors have been "
10361 "detected in the following files:\n\n");
10362
10363 pathname = safe_malloc(len);
10364 elem = NULL;
10365 while ((elem = nvlist_next_nvpair(nverrlist, elem)) != NULL) {
10366 nvlist_t *nv;
10367 uint64_t dsobj, obj;
10368
10369 verify(nvpair_value_nvlist(elem, &nv) == 0);
10370 verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_DATASET,
10371 &dsobj) == 0);
10372 verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_OBJECT,
10373 &obj) == 0);
10374 zpool_obj_to_path(zhp, dsobj, obj, pathname, len);
10375 (void) printf("%7s %s\n", "", pathname);
10376 }
10377 free(pathname);
10378 nvlist_free(nverrlist);
10379 }
10380
10381 static void
print_spares(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t ** spares,uint_t nspares)10382 print_spares(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **spares,
10383 uint_t nspares)
10384 {
10385 uint_t i;
10386 char *name;
10387
10388 if (nspares == 0)
10389 return;
10390
10391 (void) printf(gettext("\tspares\n"));
10392
10393 for (i = 0; i < nspares; i++) {
10394 name = zpool_vdev_name(g_zfs, zhp, spares[i],
10395 cb->cb_name_flags);
10396 print_status_config(zhp, cb, name, spares[i], 2, B_TRUE, NULL);
10397 free(name);
10398 }
10399 }
10400
10401 static void
print_l2cache(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t ** l2cache,uint_t nl2cache)10402 print_l2cache(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **l2cache,
10403 uint_t nl2cache)
10404 {
10405 uint_t i;
10406 char *name;
10407
10408 if (nl2cache == 0)
10409 return;
10410
10411 (void) printf(gettext("\tcache\n"));
10412
10413 for (i = 0; i < nl2cache; i++) {
10414 name = zpool_vdev_name(g_zfs, zhp, l2cache[i],
10415 cb->cb_name_flags);
10416 print_status_config(zhp, cb, name, l2cache[i], 2,
10417 B_FALSE, NULL);
10418 free(name);
10419 }
10420 }
10421
10422 static void
print_dedup_stats(zpool_handle_t * zhp,nvlist_t * config,boolean_t literal)10423 print_dedup_stats(zpool_handle_t *zhp, nvlist_t *config, boolean_t literal)
10424 {
10425 ddt_histogram_t *ddh;
10426 ddt_stat_t *dds;
10427 ddt_object_t *ddo;
10428 uint_t c;
10429 /* Extra space provided for literal display */
10430 char dspace[32], mspace[32], cspace[32];
10431 uint64_t cspace_prop;
10432 enum zfs_nicenum_format format;
10433 zprop_source_t src;
10434
10435 /*
10436 * If the pool was faulted then we may not have been able to
10437 * obtain the config. Otherwise, if we have anything in the dedup
10438 * table continue processing the stats.
10439 */
10440 if (nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_OBJ_STATS,
10441 (uint64_t **)&ddo, &c) != 0)
10442 return;
10443
10444 (void) printf("\n");
10445 (void) printf(gettext(" dedup: "));
10446 if (ddo->ddo_count == 0) {
10447 (void) printf(gettext("no DDT entries\n"));
10448 return;
10449 }
10450
10451 /*
10452 * Squash cached size into in-core size to handle race.
10453 * Only include cached size if it is available.
10454 */
10455 cspace_prop = zpool_get_prop_int(zhp, ZPOOL_PROP_DEDUPCACHED, &src);
10456 cspace_prop = MIN(cspace_prop, ddo->ddo_mspace);
10457 format = literal ? ZFS_NICENUM_RAW : ZFS_NICENUM_1024;
10458 zfs_nicenum_format(cspace_prop, cspace, sizeof (cspace), format);
10459 zfs_nicenum_format(ddo->ddo_dspace, dspace, sizeof (dspace), format);
10460 zfs_nicenum_format(ddo->ddo_mspace, mspace, sizeof (mspace), format);
10461 (void) printf("DDT entries %llu, size %s on disk, %s in core",
10462 (u_longlong_t)ddo->ddo_count,
10463 dspace,
10464 mspace);
10465 if (src != ZPROP_SRC_DEFAULT) {
10466 (void) printf(", %s cached (%.02f%%)",
10467 cspace,
10468 (double)cspace_prop / (double)ddo->ddo_mspace * 100.0);
10469 }
10470 (void) printf("\n");
10471
10472 verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_STATS,
10473 (uint64_t **)&dds, &c) == 0);
10474 verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_HISTOGRAM,
10475 (uint64_t **)&ddh, &c) == 0);
10476 zpool_dump_ddt(dds, ddh);
10477 }
10478
10479 #define ST_SIZE 4096
10480 #define AC_SIZE 2048
10481
10482 static void
print_status_reason(zpool_handle_t * zhp,status_cbdata_t * cbp,zpool_status_t reason,zpool_errata_t errata,nvlist_t * item)10483 print_status_reason(zpool_handle_t *zhp, status_cbdata_t *cbp,
10484 zpool_status_t reason, zpool_errata_t errata, nvlist_t *item)
10485 {
10486 char status[ST_SIZE];
10487 char action[AC_SIZE];
10488 memset(status, 0, ST_SIZE);
10489 memset(action, 0, AC_SIZE);
10490
10491 switch (reason) {
10492 case ZPOOL_STATUS_MISSING_DEV_R:
10493 (void) snprintf(status, ST_SIZE,
10494 gettext("One or more devices could "
10495 "not be opened. Sufficient replicas exist for\n\tthe pool "
10496 "to continue functioning in a degraded state.\n"));
10497 (void) snprintf(action, AC_SIZE,
10498 gettext("Attach the missing device "
10499 "and online it using 'zpool online'.\n"));
10500 break;
10501
10502 case ZPOOL_STATUS_MISSING_DEV_NR:
10503 (void) snprintf(status, ST_SIZE,
10504 gettext("One or more devices could "
10505 "not be opened. There are insufficient\n\treplicas for the"
10506 " pool to continue functioning.\n"));
10507 (void) snprintf(action, AC_SIZE,
10508 gettext("Attach the missing device "
10509 "and online it using 'zpool online'.\n"));
10510 break;
10511
10512 case ZPOOL_STATUS_CORRUPT_LABEL_R:
10513 (void) snprintf(status, ST_SIZE,
10514 gettext("One or more devices could "
10515 "not be used because the label is missing or\n\tinvalid. "
10516 "Sufficient replicas exist for the pool to continue\n\t"
10517 "functioning in a degraded state.\n"));
10518 (void) snprintf(action, AC_SIZE,
10519 gettext("Replace the device using 'zpool replace'.\n"));
10520 break;
10521
10522 case ZPOOL_STATUS_CORRUPT_LABEL_NR:
10523 (void) snprintf(status, ST_SIZE,
10524 gettext("One or more devices could "
10525 "not be used because the label is missing \n\tor invalid. "
10526 "There are insufficient replicas for the pool to "
10527 "continue\n\tfunctioning.\n"));
10528 zpool_explain_recover(zpool_get_handle(zhp),
10529 zpool_get_name(zhp), reason, zpool_get_config(zhp, NULL),
10530 action, AC_SIZE);
10531 break;
10532
10533 case ZPOOL_STATUS_FAILING_DEV:
10534 (void) snprintf(status, ST_SIZE,
10535 gettext("One or more devices has "
10536 "experienced an unrecoverable error. An\n\tattempt was "
10537 "made to correct the error. Applications are "
10538 "unaffected.\n"));
10539 (void) snprintf(action, AC_SIZE, gettext("Determine if the "
10540 "device needs to be replaced, and clear the errors\n\tusing"
10541 " 'zpool clear' or replace the device with 'zpool "
10542 "replace'.\n"));
10543 break;
10544
10545 case ZPOOL_STATUS_OFFLINE_DEV:
10546 (void) snprintf(status, ST_SIZE,
10547 gettext("One or more devices has "
10548 "been taken offline by the administrator.\n\tSufficient "
10549 "replicas exist for the pool to continue functioning in "
10550 "a\n\tdegraded state.\n"));
10551 (void) snprintf(action, AC_SIZE, gettext("Online the device "
10552 "using 'zpool online' or replace the device with\n\t'zpool "
10553 "replace'.\n"));
10554 break;
10555
10556 case ZPOOL_STATUS_REMOVED_DEV:
10557 (void) snprintf(status, ST_SIZE,
10558 gettext("One or more devices have "
10559 "been removed.\n\tSufficient replicas exist for the pool "
10560 "to continue functioning in a\n\tdegraded state.\n"));
10561 (void) snprintf(action, AC_SIZE, gettext("Online the device "
10562 "using zpool online' or replace the device with\n\t'zpool "
10563 "replace'.\n"));
10564 break;
10565
10566 case ZPOOL_STATUS_RESILVERING:
10567 case ZPOOL_STATUS_REBUILDING:
10568 (void) snprintf(status, ST_SIZE,
10569 gettext("One or more devices is "
10570 "currently being resilvered. The pool will\n\tcontinue "
10571 "to function, possibly in a degraded state.\n"));
10572 (void) snprintf(action, AC_SIZE,
10573 gettext("Wait for the resilver to complete.\n"));
10574 break;
10575
10576 case ZPOOL_STATUS_REBUILD_SCRUB:
10577 (void) snprintf(status, ST_SIZE,
10578 gettext("One or more devices have "
10579 "been sequentially resilvered, scrubbing\n\tthe pool "
10580 "is recommended.\n"));
10581 (void) snprintf(action, AC_SIZE, gettext("Use 'zpool scrub' to "
10582 "verify all data checksums.\n"));
10583 break;
10584
10585 case ZPOOL_STATUS_CORRUPT_DATA:
10586 (void) snprintf(status, ST_SIZE,
10587 gettext("One or more devices has "
10588 "experienced an error resulting in data\n\tcorruption. "
10589 "Applications may be affected.\n"));
10590 (void) snprintf(action, AC_SIZE,
10591 gettext("Restore the file in question"
10592 " if possible. Otherwise restore the\n\tentire pool from "
10593 "backup.\n"));
10594 break;
10595
10596 case ZPOOL_STATUS_CORRUPT_POOL:
10597 (void) snprintf(status, ST_SIZE, gettext("The pool metadata is "
10598 "corrupted and the pool cannot be opened.\n"));
10599 zpool_explain_recover(zpool_get_handle(zhp),
10600 zpool_get_name(zhp), reason, zpool_get_config(zhp, NULL),
10601 action, AC_SIZE);
10602 break;
10603
10604 case ZPOOL_STATUS_VERSION_OLDER:
10605 (void) snprintf(status, ST_SIZE,
10606 gettext("The pool is formatted using "
10607 "a legacy on-disk format. The pool can\n\tstill be used, "
10608 "but some features are unavailable.\n"));
10609 (void) snprintf(action, AC_SIZE,
10610 gettext("Upgrade the pool using "
10611 "'zpool upgrade'. Once this is done, the\n\tpool will no "
10612 "longer be accessible on software that does not support\n\t"
10613 "feature flags.\n"));
10614 break;
10615
10616 case ZPOOL_STATUS_VERSION_NEWER:
10617 (void) snprintf(status, ST_SIZE,
10618 gettext("The pool has been upgraded "
10619 "to a newer, incompatible on-disk version.\n\tThe pool "
10620 "cannot be accessed on this system.\n"));
10621 (void) snprintf(action, AC_SIZE,
10622 gettext("Access the pool from a "
10623 "system running more recent software, or\n\trestore the "
10624 "pool from backup.\n"));
10625 break;
10626
10627 case ZPOOL_STATUS_FEAT_DISABLED:
10628 (void) snprintf(status, ST_SIZE, gettext("Some supported and "
10629 "requested features are not enabled on the pool.\n\t"
10630 "The pool can still be used, but some features are "
10631 "unavailable.\n"));
10632 (void) snprintf(action, AC_SIZE,
10633 gettext("Enable all features using "
10634 "'zpool upgrade'. Once this is done,\n\tthe pool may no "
10635 "longer be accessible by software that does not support\n\t"
10636 "the features. See zpool-features(7) for details.\n"));
10637 break;
10638
10639 case ZPOOL_STATUS_COMPATIBILITY_ERR:
10640 (void) snprintf(status, ST_SIZE, gettext("This pool has a "
10641 "compatibility list specified, but it could not be\n\t"
10642 "read/parsed at this time. The pool can still be used, "
10643 "but this\n\tshould be investigated.\n"));
10644 (void) snprintf(action, AC_SIZE,
10645 gettext("Check the value of the "
10646 "'compatibility' property against the\n\t"
10647 "appropriate file in " ZPOOL_SYSCONF_COMPAT_D " or "
10648 ZPOOL_DATA_COMPAT_D ".\n"));
10649 break;
10650
10651 case ZPOOL_STATUS_INCOMPATIBLE_FEAT:
10652 (void) snprintf(status, ST_SIZE, gettext("One or more features "
10653 "are enabled on the pool despite not being\n\t"
10654 "requested by the 'compatibility' property.\n"));
10655 (void) snprintf(action, AC_SIZE, gettext("Consider setting "
10656 "'compatibility' to an appropriate value, or\n\t"
10657 "adding needed features to the relevant file in\n\t"
10658 ZPOOL_SYSCONF_COMPAT_D " or " ZPOOL_DATA_COMPAT_D ".\n"));
10659 break;
10660
10661 case ZPOOL_STATUS_UNSUP_FEAT_READ:
10662 (void) snprintf(status, ST_SIZE,
10663 gettext("The pool cannot be accessed "
10664 "on this system because it uses the\n\tfollowing feature(s)"
10665 " not supported on this system:\n"));
10666 zpool_collect_unsup_feat(zpool_get_config(zhp, NULL), status,
10667 1024);
10668 (void) snprintf(action, AC_SIZE,
10669 gettext("Access the pool from a "
10670 "system that supports the required feature(s),\n\tor "
10671 "restore the pool from backup.\n"));
10672 break;
10673
10674 case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
10675 (void) snprintf(status, ST_SIZE, gettext("The pool can only be "
10676 "accessed in read-only mode on this system. It\n\tcannot be"
10677 " accessed in read-write mode because it uses the "
10678 "following\n\tfeature(s) not supported on this system:\n"));
10679 zpool_collect_unsup_feat(zpool_get_config(zhp, NULL), status,
10680 1024);
10681 (void) snprintf(action, AC_SIZE,
10682 gettext("The pool cannot be accessed "
10683 "in read-write mode. Import the pool with\n"
10684 "\t\"-o readonly=on\", access the pool from a system that "
10685 "supports the\n\trequired feature(s), or restore the "
10686 "pool from backup.\n"));
10687 break;
10688
10689 case ZPOOL_STATUS_FAULTED_DEV_R:
10690 (void) snprintf(status, ST_SIZE,
10691 gettext("One or more devices are "
10692 "faulted in response to persistent errors.\n\tSufficient "
10693 "replicas exist for the pool to continue functioning "
10694 "in a\n\tdegraded state.\n"));
10695 (void) snprintf(action, AC_SIZE,
10696 gettext("Replace the faulted device, "
10697 "or use 'zpool clear' to mark the device\n\trepaired.\n"));
10698 break;
10699
10700 case ZPOOL_STATUS_FAULTED_DEV_NR:
10701 (void) snprintf(status, ST_SIZE,
10702 gettext("One or more devices are "
10703 "faulted in response to persistent errors. There are "
10704 "insufficient replicas for the pool to\n\tcontinue "
10705 "functioning.\n"));
10706 (void) snprintf(action, AC_SIZE,
10707 gettext("Destroy and re-create the "
10708 "pool from a backup source. Manually marking the device\n"
10709 "\trepaired using 'zpool clear' may allow some data "
10710 "to be recovered.\n"));
10711 break;
10712
10713 case ZPOOL_STATUS_IO_FAILURE_MMP:
10714 (void) snprintf(status, ST_SIZE,
10715 gettext("The pool is suspended "
10716 "because multihost writes failed or were delayed;\n\t"
10717 "another system could import the pool undetected.\n"));
10718 (void) snprintf(action, AC_SIZE,
10719 gettext("Make sure the pool's devices"
10720 " are connected, then reboot your system and\n\timport the "
10721 "pool or run 'zpool clear' to resume the pool.\n"));
10722 break;
10723
10724 case ZPOOL_STATUS_IO_FAILURE_WAIT:
10725 case ZPOOL_STATUS_IO_FAILURE_CONTINUE:
10726 (void) snprintf(status, ST_SIZE,
10727 gettext("One or more devices are "
10728 "faulted in response to IO failures.\n"));
10729 (void) snprintf(action, AC_SIZE,
10730 gettext("Make sure the affected "
10731 "devices are connected, then run 'zpool clear'.\n"));
10732 break;
10733
10734 case ZPOOL_STATUS_BAD_LOG:
10735 (void) snprintf(status, ST_SIZE, gettext("An intent log record "
10736 "could not be read.\n"
10737 "\tWaiting for administrator intervention to fix the "
10738 "faulted pool.\n"));
10739 (void) snprintf(action, AC_SIZE,
10740 gettext("Either restore the affected "
10741 "device(s) and run 'zpool online',\n"
10742 "\tor ignore the intent log records by running "
10743 "'zpool clear'.\n"));
10744 break;
10745
10746 case ZPOOL_STATUS_NON_NATIVE_ASHIFT:
10747 (void) snprintf(status, ST_SIZE,
10748 gettext("One or more devices are "
10749 "configured to use a non-native block size.\n"
10750 "\tExpect reduced performance.\n"));
10751 (void) snprintf(action, AC_SIZE,
10752 gettext("Replace affected devices "
10753 "with devices that support the\n\tconfigured block size, "
10754 "or migrate data to a properly configured\n\tpool.\n"));
10755 break;
10756
10757 case ZPOOL_STATUS_HOSTID_MISMATCH:
10758 (void) snprintf(status, ST_SIZE,
10759 gettext("Mismatch between pool hostid"
10760 " and system hostid on imported pool.\n\tThis pool was "
10761 "previously imported into a system with a different "
10762 "hostid,\n\tand then was verbatim imported into this "
10763 "system.\n"));
10764 (void) snprintf(action, AC_SIZE,
10765 gettext("Export this pool on all "
10766 "systems on which it is imported.\n"
10767 "\tThen import it to correct the mismatch.\n"));
10768 break;
10769
10770 case ZPOOL_STATUS_ERRATA:
10771 (void) snprintf(status, ST_SIZE,
10772 gettext("Errata #%d detected.\n"), errata);
10773 switch (errata) {
10774 case ZPOOL_ERRATA_NONE:
10775 break;
10776
10777 case ZPOOL_ERRATA_ZOL_2094_SCRUB:
10778 (void) snprintf(action, AC_SIZE,
10779 gettext("To correct the issue run "
10780 "'zpool scrub'.\n"));
10781 break;
10782
10783 case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION:
10784 (void) strlcat(status, gettext("\tExisting encrypted "
10785 "datasets contain an on-disk incompatibility\n\t "
10786 "which needs to be corrected.\n"), ST_SIZE);
10787 (void) snprintf(action, AC_SIZE,
10788 gettext("To correct the issue"
10789 " backup existing encrypted datasets to new\n\t"
10790 "encrypted datasets and destroy the old ones. "
10791 "'zfs mount -o ro' can\n\tbe used to temporarily "
10792 "mount existing encrypted datasets readonly.\n"));
10793 break;
10794
10795 case ZPOOL_ERRATA_ZOL_8308_ENCRYPTION:
10796 (void) strlcat(status, gettext("\tExisting encrypted "
10797 "snapshots and bookmarks contain an on-disk\n\t"
10798 "incompatibility. This may cause on-disk "
10799 "corruption if they are used\n\twith "
10800 "'zfs recv'.\n"), ST_SIZE);
10801 (void) snprintf(action, AC_SIZE,
10802 gettext("To correct the"
10803 "issue, enable the bookmark_v2 feature. No "
10804 "additional\n\taction is needed if there are no "
10805 "encrypted snapshots or bookmarks.\n\tIf preserving"
10806 "the encrypted snapshots and bookmarks is required,"
10807 " use\n\ta non-raw send to backup and restore them."
10808 " Alternately, they may be\n\tremoved to resolve "
10809 "the incompatibility.\n"));
10810 break;
10811
10812 default:
10813 /*
10814 * All errata which allow the pool to be imported
10815 * must contain an action message.
10816 */
10817 assert(0);
10818 }
10819 break;
10820
10821 default:
10822 /*
10823 * The remaining errors can't actually be generated, yet.
10824 */
10825 assert(reason == ZPOOL_STATUS_OK);
10826 }
10827
10828 if (status[0] != 0) {
10829 if (cbp->cb_json)
10830 fnvlist_add_string(item, "status", status);
10831 else {
10832 (void) printf_color(ANSI_BOLD, gettext("status: "));
10833 (void) printf_color(ANSI_YELLOW, status);
10834 }
10835 }
10836
10837 if (action[0] != 0) {
10838 if (cbp->cb_json)
10839 fnvlist_add_string(item, "action", action);
10840 else {
10841 (void) printf_color(ANSI_BOLD, gettext("action: "));
10842 (void) printf_color(ANSI_YELLOW, action);
10843 }
10844 }
10845 }
10846
10847 static int
status_callback_json(zpool_handle_t * zhp,void * data)10848 status_callback_json(zpool_handle_t *zhp, void *data)
10849 {
10850 status_cbdata_t *cbp = data;
10851 nvlist_t *config, *nvroot;
10852 const char *msgid;
10853 char pool_guid[256];
10854 char msgbuf[256];
10855 uint64_t guid;
10856 zpool_status_t reason;
10857 zpool_errata_t errata;
10858 uint_t c;
10859 vdev_stat_t *vs;
10860 nvlist_t *item, *d, *load_info, *vds;
10861
10862 /* If dedup stats were requested, also fetch dedupcached. */
10863 if (cbp->cb_dedup_stats > 1)
10864 zpool_add_propname(zhp, ZPOOL_DEDUPCACHED_PROP_NAME);
10865 reason = zpool_get_status(zhp, &msgid, &errata);
10866 /*
10867 * If we were given 'zpool status -x', only report those pools with
10868 * problems.
10869 */
10870 if (cbp->cb_explain &&
10871 (reason == ZPOOL_STATUS_OK ||
10872 reason == ZPOOL_STATUS_VERSION_OLDER ||
10873 reason == ZPOOL_STATUS_FEAT_DISABLED ||
10874 reason == ZPOOL_STATUS_COMPATIBILITY_ERR ||
10875 reason == ZPOOL_STATUS_INCOMPATIBLE_FEAT)) {
10876 return (0);
10877 }
10878
10879 d = fnvlist_lookup_nvlist(cbp->cb_jsobj, "pools");
10880 item = fnvlist_alloc();
10881 vds = fnvlist_alloc();
10882 fill_pool_info(item, zhp, B_FALSE, cbp->cb_json_as_int);
10883 config = zpool_get_config(zhp, NULL);
10884
10885 if (config != NULL) {
10886 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
10887 verify(nvlist_lookup_uint64_array(nvroot,
10888 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &c) == 0);
10889 if (cbp->cb_json_pool_key_guid) {
10890 guid = fnvlist_lookup_uint64(config,
10891 ZPOOL_CONFIG_POOL_GUID);
10892 (void) snprintf(pool_guid, 256, "%llu",
10893 (u_longlong_t)guid);
10894 }
10895 cbp->cb_count++;
10896
10897 print_status_reason(zhp, cbp, reason, errata, item);
10898 if (msgid != NULL) {
10899 (void) snprintf(msgbuf, 256,
10900 "https://openzfs.github.io/openzfs-docs/msg/%s",
10901 msgid);
10902 fnvlist_add_string(item, "msgid", msgid);
10903 fnvlist_add_string(item, "moreinfo", msgbuf);
10904 }
10905
10906 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
10907 &load_info) == 0) {
10908 fnvlist_add_nvlist(item, ZPOOL_CONFIG_LOAD_INFO,
10909 load_info);
10910 }
10911
10912 scan_status_nvlist(zhp, cbp, nvroot, item);
10913 removal_status_nvlist(zhp, cbp, nvroot, item);
10914 checkpoint_status_nvlist(nvroot, cbp, item);
10915 raidz_expand_status_nvlist(zhp, cbp, nvroot, item);
10916 vdev_stats_nvlist(zhp, cbp, nvroot, 0, B_FALSE, NULL, vds);
10917 if (cbp->cb_flat_vdevs) {
10918 class_vdevs_nvlist(zhp, cbp, nvroot,
10919 VDEV_ALLOC_BIAS_DEDUP, vds);
10920 class_vdevs_nvlist(zhp, cbp, nvroot,
10921 VDEV_ALLOC_BIAS_SPECIAL, vds);
10922 class_vdevs_nvlist(zhp, cbp, nvroot,
10923 VDEV_ALLOC_CLASS_LOGS, vds);
10924 l2cache_nvlist(zhp, cbp, nvroot, vds);
10925 spares_nvlist(zhp, cbp, nvroot, vds);
10926
10927 fnvlist_add_nvlist(item, "vdevs", vds);
10928 fnvlist_free(vds);
10929 } else {
10930 fnvlist_add_nvlist(item, "vdevs", vds);
10931 fnvlist_free(vds);
10932
10933 class_vdevs_nvlist(zhp, cbp, nvroot,
10934 VDEV_ALLOC_BIAS_DEDUP, item);
10935 class_vdevs_nvlist(zhp, cbp, nvroot,
10936 VDEV_ALLOC_BIAS_SPECIAL, item);
10937 class_vdevs_nvlist(zhp, cbp, nvroot,
10938 VDEV_ALLOC_CLASS_LOGS, item);
10939 l2cache_nvlist(zhp, cbp, nvroot, item);
10940 spares_nvlist(zhp, cbp, nvroot, item);
10941 }
10942 dedup_stats_nvlist(zhp, cbp, item);
10943 errors_nvlist(zhp, cbp, item);
10944 }
10945 if (cbp->cb_json_pool_key_guid) {
10946 fnvlist_add_nvlist(d, pool_guid, item);
10947 } else {
10948 fnvlist_add_nvlist(d, zpool_get_name(zhp),
10949 item);
10950 }
10951 fnvlist_free(item);
10952 return (0);
10953 }
10954
10955 /*
10956 * Display a summary of pool status. Displays a summary such as:
10957 *
10958 * pool: tank
10959 * status: DEGRADED
10960 * reason: One or more devices ...
10961 * see: https://openzfs.github.io/openzfs-docs/msg/ZFS-xxxx-01
10962 * config:
10963 * mirror DEGRADED
10964 * c1t0d0 OK
10965 * c2t0d0 UNAVAIL
10966 *
10967 * When given the '-v' option, we print out the complete config. If the '-e'
10968 * option is specified, then we print out error rate information as well.
10969 */
10970 static int
status_callback(zpool_handle_t * zhp,void * data)10971 status_callback(zpool_handle_t *zhp, void *data)
10972 {
10973 status_cbdata_t *cbp = data;
10974 nvlist_t *config, *nvroot;
10975 const char *msgid;
10976 zpool_status_t reason;
10977 zpool_errata_t errata;
10978 const char *health;
10979 uint_t c;
10980 vdev_stat_t *vs;
10981
10982 /* If dedup stats were requested, also fetch dedupcached. */
10983 if (cbp->cb_dedup_stats > 1)
10984 zpool_add_propname(zhp, ZPOOL_DEDUPCACHED_PROP_NAME);
10985
10986 config = zpool_get_config(zhp, NULL);
10987 reason = zpool_get_status(zhp, &msgid, &errata);
10988
10989 cbp->cb_count++;
10990
10991 /*
10992 * If we were given 'zpool status -x', only report those pools with
10993 * problems.
10994 */
10995 if (cbp->cb_explain &&
10996 (reason == ZPOOL_STATUS_OK ||
10997 reason == ZPOOL_STATUS_VERSION_OLDER ||
10998 reason == ZPOOL_STATUS_FEAT_DISABLED ||
10999 reason == ZPOOL_STATUS_COMPATIBILITY_ERR ||
11000 reason == ZPOOL_STATUS_INCOMPATIBLE_FEAT)) {
11001 if (!cbp->cb_allpools) {
11002 (void) printf(gettext("pool '%s' is healthy\n"),
11003 zpool_get_name(zhp));
11004 if (cbp->cb_first)
11005 cbp->cb_first = B_FALSE;
11006 }
11007 return (0);
11008 }
11009
11010 if (cbp->cb_first)
11011 cbp->cb_first = B_FALSE;
11012 else
11013 (void) printf("\n");
11014
11015 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
11016 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
11017 (uint64_t **)&vs, &c) == 0);
11018
11019 health = zpool_get_state_str(zhp);
11020
11021 printf(" ");
11022 (void) printf_color(ANSI_BOLD, gettext("pool:"));
11023 printf(" %s\n", zpool_get_name(zhp));
11024 (void) fputc(' ', stdout);
11025 (void) printf_color(ANSI_BOLD, gettext("state: "));
11026
11027 (void) printf_color(health_str_to_color(health), "%s", health);
11028
11029 (void) fputc('\n', stdout);
11030 print_status_reason(zhp, cbp, reason, errata, NULL);
11031
11032 if (msgid != NULL) {
11033 printf(" ");
11034 (void) printf_color(ANSI_BOLD, gettext("see:"));
11035 printf(gettext(
11036 " https://openzfs.github.io/openzfs-docs/msg/%s\n"),
11037 msgid);
11038 }
11039
11040 if (config != NULL) {
11041 uint64_t nerr;
11042 nvlist_t **spares, **l2cache;
11043 uint_t nspares, nl2cache;
11044
11045 print_scan_status(zhp, nvroot);
11046
11047 pool_removal_stat_t *prs = NULL;
11048 (void) nvlist_lookup_uint64_array(nvroot,
11049 ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c);
11050 print_removal_status(zhp, prs);
11051
11052 pool_checkpoint_stat_t *pcs = NULL;
11053 (void) nvlist_lookup_uint64_array(nvroot,
11054 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
11055 print_checkpoint_status(pcs);
11056
11057 pool_raidz_expand_stat_t *pres = NULL;
11058 (void) nvlist_lookup_uint64_array(nvroot,
11059 ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, (uint64_t **)&pres, &c);
11060 print_raidz_expand_status(zhp, pres);
11061
11062 cbp->cb_namewidth = max_width(zhp, nvroot, 0, 0,
11063 cbp->cb_name_flags | VDEV_NAME_TYPE_ID);
11064 if (cbp->cb_namewidth < 10)
11065 cbp->cb_namewidth = 10;
11066
11067 color_start(ANSI_BOLD);
11068 (void) printf(gettext("config:\n\n"));
11069 (void) printf(gettext("\t%-*s %-8s %5s %5s %5s"),
11070 cbp->cb_namewidth, "NAME", "STATE", "READ", "WRITE",
11071 "CKSUM");
11072 color_end();
11073
11074 if (cbp->cb_print_slow_ios) {
11075 (void) printf_color(ANSI_BOLD, " %5s", gettext("SLOW"));
11076 }
11077
11078 if (cbp->cb_print_power) {
11079 (void) printf_color(ANSI_BOLD, " %5s",
11080 gettext("POWER"));
11081 }
11082
11083 if (cbp->cb_print_dio_verify) {
11084 (void) printf_color(ANSI_BOLD, " %5s", gettext("DIO"));
11085 }
11086
11087 if (cbp->vcdl != NULL)
11088 print_cmd_columns(cbp->vcdl, 0);
11089
11090 printf("\n");
11091
11092 print_status_config(zhp, cbp, zpool_get_name(zhp), nvroot, 0,
11093 B_FALSE, NULL);
11094
11095 print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_BIAS_DEDUP);
11096 print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_BIAS_SPECIAL);
11097 print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_CLASS_LOGS);
11098
11099 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
11100 &l2cache, &nl2cache) == 0)
11101 print_l2cache(zhp, cbp, l2cache, nl2cache);
11102
11103 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
11104 &spares, &nspares) == 0)
11105 print_spares(zhp, cbp, spares, nspares);
11106
11107 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT,
11108 &nerr) == 0) {
11109 (void) printf("\n");
11110 if (nerr == 0) {
11111 (void) printf(gettext(
11112 "errors: No known data errors\n"));
11113 } else if (!cbp->cb_verbose) {
11114 color_start(ANSI_RED);
11115 (void) printf(gettext("errors: %llu data "
11116 "errors, use '-v' for a list\n"),
11117 (u_longlong_t)nerr);
11118 color_end();
11119 } else {
11120 print_error_log(zhp);
11121 }
11122 }
11123
11124 if (cbp->cb_dedup_stats)
11125 print_dedup_stats(zhp, config, cbp->cb_literal);
11126 } else {
11127 (void) printf(gettext("config: The configuration cannot be "
11128 "determined.\n"));
11129 }
11130
11131 return (0);
11132 }
11133
11134 /*
11135 * zpool status [-dDegiLpPstvx] [-c [script1,script2,...]] ...
11136 * [-j|--json [--json-flat-vdevs] [--json-int] ...
11137 * [--json-pool-key-guid]] [--power] [-T d|u] ...
11138 * [pool] [interval [count]]
11139 *
11140 * -c CMD For each vdev, run command CMD
11141 * -D Display dedup status (undocumented)
11142 * -d Display Direct I/O write verify errors
11143 * -e Display only unhealthy vdevs
11144 * -g Display guid for individual vdev name.
11145 * -i Display vdev initialization status.
11146 * -j [...] Display output in JSON format
11147 * --json-flat-vdevs Display vdevs in flat hierarchy
11148 * --json-int Display numbers in integer format instead of string
11149 * --json-pool-key-guid Use pool GUID as key for pool objects
11150 * -L Follow links when resolving vdev path name.
11151 * -P Display full path for vdev name.
11152 * -p Display values in parsable (exact) format.
11153 * --power Display vdev enclosure slot power status
11154 * -s Display slow IOs column.
11155 * -T Display a timestamp in date(1) or Unix format
11156 * -t Display vdev TRIM status.
11157 * -v Display complete error logs
11158 * -x Display only pools with potential problems
11159 *
11160 * Describes the health status of all pools or some subset.
11161 */
11162 int
zpool_do_status(int argc,char ** argv)11163 zpool_do_status(int argc, char **argv)
11164 {
11165 int c;
11166 int ret;
11167 float interval = 0;
11168 unsigned long count = 0;
11169 status_cbdata_t cb = { 0 };
11170 nvlist_t *data;
11171 char *cmd = NULL;
11172
11173 struct option long_options[] = {
11174 {"power", no_argument, NULL, ZPOOL_OPTION_POWER},
11175 {"json", no_argument, NULL, 'j'},
11176 {"json-int", no_argument, NULL, ZPOOL_OPTION_JSON_NUMS_AS_INT},
11177 {"json-flat-vdevs", no_argument, NULL,
11178 ZPOOL_OPTION_JSON_FLAT_VDEVS},
11179 {"json-pool-key-guid", no_argument, NULL,
11180 ZPOOL_OPTION_POOL_KEY_GUID},
11181 {0, 0, 0, 0}
11182 };
11183
11184 /* check options */
11185 while ((c = getopt_long(argc, argv, "c:jdDegiLpPstT:vx", long_options,
11186 NULL)) != -1) {
11187 switch (c) {
11188 case 'c':
11189 if (cmd != NULL) {
11190 fprintf(stderr,
11191 gettext("Can't set -c flag twice\n"));
11192 exit(1);
11193 }
11194
11195 if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL &&
11196 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) {
11197 fprintf(stderr, gettext(
11198 "Can't run -c, disabled by "
11199 "ZPOOL_SCRIPTS_ENABLED.\n"));
11200 exit(1);
11201 }
11202
11203 if ((getuid() <= 0 || geteuid() <= 0) &&
11204 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) {
11205 fprintf(stderr, gettext(
11206 "Can't run -c with root privileges "
11207 "unless ZPOOL_SCRIPTS_AS_ROOT is set.\n"));
11208 exit(1);
11209 }
11210 cmd = optarg;
11211 break;
11212 case 'd':
11213 cb.cb_print_dio_verify = B_TRUE;
11214 break;
11215 case 'D':
11216 if (++cb.cb_dedup_stats > 2)
11217 cb.cb_dedup_stats = 2;
11218 break;
11219 case 'e':
11220 cb.cb_print_unhealthy = B_TRUE;
11221 break;
11222 case 'g':
11223 cb.cb_name_flags |= VDEV_NAME_GUID;
11224 break;
11225 case 'i':
11226 cb.cb_print_vdev_init = B_TRUE;
11227 break;
11228 case 'L':
11229 cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
11230 break;
11231 case 'p':
11232 cb.cb_literal = B_TRUE;
11233 break;
11234 case 'P':
11235 cb.cb_name_flags |= VDEV_NAME_PATH;
11236 break;
11237 case 's':
11238 cb.cb_print_slow_ios = B_TRUE;
11239 break;
11240 case 't':
11241 cb.cb_print_vdev_trim = B_TRUE;
11242 break;
11243 case 'T':
11244 get_timestamp_arg(*optarg);
11245 break;
11246 case 'v':
11247 cb.cb_verbose = B_TRUE;
11248 break;
11249 case 'j':
11250 cb.cb_json = B_TRUE;
11251 break;
11252 case 'x':
11253 cb.cb_explain = B_TRUE;
11254 break;
11255 case ZPOOL_OPTION_POWER:
11256 cb.cb_print_power = B_TRUE;
11257 break;
11258 case ZPOOL_OPTION_JSON_FLAT_VDEVS:
11259 cb.cb_flat_vdevs = B_TRUE;
11260 break;
11261 case ZPOOL_OPTION_JSON_NUMS_AS_INT:
11262 cb.cb_json_as_int = B_TRUE;
11263 cb.cb_literal = B_TRUE;
11264 break;
11265 case ZPOOL_OPTION_POOL_KEY_GUID:
11266 cb.cb_json_pool_key_guid = B_TRUE;
11267 break;
11268 case '?':
11269 if (optopt == 'c') {
11270 print_zpool_script_list("status");
11271 exit(0);
11272 } else {
11273 fprintf(stderr,
11274 gettext("invalid option '%c'\n"), optopt);
11275 }
11276 usage(B_FALSE);
11277 }
11278 }
11279
11280 argc -= optind;
11281 argv += optind;
11282
11283 get_interval_count(&argc, argv, &interval, &count);
11284
11285 if (argc == 0)
11286 cb.cb_allpools = B_TRUE;
11287
11288 cb.cb_first = B_TRUE;
11289 cb.cb_print_status = B_TRUE;
11290
11291 if (cb.cb_flat_vdevs && !cb.cb_json) {
11292 fprintf(stderr, gettext("'--json-flat-vdevs' only works with"
11293 " '-j' option\n"));
11294 usage(B_FALSE);
11295 }
11296
11297 if (cb.cb_json_as_int && !cb.cb_json) {
11298 (void) fprintf(stderr, gettext("'--json-int' only works with"
11299 " '-j' option\n"));
11300 usage(B_FALSE);
11301 }
11302
11303 if (!cb.cb_json && cb.cb_json_pool_key_guid) {
11304 (void) fprintf(stderr, gettext("'json-pool-key-guid' only"
11305 " works with '-j' option\n"));
11306 usage(B_FALSE);
11307 }
11308
11309 for (;;) {
11310 if (cb.cb_json) {
11311 cb.cb_jsobj = zpool_json_schema(0, 1);
11312 data = fnvlist_alloc();
11313 fnvlist_add_nvlist(cb.cb_jsobj, "pools", data);
11314 fnvlist_free(data);
11315 }
11316
11317 if (timestamp_fmt != NODATE) {
11318 if (cb.cb_json) {
11319 if (cb.cb_json_as_int) {
11320 fnvlist_add_uint64(cb.cb_jsobj, "time",
11321 time(NULL));
11322 } else {
11323 char ts[128];
11324 get_timestamp(timestamp_fmt, ts, 128);
11325 fnvlist_add_string(cb.cb_jsobj, "time",
11326 ts);
11327 }
11328 } else
11329 print_timestamp(timestamp_fmt);
11330 }
11331
11332 if (cmd != NULL)
11333 cb.vcdl = all_pools_for_each_vdev_run(argc, argv, cmd,
11334 NULL, NULL, 0, 0);
11335
11336 if (cb.cb_json) {
11337 ret = for_each_pool(argc, argv, B_TRUE, NULL,
11338 ZFS_TYPE_POOL, cb.cb_literal,
11339 status_callback_json, &cb);
11340 } else {
11341 ret = for_each_pool(argc, argv, B_TRUE, NULL,
11342 ZFS_TYPE_POOL, cb.cb_literal,
11343 status_callback, &cb);
11344 }
11345
11346 if (cb.vcdl != NULL)
11347 free_vdev_cmd_data_list(cb.vcdl);
11348
11349 if (cb.cb_json) {
11350 if (ret == 0)
11351 zcmd_print_json(cb.cb_jsobj);
11352 else
11353 nvlist_free(cb.cb_jsobj);
11354 } else {
11355 if (argc == 0 && cb.cb_count == 0) {
11356 (void) fprintf(stderr, "%s",
11357 gettext("no pools available\n"));
11358 } else if (cb.cb_explain && cb.cb_first &&
11359 cb.cb_allpools) {
11360 (void) printf("%s",
11361 gettext("all pools are healthy\n"));
11362 }
11363 }
11364
11365 if (ret != 0)
11366 return (ret);
11367
11368 if (interval == 0)
11369 break;
11370
11371 if (count != 0 && --count == 0)
11372 break;
11373
11374 (void) fflush(stdout);
11375 (void) fsleep(interval);
11376 }
11377
11378 return (0);
11379 }
11380
11381 typedef struct upgrade_cbdata {
11382 int cb_first;
11383 int cb_argc;
11384 uint64_t cb_version;
11385 char **cb_argv;
11386 } upgrade_cbdata_t;
11387
11388 static int
check_unsupp_fs(zfs_handle_t * zhp,void * unsupp_fs)11389 check_unsupp_fs(zfs_handle_t *zhp, void *unsupp_fs)
11390 {
11391 int zfs_version = (int)zfs_prop_get_int(zhp, ZFS_PROP_VERSION);
11392 int *count = (int *)unsupp_fs;
11393
11394 if (zfs_version > ZPL_VERSION) {
11395 (void) printf(gettext("%s (v%d) is not supported by this "
11396 "implementation of ZFS.\n"),
11397 zfs_get_name(zhp), zfs_version);
11398 (*count)++;
11399 }
11400
11401 (void) zfs_iter_filesystems_v2(zhp, 0, check_unsupp_fs, unsupp_fs);
11402
11403 zfs_close(zhp);
11404
11405 return (0);
11406 }
11407
11408 static int
upgrade_version(zpool_handle_t * zhp,uint64_t version)11409 upgrade_version(zpool_handle_t *zhp, uint64_t version)
11410 {
11411 int ret;
11412 nvlist_t *config;
11413 uint64_t oldversion;
11414 int unsupp_fs = 0;
11415
11416 config = zpool_get_config(zhp, NULL);
11417 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
11418 &oldversion) == 0);
11419
11420 char compat[ZFS_MAXPROPLEN];
11421 if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, compat,
11422 ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
11423 compat[0] = '\0';
11424
11425 assert(SPA_VERSION_IS_SUPPORTED(oldversion));
11426 assert(oldversion < version);
11427
11428 ret = zfs_iter_root(zpool_get_handle(zhp), check_unsupp_fs, &unsupp_fs);
11429 if (ret != 0)
11430 return (ret);
11431
11432 if (unsupp_fs) {
11433 (void) fprintf(stderr, gettext("Upgrade not performed due "
11434 "to %d unsupported filesystems (max v%d).\n"),
11435 unsupp_fs, (int)ZPL_VERSION);
11436 return (1);
11437 }
11438
11439 if (strcmp(compat, ZPOOL_COMPAT_LEGACY) == 0) {
11440 (void) fprintf(stderr, gettext("Upgrade not performed because "
11441 "'compatibility' property set to '"
11442 ZPOOL_COMPAT_LEGACY "'.\n"));
11443 return (1);
11444 }
11445
11446 ret = zpool_upgrade(zhp, version);
11447 if (ret != 0)
11448 return (ret);
11449
11450 if (version >= SPA_VERSION_FEATURES) {
11451 (void) printf(gettext("Successfully upgraded "
11452 "'%s' from version %llu to feature flags.\n"),
11453 zpool_get_name(zhp), (u_longlong_t)oldversion);
11454 } else {
11455 (void) printf(gettext("Successfully upgraded "
11456 "'%s' from version %llu to version %llu.\n"),
11457 zpool_get_name(zhp), (u_longlong_t)oldversion,
11458 (u_longlong_t)version);
11459 }
11460
11461 return (0);
11462 }
11463
11464 static int
upgrade_enable_all(zpool_handle_t * zhp,int * countp)11465 upgrade_enable_all(zpool_handle_t *zhp, int *countp)
11466 {
11467 int i, ret, count;
11468 boolean_t firstff = B_TRUE;
11469 nvlist_t *enabled = zpool_get_features(zhp);
11470
11471 char compat[ZFS_MAXPROPLEN];
11472 if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, compat,
11473 ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
11474 compat[0] = '\0';
11475
11476 boolean_t requested_features[SPA_FEATURES];
11477 if (zpool_do_load_compat(compat, requested_features) !=
11478 ZPOOL_COMPATIBILITY_OK)
11479 return (-1);
11480
11481 count = 0;
11482 for (i = 0; i < SPA_FEATURES; i++) {
11483 const char *fname = spa_feature_table[i].fi_uname;
11484 const char *fguid = spa_feature_table[i].fi_guid;
11485
11486 if (!spa_feature_table[i].fi_zfs_mod_supported ||
11487 (spa_feature_table[i].fi_flags & ZFEATURE_FLAG_NO_UPGRADE))
11488 continue;
11489
11490 if (!nvlist_exists(enabled, fguid) && requested_features[i]) {
11491 char *propname;
11492 verify(-1 != asprintf(&propname, "feature@%s", fname));
11493 ret = zpool_set_prop(zhp, propname,
11494 ZFS_FEATURE_ENABLED);
11495 if (ret != 0) {
11496 free(propname);
11497 return (ret);
11498 }
11499 count++;
11500
11501 if (firstff) {
11502 (void) printf(gettext("Enabled the "
11503 "following features on '%s':\n"),
11504 zpool_get_name(zhp));
11505 firstff = B_FALSE;
11506 }
11507 (void) printf(gettext(" %s\n"), fname);
11508 free(propname);
11509 }
11510 }
11511
11512 if (countp != NULL)
11513 *countp = count;
11514 return (0);
11515 }
11516
11517 static int
upgrade_cb(zpool_handle_t * zhp,void * arg)11518 upgrade_cb(zpool_handle_t *zhp, void *arg)
11519 {
11520 upgrade_cbdata_t *cbp = arg;
11521 nvlist_t *config;
11522 uint64_t version;
11523 boolean_t modified_pool = B_FALSE;
11524 int ret;
11525
11526 config = zpool_get_config(zhp, NULL);
11527 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
11528 &version) == 0);
11529
11530 assert(SPA_VERSION_IS_SUPPORTED(version));
11531
11532 if (version < cbp->cb_version) {
11533 cbp->cb_first = B_FALSE;
11534 ret = upgrade_version(zhp, cbp->cb_version);
11535 if (ret != 0)
11536 return (ret);
11537 modified_pool = B_TRUE;
11538
11539 /*
11540 * If they did "zpool upgrade -a", then we could
11541 * be doing ioctls to different pools. We need
11542 * to log this history once to each pool, and bypass
11543 * the normal history logging that happens in main().
11544 */
11545 (void) zpool_log_history(g_zfs, history_str);
11546 log_history = B_FALSE;
11547 }
11548
11549 if (cbp->cb_version >= SPA_VERSION_FEATURES) {
11550 int count;
11551 ret = upgrade_enable_all(zhp, &count);
11552 if (ret != 0)
11553 return (ret);
11554
11555 if (count > 0) {
11556 cbp->cb_first = B_FALSE;
11557 modified_pool = B_TRUE;
11558 }
11559 }
11560
11561 if (modified_pool) {
11562 (void) printf("\n");
11563 (void) after_zpool_upgrade(zhp);
11564 }
11565
11566 return (0);
11567 }
11568
11569 static int
upgrade_list_older_cb(zpool_handle_t * zhp,void * arg)11570 upgrade_list_older_cb(zpool_handle_t *zhp, void *arg)
11571 {
11572 upgrade_cbdata_t *cbp = arg;
11573 nvlist_t *config;
11574 uint64_t version;
11575
11576 config = zpool_get_config(zhp, NULL);
11577 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
11578 &version) == 0);
11579
11580 assert(SPA_VERSION_IS_SUPPORTED(version));
11581
11582 if (version < SPA_VERSION_FEATURES) {
11583 if (cbp->cb_first) {
11584 (void) printf(gettext("The following pools are "
11585 "formatted with legacy version numbers and can\n"
11586 "be upgraded to use feature flags. After "
11587 "being upgraded, these pools\nwill no "
11588 "longer be accessible by software that does not "
11589 "support feature\nflags.\n\n"
11590 "Note that setting a pool's 'compatibility' "
11591 "feature to '" ZPOOL_COMPAT_LEGACY "' will\n"
11592 "inhibit upgrades.\n\n"));
11593 (void) printf(gettext("VER POOL\n"));
11594 (void) printf(gettext("--- ------------\n"));
11595 cbp->cb_first = B_FALSE;
11596 }
11597
11598 (void) printf("%2llu %s\n", (u_longlong_t)version,
11599 zpool_get_name(zhp));
11600 }
11601
11602 return (0);
11603 }
11604
11605 static int
upgrade_list_disabled_cb(zpool_handle_t * zhp,void * arg)11606 upgrade_list_disabled_cb(zpool_handle_t *zhp, void *arg)
11607 {
11608 upgrade_cbdata_t *cbp = arg;
11609 nvlist_t *config;
11610 uint64_t version;
11611
11612 config = zpool_get_config(zhp, NULL);
11613 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
11614 &version) == 0);
11615
11616 if (version >= SPA_VERSION_FEATURES) {
11617 int i;
11618 boolean_t poolfirst = B_TRUE;
11619 nvlist_t *enabled = zpool_get_features(zhp);
11620
11621 for (i = 0; i < SPA_FEATURES; i++) {
11622 const char *fguid = spa_feature_table[i].fi_guid;
11623 const char *fname = spa_feature_table[i].fi_uname;
11624
11625 if (!spa_feature_table[i].fi_zfs_mod_supported)
11626 continue;
11627
11628 if (!nvlist_exists(enabled, fguid)) {
11629 if (cbp->cb_first) {
11630 (void) printf(gettext("\nSome "
11631 "supported features are not "
11632 "enabled on the following pools. "
11633 "Once a\nfeature is enabled the "
11634 "pool may become incompatible with "
11635 "software\nthat does not support "
11636 "the feature. See "
11637 "zpool-features(7) for "
11638 "details.\n\n"
11639 "Note that the pool "
11640 "'compatibility' feature can be "
11641 "used to inhibit\nfeature "
11642 "upgrades.\n\n"
11643 "Features marked with (*) are not "
11644 "applied automatically on upgrade, "
11645 "and\nmust be applied explicitly "
11646 "with zpool-set(7).\n\n"));
11647 (void) printf(gettext("POOL "
11648 "FEATURE\n"));
11649 (void) printf(gettext("------"
11650 "---------\n"));
11651 cbp->cb_first = B_FALSE;
11652 }
11653
11654 if (poolfirst) {
11655 (void) printf(gettext("%s\n"),
11656 zpool_get_name(zhp));
11657 poolfirst = B_FALSE;
11658 }
11659
11660 (void) printf(gettext(" %s%s\n"), fname,
11661 spa_feature_table[i].fi_flags &
11662 ZFEATURE_FLAG_NO_UPGRADE ? "(*)" : "");
11663 }
11664 /*
11665 * If they did "zpool upgrade -a", then we could
11666 * be doing ioctls to different pools. We need
11667 * to log this history once to each pool, and bypass
11668 * the normal history logging that happens in main().
11669 */
11670 (void) zpool_log_history(g_zfs, history_str);
11671 log_history = B_FALSE;
11672 }
11673 }
11674
11675 return (0);
11676 }
11677
11678 static int
upgrade_one(zpool_handle_t * zhp,void * data)11679 upgrade_one(zpool_handle_t *zhp, void *data)
11680 {
11681 boolean_t modified_pool = B_FALSE;
11682 upgrade_cbdata_t *cbp = data;
11683 uint64_t cur_version;
11684 int ret;
11685
11686 if (strcmp("log", zpool_get_name(zhp)) == 0) {
11687 (void) fprintf(stderr, gettext("'log' is now a reserved word\n"
11688 "Pool 'log' must be renamed using export and import"
11689 " to upgrade.\n"));
11690 return (1);
11691 }
11692
11693 cur_version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
11694 if (cur_version > cbp->cb_version) {
11695 (void) printf(gettext("Pool '%s' is already formatted "
11696 "using more current version '%llu'.\n\n"),
11697 zpool_get_name(zhp), (u_longlong_t)cur_version);
11698 return (0);
11699 }
11700
11701 if (cbp->cb_version != SPA_VERSION && cur_version == cbp->cb_version) {
11702 (void) printf(gettext("Pool '%s' is already formatted "
11703 "using version %llu.\n\n"), zpool_get_name(zhp),
11704 (u_longlong_t)cbp->cb_version);
11705 return (0);
11706 }
11707
11708 if (cur_version != cbp->cb_version) {
11709 modified_pool = B_TRUE;
11710 ret = upgrade_version(zhp, cbp->cb_version);
11711 if (ret != 0)
11712 return (ret);
11713 }
11714
11715 if (cbp->cb_version >= SPA_VERSION_FEATURES) {
11716 int count = 0;
11717 ret = upgrade_enable_all(zhp, &count);
11718 if (ret != 0)
11719 return (ret);
11720
11721 if (count != 0) {
11722 modified_pool = B_TRUE;
11723 } else if (cur_version == SPA_VERSION) {
11724 (void) printf(gettext("Pool '%s' already has all "
11725 "supported and requested features enabled.\n"),
11726 zpool_get_name(zhp));
11727 }
11728 }
11729
11730 if (modified_pool) {
11731 (void) printf("\n");
11732 (void) after_zpool_upgrade(zhp);
11733 }
11734
11735 return (0);
11736 }
11737
11738 /*
11739 * zpool upgrade
11740 * zpool upgrade -v
11741 * zpool upgrade [-V version] <-a | pool ...>
11742 *
11743 * With no arguments, display downrev'd ZFS pool available for upgrade.
11744 * Individual pools can be upgraded by specifying the pool, and '-a' will
11745 * upgrade all pools.
11746 */
11747 int
zpool_do_upgrade(int argc,char ** argv)11748 zpool_do_upgrade(int argc, char **argv)
11749 {
11750 int c;
11751 upgrade_cbdata_t cb = { 0 };
11752 int ret = 0;
11753 boolean_t showversions = B_FALSE;
11754 boolean_t upgradeall = B_FALSE;
11755 char *end;
11756
11757
11758 /* check options */
11759 while ((c = getopt(argc, argv, ":avV:")) != -1) {
11760 switch (c) {
11761 case 'a':
11762 upgradeall = B_TRUE;
11763 break;
11764 case 'v':
11765 showversions = B_TRUE;
11766 break;
11767 case 'V':
11768 cb.cb_version = strtoll(optarg, &end, 10);
11769 if (*end != '\0' ||
11770 !SPA_VERSION_IS_SUPPORTED(cb.cb_version)) {
11771 (void) fprintf(stderr,
11772 gettext("invalid version '%s'\n"), optarg);
11773 usage(B_FALSE);
11774 }
11775 break;
11776 case ':':
11777 (void) fprintf(stderr, gettext("missing argument for "
11778 "'%c' option\n"), optopt);
11779 usage(B_FALSE);
11780 break;
11781 case '?':
11782 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
11783 optopt);
11784 usage(B_FALSE);
11785 }
11786 }
11787
11788 cb.cb_argc = argc;
11789 cb.cb_argv = argv;
11790 argc -= optind;
11791 argv += optind;
11792
11793 if (cb.cb_version == 0) {
11794 cb.cb_version = SPA_VERSION;
11795 } else if (!upgradeall && argc == 0) {
11796 (void) fprintf(stderr, gettext("-V option is "
11797 "incompatible with other arguments\n"));
11798 usage(B_FALSE);
11799 }
11800
11801 if (showversions) {
11802 if (upgradeall || argc != 0) {
11803 (void) fprintf(stderr, gettext("-v option is "
11804 "incompatible with other arguments\n"));
11805 usage(B_FALSE);
11806 }
11807 } else if (upgradeall) {
11808 if (argc != 0) {
11809 (void) fprintf(stderr, gettext("-a option should not "
11810 "be used along with a pool name\n"));
11811 usage(B_FALSE);
11812 }
11813 }
11814
11815 (void) printf("%s", gettext("This system supports ZFS pool feature "
11816 "flags.\n\n"));
11817 if (showversions) {
11818 int i;
11819
11820 (void) printf(gettext("The following features are "
11821 "supported:\n\n"));
11822 (void) printf(gettext("FEAT DESCRIPTION\n"));
11823 (void) printf("----------------------------------------------"
11824 "---------------\n");
11825 for (i = 0; i < SPA_FEATURES; i++) {
11826 zfeature_info_t *fi = &spa_feature_table[i];
11827 if (!fi->fi_zfs_mod_supported)
11828 continue;
11829 const char *ro =
11830 (fi->fi_flags & ZFEATURE_FLAG_READONLY_COMPAT) ?
11831 " (read-only compatible)" : "";
11832
11833 (void) printf("%-37s%s\n", fi->fi_uname, ro);
11834 (void) printf(" %s\n", fi->fi_desc);
11835 }
11836 (void) printf("\n");
11837
11838 (void) printf(gettext("The following legacy versions are also "
11839 "supported:\n\n"));
11840 (void) printf(gettext("VER DESCRIPTION\n"));
11841 (void) printf("--- -----------------------------------------"
11842 "---------------\n");
11843 (void) printf(gettext(" 1 Initial ZFS version\n"));
11844 (void) printf(gettext(" 2 Ditto blocks "
11845 "(replicated metadata)\n"));
11846 (void) printf(gettext(" 3 Hot spares and double parity "
11847 "RAID-Z\n"));
11848 (void) printf(gettext(" 4 zpool history\n"));
11849 (void) printf(gettext(" 5 Compression using the gzip "
11850 "algorithm\n"));
11851 (void) printf(gettext(" 6 bootfs pool property\n"));
11852 (void) printf(gettext(" 7 Separate intent log devices\n"));
11853 (void) printf(gettext(" 8 Delegated administration\n"));
11854 (void) printf(gettext(" 9 refquota and refreservation "
11855 "properties\n"));
11856 (void) printf(gettext(" 10 Cache devices\n"));
11857 (void) printf(gettext(" 11 Improved scrub performance\n"));
11858 (void) printf(gettext(" 12 Snapshot properties\n"));
11859 (void) printf(gettext(" 13 snapused property\n"));
11860 (void) printf(gettext(" 14 passthrough-x aclinherit\n"));
11861 (void) printf(gettext(" 15 user/group space accounting\n"));
11862 (void) printf(gettext(" 16 stmf property support\n"));
11863 (void) printf(gettext(" 17 Triple-parity RAID-Z\n"));
11864 (void) printf(gettext(" 18 Snapshot user holds\n"));
11865 (void) printf(gettext(" 19 Log device removal\n"));
11866 (void) printf(gettext(" 20 Compression using zle "
11867 "(zero-length encoding)\n"));
11868 (void) printf(gettext(" 21 Deduplication\n"));
11869 (void) printf(gettext(" 22 Received properties\n"));
11870 (void) printf(gettext(" 23 Slim ZIL\n"));
11871 (void) printf(gettext(" 24 System attributes\n"));
11872 (void) printf(gettext(" 25 Improved scrub stats\n"));
11873 (void) printf(gettext(" 26 Improved snapshot deletion "
11874 "performance\n"));
11875 (void) printf(gettext(" 27 Improved snapshot creation "
11876 "performance\n"));
11877 (void) printf(gettext(" 28 Multiple vdev replacements\n"));
11878 (void) printf(gettext("\nFor more information on a particular "
11879 "version, including supported releases,\n"));
11880 (void) printf(gettext("see the ZFS Administration Guide.\n\n"));
11881 } else if (argc == 0 && upgradeall) {
11882 cb.cb_first = B_TRUE;
11883 ret = zpool_iter(g_zfs, upgrade_cb, &cb);
11884 if (ret == 0 && cb.cb_first) {
11885 if (cb.cb_version == SPA_VERSION) {
11886 (void) printf(gettext("All pools are already "
11887 "formatted using feature flags.\n\n"));
11888 (void) printf(gettext("Every feature flags "
11889 "pool already has all supported and "
11890 "requested features enabled.\n"));
11891 } else {
11892 (void) printf(gettext("All pools are already "
11893 "formatted with version %llu or higher.\n"),
11894 (u_longlong_t)cb.cb_version);
11895 }
11896 }
11897 } else if (argc == 0) {
11898 cb.cb_first = B_TRUE;
11899 ret = zpool_iter(g_zfs, upgrade_list_older_cb, &cb);
11900 assert(ret == 0);
11901
11902 if (cb.cb_first) {
11903 (void) printf(gettext("All pools are formatted "
11904 "using feature flags.\n\n"));
11905 } else {
11906 (void) printf(gettext("\nUse 'zpool upgrade -v' "
11907 "for a list of available legacy versions.\n"));
11908 }
11909
11910 cb.cb_first = B_TRUE;
11911 ret = zpool_iter(g_zfs, upgrade_list_disabled_cb, &cb);
11912 assert(ret == 0);
11913
11914 if (cb.cb_first) {
11915 (void) printf(gettext("Every feature flags pool has "
11916 "all supported and requested features enabled.\n"));
11917 } else {
11918 (void) printf(gettext("\n"));
11919 }
11920 } else {
11921 ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL,
11922 B_FALSE, upgrade_one, &cb);
11923 }
11924
11925 return (ret);
11926 }
11927
11928 typedef struct hist_cbdata {
11929 boolean_t first;
11930 boolean_t longfmt;
11931 boolean_t internal;
11932 } hist_cbdata_t;
11933
11934 static void
print_history_records(nvlist_t * nvhis,hist_cbdata_t * cb)11935 print_history_records(nvlist_t *nvhis, hist_cbdata_t *cb)
11936 {
11937 nvlist_t **records;
11938 uint_t numrecords;
11939 int i;
11940
11941 verify(nvlist_lookup_nvlist_array(nvhis, ZPOOL_HIST_RECORD,
11942 &records, &numrecords) == 0);
11943 for (i = 0; i < numrecords; i++) {
11944 nvlist_t *rec = records[i];
11945 char tbuf[64] = "";
11946
11947 if (nvlist_exists(rec, ZPOOL_HIST_TIME)) {
11948 time_t tsec;
11949 struct tm t;
11950
11951 tsec = fnvlist_lookup_uint64(records[i],
11952 ZPOOL_HIST_TIME);
11953 (void) localtime_r(&tsec, &t);
11954 (void) strftime(tbuf, sizeof (tbuf), "%F.%T", &t);
11955 }
11956
11957 if (nvlist_exists(rec, ZPOOL_HIST_ELAPSED_NS)) {
11958 uint64_t elapsed_ns = fnvlist_lookup_int64(records[i],
11959 ZPOOL_HIST_ELAPSED_NS);
11960 (void) snprintf(tbuf + strlen(tbuf),
11961 sizeof (tbuf) - strlen(tbuf),
11962 " (%lldms)", (long long)elapsed_ns / 1000 / 1000);
11963 }
11964
11965 if (nvlist_exists(rec, ZPOOL_HIST_CMD)) {
11966 (void) printf("%s %s", tbuf,
11967 fnvlist_lookup_string(rec, ZPOOL_HIST_CMD));
11968 } else if (nvlist_exists(rec, ZPOOL_HIST_INT_EVENT)) {
11969 int ievent =
11970 fnvlist_lookup_uint64(rec, ZPOOL_HIST_INT_EVENT);
11971 if (!cb->internal)
11972 continue;
11973 if (ievent >= ZFS_NUM_LEGACY_HISTORY_EVENTS) {
11974 (void) printf("%s unrecognized record:\n",
11975 tbuf);
11976 dump_nvlist(rec, 4);
11977 continue;
11978 }
11979 (void) printf("%s [internal %s txg:%lld] %s", tbuf,
11980 zfs_history_event_names[ievent],
11981 (longlong_t)fnvlist_lookup_uint64(
11982 rec, ZPOOL_HIST_TXG),
11983 fnvlist_lookup_string(rec, ZPOOL_HIST_INT_STR));
11984 } else if (nvlist_exists(rec, ZPOOL_HIST_INT_NAME)) {
11985 if (!cb->internal)
11986 continue;
11987 (void) printf("%s [txg:%lld] %s", tbuf,
11988 (longlong_t)fnvlist_lookup_uint64(
11989 rec, ZPOOL_HIST_TXG),
11990 fnvlist_lookup_string(rec, ZPOOL_HIST_INT_NAME));
11991 if (nvlist_exists(rec, ZPOOL_HIST_DSNAME)) {
11992 (void) printf(" %s (%llu)",
11993 fnvlist_lookup_string(rec,
11994 ZPOOL_HIST_DSNAME),
11995 (u_longlong_t)fnvlist_lookup_uint64(rec,
11996 ZPOOL_HIST_DSID));
11997 }
11998 (void) printf(" %s", fnvlist_lookup_string(rec,
11999 ZPOOL_HIST_INT_STR));
12000 } else if (nvlist_exists(rec, ZPOOL_HIST_IOCTL)) {
12001 if (!cb->internal)
12002 continue;
12003 (void) printf("%s ioctl %s\n", tbuf,
12004 fnvlist_lookup_string(rec, ZPOOL_HIST_IOCTL));
12005 if (nvlist_exists(rec, ZPOOL_HIST_INPUT_NVL)) {
12006 (void) printf(" input:\n");
12007 dump_nvlist(fnvlist_lookup_nvlist(rec,
12008 ZPOOL_HIST_INPUT_NVL), 8);
12009 }
12010 if (nvlist_exists(rec, ZPOOL_HIST_OUTPUT_NVL)) {
12011 (void) printf(" output:\n");
12012 dump_nvlist(fnvlist_lookup_nvlist(rec,
12013 ZPOOL_HIST_OUTPUT_NVL), 8);
12014 }
12015 if (nvlist_exists(rec, ZPOOL_HIST_OUTPUT_SIZE)) {
12016 (void) printf(" output nvlist omitted; "
12017 "original size: %lldKB\n",
12018 (longlong_t)fnvlist_lookup_int64(rec,
12019 ZPOOL_HIST_OUTPUT_SIZE) / 1024);
12020 }
12021 if (nvlist_exists(rec, ZPOOL_HIST_ERRNO)) {
12022 (void) printf(" errno: %lld\n",
12023 (longlong_t)fnvlist_lookup_int64(rec,
12024 ZPOOL_HIST_ERRNO));
12025 }
12026 } else {
12027 if (!cb->internal)
12028 continue;
12029 (void) printf("%s unrecognized record:\n", tbuf);
12030 dump_nvlist(rec, 4);
12031 }
12032
12033 if (!cb->longfmt) {
12034 (void) printf("\n");
12035 continue;
12036 }
12037 (void) printf(" [");
12038 if (nvlist_exists(rec, ZPOOL_HIST_WHO)) {
12039 uid_t who = fnvlist_lookup_uint64(rec, ZPOOL_HIST_WHO);
12040 struct passwd *pwd = getpwuid(who);
12041 (void) printf("user %d ", (int)who);
12042 if (pwd != NULL)
12043 (void) printf("(%s) ", pwd->pw_name);
12044 }
12045 if (nvlist_exists(rec, ZPOOL_HIST_HOST)) {
12046 (void) printf("on %s",
12047 fnvlist_lookup_string(rec, ZPOOL_HIST_HOST));
12048 }
12049 if (nvlist_exists(rec, ZPOOL_HIST_ZONE)) {
12050 (void) printf(":%s",
12051 fnvlist_lookup_string(rec, ZPOOL_HIST_ZONE));
12052 }
12053
12054 (void) printf("]");
12055 (void) printf("\n");
12056 }
12057 }
12058
12059 /*
12060 * Print out the command history for a specific pool.
12061 */
12062 static int
get_history_one(zpool_handle_t * zhp,void * data)12063 get_history_one(zpool_handle_t *zhp, void *data)
12064 {
12065 nvlist_t *nvhis;
12066 int ret;
12067 hist_cbdata_t *cb = (hist_cbdata_t *)data;
12068 uint64_t off = 0;
12069 boolean_t eof = B_FALSE;
12070
12071 cb->first = B_FALSE;
12072
12073 (void) printf(gettext("History for '%s':\n"), zpool_get_name(zhp));
12074
12075 while (!eof) {
12076 if ((ret = zpool_get_history(zhp, &nvhis, &off, &eof)) != 0)
12077 return (ret);
12078
12079 print_history_records(nvhis, cb);
12080 nvlist_free(nvhis);
12081 }
12082 (void) printf("\n");
12083
12084 return (ret);
12085 }
12086
12087 /*
12088 * zpool history <pool>
12089 *
12090 * Displays the history of commands that modified pools.
12091 */
12092 int
zpool_do_history(int argc,char ** argv)12093 zpool_do_history(int argc, char **argv)
12094 {
12095 hist_cbdata_t cbdata = { 0 };
12096 int ret;
12097 int c;
12098
12099 cbdata.first = B_TRUE;
12100 /* check options */
12101 while ((c = getopt(argc, argv, "li")) != -1) {
12102 switch (c) {
12103 case 'l':
12104 cbdata.longfmt = B_TRUE;
12105 break;
12106 case 'i':
12107 cbdata.internal = B_TRUE;
12108 break;
12109 case '?':
12110 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
12111 optopt);
12112 usage(B_FALSE);
12113 }
12114 }
12115 argc -= optind;
12116 argv += optind;
12117
12118 ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL,
12119 B_FALSE, get_history_one, &cbdata);
12120
12121 if (argc == 0 && cbdata.first == B_TRUE) {
12122 (void) fprintf(stderr, gettext("no pools available\n"));
12123 return (0);
12124 }
12125
12126 return (ret);
12127 }
12128
12129 typedef struct ev_opts {
12130 int verbose;
12131 int scripted;
12132 int follow;
12133 int clear;
12134 char poolname[ZFS_MAX_DATASET_NAME_LEN];
12135 } ev_opts_t;
12136
12137 static void
zpool_do_events_short(nvlist_t * nvl,ev_opts_t * opts)12138 zpool_do_events_short(nvlist_t *nvl, ev_opts_t *opts)
12139 {
12140 char ctime_str[26], str[32];
12141 const char *ptr;
12142 int64_t *tv;
12143 uint_t n;
12144
12145 verify(nvlist_lookup_int64_array(nvl, FM_EREPORT_TIME, &tv, &n) == 0);
12146 memset(str, ' ', 32);
12147 (void) ctime_r((const time_t *)&tv[0], ctime_str);
12148 (void) memcpy(str, ctime_str+4, 6); /* 'Jun 30' */
12149 (void) memcpy(str+7, ctime_str+20, 4); /* '1993' */
12150 (void) memcpy(str+12, ctime_str+11, 8); /* '21:49:08' */
12151 (void) sprintf(str+20, ".%09lld", (longlong_t)tv[1]); /* '.123456789' */
12152 if (opts->scripted)
12153 (void) printf(gettext("%s\t"), str);
12154 else
12155 (void) printf(gettext("%s "), str);
12156
12157 verify(nvlist_lookup_string(nvl, FM_CLASS, &ptr) == 0);
12158 (void) printf(gettext("%s\n"), ptr);
12159 }
12160
12161 static void
zpool_do_events_nvprint(nvlist_t * nvl,int depth)12162 zpool_do_events_nvprint(nvlist_t *nvl, int depth)
12163 {
12164 nvpair_t *nvp;
12165 static char flagstr[256];
12166
12167 for (nvp = nvlist_next_nvpair(nvl, NULL);
12168 nvp != NULL; nvp = nvlist_next_nvpair(nvl, nvp)) {
12169
12170 data_type_t type = nvpair_type(nvp);
12171 const char *name = nvpair_name(nvp);
12172
12173 boolean_t b;
12174 uint8_t i8;
12175 uint16_t i16;
12176 uint32_t i32;
12177 uint64_t i64;
12178 const char *str;
12179 nvlist_t *cnv;
12180
12181 printf(gettext("%*s%s = "), depth, "", name);
12182
12183 switch (type) {
12184 case DATA_TYPE_BOOLEAN:
12185 printf(gettext("%s"), "1");
12186 break;
12187
12188 case DATA_TYPE_BOOLEAN_VALUE:
12189 (void) nvpair_value_boolean_value(nvp, &b);
12190 printf(gettext("%s"), b ? "1" : "0");
12191 break;
12192
12193 case DATA_TYPE_BYTE:
12194 (void) nvpair_value_byte(nvp, &i8);
12195 printf(gettext("0x%x"), i8);
12196 break;
12197
12198 case DATA_TYPE_INT8:
12199 (void) nvpair_value_int8(nvp, (void *)&i8);
12200 printf(gettext("0x%x"), i8);
12201 break;
12202
12203 case DATA_TYPE_UINT8:
12204 (void) nvpair_value_uint8(nvp, &i8);
12205 printf(gettext("0x%x"), i8);
12206 break;
12207
12208 case DATA_TYPE_INT16:
12209 (void) nvpair_value_int16(nvp, (void *)&i16);
12210 printf(gettext("0x%x"), i16);
12211 break;
12212
12213 case DATA_TYPE_UINT16:
12214 (void) nvpair_value_uint16(nvp, &i16);
12215 printf(gettext("0x%x"), i16);
12216 break;
12217
12218 case DATA_TYPE_INT32:
12219 (void) nvpair_value_int32(nvp, (void *)&i32);
12220 printf(gettext("0x%x"), i32);
12221 break;
12222
12223 case DATA_TYPE_UINT32:
12224 (void) nvpair_value_uint32(nvp, &i32);
12225 if (strcmp(name,
12226 FM_EREPORT_PAYLOAD_ZFS_ZIO_STAGE) == 0 ||
12227 strcmp(name,
12228 FM_EREPORT_PAYLOAD_ZFS_ZIO_PIPELINE) == 0) {
12229 (void) zfs_valstr_zio_stage(i32, flagstr,
12230 sizeof (flagstr));
12231 printf(gettext("0x%x [%s]"), i32, flagstr);
12232 } else if (strcmp(name,
12233 FM_EREPORT_PAYLOAD_ZFS_ZIO_TYPE) == 0) {
12234 (void) zfs_valstr_zio_type(i32, flagstr,
12235 sizeof (flagstr));
12236 printf(gettext("0x%x [%s]"), i32, flagstr);
12237 } else if (strcmp(name,
12238 FM_EREPORT_PAYLOAD_ZFS_ZIO_PRIORITY) == 0) {
12239 (void) zfs_valstr_zio_priority(i32, flagstr,
12240 sizeof (flagstr));
12241 printf(gettext("0x%x [%s]"), i32, flagstr);
12242 } else {
12243 printf(gettext("0x%x"), i32);
12244 }
12245 break;
12246
12247 case DATA_TYPE_INT64:
12248 (void) nvpair_value_int64(nvp, (void *)&i64);
12249 printf(gettext("0x%llx"), (u_longlong_t)i64);
12250 break;
12251
12252 case DATA_TYPE_UINT64:
12253 (void) nvpair_value_uint64(nvp, &i64);
12254 /*
12255 * translate vdev state values to readable
12256 * strings to aide zpool events consumers
12257 */
12258 if (strcmp(name,
12259 FM_EREPORT_PAYLOAD_ZFS_VDEV_STATE) == 0 ||
12260 strcmp(name,
12261 FM_EREPORT_PAYLOAD_ZFS_VDEV_LASTSTATE) == 0) {
12262 printf(gettext("\"%s\" (0x%llx)"),
12263 zpool_state_to_name(i64, VDEV_AUX_NONE),
12264 (u_longlong_t)i64);
12265 } else if (strcmp(name,
12266 FM_EREPORT_PAYLOAD_ZFS_ZIO_FLAGS) == 0) {
12267 (void) zfs_valstr_zio_flag(i64, flagstr,
12268 sizeof (flagstr));
12269 printf(gettext("0x%llx [%s]"),
12270 (u_longlong_t)i64, flagstr);
12271 } else {
12272 printf(gettext("0x%llx"), (u_longlong_t)i64);
12273 }
12274 break;
12275
12276 case DATA_TYPE_HRTIME:
12277 (void) nvpair_value_hrtime(nvp, (void *)&i64);
12278 printf(gettext("0x%llx"), (u_longlong_t)i64);
12279 break;
12280
12281 case DATA_TYPE_STRING:
12282 (void) nvpair_value_string(nvp, &str);
12283 printf(gettext("\"%s\""), str ? str : "<NULL>");
12284 break;
12285
12286 case DATA_TYPE_NVLIST:
12287 printf(gettext("(embedded nvlist)\n"));
12288 (void) nvpair_value_nvlist(nvp, &cnv);
12289 zpool_do_events_nvprint(cnv, depth + 8);
12290 printf(gettext("%*s(end %s)"), depth, "", name);
12291 break;
12292
12293 case DATA_TYPE_NVLIST_ARRAY: {
12294 nvlist_t **val;
12295 uint_t i, nelem;
12296
12297 (void) nvpair_value_nvlist_array(nvp, &val, &nelem);
12298 printf(gettext("(%d embedded nvlists)\n"), nelem);
12299 for (i = 0; i < nelem; i++) {
12300 printf(gettext("%*s%s[%d] = %s\n"),
12301 depth, "", name, i, "(embedded nvlist)");
12302 zpool_do_events_nvprint(val[i], depth + 8);
12303 printf(gettext("%*s(end %s[%i])\n"),
12304 depth, "", name, i);
12305 }
12306 printf(gettext("%*s(end %s)\n"), depth, "", name);
12307 }
12308 break;
12309
12310 case DATA_TYPE_INT8_ARRAY: {
12311 int8_t *val;
12312 uint_t i, nelem;
12313
12314 (void) nvpair_value_int8_array(nvp, &val, &nelem);
12315 for (i = 0; i < nelem; i++)
12316 printf(gettext("0x%x "), val[i]);
12317
12318 break;
12319 }
12320
12321 case DATA_TYPE_UINT8_ARRAY: {
12322 uint8_t *val;
12323 uint_t i, nelem;
12324
12325 (void) nvpair_value_uint8_array(nvp, &val, &nelem);
12326 for (i = 0; i < nelem; i++)
12327 printf(gettext("0x%x "), val[i]);
12328
12329 break;
12330 }
12331
12332 case DATA_TYPE_INT16_ARRAY: {
12333 int16_t *val;
12334 uint_t i, nelem;
12335
12336 (void) nvpair_value_int16_array(nvp, &val, &nelem);
12337 for (i = 0; i < nelem; i++)
12338 printf(gettext("0x%x "), val[i]);
12339
12340 break;
12341 }
12342
12343 case DATA_TYPE_UINT16_ARRAY: {
12344 uint16_t *val;
12345 uint_t i, nelem;
12346
12347 (void) nvpair_value_uint16_array(nvp, &val, &nelem);
12348 for (i = 0; i < nelem; i++)
12349 printf(gettext("0x%x "), val[i]);
12350
12351 break;
12352 }
12353
12354 case DATA_TYPE_INT32_ARRAY: {
12355 int32_t *val;
12356 uint_t i, nelem;
12357
12358 (void) nvpair_value_int32_array(nvp, &val, &nelem);
12359 for (i = 0; i < nelem; i++)
12360 printf(gettext("0x%x "), val[i]);
12361
12362 break;
12363 }
12364
12365 case DATA_TYPE_UINT32_ARRAY: {
12366 uint32_t *val;
12367 uint_t i, nelem;
12368
12369 (void) nvpair_value_uint32_array(nvp, &val, &nelem);
12370 for (i = 0; i < nelem; i++)
12371 printf(gettext("0x%x "), val[i]);
12372
12373 break;
12374 }
12375
12376 case DATA_TYPE_INT64_ARRAY: {
12377 int64_t *val;
12378 uint_t i, nelem;
12379
12380 (void) nvpair_value_int64_array(nvp, &val, &nelem);
12381 for (i = 0; i < nelem; i++)
12382 printf(gettext("0x%llx "),
12383 (u_longlong_t)val[i]);
12384
12385 break;
12386 }
12387
12388 case DATA_TYPE_UINT64_ARRAY: {
12389 uint64_t *val;
12390 uint_t i, nelem;
12391
12392 (void) nvpair_value_uint64_array(nvp, &val, &nelem);
12393 for (i = 0; i < nelem; i++)
12394 printf(gettext("0x%llx "),
12395 (u_longlong_t)val[i]);
12396
12397 break;
12398 }
12399
12400 case DATA_TYPE_STRING_ARRAY: {
12401 const char **str;
12402 uint_t i, nelem;
12403
12404 (void) nvpair_value_string_array(nvp, &str, &nelem);
12405 for (i = 0; i < nelem; i++)
12406 printf(gettext("\"%s\" "),
12407 str[i] ? str[i] : "<NULL>");
12408
12409 break;
12410 }
12411
12412 case DATA_TYPE_BOOLEAN_ARRAY:
12413 case DATA_TYPE_BYTE_ARRAY:
12414 case DATA_TYPE_DOUBLE:
12415 case DATA_TYPE_DONTCARE:
12416 case DATA_TYPE_UNKNOWN:
12417 printf(gettext("<unknown>"));
12418 break;
12419 }
12420
12421 printf(gettext("\n"));
12422 }
12423 }
12424
12425 static int
zpool_do_events_next(ev_opts_t * opts)12426 zpool_do_events_next(ev_opts_t *opts)
12427 {
12428 nvlist_t *nvl;
12429 int zevent_fd, ret, dropped;
12430 const char *pool;
12431
12432 zevent_fd = open(ZFS_DEV, O_RDWR);
12433 VERIFY(zevent_fd >= 0);
12434
12435 if (!opts->scripted)
12436 (void) printf(gettext("%-30s %s\n"), "TIME", "CLASS");
12437
12438 while (1) {
12439 ret = zpool_events_next(g_zfs, &nvl, &dropped,
12440 (opts->follow ? ZEVENT_NONE : ZEVENT_NONBLOCK), zevent_fd);
12441 if (ret || nvl == NULL)
12442 break;
12443
12444 if (dropped > 0)
12445 (void) printf(gettext("dropped %d events\n"), dropped);
12446
12447 if (strlen(opts->poolname) > 0 &&
12448 nvlist_lookup_string(nvl, FM_FMRI_ZFS_POOL, &pool) == 0 &&
12449 strcmp(opts->poolname, pool) != 0)
12450 continue;
12451
12452 zpool_do_events_short(nvl, opts);
12453
12454 if (opts->verbose) {
12455 zpool_do_events_nvprint(nvl, 8);
12456 printf(gettext("\n"));
12457 }
12458 (void) fflush(stdout);
12459
12460 nvlist_free(nvl);
12461 }
12462
12463 VERIFY0(close(zevent_fd));
12464
12465 return (ret);
12466 }
12467
12468 static int
zpool_do_events_clear(void)12469 zpool_do_events_clear(void)
12470 {
12471 int count, ret;
12472
12473 ret = zpool_events_clear(g_zfs, &count);
12474 if (!ret)
12475 (void) printf(gettext("cleared %d events\n"), count);
12476
12477 return (ret);
12478 }
12479
12480 /*
12481 * zpool events [-vHf [pool] | -c]
12482 *
12483 * Displays events logs by ZFS.
12484 */
12485 int
zpool_do_events(int argc,char ** argv)12486 zpool_do_events(int argc, char **argv)
12487 {
12488 ev_opts_t opts = { 0 };
12489 int ret;
12490 int c;
12491
12492 /* check options */
12493 while ((c = getopt(argc, argv, "vHfc")) != -1) {
12494 switch (c) {
12495 case 'v':
12496 opts.verbose = 1;
12497 break;
12498 case 'H':
12499 opts.scripted = 1;
12500 break;
12501 case 'f':
12502 opts.follow = 1;
12503 break;
12504 case 'c':
12505 opts.clear = 1;
12506 break;
12507 case '?':
12508 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
12509 optopt);
12510 usage(B_FALSE);
12511 }
12512 }
12513 argc -= optind;
12514 argv += optind;
12515
12516 if (argc > 1) {
12517 (void) fprintf(stderr, gettext("too many arguments\n"));
12518 usage(B_FALSE);
12519 } else if (argc == 1) {
12520 (void) strlcpy(opts.poolname, argv[0], sizeof (opts.poolname));
12521 if (!zfs_name_valid(opts.poolname, ZFS_TYPE_POOL)) {
12522 (void) fprintf(stderr,
12523 gettext("invalid pool name '%s'\n"), opts.poolname);
12524 usage(B_FALSE);
12525 }
12526 }
12527
12528 if ((argc == 1 || opts.verbose || opts.scripted || opts.follow) &&
12529 opts.clear) {
12530 (void) fprintf(stderr,
12531 gettext("invalid options combined with -c\n"));
12532 usage(B_FALSE);
12533 }
12534
12535 if (opts.clear)
12536 ret = zpool_do_events_clear();
12537 else
12538 ret = zpool_do_events_next(&opts);
12539
12540 return (ret);
12541 }
12542
12543 static int
get_callback_vdev(zpool_handle_t * zhp,char * vdevname,void * data)12544 get_callback_vdev(zpool_handle_t *zhp, char *vdevname, void *data)
12545 {
12546 zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
12547 char value[ZFS_MAXPROPLEN];
12548 zprop_source_t srctype;
12549 nvlist_t *props, *item, *d;
12550 props = item = d = NULL;
12551
12552 if (cbp->cb_json) {
12553 d = fnvlist_lookup_nvlist(cbp->cb_jsobj, "vdevs");
12554 if (d == NULL) {
12555 fprintf(stderr, "vdevs obj not found.\n");
12556 exit(1);
12557 }
12558 props = fnvlist_alloc();
12559 }
12560
12561 for (zprop_list_t *pl = cbp->cb_proplist; pl != NULL;
12562 pl = pl->pl_next) {
12563 char *prop_name;
12564 /*
12565 * If the first property is pool name, it is a special
12566 * placeholder that we can skip. This will also skip
12567 * over the name property when 'all' is specified.
12568 */
12569 if (pl->pl_prop == ZPOOL_PROP_NAME &&
12570 pl == cbp->cb_proplist)
12571 continue;
12572
12573 if (pl->pl_prop == ZPROP_INVAL) {
12574 prop_name = pl->pl_user_prop;
12575 } else {
12576 prop_name = (char *)vdev_prop_to_name(pl->pl_prop);
12577 }
12578 if (zpool_get_vdev_prop(zhp, vdevname, pl->pl_prop,
12579 prop_name, value, sizeof (value), &srctype,
12580 cbp->cb_literal) == 0) {
12581 (void) zprop_collect_property(vdevname, cbp, prop_name,
12582 value, srctype, NULL, NULL, props);
12583 }
12584 }
12585
12586 if (cbp->cb_json) {
12587 if (!nvlist_empty(props)) {
12588 item = fnvlist_alloc();
12589 fill_vdev_info(item, zhp, vdevname, B_TRUE,
12590 cbp->cb_json_as_int);
12591 fnvlist_add_nvlist(item, "properties", props);
12592 fnvlist_add_nvlist(d, vdevname, item);
12593 fnvlist_add_nvlist(cbp->cb_jsobj, "vdevs", d);
12594 fnvlist_free(item);
12595 }
12596 fnvlist_free(props);
12597 }
12598
12599 return (0);
12600 }
12601
12602 static int
get_callback_vdev_cb(void * zhp_data,nvlist_t * nv,void * data)12603 get_callback_vdev_cb(void *zhp_data, nvlist_t *nv, void *data)
12604 {
12605 zpool_handle_t *zhp = zhp_data;
12606 zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
12607 char *vdevname;
12608 const char *type;
12609 int ret;
12610
12611 /*
12612 * zpool_vdev_name() transforms the root vdev name (i.e., root-0) to the
12613 * pool name for display purposes, which is not desired. Fallback to
12614 * zpool_vdev_name() when not dealing with the root vdev.
12615 */
12616 type = fnvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE);
12617 if (zhp != NULL && strcmp(type, "root") == 0)
12618 vdevname = strdup("root-0");
12619 else
12620 vdevname = zpool_vdev_name(g_zfs, zhp, nv,
12621 cbp->cb_vdevs.cb_name_flags);
12622
12623 (void) vdev_expand_proplist(zhp, vdevname, &cbp->cb_proplist);
12624
12625 ret = get_callback_vdev(zhp, vdevname, data);
12626
12627 free(vdevname);
12628
12629 return (ret);
12630 }
12631
12632 static int
get_callback(zpool_handle_t * zhp,void * data)12633 get_callback(zpool_handle_t *zhp, void *data)
12634 {
12635 zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
12636 char value[ZFS_MAXPROPLEN];
12637 zprop_source_t srctype;
12638 zprop_list_t *pl;
12639 int vid;
12640 int err = 0;
12641 nvlist_t *props, *item, *d;
12642 props = item = d = NULL;
12643
12644 if (cbp->cb_type == ZFS_TYPE_VDEV) {
12645 if (cbp->cb_json) {
12646 nvlist_t *pool = fnvlist_alloc();
12647 fill_pool_info(pool, zhp, B_FALSE, cbp->cb_json_as_int);
12648 fnvlist_add_nvlist(cbp->cb_jsobj, "pool", pool);
12649 fnvlist_free(pool);
12650 }
12651
12652 if (strcmp(cbp->cb_vdevs.cb_names[0], "all-vdevs") == 0) {
12653 (void) for_each_vdev(zhp, get_callback_vdev_cb, data);
12654 } else {
12655 /* Adjust column widths for vdev properties */
12656 for (vid = 0; vid < cbp->cb_vdevs.cb_names_count;
12657 vid++) {
12658 (void) vdev_expand_proplist(zhp,
12659 cbp->cb_vdevs.cb_names[vid],
12660 &cbp->cb_proplist);
12661 }
12662 /* Display the properties */
12663 for (vid = 0; vid < cbp->cb_vdevs.cb_names_count;
12664 vid++) {
12665 (void) get_callback_vdev(zhp,
12666 cbp->cb_vdevs.cb_names[vid], data);
12667 }
12668 }
12669 } else {
12670 assert(cbp->cb_type == ZFS_TYPE_POOL);
12671 if (cbp->cb_json) {
12672 d = fnvlist_lookup_nvlist(cbp->cb_jsobj, "pools");
12673 if (d == NULL) {
12674 fprintf(stderr, "pools obj not found.\n");
12675 exit(1);
12676 }
12677 props = fnvlist_alloc();
12678 }
12679 for (pl = cbp->cb_proplist; pl != NULL; pl = pl->pl_next) {
12680 /*
12681 * Skip the special fake placeholder. This will also
12682 * skip over the name property when 'all' is specified.
12683 */
12684 if (pl->pl_prop == ZPOOL_PROP_NAME &&
12685 pl == cbp->cb_proplist)
12686 continue;
12687
12688 if (pl->pl_prop == ZPROP_INVAL &&
12689 zfs_prop_user(pl->pl_user_prop)) {
12690 srctype = ZPROP_SRC_LOCAL;
12691
12692 if (zpool_get_userprop(zhp, pl->pl_user_prop,
12693 value, sizeof (value), &srctype) != 0)
12694 continue;
12695
12696 err = zprop_collect_property(
12697 zpool_get_name(zhp), cbp, pl->pl_user_prop,
12698 value, srctype, NULL, NULL, props);
12699 } else if (pl->pl_prop == ZPROP_INVAL &&
12700 (zpool_prop_feature(pl->pl_user_prop) ||
12701 zpool_prop_unsupported(pl->pl_user_prop))) {
12702 srctype = ZPROP_SRC_LOCAL;
12703
12704 if (zpool_prop_get_feature(zhp,
12705 pl->pl_user_prop, value,
12706 sizeof (value)) == 0) {
12707 err = zprop_collect_property(
12708 zpool_get_name(zhp), cbp,
12709 pl->pl_user_prop, value, srctype,
12710 NULL, NULL, props);
12711 }
12712 } else {
12713 if (zpool_get_prop(zhp, pl->pl_prop, value,
12714 sizeof (value), &srctype,
12715 cbp->cb_literal) != 0)
12716 continue;
12717
12718 err = zprop_collect_property(
12719 zpool_get_name(zhp), cbp,
12720 zpool_prop_to_name(pl->pl_prop),
12721 value, srctype, NULL, NULL, props);
12722 }
12723 if (err != 0)
12724 return (err);
12725 }
12726
12727 if (cbp->cb_json) {
12728 if (!nvlist_empty(props)) {
12729 item = fnvlist_alloc();
12730 fill_pool_info(item, zhp, B_TRUE,
12731 cbp->cb_json_as_int);
12732 fnvlist_add_nvlist(item, "properties", props);
12733 if (cbp->cb_json_pool_key_guid) {
12734 char buf[256];
12735 uint64_t guid = fnvlist_lookup_uint64(
12736 zpool_get_config(zhp, NULL),
12737 ZPOOL_CONFIG_POOL_GUID);
12738 (void) snprintf(buf, 256, "%llu",
12739 (u_longlong_t)guid);
12740 fnvlist_add_nvlist(d, buf, item);
12741 } else {
12742 const char *name = zpool_get_name(zhp);
12743 fnvlist_add_nvlist(d, name, item);
12744 }
12745 fnvlist_add_nvlist(cbp->cb_jsobj, "pools", d);
12746 fnvlist_free(item);
12747 }
12748 fnvlist_free(props);
12749 }
12750 }
12751
12752 return (0);
12753 }
12754
12755 /*
12756 * zpool get [-Hp] [-o "all" | field[,...]] <"all" | property[,...]> <pool> ...
12757 *
12758 * -H Scripted mode. Don't display headers, and separate properties
12759 * by a single tab.
12760 * -o List of columns to display. Defaults to
12761 * "name,property,value,source".
12762 * -p Display values in parsable (exact) format.
12763 * -j Display output in JSON format.
12764 * --json-int Display numbers as integers instead of strings.
12765 * --json-pool-key-guid Set pool GUID as key for pool objects.
12766 *
12767 * Get properties of pools in the system. Output space statistics
12768 * for each one as well as other attributes.
12769 */
12770 int
zpool_do_get(int argc,char ** argv)12771 zpool_do_get(int argc, char **argv)
12772 {
12773 zprop_get_cbdata_t cb = { 0 };
12774 zprop_list_t fake_name = { 0 };
12775 int ret;
12776 int c, i;
12777 char *propstr = NULL;
12778 char *vdev = NULL;
12779 nvlist_t *data = NULL;
12780
12781 cb.cb_first = B_TRUE;
12782
12783 /*
12784 * Set up default columns and sources.
12785 */
12786 cb.cb_sources = ZPROP_SRC_ALL;
12787 cb.cb_columns[0] = GET_COL_NAME;
12788 cb.cb_columns[1] = GET_COL_PROPERTY;
12789 cb.cb_columns[2] = GET_COL_VALUE;
12790 cb.cb_columns[3] = GET_COL_SOURCE;
12791 cb.cb_type = ZFS_TYPE_POOL;
12792 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_TYPE_ID;
12793 current_prop_type = cb.cb_type;
12794
12795 struct option long_options[] = {
12796 {"json", no_argument, NULL, 'j'},
12797 {"json-int", no_argument, NULL, ZPOOL_OPTION_JSON_NUMS_AS_INT},
12798 {"json-pool-key-guid", no_argument, NULL,
12799 ZPOOL_OPTION_POOL_KEY_GUID},
12800 {0, 0, 0, 0}
12801 };
12802
12803 /* check options */
12804 while ((c = getopt_long(argc, argv, ":jHpo:", long_options,
12805 NULL)) != -1) {
12806 switch (c) {
12807 case 'p':
12808 cb.cb_literal = B_TRUE;
12809 break;
12810 case 'H':
12811 cb.cb_scripted = B_TRUE;
12812 break;
12813 case 'j':
12814 cb.cb_json = B_TRUE;
12815 cb.cb_jsobj = zpool_json_schema(0, 1);
12816 data = fnvlist_alloc();
12817 break;
12818 case ZPOOL_OPTION_POOL_KEY_GUID:
12819 cb.cb_json_pool_key_guid = B_TRUE;
12820 break;
12821 case ZPOOL_OPTION_JSON_NUMS_AS_INT:
12822 cb.cb_json_as_int = B_TRUE;
12823 cb.cb_literal = B_TRUE;
12824 break;
12825 case 'o':
12826 memset(&cb.cb_columns, 0, sizeof (cb.cb_columns));
12827 i = 0;
12828
12829 for (char *tok; (tok = strsep(&optarg, ",")); ) {
12830 static const char *const col_opts[] =
12831 { "name", "property", "value", "source",
12832 "all" };
12833 static const zfs_get_column_t col_cols[] =
12834 { GET_COL_NAME, GET_COL_PROPERTY, GET_COL_VALUE,
12835 GET_COL_SOURCE };
12836
12837 if (i == ZFS_GET_NCOLS - 1) {
12838 (void) fprintf(stderr, gettext("too "
12839 "many fields given to -o "
12840 "option\n"));
12841 usage(B_FALSE);
12842 }
12843
12844 for (c = 0; c < ARRAY_SIZE(col_opts); ++c)
12845 if (strcmp(tok, col_opts[c]) == 0)
12846 goto found;
12847
12848 (void) fprintf(stderr,
12849 gettext("invalid column name '%s'\n"), tok);
12850 usage(B_FALSE);
12851
12852 found:
12853 if (c >= 4) {
12854 if (i > 0) {
12855 (void) fprintf(stderr,
12856 gettext("\"all\" conflicts "
12857 "with specific fields "
12858 "given to -o option\n"));
12859 usage(B_FALSE);
12860 }
12861
12862 memcpy(cb.cb_columns, col_cols,
12863 sizeof (col_cols));
12864 i = ZFS_GET_NCOLS - 1;
12865 } else
12866 cb.cb_columns[i++] = col_cols[c];
12867 }
12868 break;
12869 case '?':
12870 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
12871 optopt);
12872 usage(B_FALSE);
12873 }
12874 }
12875
12876 argc -= optind;
12877 argv += optind;
12878
12879 if (!cb.cb_json && cb.cb_json_as_int) {
12880 (void) fprintf(stderr, gettext("'--json-int' only works with"
12881 " '-j' option\n"));
12882 usage(B_FALSE);
12883 }
12884
12885 if (!cb.cb_json && cb.cb_json_pool_key_guid) {
12886 (void) fprintf(stderr, gettext("'json-pool-key-guid' only"
12887 " works with '-j' option\n"));
12888 usage(B_FALSE);
12889 }
12890
12891 if (argc < 1) {
12892 (void) fprintf(stderr, gettext("missing property "
12893 "argument\n"));
12894 usage(B_FALSE);
12895 }
12896
12897 /* Properties list is needed later by zprop_get_list() */
12898 propstr = argv[0];
12899
12900 argc--;
12901 argv++;
12902
12903 if (argc == 0) {
12904 /* No args, so just print the defaults. */
12905 } else if (are_all_pools(argc, argv)) {
12906 /* All the args are pool names */
12907 } else if (are_all_pools(1, argv)) {
12908 /* The first arg is a pool name */
12909 if ((argc == 2 && strcmp(argv[1], "all-vdevs") == 0) ||
12910 (argc == 2 && strcmp(argv[1], "root") == 0) ||
12911 are_vdevs_in_pool(argc - 1, argv + 1, argv[0],
12912 &cb.cb_vdevs)) {
12913
12914 if (strcmp(argv[1], "root") == 0)
12915 vdev = strdup("root-0");
12916
12917 /* ... and the rest are vdev names */
12918 if (vdev == NULL)
12919 cb.cb_vdevs.cb_names = argv + 1;
12920 else
12921 cb.cb_vdevs.cb_names = &vdev;
12922
12923 cb.cb_vdevs.cb_names_count = argc - 1;
12924 cb.cb_type = ZFS_TYPE_VDEV;
12925 argc = 1; /* One pool to process */
12926 } else {
12927 if (cb.cb_json) {
12928 nvlist_free(cb.cb_jsobj);
12929 nvlist_free(data);
12930 }
12931 fprintf(stderr, gettext("Expected a list of vdevs in"
12932 " \"%s\", but got:\n"), argv[0]);
12933 error_list_unresolved_vdevs(argc - 1, argv + 1,
12934 argv[0], &cb.cb_vdevs);
12935 fprintf(stderr, "\n");
12936 usage(B_FALSE);
12937 }
12938 } else {
12939 if (cb.cb_json) {
12940 nvlist_free(cb.cb_jsobj);
12941 nvlist_free(data);
12942 }
12943 /*
12944 * The first arg isn't the name of a valid pool.
12945 */
12946 fprintf(stderr, gettext("Cannot get properties of %s: "
12947 "no such pool available.\n"), argv[0]);
12948 return (1);
12949 }
12950
12951 if (zprop_get_list(g_zfs, propstr, &cb.cb_proplist,
12952 cb.cb_type) != 0) {
12953 /* Use correct list of valid properties (pool or vdev) */
12954 current_prop_type = cb.cb_type;
12955 usage(B_FALSE);
12956 }
12957
12958 if (cb.cb_proplist != NULL) {
12959 fake_name.pl_prop = ZPOOL_PROP_NAME;
12960 fake_name.pl_width = strlen(gettext("NAME"));
12961 fake_name.pl_next = cb.cb_proplist;
12962 cb.cb_proplist = &fake_name;
12963 }
12964
12965 if (cb.cb_json) {
12966 if (cb.cb_type == ZFS_TYPE_VDEV)
12967 fnvlist_add_nvlist(cb.cb_jsobj, "vdevs", data);
12968 else
12969 fnvlist_add_nvlist(cb.cb_jsobj, "pools", data);
12970 fnvlist_free(data);
12971 }
12972
12973 ret = for_each_pool(argc, argv, B_TRUE, &cb.cb_proplist, cb.cb_type,
12974 cb.cb_literal, get_callback, &cb);
12975
12976 if (ret == 0 && cb.cb_json)
12977 zcmd_print_json(cb.cb_jsobj);
12978 else if (ret != 0 && cb.cb_json)
12979 nvlist_free(cb.cb_jsobj);
12980
12981 if (cb.cb_proplist == &fake_name)
12982 zprop_free_list(fake_name.pl_next);
12983 else
12984 zprop_free_list(cb.cb_proplist);
12985
12986 if (vdev != NULL)
12987 free(vdev);
12988
12989 return (ret);
12990 }
12991
12992 typedef struct set_cbdata {
12993 char *cb_propname;
12994 char *cb_value;
12995 zfs_type_t cb_type;
12996 vdev_cbdata_t cb_vdevs;
12997 boolean_t cb_any_successful;
12998 } set_cbdata_t;
12999
13000 static int
set_pool_callback(zpool_handle_t * zhp,set_cbdata_t * cb)13001 set_pool_callback(zpool_handle_t *zhp, set_cbdata_t *cb)
13002 {
13003 int error;
13004
13005 /* Check if we have out-of-bounds features */
13006 if (strcmp(cb->cb_propname, ZPOOL_CONFIG_COMPATIBILITY) == 0) {
13007 boolean_t features[SPA_FEATURES];
13008 if (zpool_do_load_compat(cb->cb_value, features) !=
13009 ZPOOL_COMPATIBILITY_OK)
13010 return (-1);
13011
13012 nvlist_t *enabled = zpool_get_features(zhp);
13013 spa_feature_t i;
13014 for (i = 0; i < SPA_FEATURES; i++) {
13015 const char *fguid = spa_feature_table[i].fi_guid;
13016 if (nvlist_exists(enabled, fguid) && !features[i])
13017 break;
13018 }
13019 if (i < SPA_FEATURES)
13020 (void) fprintf(stderr, gettext("Warning: one or "
13021 "more features already enabled on pool '%s'\n"
13022 "are not present in this compatibility set.\n"),
13023 zpool_get_name(zhp));
13024 }
13025
13026 /* if we're setting a feature, check it's in compatibility set */
13027 if (zpool_prop_feature(cb->cb_propname) &&
13028 strcmp(cb->cb_value, ZFS_FEATURE_ENABLED) == 0) {
13029 char *fname = strchr(cb->cb_propname, '@') + 1;
13030 spa_feature_t f;
13031
13032 if (zfeature_lookup_name(fname, &f) == 0) {
13033 char compat[ZFS_MAXPROPLEN];
13034 if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY,
13035 compat, ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
13036 compat[0] = '\0';
13037
13038 boolean_t features[SPA_FEATURES];
13039 if (zpool_do_load_compat(compat, features) !=
13040 ZPOOL_COMPATIBILITY_OK) {
13041 (void) fprintf(stderr, gettext("Error: "
13042 "cannot enable feature '%s' on pool '%s'\n"
13043 "because the pool's 'compatibility' "
13044 "property cannot be parsed.\n"),
13045 fname, zpool_get_name(zhp));
13046 return (-1);
13047 }
13048
13049 if (!features[f]) {
13050 (void) fprintf(stderr, gettext("Error: "
13051 "cannot enable feature '%s' on pool '%s'\n"
13052 "as it is not specified in this pool's "
13053 "current compatibility set.\n"
13054 "Consider setting 'compatibility' to a "
13055 "less restrictive set, or to 'off'.\n"),
13056 fname, zpool_get_name(zhp));
13057 return (-1);
13058 }
13059 }
13060 }
13061
13062 error = zpool_set_prop(zhp, cb->cb_propname, cb->cb_value);
13063
13064 return (error);
13065 }
13066
13067 static int
set_callback(zpool_handle_t * zhp,void * data)13068 set_callback(zpool_handle_t *zhp, void *data)
13069 {
13070 int error;
13071 set_cbdata_t *cb = (set_cbdata_t *)data;
13072
13073 if (cb->cb_type == ZFS_TYPE_VDEV) {
13074 error = zpool_set_vdev_prop(zhp, *cb->cb_vdevs.cb_names,
13075 cb->cb_propname, cb->cb_value);
13076 } else {
13077 assert(cb->cb_type == ZFS_TYPE_POOL);
13078 error = set_pool_callback(zhp, cb);
13079 }
13080
13081 cb->cb_any_successful = !error;
13082 return (error);
13083 }
13084
13085 int
zpool_do_set(int argc,char ** argv)13086 zpool_do_set(int argc, char **argv)
13087 {
13088 set_cbdata_t cb = { 0 };
13089 int error;
13090 char *vdev = NULL;
13091
13092 current_prop_type = ZFS_TYPE_POOL;
13093 if (argc > 1 && argv[1][0] == '-') {
13094 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
13095 argv[1][1]);
13096 usage(B_FALSE);
13097 }
13098
13099 if (argc < 2) {
13100 (void) fprintf(stderr, gettext("missing property=value "
13101 "argument\n"));
13102 usage(B_FALSE);
13103 }
13104
13105 if (argc < 3) {
13106 (void) fprintf(stderr, gettext("missing pool name\n"));
13107 usage(B_FALSE);
13108 }
13109
13110 if (argc > 4) {
13111 (void) fprintf(stderr, gettext("too many pool names\n"));
13112 usage(B_FALSE);
13113 }
13114
13115 cb.cb_propname = argv[1];
13116 cb.cb_type = ZFS_TYPE_POOL;
13117 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_TYPE_ID;
13118 cb.cb_value = strchr(cb.cb_propname, '=');
13119 if (cb.cb_value == NULL) {
13120 (void) fprintf(stderr, gettext("missing value in "
13121 "property=value argument\n"));
13122 usage(B_FALSE);
13123 }
13124
13125 *(cb.cb_value) = '\0';
13126 cb.cb_value++;
13127 argc -= 2;
13128 argv += 2;
13129
13130 /* argv[0] is pool name */
13131 if (!is_pool(argv[0])) {
13132 (void) fprintf(stderr,
13133 gettext("cannot open '%s': is not a pool\n"), argv[0]);
13134 return (EINVAL);
13135 }
13136
13137 /* argv[1], when supplied, is vdev name */
13138 if (argc == 2) {
13139
13140 if (strcmp(argv[1], "root") == 0)
13141 vdev = strdup("root-0");
13142 else
13143 vdev = strdup(argv[1]);
13144
13145 if (!are_vdevs_in_pool(1, &vdev, argv[0], &cb.cb_vdevs)) {
13146 (void) fprintf(stderr, gettext(
13147 "cannot find '%s' in '%s': device not in pool\n"),
13148 vdev, argv[0]);
13149 free(vdev);
13150 return (EINVAL);
13151 }
13152 cb.cb_vdevs.cb_names = &vdev;
13153 cb.cb_vdevs.cb_names_count = 1;
13154 cb.cb_type = ZFS_TYPE_VDEV;
13155 }
13156
13157 error = for_each_pool(1, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
13158 B_FALSE, set_callback, &cb);
13159
13160 if (vdev != NULL)
13161 free(vdev);
13162
13163 return (error);
13164 }
13165
13166 /* Add up the total number of bytes left to initialize/trim across all vdevs */
13167 static uint64_t
vdev_activity_remaining(nvlist_t * nv,zpool_wait_activity_t activity)13168 vdev_activity_remaining(nvlist_t *nv, zpool_wait_activity_t activity)
13169 {
13170 uint64_t bytes_remaining;
13171 nvlist_t **child;
13172 uint_t c, children;
13173 vdev_stat_t *vs;
13174
13175 assert(activity == ZPOOL_WAIT_INITIALIZE ||
13176 activity == ZPOOL_WAIT_TRIM);
13177
13178 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
13179 (uint64_t **)&vs, &c) == 0);
13180
13181 if (activity == ZPOOL_WAIT_INITIALIZE &&
13182 vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE)
13183 bytes_remaining = vs->vs_initialize_bytes_est -
13184 vs->vs_initialize_bytes_done;
13185 else if (activity == ZPOOL_WAIT_TRIM &&
13186 vs->vs_trim_state == VDEV_TRIM_ACTIVE)
13187 bytes_remaining = vs->vs_trim_bytes_est -
13188 vs->vs_trim_bytes_done;
13189 else
13190 bytes_remaining = 0;
13191
13192 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
13193 &child, &children) != 0)
13194 children = 0;
13195
13196 for (c = 0; c < children; c++)
13197 bytes_remaining += vdev_activity_remaining(child[c], activity);
13198
13199 return (bytes_remaining);
13200 }
13201
13202 /* Add up the total number of bytes left to rebuild across top-level vdevs */
13203 static uint64_t
vdev_activity_top_remaining(nvlist_t * nv)13204 vdev_activity_top_remaining(nvlist_t *nv)
13205 {
13206 uint64_t bytes_remaining = 0;
13207 nvlist_t **child;
13208 uint_t children;
13209 int error;
13210
13211 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
13212 &child, &children) != 0)
13213 children = 0;
13214
13215 for (uint_t c = 0; c < children; c++) {
13216 vdev_rebuild_stat_t *vrs;
13217 uint_t i;
13218
13219 error = nvlist_lookup_uint64_array(child[c],
13220 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i);
13221 if (error == 0) {
13222 if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
13223 bytes_remaining += (vrs->vrs_bytes_est -
13224 vrs->vrs_bytes_rebuilt);
13225 }
13226 }
13227 }
13228
13229 return (bytes_remaining);
13230 }
13231
13232 /* Whether any vdevs are 'spare' or 'replacing' vdevs */
13233 static boolean_t
vdev_any_spare_replacing(nvlist_t * nv)13234 vdev_any_spare_replacing(nvlist_t *nv)
13235 {
13236 nvlist_t **child;
13237 uint_t c, children;
13238 const char *vdev_type;
13239
13240 (void) nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &vdev_type);
13241
13242 if (strcmp(vdev_type, VDEV_TYPE_REPLACING) == 0 ||
13243 strcmp(vdev_type, VDEV_TYPE_SPARE) == 0 ||
13244 strcmp(vdev_type, VDEV_TYPE_DRAID_SPARE) == 0) {
13245 return (B_TRUE);
13246 }
13247
13248 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
13249 &child, &children) != 0)
13250 children = 0;
13251
13252 for (c = 0; c < children; c++) {
13253 if (vdev_any_spare_replacing(child[c]))
13254 return (B_TRUE);
13255 }
13256
13257 return (B_FALSE);
13258 }
13259
13260 typedef struct wait_data {
13261 char *wd_poolname;
13262 boolean_t wd_scripted;
13263 boolean_t wd_exact;
13264 boolean_t wd_headers_once;
13265 boolean_t wd_should_exit;
13266 /* Which activities to wait for */
13267 boolean_t wd_enabled[ZPOOL_WAIT_NUM_ACTIVITIES];
13268 float wd_interval;
13269 pthread_cond_t wd_cv;
13270 pthread_mutex_t wd_mutex;
13271 } wait_data_t;
13272
13273 /*
13274 * Print to stdout a single line, containing one column for each activity that
13275 * we are waiting for specifying how many bytes of work are left for that
13276 * activity.
13277 */
13278 static void
print_wait_status_row(wait_data_t * wd,zpool_handle_t * zhp,int row)13279 print_wait_status_row(wait_data_t *wd, zpool_handle_t *zhp, int row)
13280 {
13281 nvlist_t *config, *nvroot;
13282 uint_t c;
13283 int i;
13284 pool_checkpoint_stat_t *pcs = NULL;
13285 pool_scan_stat_t *pss = NULL;
13286 pool_removal_stat_t *prs = NULL;
13287 pool_raidz_expand_stat_t *pres = NULL;
13288 const char *const headers[] = {"DISCARD", "FREE", "INITIALIZE",
13289 "REPLACE", "REMOVE", "RESILVER", "SCRUB", "TRIM", "RAIDZ_EXPAND"};
13290 int col_widths[ZPOOL_WAIT_NUM_ACTIVITIES];
13291
13292 /* Calculate the width of each column */
13293 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
13294 /*
13295 * Make sure we have enough space in the col for pretty-printed
13296 * numbers and for the column header, and then leave a couple
13297 * spaces between cols for readability.
13298 */
13299 col_widths[i] = MAX(strlen(headers[i]), 6) + 2;
13300 }
13301
13302 if (timestamp_fmt != NODATE)
13303 print_timestamp(timestamp_fmt);
13304
13305 /* Print header if appropriate */
13306 int term_height = terminal_height();
13307 boolean_t reprint_header = (!wd->wd_headers_once && term_height > 0 &&
13308 row % (term_height-1) == 0);
13309 if (!wd->wd_scripted && (row == 0 || reprint_header)) {
13310 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
13311 if (wd->wd_enabled[i])
13312 (void) printf("%*s", col_widths[i], headers[i]);
13313 }
13314 (void) fputc('\n', stdout);
13315 }
13316
13317 /* Bytes of work remaining in each activity */
13318 int64_t bytes_rem[ZPOOL_WAIT_NUM_ACTIVITIES] = {0};
13319
13320 bytes_rem[ZPOOL_WAIT_FREE] =
13321 zpool_get_prop_int(zhp, ZPOOL_PROP_FREEING, NULL);
13322
13323 config = zpool_get_config(zhp, NULL);
13324 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
13325
13326 (void) nvlist_lookup_uint64_array(nvroot,
13327 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
13328 if (pcs != NULL && pcs->pcs_state == CS_CHECKPOINT_DISCARDING)
13329 bytes_rem[ZPOOL_WAIT_CKPT_DISCARD] = pcs->pcs_space;
13330
13331 (void) nvlist_lookup_uint64_array(nvroot,
13332 ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c);
13333 if (prs != NULL && prs->prs_state == DSS_SCANNING)
13334 bytes_rem[ZPOOL_WAIT_REMOVE] = prs->prs_to_copy -
13335 prs->prs_copied;
13336
13337 (void) nvlist_lookup_uint64_array(nvroot,
13338 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&pss, &c);
13339 if (pss != NULL && pss->pss_state == DSS_SCANNING &&
13340 pss->pss_pass_scrub_pause == 0) {
13341 int64_t rem = pss->pss_to_examine - pss->pss_issued;
13342 if (pss->pss_func == POOL_SCAN_SCRUB)
13343 bytes_rem[ZPOOL_WAIT_SCRUB] = rem;
13344 else
13345 bytes_rem[ZPOOL_WAIT_RESILVER] = rem;
13346 } else if (check_rebuilding(nvroot, NULL)) {
13347 bytes_rem[ZPOOL_WAIT_RESILVER] =
13348 vdev_activity_top_remaining(nvroot);
13349 }
13350
13351 (void) nvlist_lookup_uint64_array(nvroot,
13352 ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, (uint64_t **)&pres, &c);
13353 if (pres != NULL && pres->pres_state == DSS_SCANNING) {
13354 int64_t rem = pres->pres_to_reflow - pres->pres_reflowed;
13355 bytes_rem[ZPOOL_WAIT_RAIDZ_EXPAND] = rem;
13356 }
13357
13358 bytes_rem[ZPOOL_WAIT_INITIALIZE] =
13359 vdev_activity_remaining(nvroot, ZPOOL_WAIT_INITIALIZE);
13360 bytes_rem[ZPOOL_WAIT_TRIM] =
13361 vdev_activity_remaining(nvroot, ZPOOL_WAIT_TRIM);
13362
13363 /*
13364 * A replace finishes after resilvering finishes, so the amount of work
13365 * left for a replace is the same as for resilvering.
13366 *
13367 * It isn't quite correct to say that if we have any 'spare' or
13368 * 'replacing' vdevs and a resilver is happening, then a replace is in
13369 * progress, like we do here. When a hot spare is used, the faulted vdev
13370 * is not removed after the hot spare is resilvered, so parent 'spare'
13371 * vdev is not removed either. So we could have a 'spare' vdev, but be
13372 * resilvering for a different reason. However, we use it as a heuristic
13373 * because we don't have access to the DTLs, which could tell us whether
13374 * or not we have really finished resilvering a hot spare.
13375 */
13376 if (vdev_any_spare_replacing(nvroot))
13377 bytes_rem[ZPOOL_WAIT_REPLACE] = bytes_rem[ZPOOL_WAIT_RESILVER];
13378
13379 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
13380 char buf[64];
13381 if (!wd->wd_enabled[i])
13382 continue;
13383
13384 if (wd->wd_exact) {
13385 (void) snprintf(buf, sizeof (buf), "%" PRIi64,
13386 bytes_rem[i]);
13387 } else {
13388 zfs_nicenum(bytes_rem[i], buf, sizeof (buf));
13389 }
13390
13391 if (wd->wd_scripted)
13392 (void) printf(i == 0 ? "%s" : "\t%s", buf);
13393 else
13394 (void) printf(" %*s", col_widths[i] - 1, buf);
13395 }
13396 (void) printf("\n");
13397 (void) fflush(stdout);
13398 }
13399
13400 static void *
wait_status_thread(void * arg)13401 wait_status_thread(void *arg)
13402 {
13403 wait_data_t *wd = (wait_data_t *)arg;
13404 zpool_handle_t *zhp;
13405
13406 if ((zhp = zpool_open(g_zfs, wd->wd_poolname)) == NULL)
13407 return (void *)(1);
13408
13409 for (int row = 0; ; row++) {
13410 boolean_t missing;
13411 struct timespec timeout;
13412 int ret = 0;
13413 (void) clock_gettime(CLOCK_REALTIME, &timeout);
13414
13415 if (zpool_refresh_stats(zhp, &missing) != 0 || missing ||
13416 zpool_props_refresh(zhp) != 0) {
13417 zpool_close(zhp);
13418 return (void *)(uintptr_t)(missing ? 0 : 1);
13419 }
13420
13421 print_wait_status_row(wd, zhp, row);
13422
13423 timeout.tv_sec += floor(wd->wd_interval);
13424 long nanos = timeout.tv_nsec +
13425 (wd->wd_interval - floor(wd->wd_interval)) * NANOSEC;
13426 if (nanos >= NANOSEC) {
13427 timeout.tv_sec++;
13428 timeout.tv_nsec = nanos - NANOSEC;
13429 } else {
13430 timeout.tv_nsec = nanos;
13431 }
13432 (void) pthread_mutex_lock(&wd->wd_mutex);
13433 if (!wd->wd_should_exit)
13434 ret = pthread_cond_timedwait(&wd->wd_cv, &wd->wd_mutex,
13435 &timeout);
13436 (void) pthread_mutex_unlock(&wd->wd_mutex);
13437 if (ret == 0) {
13438 break; /* signaled by main thread */
13439 } else if (ret != ETIMEDOUT) {
13440 (void) fprintf(stderr, gettext("pthread_cond_timedwait "
13441 "failed: %s\n"), strerror(ret));
13442 zpool_close(zhp);
13443 return (void *)(uintptr_t)(1);
13444 }
13445 }
13446
13447 zpool_close(zhp);
13448 return (void *)(0);
13449 }
13450
13451 int
zpool_do_wait(int argc,char ** argv)13452 zpool_do_wait(int argc, char **argv)
13453 {
13454 boolean_t verbose = B_FALSE;
13455 int c, i;
13456 unsigned long count;
13457 pthread_t status_thr;
13458 int error = 0;
13459 zpool_handle_t *zhp;
13460
13461 wait_data_t wd;
13462 wd.wd_scripted = B_FALSE;
13463 wd.wd_exact = B_FALSE;
13464 wd.wd_headers_once = B_FALSE;
13465 wd.wd_should_exit = B_FALSE;
13466
13467 (void) pthread_mutex_init(&wd.wd_mutex, NULL);
13468 (void) pthread_cond_init(&wd.wd_cv, NULL);
13469
13470 /* By default, wait for all types of activity. */
13471 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++)
13472 wd.wd_enabled[i] = B_TRUE;
13473
13474 while ((c = getopt(argc, argv, "HpT:t:")) != -1) {
13475 switch (c) {
13476 case 'H':
13477 wd.wd_scripted = B_TRUE;
13478 break;
13479 case 'n':
13480 wd.wd_headers_once = B_TRUE;
13481 break;
13482 case 'p':
13483 wd.wd_exact = B_TRUE;
13484 break;
13485 case 'T':
13486 get_timestamp_arg(*optarg);
13487 break;
13488 case 't':
13489 /* Reset activities array */
13490 memset(&wd.wd_enabled, 0, sizeof (wd.wd_enabled));
13491
13492 for (char *tok; (tok = strsep(&optarg, ",")); ) {
13493 static const char *const col_opts[] = {
13494 "discard", "free", "initialize", "replace",
13495 "remove", "resilver", "scrub", "trim",
13496 "raidz_expand" };
13497
13498 for (i = 0; i < ARRAY_SIZE(col_opts); ++i)
13499 if (strcmp(tok, col_opts[i]) == 0) {
13500 wd.wd_enabled[i] = B_TRUE;
13501 goto found;
13502 }
13503
13504 (void) fprintf(stderr,
13505 gettext("invalid activity '%s'\n"), tok);
13506 usage(B_FALSE);
13507 found:;
13508 }
13509 break;
13510 case '?':
13511 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
13512 optopt);
13513 usage(B_FALSE);
13514 }
13515 }
13516
13517 argc -= optind;
13518 argv += optind;
13519
13520 get_interval_count(&argc, argv, &wd.wd_interval, &count);
13521 if (count != 0) {
13522 /* This subcmd only accepts an interval, not a count */
13523 (void) fprintf(stderr, gettext("too many arguments\n"));
13524 usage(B_FALSE);
13525 }
13526
13527 if (wd.wd_interval != 0)
13528 verbose = B_TRUE;
13529
13530 if (argc < 1) {
13531 (void) fprintf(stderr, gettext("missing 'pool' argument\n"));
13532 usage(B_FALSE);
13533 }
13534 if (argc > 1) {
13535 (void) fprintf(stderr, gettext("too many arguments\n"));
13536 usage(B_FALSE);
13537 }
13538
13539 wd.wd_poolname = argv[0];
13540
13541 if ((zhp = zpool_open(g_zfs, wd.wd_poolname)) == NULL)
13542 return (1);
13543
13544 if (verbose) {
13545 /*
13546 * We use a separate thread for printing status updates because
13547 * the main thread will call lzc_wait(), which blocks as long
13548 * as an activity is in progress, which can be a long time.
13549 */
13550 if (pthread_create(&status_thr, NULL, wait_status_thread, &wd)
13551 != 0) {
13552 (void) fprintf(stderr, gettext("failed to create status"
13553 "thread: %s\n"), strerror(errno));
13554 zpool_close(zhp);
13555 return (1);
13556 }
13557 }
13558
13559 /*
13560 * Loop over all activities that we are supposed to wait for until none
13561 * of them are in progress. Note that this means we can end up waiting
13562 * for more activities to complete than just those that were in progress
13563 * when we began waiting; if an activity we are interested in begins
13564 * while we are waiting for another activity, we will wait for both to
13565 * complete before exiting.
13566 */
13567 for (;;) {
13568 boolean_t missing = B_FALSE;
13569 boolean_t any_waited = B_FALSE;
13570
13571 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
13572 boolean_t waited;
13573
13574 if (!wd.wd_enabled[i])
13575 continue;
13576
13577 error = zpool_wait_status(zhp, i, &missing, &waited);
13578 if (error != 0 || missing)
13579 break;
13580
13581 any_waited = (any_waited || waited);
13582 }
13583
13584 if (error != 0 || missing || !any_waited)
13585 break;
13586 }
13587
13588 zpool_close(zhp);
13589
13590 if (verbose) {
13591 uintptr_t status;
13592 (void) pthread_mutex_lock(&wd.wd_mutex);
13593 wd.wd_should_exit = B_TRUE;
13594 (void) pthread_cond_signal(&wd.wd_cv);
13595 (void) pthread_mutex_unlock(&wd.wd_mutex);
13596 (void) pthread_join(status_thr, (void *)&status);
13597 if (status != 0)
13598 error = status;
13599 }
13600
13601 (void) pthread_mutex_destroy(&wd.wd_mutex);
13602 (void) pthread_cond_destroy(&wd.wd_cv);
13603 return (error);
13604 }
13605
13606 /*
13607 * zpool ddtprune -d|-p <amount> <pool>
13608 *
13609 * -d <days> Prune entries <days> old and older
13610 * -p <percent> Prune <percent> amount of entries
13611 *
13612 * Prune single reference entries from DDT to satisfy the amount specified.
13613 */
13614 int
zpool_do_ddt_prune(int argc,char ** argv)13615 zpool_do_ddt_prune(int argc, char **argv)
13616 {
13617 zpool_ddt_prune_unit_t unit = ZPOOL_DDT_PRUNE_NONE;
13618 uint64_t amount = 0;
13619 zpool_handle_t *zhp;
13620 char *endptr;
13621 int c;
13622
13623 while ((c = getopt(argc, argv, "d:p:")) != -1) {
13624 switch (c) {
13625 case 'd':
13626 if (unit == ZPOOL_DDT_PRUNE_PERCENTAGE) {
13627 (void) fprintf(stderr, gettext("-d cannot be "
13628 "combined with -p option\n"));
13629 usage(B_FALSE);
13630 }
13631 errno = 0;
13632 amount = strtoull(optarg, &endptr, 0);
13633 if (errno != 0 || *endptr != '\0' || amount == 0) {
13634 (void) fprintf(stderr,
13635 gettext("invalid days value\n"));
13636 usage(B_FALSE);
13637 }
13638 amount *= 86400; /* convert days to seconds */
13639 unit = ZPOOL_DDT_PRUNE_AGE;
13640 break;
13641 case 'p':
13642 if (unit == ZPOOL_DDT_PRUNE_AGE) {
13643 (void) fprintf(stderr, gettext("-p cannot be "
13644 "combined with -d option\n"));
13645 usage(B_FALSE);
13646 }
13647 errno = 0;
13648 amount = strtoull(optarg, &endptr, 0);
13649 if (errno != 0 || *endptr != '\0' ||
13650 amount == 0 || amount > 100) {
13651 (void) fprintf(stderr,
13652 gettext("invalid percentage value\n"));
13653 usage(B_FALSE);
13654 }
13655 unit = ZPOOL_DDT_PRUNE_PERCENTAGE;
13656 break;
13657 case '?':
13658 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
13659 optopt);
13660 usage(B_FALSE);
13661 }
13662 }
13663 argc -= optind;
13664 argv += optind;
13665
13666 if (unit == ZPOOL_DDT_PRUNE_NONE) {
13667 (void) fprintf(stderr,
13668 gettext("missing amount option (-d|-p <value>)\n"));
13669 usage(B_FALSE);
13670 } else if (argc < 1) {
13671 (void) fprintf(stderr, gettext("missing pool argument\n"));
13672 usage(B_FALSE);
13673 } else if (argc > 1) {
13674 (void) fprintf(stderr, gettext("too many arguments\n"));
13675 usage(B_FALSE);
13676 }
13677 zhp = zpool_open(g_zfs, argv[0]);
13678 if (zhp == NULL)
13679 return (-1);
13680
13681 int error = zpool_ddt_prune(zhp, unit, amount);
13682
13683 zpool_close(zhp);
13684
13685 return (error);
13686 }
13687
13688 static int
find_command_idx(const char * command,int * idx)13689 find_command_idx(const char *command, int *idx)
13690 {
13691 for (int i = 0; i < NCOMMAND; ++i) {
13692 if (command_table[i].name == NULL)
13693 continue;
13694
13695 if (strcmp(command, command_table[i].name) == 0) {
13696 *idx = i;
13697 return (0);
13698 }
13699 }
13700 return (1);
13701 }
13702
13703 /*
13704 * Display version message
13705 */
13706 static int
zpool_do_version(int argc,char ** argv)13707 zpool_do_version(int argc, char **argv)
13708 {
13709 int c;
13710 nvlist_t *jsobj = NULL, *zfs_ver = NULL;
13711 boolean_t json = B_FALSE;
13712
13713 struct option long_options[] = {
13714 {"json", no_argument, NULL, 'j'},
13715 };
13716
13717 while ((c = getopt_long(argc, argv, "j", long_options, NULL)) != -1) {
13718 switch (c) {
13719 case 'j':
13720 json = B_TRUE;
13721 jsobj = zpool_json_schema(0, 1);
13722 break;
13723 case '?':
13724 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
13725 optopt);
13726 usage(B_FALSE);
13727 }
13728 }
13729
13730 argc -= optind;
13731 if (argc != 0) {
13732 (void) fprintf(stderr, "too many arguments\n");
13733 usage(B_FALSE);
13734 }
13735
13736 if (json) {
13737 zfs_ver = zfs_version_nvlist();
13738 if (zfs_ver) {
13739 fnvlist_add_nvlist(jsobj, "zfs_version", zfs_ver);
13740 zcmd_print_json(jsobj);
13741 fnvlist_free(zfs_ver);
13742 return (0);
13743 } else
13744 return (-1);
13745 } else
13746 return (zfs_version_print() != 0);
13747 }
13748
13749 /* Display documentation */
13750 static int
zpool_do_help(int argc,char ** argv)13751 zpool_do_help(int argc, char **argv)
13752 {
13753 char page[MAXNAMELEN];
13754 if (argc < 3 || strcmp(argv[2], "zpool") == 0)
13755 (void) strcpy(page, "zpool");
13756 else if (strcmp(argv[2], "concepts") == 0 ||
13757 strcmp(argv[2], "props") == 0)
13758 (void) snprintf(page, sizeof (page), "zpool%s", argv[2]);
13759 else
13760 (void) snprintf(page, sizeof (page), "zpool-%s", argv[2]);
13761
13762 (void) execlp("man", "man", page, NULL);
13763
13764 fprintf(stderr, "couldn't run man program: %s", strerror(errno));
13765 return (-1);
13766 }
13767
13768 /*
13769 * Do zpool_load_compat() and print error message on failure
13770 */
13771 static zpool_compat_status_t
zpool_do_load_compat(const char * compat,boolean_t * list)13772 zpool_do_load_compat(const char *compat, boolean_t *list)
13773 {
13774 char report[1024];
13775
13776 zpool_compat_status_t ret;
13777
13778 ret = zpool_load_compat(compat, list, report, 1024);
13779 switch (ret) {
13780
13781 case ZPOOL_COMPATIBILITY_OK:
13782 break;
13783
13784 case ZPOOL_COMPATIBILITY_NOFILES:
13785 case ZPOOL_COMPATIBILITY_BADFILE:
13786 case ZPOOL_COMPATIBILITY_BADTOKEN:
13787 (void) fprintf(stderr, "Error: %s\n", report);
13788 break;
13789
13790 case ZPOOL_COMPATIBILITY_WARNTOKEN:
13791 (void) fprintf(stderr, "Warning: %s\n", report);
13792 ret = ZPOOL_COMPATIBILITY_OK;
13793 break;
13794 }
13795 return (ret);
13796 }
13797
13798 int
main(int argc,char ** argv)13799 main(int argc, char **argv)
13800 {
13801 int ret = 0;
13802 int i = 0;
13803 char *cmdname;
13804 char **newargv;
13805
13806 (void) setlocale(LC_ALL, "");
13807 (void) setlocale(LC_NUMERIC, "C");
13808 (void) textdomain(TEXT_DOMAIN);
13809 srand(time(NULL));
13810
13811 opterr = 0;
13812
13813 /*
13814 * Make sure the user has specified some command.
13815 */
13816 if (argc < 2) {
13817 (void) fprintf(stderr, gettext("missing command\n"));
13818 usage(B_FALSE);
13819 }
13820
13821 cmdname = argv[1];
13822
13823 /*
13824 * Special case '-?'
13825 */
13826 if ((strcmp(cmdname, "-?") == 0) || strcmp(cmdname, "--help") == 0)
13827 usage(B_TRUE);
13828
13829 /*
13830 * Special case '-V|--version'
13831 */
13832 if ((strcmp(cmdname, "-V") == 0) || (strcmp(cmdname, "--version") == 0))
13833 return (zfs_version_print() != 0);
13834
13835 /*
13836 * Special case 'help'
13837 */
13838 if (strcmp(cmdname, "help") == 0)
13839 return (zpool_do_help(argc, argv));
13840
13841 if ((g_zfs = libzfs_init()) == NULL) {
13842 (void) fprintf(stderr, "%s\n", libzfs_error_init(errno));
13843 return (1);
13844 }
13845
13846 libzfs_print_on_error(g_zfs, B_TRUE);
13847
13848 zfs_save_arguments(argc, argv, history_str, sizeof (history_str));
13849
13850 /*
13851 * Many commands modify input strings for string parsing reasons.
13852 * We create a copy to protect the original argv.
13853 */
13854 newargv = safe_malloc((argc + 1) * sizeof (newargv[0]));
13855 for (i = 0; i < argc; i++)
13856 newargv[i] = strdup(argv[i]);
13857 newargv[argc] = NULL;
13858
13859 /*
13860 * Run the appropriate command.
13861 */
13862 if (find_command_idx(cmdname, &i) == 0) {
13863 current_command = &command_table[i];
13864 ret = command_table[i].func(argc - 1, newargv + 1);
13865 } else if (strchr(cmdname, '=')) {
13866 verify(find_command_idx("set", &i) == 0);
13867 current_command = &command_table[i];
13868 ret = command_table[i].func(argc, newargv);
13869 } else if (strcmp(cmdname, "freeze") == 0 && argc == 3) {
13870 /*
13871 * 'freeze' is a vile debugging abomination, so we treat
13872 * it as such.
13873 */
13874 zfs_cmd_t zc = {"\0"};
13875
13876 (void) strlcpy(zc.zc_name, argv[2], sizeof (zc.zc_name));
13877 ret = zfs_ioctl(g_zfs, ZFS_IOC_POOL_FREEZE, &zc);
13878 if (ret != 0) {
13879 (void) fprintf(stderr,
13880 gettext("failed to freeze pool: %d\n"), errno);
13881 ret = 1;
13882 }
13883
13884 log_history = 0;
13885 } else {
13886 (void) fprintf(stderr, gettext("unrecognized "
13887 "command '%s'\n"), cmdname);
13888 usage(B_FALSE);
13889 }
13890
13891 for (i = 0; i < argc; i++)
13892 free(newargv[i]);
13893 free(newargv);
13894
13895 if (ret == 0 && log_history)
13896 (void) zpool_log_history(g_zfs, history_str);
13897
13898 libzfs_fini(g_zfs);
13899
13900 /*
13901 * The 'ZFS_ABORT' environment variable causes us to dump core on exit
13902 * for the purposes of running ::findleaks.
13903 */
13904 if (getenv("ZFS_ABORT") != NULL) {
13905 (void) printf("dumping core by request\n");
13906 abort();
13907 }
13908
13909 return (ret);
13910 }
13911