1 // SPDX-License-Identifier: CDDL-1.0
2 /*
3 * CDDL HEADER START
4 *
5 * The contents of this file are subject to the terms of the
6 * Common Development and Distribution License (the "License").
7 * You may not use this file except in compliance with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or https://opensource.org/licenses/CDDL-1.0.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22
23 /*
24 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
26 * Copyright (c) 2011, 2024 by Delphix. All rights reserved.
27 * Copyright (c) 2012 by Frederik Wessels. All rights reserved.
28 * Copyright (c) 2012 by Cyril Plisko. All rights reserved.
29 * Copyright (c) 2013 by Prasad Joshi (sTec). All rights reserved.
30 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>.
31 * Copyright (c) 2017 Datto Inc.
32 * Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
33 * Copyright (c) 2017, Intel Corporation.
34 * Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>
35 * Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
36 * Copyright (c) 2021, 2023, Klara Inc.
37 * Copyright (c) 2021, 2025 Hewlett Packard Enterprise Development LP.
38 */
39
40 #include <assert.h>
41 #include <ctype.h>
42 #include <dirent.h>
43 #include <errno.h>
44 #include <fcntl.h>
45 #include <getopt.h>
46 #include <libgen.h>
47 #include <libintl.h>
48 #include <libuutil.h>
49 #include <locale.h>
50 #include <pthread.h>
51 #include <stdio.h>
52 #include <stdlib.h>
53 #include <string.h>
54 #include <thread_pool.h>
55 #include <time.h>
56 #include <unistd.h>
57 #include <pwd.h>
58 #include <zone.h>
59 #include <sys/wait.h>
60 #include <zfs_prop.h>
61 #include <sys/fs/zfs.h>
62 #include <sys/stat.h>
63 #include <sys/systeminfo.h>
64 #include <sys/fm/fs/zfs.h>
65 #include <sys/fm/util.h>
66 #include <sys/fm/protocol.h>
67 #include <sys/zfs_ioctl.h>
68 #include <sys/mount.h>
69 #include <sys/sysmacros.h>
70 #include <string.h>
71 #include <math.h>
72
73 #include <libzfs.h>
74 #include <libzutil.h>
75
76 #include "zpool_util.h"
77 #include "zfs_comutil.h"
78 #include "zfeature_common.h"
79 #include "zfs_valstr.h"
80
81 #include "statcommon.h"
82
83 libzfs_handle_t *g_zfs;
84
85 static int mount_tp_nthr = 512; /* tpool threads for multi-threaded mounting */
86
87 static int zpool_do_create(int, char **);
88 static int zpool_do_destroy(int, char **);
89
90 static int zpool_do_add(int, char **);
91 static int zpool_do_remove(int, char **);
92 static int zpool_do_labelclear(int, char **);
93
94 static int zpool_do_checkpoint(int, char **);
95 static int zpool_do_prefetch(int, char **);
96
97 static int zpool_do_list(int, char **);
98 static int zpool_do_iostat(int, char **);
99 static int zpool_do_status(int, char **);
100
101 static int zpool_do_online(int, char **);
102 static int zpool_do_offline(int, char **);
103 static int zpool_do_clear(int, char **);
104 static int zpool_do_reopen(int, char **);
105
106 static int zpool_do_reguid(int, char **);
107
108 static int zpool_do_attach(int, char **);
109 static int zpool_do_detach(int, char **);
110 static int zpool_do_replace(int, char **);
111 static int zpool_do_split(int, char **);
112
113 static int zpool_do_initialize(int, char **);
114 static int zpool_do_scrub(int, char **);
115 static int zpool_do_resilver(int, char **);
116 static int zpool_do_trim(int, char **);
117
118 static int zpool_do_import(int, char **);
119 static int zpool_do_export(int, char **);
120
121 static int zpool_do_upgrade(int, char **);
122
123 static int zpool_do_history(int, char **);
124 static int zpool_do_events(int, char **);
125
126 static int zpool_do_get(int, char **);
127 static int zpool_do_set(int, char **);
128
129 static int zpool_do_sync(int, char **);
130
131 static int zpool_do_version(int, char **);
132
133 static int zpool_do_wait(int, char **);
134
135 static int zpool_do_ddt_prune(int, char **);
136
137 static int zpool_do_help(int argc, char **argv);
138
139 static zpool_compat_status_t zpool_do_load_compat(
140 const char *, boolean_t *);
141
142 enum zpool_options {
143 ZPOOL_OPTION_POWER = 1024,
144 ZPOOL_OPTION_ALLOW_INUSE,
145 ZPOOL_OPTION_ALLOW_REPLICATION_MISMATCH,
146 ZPOOL_OPTION_ALLOW_ASHIFT_MISMATCH,
147 ZPOOL_OPTION_POOL_KEY_GUID,
148 ZPOOL_OPTION_JSON_NUMS_AS_INT,
149 ZPOOL_OPTION_JSON_FLAT_VDEVS
150 };
151
152 /*
153 * These libumem hooks provide a reasonable set of defaults for the allocator's
154 * debugging facilities.
155 */
156
157 #ifdef DEBUG
158 const char *
_umem_debug_init(void)159 _umem_debug_init(void)
160 {
161 return ("default,verbose"); /* $UMEM_DEBUG setting */
162 }
163
164 const char *
_umem_logging_init(void)165 _umem_logging_init(void)
166 {
167 return ("fail,contents"); /* $UMEM_LOGGING setting */
168 }
169 #endif
170
171 typedef enum {
172 HELP_ADD,
173 HELP_ATTACH,
174 HELP_CLEAR,
175 HELP_CREATE,
176 HELP_CHECKPOINT,
177 HELP_DDT_PRUNE,
178 HELP_DESTROY,
179 HELP_DETACH,
180 HELP_EXPORT,
181 HELP_HISTORY,
182 HELP_IMPORT,
183 HELP_IOSTAT,
184 HELP_LABELCLEAR,
185 HELP_LIST,
186 HELP_OFFLINE,
187 HELP_ONLINE,
188 HELP_PREFETCH,
189 HELP_REPLACE,
190 HELP_REMOVE,
191 HELP_INITIALIZE,
192 HELP_SCRUB,
193 HELP_RESILVER,
194 HELP_TRIM,
195 HELP_STATUS,
196 HELP_UPGRADE,
197 HELP_EVENTS,
198 HELP_GET,
199 HELP_SET,
200 HELP_SPLIT,
201 HELP_SYNC,
202 HELP_REGUID,
203 HELP_REOPEN,
204 HELP_VERSION,
205 HELP_WAIT
206 } zpool_help_t;
207
208
209 /*
210 * Flags for stats to display with "zpool iostats"
211 */
212 enum iostat_type {
213 IOS_DEFAULT = 0,
214 IOS_LATENCY = 1,
215 IOS_QUEUES = 2,
216 IOS_L_HISTO = 3,
217 IOS_RQ_HISTO = 4,
218 IOS_COUNT, /* always last element */
219 };
220
221 /* iostat_type entries as bitmasks */
222 #define IOS_DEFAULT_M (1ULL << IOS_DEFAULT)
223 #define IOS_LATENCY_M (1ULL << IOS_LATENCY)
224 #define IOS_QUEUES_M (1ULL << IOS_QUEUES)
225 #define IOS_L_HISTO_M (1ULL << IOS_L_HISTO)
226 #define IOS_RQ_HISTO_M (1ULL << IOS_RQ_HISTO)
227
228 /* Mask of all the histo bits */
229 #define IOS_ANYHISTO_M (IOS_L_HISTO_M | IOS_RQ_HISTO_M)
230
231 /*
232 * Lookup table for iostat flags to nvlist names. Basically a list
233 * of all the nvlists a flag requires. Also specifies the order in
234 * which data gets printed in zpool iostat.
235 */
236 static const char *vsx_type_to_nvlist[IOS_COUNT][15] = {
237 [IOS_L_HISTO] = {
238 ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
239 ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
240 ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
241 ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
242 ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO,
243 ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO,
244 ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO,
245 ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO,
246 ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO,
247 ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
248 ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
249 NULL},
250 [IOS_LATENCY] = {
251 ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
252 ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
253 ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
254 ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
255 ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
256 ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
257 NULL},
258 [IOS_QUEUES] = {
259 ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE,
260 ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE,
261 ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE,
262 ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE,
263 ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,
264 ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE,
265 ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE,
266 NULL},
267 [IOS_RQ_HISTO] = {
268 ZPOOL_CONFIG_VDEV_SYNC_IND_R_HISTO,
269 ZPOOL_CONFIG_VDEV_SYNC_AGG_R_HISTO,
270 ZPOOL_CONFIG_VDEV_SYNC_IND_W_HISTO,
271 ZPOOL_CONFIG_VDEV_SYNC_AGG_W_HISTO,
272 ZPOOL_CONFIG_VDEV_ASYNC_IND_R_HISTO,
273 ZPOOL_CONFIG_VDEV_ASYNC_AGG_R_HISTO,
274 ZPOOL_CONFIG_VDEV_ASYNC_IND_W_HISTO,
275 ZPOOL_CONFIG_VDEV_ASYNC_AGG_W_HISTO,
276 ZPOOL_CONFIG_VDEV_IND_SCRUB_HISTO,
277 ZPOOL_CONFIG_VDEV_AGG_SCRUB_HISTO,
278 ZPOOL_CONFIG_VDEV_IND_TRIM_HISTO,
279 ZPOOL_CONFIG_VDEV_AGG_TRIM_HISTO,
280 ZPOOL_CONFIG_VDEV_IND_REBUILD_HISTO,
281 ZPOOL_CONFIG_VDEV_AGG_REBUILD_HISTO,
282 NULL},
283 };
284
285 static const char *pool_scan_func_str[] = {
286 "NONE",
287 "SCRUB",
288 "RESILVER",
289 "ERRORSCRUB"
290 };
291
292 static const char *pool_scan_state_str[] = {
293 "NONE",
294 "SCANNING",
295 "FINISHED",
296 "CANCELED",
297 "ERRORSCRUBBING"
298 };
299
300 static const char *vdev_rebuild_state_str[] = {
301 "NONE",
302 "ACTIVE",
303 "CANCELED",
304 "COMPLETE"
305 };
306
307 static const char *checkpoint_state_str[] = {
308 "NONE",
309 "EXISTS",
310 "DISCARDING"
311 };
312
313 static const char *vdev_state_str[] = {
314 "UNKNOWN",
315 "CLOSED",
316 "OFFLINE",
317 "REMOVED",
318 "CANT_OPEN",
319 "FAULTED",
320 "DEGRADED",
321 "ONLINE"
322 };
323
324 static const char *vdev_aux_str[] = {
325 "NONE",
326 "OPEN_FAILED",
327 "CORRUPT_DATA",
328 "NO_REPLICAS",
329 "BAD_GUID_SUM",
330 "TOO_SMALL",
331 "BAD_LABEL",
332 "VERSION_NEWER",
333 "VERSION_OLDER",
334 "UNSUP_FEAT",
335 "SPARED",
336 "ERR_EXCEEDED",
337 "IO_FAILURE",
338 "BAD_LOG",
339 "EXTERNAL",
340 "SPLIT_POOL",
341 "BAD_ASHIFT",
342 "EXTERNAL_PERSIST",
343 "ACTIVE",
344 "CHILDREN_OFFLINE",
345 "ASHIFT_TOO_BIG"
346 };
347
348 static const char *vdev_init_state_str[] = {
349 "NONE",
350 "ACTIVE",
351 "CANCELED",
352 "SUSPENDED",
353 "COMPLETE"
354 };
355
356 static const char *vdev_trim_state_str[] = {
357 "NONE",
358 "ACTIVE",
359 "CANCELED",
360 "SUSPENDED",
361 "COMPLETE"
362 };
363
364 #define ZFS_NICE_TIMESTAMP 100
365
366 /*
367 * Given a cb->cb_flags with a histogram bit set, return the iostat_type.
368 * Right now, only one histo bit is ever set at one time, so we can
369 * just do a highbit64(a)
370 */
371 #define IOS_HISTO_IDX(a) (highbit64(a & IOS_ANYHISTO_M) - 1)
372
373 typedef struct zpool_command {
374 const char *name;
375 int (*func)(int, char **);
376 zpool_help_t usage;
377 } zpool_command_t;
378
379 /*
380 * Master command table. Each ZFS command has a name, associated function, and
381 * usage message. The usage messages need to be internationalized, so we have
382 * to have a function to return the usage message based on a command index.
383 *
384 * These commands are organized according to how they are displayed in the usage
385 * message. An empty command (one with a NULL name) indicates an empty line in
386 * the generic usage message.
387 */
388 static zpool_command_t command_table[] = {
389 { "version", zpool_do_version, HELP_VERSION },
390 { NULL },
391 { "create", zpool_do_create, HELP_CREATE },
392 { "destroy", zpool_do_destroy, HELP_DESTROY },
393 { NULL },
394 { "add", zpool_do_add, HELP_ADD },
395 { "remove", zpool_do_remove, HELP_REMOVE },
396 { NULL },
397 { "labelclear", zpool_do_labelclear, HELP_LABELCLEAR },
398 { NULL },
399 { "checkpoint", zpool_do_checkpoint, HELP_CHECKPOINT },
400 { "prefetch", zpool_do_prefetch, HELP_PREFETCH },
401 { NULL },
402 { "list", zpool_do_list, HELP_LIST },
403 { "iostat", zpool_do_iostat, HELP_IOSTAT },
404 { "status", zpool_do_status, HELP_STATUS },
405 { NULL },
406 { "online", zpool_do_online, HELP_ONLINE },
407 { "offline", zpool_do_offline, HELP_OFFLINE },
408 { "clear", zpool_do_clear, HELP_CLEAR },
409 { "reopen", zpool_do_reopen, HELP_REOPEN },
410 { NULL },
411 { "attach", zpool_do_attach, HELP_ATTACH },
412 { "detach", zpool_do_detach, HELP_DETACH },
413 { "replace", zpool_do_replace, HELP_REPLACE },
414 { "split", zpool_do_split, HELP_SPLIT },
415 { NULL },
416 { "initialize", zpool_do_initialize, HELP_INITIALIZE },
417 { "resilver", zpool_do_resilver, HELP_RESILVER },
418 { "scrub", zpool_do_scrub, HELP_SCRUB },
419 { "trim", zpool_do_trim, HELP_TRIM },
420 { NULL },
421 { "import", zpool_do_import, HELP_IMPORT },
422 { "export", zpool_do_export, HELP_EXPORT },
423 { "upgrade", zpool_do_upgrade, HELP_UPGRADE },
424 { "reguid", zpool_do_reguid, HELP_REGUID },
425 { NULL },
426 { "history", zpool_do_history, HELP_HISTORY },
427 { "events", zpool_do_events, HELP_EVENTS },
428 { NULL },
429 { "get", zpool_do_get, HELP_GET },
430 { "set", zpool_do_set, HELP_SET },
431 { "sync", zpool_do_sync, HELP_SYNC },
432 { NULL },
433 { "wait", zpool_do_wait, HELP_WAIT },
434 { NULL },
435 { "ddtprune", zpool_do_ddt_prune, HELP_DDT_PRUNE },
436 };
437
438 #define NCOMMAND (ARRAY_SIZE(command_table))
439
440 #define VDEV_ALLOC_CLASS_LOGS "logs"
441
442 #define MAX_CMD_LEN 256
443
444 static zpool_command_t *current_command;
445 static zfs_type_t current_prop_type = (ZFS_TYPE_POOL | ZFS_TYPE_VDEV);
446 static char history_str[HIS_MAX_RECORD_LEN];
447 static boolean_t log_history = B_TRUE;
448 static uint_t timestamp_fmt = NODATE;
449
450 static const char *
get_usage(zpool_help_t idx)451 get_usage(zpool_help_t idx)
452 {
453 switch (idx) {
454 case HELP_ADD:
455 return (gettext("\tadd [-afgLnP] [-o property=value] "
456 "<pool> <vdev> ...\n"));
457 case HELP_ATTACH:
458 return (gettext("\tattach [-fsw] [-o property=value] "
459 "<pool> <device> <new-device>\n"));
460 case HELP_CLEAR:
461 return (gettext("\tclear [[--power]|[-nF]] <pool> [device]\n"));
462 case HELP_CREATE:
463 return (gettext("\tcreate [-fnd] [-o property=value] ... \n"
464 "\t [-O file-system-property=value] ... \n"
465 "\t [-m mountpoint] [-R root] <pool> <vdev> ...\n"));
466 case HELP_CHECKPOINT:
467 return (gettext("\tcheckpoint [-d [-w]] <pool> ...\n"));
468 case HELP_DESTROY:
469 return (gettext("\tdestroy [-f] <pool>\n"));
470 case HELP_DETACH:
471 return (gettext("\tdetach <pool> <device>\n"));
472 case HELP_EXPORT:
473 return (gettext("\texport [-af] <pool> ...\n"));
474 case HELP_HISTORY:
475 return (gettext("\thistory [-il] [<pool>] ...\n"));
476 case HELP_IMPORT:
477 return (gettext("\timport [-d dir] [-D]\n"
478 "\timport [-o mntopts] [-o property=value] ... \n"
479 "\t [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] "
480 "[-R root] [-F [-n]] -a\n"
481 "\timport [-o mntopts] [-o property=value] ... \n"
482 "\t [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] "
483 "[-R root] [-F [-n]]\n"
484 "\t [--rewind-to-checkpoint] <pool | id> [newpool]\n"));
485 case HELP_IOSTAT:
486 return (gettext("\tiostat [[[-c [script1,script2,...]"
487 "[-lq]]|[-rw]] [-T d | u] [-ghHLpPvy]\n"
488 "\t [[pool ...]|[pool vdev ...]|[vdev ...]]"
489 " [[-n] interval [count]]\n"));
490 case HELP_LABELCLEAR:
491 return (gettext("\tlabelclear [-f] <vdev>\n"));
492 case HELP_LIST:
493 return (gettext("\tlist [-gHLpPv] [-o property[,...]] [-j "
494 "[--json-int, --json-pool-key-guid]] ...\n"
495 "\t [-T d|u] [pool] [interval [count]]\n"));
496 case HELP_PREFETCH:
497 return (gettext("\tprefetch -t <type> [<type opts>] <pool>\n"
498 "\t -t ddt <pool>\n"));
499 case HELP_OFFLINE:
500 return (gettext("\toffline [--power]|[[-f][-t]] <pool> "
501 "<device> ...\n"));
502 case HELP_ONLINE:
503 return (gettext("\tonline [--power][-e] <pool> <device> "
504 "...\n"));
505 case HELP_REPLACE:
506 return (gettext("\treplace [-fsw] [-o property=value] "
507 "<pool> <device> [new-device]\n"));
508 case HELP_REMOVE:
509 return (gettext("\tremove [-npsw] <pool> <device> ...\n"));
510 case HELP_REOPEN:
511 return (gettext("\treopen [-n] <pool>\n"));
512 case HELP_INITIALIZE:
513 return (gettext("\tinitialize [-c | -s | -u] [-w] <-a | <pool> "
514 "[<device> ...]>\n"));
515 case HELP_SCRUB:
516 return (gettext("\tscrub [-e | -s | -p | -C | -E | -S] [-w] "
517 "<-a | <pool> [<pool> ...]>\n"));
518 case HELP_RESILVER:
519 return (gettext("\tresilver <pool> ...\n"));
520 case HELP_TRIM:
521 return (gettext("\ttrim [-dw] [-r <rate>] [-c | -s] "
522 "<-a | <pool> [<device> ...]>\n"));
523 case HELP_STATUS:
524 return (gettext("\tstatus [-DdegiLPpstvx] "
525 "[-c script1[,script2,...]] ...\n"
526 "\t [-j|--json [--json-flat-vdevs] [--json-int] "
527 "[--json-pool-key-guid]] ...\n"
528 "\t [-T d|u] [--power] [pool] [interval [count]]\n"));
529 case HELP_UPGRADE:
530 return (gettext("\tupgrade\n"
531 "\tupgrade -v\n"
532 "\tupgrade [-V version] <-a | pool ...>\n"));
533 case HELP_EVENTS:
534 return (gettext("\tevents [-vHf [pool] | -c]\n"));
535 case HELP_GET:
536 return (gettext("\tget [-Hp] [-j [--json-int, "
537 "--json-pool-key-guid]] ...\n"
538 "\t [-o \"all\" | field[,...]] "
539 "<\"all\" | property[,...]> <pool> ...\n"));
540 case HELP_SET:
541 return (gettext("\tset <property=value> <pool>\n"
542 "\tset <vdev_property=value> <pool> <vdev>\n"));
543 case HELP_SPLIT:
544 return (gettext("\tsplit [-gLnPl] [-R altroot] [-o mntopts]\n"
545 "\t [-o property=value] <pool> <newpool> "
546 "[<device> ...]\n"));
547 case HELP_REGUID:
548 return (gettext("\treguid [-g guid] <pool>\n"));
549 case HELP_SYNC:
550 return (gettext("\tsync [pool] ...\n"));
551 case HELP_VERSION:
552 return (gettext("\tversion [-j]\n"));
553 case HELP_WAIT:
554 return (gettext("\twait [-Hp] [-T d|u] [-t <activity>[,...]] "
555 "<pool> [interval]\n"));
556 case HELP_DDT_PRUNE:
557 return (gettext("\tddtprune -d|-p <amount> <pool>\n"));
558 default:
559 __builtin_unreachable();
560 }
561 }
562
563 /*
564 * Callback routine that will print out a pool property value.
565 */
566 static int
print_pool_prop_cb(int prop,void * cb)567 print_pool_prop_cb(int prop, void *cb)
568 {
569 FILE *fp = cb;
570
571 (void) fprintf(fp, "\t%-19s ", zpool_prop_to_name(prop));
572
573 if (zpool_prop_readonly(prop))
574 (void) fprintf(fp, " NO ");
575 else
576 (void) fprintf(fp, " YES ");
577
578 if (zpool_prop_values(prop) == NULL)
579 (void) fprintf(fp, "-\n");
580 else
581 (void) fprintf(fp, "%s\n", zpool_prop_values(prop));
582
583 return (ZPROP_CONT);
584 }
585
586 /*
587 * Callback routine that will print out a vdev property value.
588 */
589 static int
print_vdev_prop_cb(int prop,void * cb)590 print_vdev_prop_cb(int prop, void *cb)
591 {
592 FILE *fp = cb;
593
594 (void) fprintf(fp, "\t%-19s ", vdev_prop_to_name(prop));
595
596 if (vdev_prop_readonly(prop))
597 (void) fprintf(fp, " NO ");
598 else
599 (void) fprintf(fp, " YES ");
600
601 if (vdev_prop_values(prop) == NULL)
602 (void) fprintf(fp, "-\n");
603 else
604 (void) fprintf(fp, "%s\n", vdev_prop_values(prop));
605
606 return (ZPROP_CONT);
607 }
608
609 /*
610 * Given a leaf vdev name like 'L5' return its VDEV_CONFIG_PATH like
611 * '/dev/disk/by-vdev/L5'.
612 */
613 static const char *
vdev_name_to_path(zpool_handle_t * zhp,char * vdev)614 vdev_name_to_path(zpool_handle_t *zhp, char *vdev)
615 {
616 nvlist_t *vdev_nv = zpool_find_vdev(zhp, vdev, NULL, NULL, NULL);
617 if (vdev_nv == NULL) {
618 return (NULL);
619 }
620 return (fnvlist_lookup_string(vdev_nv, ZPOOL_CONFIG_PATH));
621 }
622
623 static int
zpool_power_on(zpool_handle_t * zhp,char * vdev)624 zpool_power_on(zpool_handle_t *zhp, char *vdev)
625 {
626 return (zpool_power(zhp, vdev, B_TRUE));
627 }
628
629 static int
zpool_power_on_and_disk_wait(zpool_handle_t * zhp,char * vdev)630 zpool_power_on_and_disk_wait(zpool_handle_t *zhp, char *vdev)
631 {
632 int rc;
633
634 rc = zpool_power_on(zhp, vdev);
635 if (rc != 0)
636 return (rc);
637
638 zpool_disk_wait(vdev_name_to_path(zhp, vdev));
639
640 return (0);
641 }
642
643 static int
zpool_power_on_pool_and_wait_for_devices(zpool_handle_t * zhp)644 zpool_power_on_pool_and_wait_for_devices(zpool_handle_t *zhp)
645 {
646 nvlist_t *nv;
647 const char *path = NULL;
648 int rc;
649
650 /* Power up all the devices first */
651 FOR_EACH_REAL_LEAF_VDEV(zhp, nv) {
652 path = fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH);
653 if (path != NULL) {
654 rc = zpool_power_on(zhp, (char *)path);
655 if (rc != 0) {
656 return (rc);
657 }
658 }
659 }
660
661 /*
662 * Wait for their devices to show up. Since we powered them on
663 * at roughly the same time, they should all come online around
664 * the same time.
665 */
666 FOR_EACH_REAL_LEAF_VDEV(zhp, nv) {
667 path = fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH);
668 zpool_disk_wait(path);
669 }
670
671 return (0);
672 }
673
674 static int
zpool_power_off(zpool_handle_t * zhp,char * vdev)675 zpool_power_off(zpool_handle_t *zhp, char *vdev)
676 {
677 return (zpool_power(zhp, vdev, B_FALSE));
678 }
679
680 /*
681 * Display usage message. If we're inside a command, display only the usage for
682 * that command. Otherwise, iterate over the entire command table and display
683 * a complete usage message.
684 */
685 static __attribute__((noreturn)) void
usage(boolean_t requested)686 usage(boolean_t requested)
687 {
688 FILE *fp = requested ? stdout : stderr;
689
690 if (current_command == NULL) {
691 int i;
692
693 (void) fprintf(fp, gettext("usage: zpool command args ...\n"));
694 (void) fprintf(fp,
695 gettext("where 'command' is one of the following:\n\n"));
696
697 for (i = 0; i < NCOMMAND; i++) {
698 if (command_table[i].name == NULL)
699 (void) fprintf(fp, "\n");
700 else
701 (void) fprintf(fp, "%s",
702 get_usage(command_table[i].usage));
703 }
704
705 (void) fprintf(fp,
706 gettext("\nFor further help on a command or topic, "
707 "run: %s\n"), "zpool help [<topic>]");
708 } else {
709 (void) fprintf(fp, gettext("usage:\n"));
710 (void) fprintf(fp, "%s", get_usage(current_command->usage));
711 }
712
713 if (current_command != NULL &&
714 current_prop_type != (ZFS_TYPE_POOL | ZFS_TYPE_VDEV) &&
715 ((strcmp(current_command->name, "set") == 0) ||
716 (strcmp(current_command->name, "get") == 0) ||
717 (strcmp(current_command->name, "list") == 0))) {
718
719 (void) fprintf(fp, "%s",
720 gettext("\nthe following properties are supported:\n"));
721
722 (void) fprintf(fp, "\n\t%-19s %s %s\n\n",
723 "PROPERTY", "EDIT", "VALUES");
724
725 /* Iterate over all properties */
726 if (current_prop_type == ZFS_TYPE_POOL) {
727 (void) zprop_iter(print_pool_prop_cb, fp, B_FALSE,
728 B_TRUE, current_prop_type);
729
730 (void) fprintf(fp, "\t%-19s ", "feature@...");
731 (void) fprintf(fp, "YES "
732 "disabled | enabled | active\n");
733
734 (void) fprintf(fp, gettext("\nThe feature@ properties "
735 "must be appended with a feature name.\n"
736 "See zpool-features(7).\n"));
737 } else if (current_prop_type == ZFS_TYPE_VDEV) {
738 (void) zprop_iter(print_vdev_prop_cb, fp, B_FALSE,
739 B_TRUE, current_prop_type);
740 }
741 }
742
743 /*
744 * See comments at end of main().
745 */
746 if (getenv("ZFS_ABORT") != NULL) {
747 (void) printf("dumping core by request\n");
748 abort();
749 }
750
751 exit(requested ? 0 : 2);
752 }
753
754 /*
755 * zpool initialize [-c | -s | -u] [-w] <pool> [<vdev> ...]
756 * Initialize all unused blocks in the specified vdevs, or all vdevs in the pool
757 * if none specified.
758 *
759 * -c Cancel. Ends active initializing.
760 * -s Suspend. Initializing can then be restarted with no flags.
761 * -u Uninitialize. Clears initialization state.
762 * -w Wait. Blocks until initializing has completed.
763 */
764 int
zpool_do_initialize(int argc,char ** argv)765 zpool_do_initialize(int argc, char **argv)
766 {
767 int c;
768 char *poolname;
769 zpool_handle_t *zhp;
770 int err = 0;
771 boolean_t wait = B_FALSE;
772 boolean_t initialize_all = B_FALSE;
773
774 struct option long_options[] = {
775 {"cancel", no_argument, NULL, 'c'},
776 {"suspend", no_argument, NULL, 's'},
777 {"uninit", no_argument, NULL, 'u'},
778 {"wait", no_argument, NULL, 'w'},
779 {"all", no_argument, NULL, 'a'},
780 {0, 0, 0, 0}
781 };
782
783 pool_initialize_func_t cmd_type = POOL_INITIALIZE_START;
784 while ((c = getopt_long(argc, argv, "acsuw", long_options,
785 NULL)) != -1) {
786 switch (c) {
787 case 'a':
788 initialize_all = B_TRUE;
789 break;
790 case 'c':
791 if (cmd_type != POOL_INITIALIZE_START &&
792 cmd_type != POOL_INITIALIZE_CANCEL) {
793 (void) fprintf(stderr, gettext("-c cannot be "
794 "combined with other options\n"));
795 usage(B_FALSE);
796 }
797 cmd_type = POOL_INITIALIZE_CANCEL;
798 break;
799 case 's':
800 if (cmd_type != POOL_INITIALIZE_START &&
801 cmd_type != POOL_INITIALIZE_SUSPEND) {
802 (void) fprintf(stderr, gettext("-s cannot be "
803 "combined with other options\n"));
804 usage(B_FALSE);
805 }
806 cmd_type = POOL_INITIALIZE_SUSPEND;
807 break;
808 case 'u':
809 if (cmd_type != POOL_INITIALIZE_START &&
810 cmd_type != POOL_INITIALIZE_UNINIT) {
811 (void) fprintf(stderr, gettext("-u cannot be "
812 "combined with other options\n"));
813 usage(B_FALSE);
814 }
815 cmd_type = POOL_INITIALIZE_UNINIT;
816 break;
817 case 'w':
818 wait = B_TRUE;
819 break;
820 case '?':
821 if (optopt != 0) {
822 (void) fprintf(stderr,
823 gettext("invalid option '%c'\n"), optopt);
824 } else {
825 (void) fprintf(stderr,
826 gettext("invalid option '%s'\n"),
827 argv[optind - 1]);
828 }
829 usage(B_FALSE);
830 }
831 }
832
833 argc -= optind;
834 argv += optind;
835
836 initialize_cbdata_t cbdata = {
837 .wait = wait,
838 .cmd_type = cmd_type
839 };
840
841 if (initialize_all && argc > 0) {
842 (void) fprintf(stderr, gettext("-a cannot be combined with "
843 "individual pools or vdevs\n"));
844 usage(B_FALSE);
845 }
846
847 if (argc < 1 && !initialize_all) {
848 (void) fprintf(stderr, gettext("missing pool name argument\n"));
849 usage(B_FALSE);
850 return (-1);
851 }
852
853 if (wait && (cmd_type != POOL_INITIALIZE_START)) {
854 (void) fprintf(stderr, gettext("-w cannot be used with -c, -s"
855 "or -u\n"));
856 usage(B_FALSE);
857 }
858
859 if (argc == 0 && initialize_all) {
860 /* Initilize each pool recursively */
861 err = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
862 B_FALSE, zpool_initialize_one, &cbdata);
863 return (err);
864 } else if (argc == 1) {
865 /* no individual leaf vdevs specified, initialize the pool */
866 poolname = argv[0];
867 zhp = zpool_open(g_zfs, poolname);
868 if (zhp == NULL)
869 return (-1);
870 err = zpool_initialize_one(zhp, &cbdata);
871 } else {
872 /* individual leaf vdevs specified, initialize them */
873 poolname = argv[0];
874 zhp = zpool_open(g_zfs, poolname);
875 if (zhp == NULL)
876 return (-1);
877 nvlist_t *vdevs = fnvlist_alloc();
878 for (int i = 1; i < argc; i++) {
879 fnvlist_add_boolean(vdevs, argv[i]);
880 }
881 if (wait)
882 err = zpool_initialize_wait(zhp, cmd_type, vdevs);
883 else
884 err = zpool_initialize(zhp, cmd_type, vdevs);
885 fnvlist_free(vdevs);
886 }
887
888 zpool_close(zhp);
889
890 return (err);
891 }
892
893 /*
894 * print a pool vdev config for dry runs
895 */
896 static void
print_vdev_tree(zpool_handle_t * zhp,const char * name,nvlist_t * nv,int indent,const char * match,int name_flags)897 print_vdev_tree(zpool_handle_t *zhp, const char *name, nvlist_t *nv, int indent,
898 const char *match, int name_flags)
899 {
900 nvlist_t **child;
901 uint_t c, children;
902 char *vname;
903 boolean_t printed = B_FALSE;
904
905 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
906 &child, &children) != 0) {
907 if (name != NULL)
908 (void) printf("\t%*s%s\n", indent, "", name);
909 return;
910 }
911
912 for (c = 0; c < children; c++) {
913 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
914 const char *class = "";
915
916 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
917 &is_hole);
918
919 if (is_hole == B_TRUE) {
920 continue;
921 }
922
923 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
924 &is_log);
925 if (is_log)
926 class = VDEV_ALLOC_BIAS_LOG;
927 (void) nvlist_lookup_string(child[c],
928 ZPOOL_CONFIG_ALLOCATION_BIAS, &class);
929 if (strcmp(match, class) != 0)
930 continue;
931
932 if (!printed && name != NULL) {
933 (void) printf("\t%*s%s\n", indent, "", name);
934 printed = B_TRUE;
935 }
936 vname = zpool_vdev_name(g_zfs, zhp, child[c], name_flags);
937 print_vdev_tree(zhp, vname, child[c], indent + 2, "",
938 name_flags);
939 free(vname);
940 }
941 }
942
943 /*
944 * Print the list of l2cache devices for dry runs.
945 */
946 static void
print_cache_list(nvlist_t * nv,int indent)947 print_cache_list(nvlist_t *nv, int indent)
948 {
949 nvlist_t **child;
950 uint_t c, children;
951
952 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
953 &child, &children) == 0 && children > 0) {
954 (void) printf("\t%*s%s\n", indent, "", "cache");
955 } else {
956 return;
957 }
958 for (c = 0; c < children; c++) {
959 char *vname;
960
961 vname = zpool_vdev_name(g_zfs, NULL, child[c], 0);
962 (void) printf("\t%*s%s\n", indent + 2, "", vname);
963 free(vname);
964 }
965 }
966
967 /*
968 * Print the list of spares for dry runs.
969 */
970 static void
print_spare_list(nvlist_t * nv,int indent)971 print_spare_list(nvlist_t *nv, int indent)
972 {
973 nvlist_t **child;
974 uint_t c, children;
975
976 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
977 &child, &children) == 0 && children > 0) {
978 (void) printf("\t%*s%s\n", indent, "", "spares");
979 } else {
980 return;
981 }
982 for (c = 0; c < children; c++) {
983 char *vname;
984
985 vname = zpool_vdev_name(g_zfs, NULL, child[c], 0);
986 (void) printf("\t%*s%s\n", indent + 2, "", vname);
987 free(vname);
988 }
989 }
990
991 typedef struct spare_cbdata {
992 uint64_t cb_guid;
993 zpool_handle_t *cb_zhp;
994 } spare_cbdata_t;
995
996 static boolean_t
find_vdev(nvlist_t * nv,uint64_t search)997 find_vdev(nvlist_t *nv, uint64_t search)
998 {
999 uint64_t guid;
1000 nvlist_t **child;
1001 uint_t c, children;
1002
1003 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0 &&
1004 search == guid)
1005 return (B_TRUE);
1006
1007 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1008 &child, &children) == 0) {
1009 for (c = 0; c < children; c++)
1010 if (find_vdev(child[c], search))
1011 return (B_TRUE);
1012 }
1013
1014 return (B_FALSE);
1015 }
1016
1017 static int
find_spare(zpool_handle_t * zhp,void * data)1018 find_spare(zpool_handle_t *zhp, void *data)
1019 {
1020 spare_cbdata_t *cbp = data;
1021 nvlist_t *config, *nvroot;
1022
1023 config = zpool_get_config(zhp, NULL);
1024 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
1025 &nvroot) == 0);
1026
1027 if (find_vdev(nvroot, cbp->cb_guid)) {
1028 cbp->cb_zhp = zhp;
1029 return (1);
1030 }
1031
1032 zpool_close(zhp);
1033 return (0);
1034 }
1035
1036 static void
nice_num_str_nvlist(nvlist_t * item,const char * key,uint64_t value,boolean_t literal,boolean_t as_int,int format)1037 nice_num_str_nvlist(nvlist_t *item, const char *key, uint64_t value,
1038 boolean_t literal, boolean_t as_int, int format)
1039 {
1040 char buf[256];
1041 if (literal) {
1042 if (!as_int)
1043 snprintf(buf, 256, "%llu", (u_longlong_t)value);
1044 } else {
1045 switch (format) {
1046 case ZFS_NICENUM_1024:
1047 zfs_nicenum_format(value, buf, 256, ZFS_NICENUM_1024);
1048 break;
1049 case ZFS_NICENUM_BYTES:
1050 zfs_nicenum_format(value, buf, 256, ZFS_NICENUM_BYTES);
1051 break;
1052 case ZFS_NICENUM_TIME:
1053 zfs_nicenum_format(value, buf, 256, ZFS_NICENUM_TIME);
1054 break;
1055 case ZFS_NICE_TIMESTAMP:
1056 format_timestamp(value, buf, 256);
1057 break;
1058 default:
1059 fprintf(stderr, "Invalid number format");
1060 exit(1);
1061 }
1062 }
1063 if (as_int)
1064 fnvlist_add_uint64(item, key, value);
1065 else
1066 fnvlist_add_string(item, key, buf);
1067 }
1068
1069 /*
1070 * Generates an nvlist with output version for every command based on params.
1071 * Purpose of this is to add a version of JSON output, considering the schema
1072 * format might be updated for each command in future.
1073 *
1074 * Schema:
1075 *
1076 * "output_version": {
1077 * "command": string,
1078 * "vers_major": integer,
1079 * "vers_minor": integer,
1080 * }
1081 */
1082 static nvlist_t *
zpool_json_schema(int maj_v,int min_v)1083 zpool_json_schema(int maj_v, int min_v)
1084 {
1085 char cmd[MAX_CMD_LEN];
1086 nvlist_t *sch = fnvlist_alloc();
1087 nvlist_t *ov = fnvlist_alloc();
1088
1089 snprintf(cmd, MAX_CMD_LEN, "zpool %s", current_command->name);
1090 fnvlist_add_string(ov, "command", cmd);
1091 fnvlist_add_uint32(ov, "vers_major", maj_v);
1092 fnvlist_add_uint32(ov, "vers_minor", min_v);
1093 fnvlist_add_nvlist(sch, "output_version", ov);
1094 fnvlist_free(ov);
1095 return (sch);
1096 }
1097
1098 static void
fill_pool_info(nvlist_t * list,zpool_handle_t * zhp,boolean_t addtype,boolean_t as_int)1099 fill_pool_info(nvlist_t *list, zpool_handle_t *zhp, boolean_t addtype,
1100 boolean_t as_int)
1101 {
1102 nvlist_t *config = zpool_get_config(zhp, NULL);
1103 uint64_t guid = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID);
1104 uint64_t txg = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG);
1105
1106 fnvlist_add_string(list, "name", zpool_get_name(zhp));
1107 if (addtype)
1108 fnvlist_add_string(list, "type", "POOL");
1109 fnvlist_add_string(list, "state", zpool_get_state_str(zhp));
1110 if (as_int) {
1111 if (guid)
1112 fnvlist_add_uint64(list, ZPOOL_CONFIG_POOL_GUID, guid);
1113 if (txg)
1114 fnvlist_add_uint64(list, ZPOOL_CONFIG_POOL_TXG, txg);
1115 fnvlist_add_uint64(list, "spa_version", SPA_VERSION);
1116 fnvlist_add_uint64(list, "zpl_version", ZPL_VERSION);
1117 } else {
1118 char value[ZFS_MAXPROPLEN];
1119 if (guid) {
1120 snprintf(value, ZFS_MAXPROPLEN, "%llu",
1121 (u_longlong_t)guid);
1122 fnvlist_add_string(list, ZPOOL_CONFIG_POOL_GUID, value);
1123 }
1124 if (txg) {
1125 snprintf(value, ZFS_MAXPROPLEN, "%llu",
1126 (u_longlong_t)txg);
1127 fnvlist_add_string(list, ZPOOL_CONFIG_POOL_TXG, value);
1128 }
1129 fnvlist_add_string(list, "spa_version", SPA_VERSION_STRING);
1130 fnvlist_add_string(list, "zpl_version", ZPL_VERSION_STRING);
1131 }
1132 }
1133
1134 static void
used_by_other(zpool_handle_t * zhp,nvlist_t * nvdev,nvlist_t * list)1135 used_by_other(zpool_handle_t *zhp, nvlist_t *nvdev, nvlist_t *list)
1136 {
1137 spare_cbdata_t spare_cb;
1138 verify(nvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_GUID,
1139 &spare_cb.cb_guid) == 0);
1140 if (zpool_iter(g_zfs, find_spare, &spare_cb) == 1) {
1141 if (strcmp(zpool_get_name(spare_cb.cb_zhp),
1142 zpool_get_name(zhp)) != 0) {
1143 fnvlist_add_string(list, "used_by",
1144 zpool_get_name(spare_cb.cb_zhp));
1145 }
1146 zpool_close(spare_cb.cb_zhp);
1147 }
1148 }
1149
1150 static void
fill_vdev_info(nvlist_t * list,zpool_handle_t * zhp,char * name,boolean_t addtype,boolean_t as_int)1151 fill_vdev_info(nvlist_t *list, zpool_handle_t *zhp, char *name,
1152 boolean_t addtype, boolean_t as_int)
1153 {
1154 boolean_t l2c = B_FALSE;
1155 const char *path, *phys, *devid, *bias = NULL;
1156 uint64_t hole = 0, log = 0, spare = 0;
1157 vdev_stat_t *vs;
1158 uint_t c;
1159 nvlist_t *nvdev;
1160 nvlist_t *nvdev_parent = NULL;
1161 char *_name;
1162
1163 if (strcmp(name, zpool_get_name(zhp)) != 0)
1164 _name = name;
1165 else
1166 _name = (char *)"root-0";
1167
1168 nvdev = zpool_find_vdev(zhp, _name, NULL, &l2c, NULL);
1169
1170 fnvlist_add_string(list, "name", name);
1171 if (addtype)
1172 fnvlist_add_string(list, "type", "VDEV");
1173 if (nvdev) {
1174 const char *type = fnvlist_lookup_string(nvdev,
1175 ZPOOL_CONFIG_TYPE);
1176 if (type)
1177 fnvlist_add_string(list, "vdev_type", type);
1178 uint64_t guid = fnvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_GUID);
1179 if (guid) {
1180 if (as_int) {
1181 fnvlist_add_uint64(list, "guid", guid);
1182 } else {
1183 char buf[ZFS_MAXPROPLEN];
1184 snprintf(buf, ZFS_MAXPROPLEN, "%llu",
1185 (u_longlong_t)guid);
1186 fnvlist_add_string(list, "guid", buf);
1187 }
1188 }
1189 if (nvlist_lookup_string(nvdev, ZPOOL_CONFIG_PATH, &path) == 0)
1190 fnvlist_add_string(list, "path", path);
1191 if (nvlist_lookup_string(nvdev, ZPOOL_CONFIG_PHYS_PATH,
1192 &phys) == 0)
1193 fnvlist_add_string(list, "phys_path", phys);
1194 if (nvlist_lookup_string(nvdev, ZPOOL_CONFIG_DEVID,
1195 &devid) == 0)
1196 fnvlist_add_string(list, "devid", devid);
1197 (void) nvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_IS_LOG, &log);
1198 (void) nvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_IS_SPARE,
1199 &spare);
1200 (void) nvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_IS_HOLE, &hole);
1201 if (hole)
1202 fnvlist_add_string(list, "class", VDEV_TYPE_HOLE);
1203 else if (l2c)
1204 fnvlist_add_string(list, "class", VDEV_TYPE_L2CACHE);
1205 else if (spare)
1206 fnvlist_add_string(list, "class", VDEV_TYPE_SPARE);
1207 else if (log)
1208 fnvlist_add_string(list, "class", VDEV_TYPE_LOG);
1209 else {
1210 (void) nvlist_lookup_string(nvdev,
1211 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
1212 if (bias != NULL)
1213 fnvlist_add_string(list, "class", bias);
1214 else {
1215 nvdev_parent = NULL;
1216 nvdev_parent = zpool_find_parent_vdev(zhp,
1217 _name, NULL, NULL, NULL);
1218
1219 /*
1220 * With a mirrored special device, the parent
1221 * "mirror" vdev will have
1222 * ZPOOL_CONFIG_ALLOCATION_BIAS set to "special"
1223 * not the leaf vdevs. If we're a leaf vdev
1224 * in that case we need to look at our parent
1225 * to see if they're "special" to know if we
1226 * are "special" too.
1227 */
1228 if (nvdev_parent) {
1229 (void) nvlist_lookup_string(
1230 nvdev_parent,
1231 ZPOOL_CONFIG_ALLOCATION_BIAS,
1232 &bias);
1233 }
1234 if (bias != NULL)
1235 fnvlist_add_string(list, "class", bias);
1236 else
1237 fnvlist_add_string(list, "class",
1238 "normal");
1239 }
1240 }
1241 if (nvlist_lookup_uint64_array(nvdev, ZPOOL_CONFIG_VDEV_STATS,
1242 (uint64_t **)&vs, &c) == 0) {
1243 fnvlist_add_string(list, "state",
1244 vdev_state_str[vs->vs_state]);
1245 }
1246 }
1247 }
1248
1249 static boolean_t
prop_list_contains_feature(nvlist_t * proplist)1250 prop_list_contains_feature(nvlist_t *proplist)
1251 {
1252 nvpair_t *nvp;
1253 for (nvp = nvlist_next_nvpair(proplist, NULL); NULL != nvp;
1254 nvp = nvlist_next_nvpair(proplist, nvp)) {
1255 if (zpool_prop_feature(nvpair_name(nvp)))
1256 return (B_TRUE);
1257 }
1258 return (B_FALSE);
1259 }
1260
1261 /*
1262 * Add a property pair (name, string-value) into a property nvlist.
1263 */
1264 static int
add_prop_list(const char * propname,const char * propval,nvlist_t ** props,boolean_t poolprop)1265 add_prop_list(const char *propname, const char *propval, nvlist_t **props,
1266 boolean_t poolprop)
1267 {
1268 zpool_prop_t prop = ZPOOL_PROP_INVAL;
1269 nvlist_t *proplist;
1270 const char *normnm;
1271 const char *strval;
1272
1273 if (*props == NULL &&
1274 nvlist_alloc(props, NV_UNIQUE_NAME, 0) != 0) {
1275 (void) fprintf(stderr,
1276 gettext("internal error: out of memory\n"));
1277 return (1);
1278 }
1279
1280 proplist = *props;
1281
1282 if (poolprop) {
1283 const char *vname = zpool_prop_to_name(ZPOOL_PROP_VERSION);
1284 const char *cname =
1285 zpool_prop_to_name(ZPOOL_PROP_COMPATIBILITY);
1286
1287 if ((prop = zpool_name_to_prop(propname)) == ZPOOL_PROP_INVAL &&
1288 (!zpool_prop_feature(propname) &&
1289 !zpool_prop_vdev(propname))) {
1290 (void) fprintf(stderr, gettext("property '%s' is "
1291 "not a valid pool or vdev property\n"), propname);
1292 return (2);
1293 }
1294
1295 /*
1296 * feature@ properties and version should not be specified
1297 * at the same time.
1298 */
1299 if ((prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname) &&
1300 nvlist_exists(proplist, vname)) ||
1301 (prop == ZPOOL_PROP_VERSION &&
1302 prop_list_contains_feature(proplist))) {
1303 (void) fprintf(stderr, gettext("'feature@' and "
1304 "'version' properties cannot be specified "
1305 "together\n"));
1306 return (2);
1307 }
1308
1309 /*
1310 * if version is specified, only "legacy" compatibility
1311 * may be requested
1312 */
1313 if ((prop == ZPOOL_PROP_COMPATIBILITY &&
1314 strcmp(propval, ZPOOL_COMPAT_LEGACY) != 0 &&
1315 nvlist_exists(proplist, vname)) ||
1316 (prop == ZPOOL_PROP_VERSION &&
1317 nvlist_exists(proplist, cname) &&
1318 strcmp(fnvlist_lookup_string(proplist, cname),
1319 ZPOOL_COMPAT_LEGACY) != 0)) {
1320 (void) fprintf(stderr, gettext("when 'version' is "
1321 "specified, the 'compatibility' feature may only "
1322 "be set to '" ZPOOL_COMPAT_LEGACY "'\n"));
1323 return (2);
1324 }
1325
1326 if (zpool_prop_feature(propname) || zpool_prop_vdev(propname))
1327 normnm = propname;
1328 else
1329 normnm = zpool_prop_to_name(prop);
1330 } else {
1331 zfs_prop_t fsprop = zfs_name_to_prop(propname);
1332
1333 if (zfs_prop_valid_for_type(fsprop, ZFS_TYPE_FILESYSTEM,
1334 B_FALSE)) {
1335 normnm = zfs_prop_to_name(fsprop);
1336 } else if (zfs_prop_user(propname) ||
1337 zfs_prop_userquota(propname)) {
1338 normnm = propname;
1339 } else {
1340 (void) fprintf(stderr, gettext("property '%s' is "
1341 "not a valid filesystem property\n"), propname);
1342 return (2);
1343 }
1344 }
1345
1346 if (nvlist_lookup_string(proplist, normnm, &strval) == 0 &&
1347 prop != ZPOOL_PROP_CACHEFILE) {
1348 (void) fprintf(stderr, gettext("property '%s' "
1349 "specified multiple times\n"), propname);
1350 return (2);
1351 }
1352
1353 if (nvlist_add_string(proplist, normnm, propval) != 0) {
1354 (void) fprintf(stderr, gettext("internal "
1355 "error: out of memory\n"));
1356 return (1);
1357 }
1358
1359 return (0);
1360 }
1361
1362 /*
1363 * Set a default property pair (name, string-value) in a property nvlist
1364 */
1365 static int
add_prop_list_default(const char * propname,const char * propval,nvlist_t ** props)1366 add_prop_list_default(const char *propname, const char *propval,
1367 nvlist_t **props)
1368 {
1369 const char *pval;
1370
1371 if (nvlist_lookup_string(*props, propname, &pval) == 0)
1372 return (0);
1373
1374 return (add_prop_list(propname, propval, props, B_TRUE));
1375 }
1376
1377 /*
1378 * zpool add [-afgLnP] [-o property=value] <pool> <vdev> ...
1379 *
1380 * -a Disable the ashift validation checks
1381 * -f Force addition of devices, even if they appear in use
1382 * -g Display guid for individual vdev name.
1383 * -L Follow links when resolving vdev path name.
1384 * -n Do not add the devices, but display the resulting layout if
1385 * they were to be added.
1386 * -o Set property=value.
1387 * -P Display full path for vdev name.
1388 *
1389 * Adds the given vdevs to 'pool'. As with create, the bulk of this work is
1390 * handled by make_root_vdev(), which constructs the nvlist needed to pass to
1391 * libzfs.
1392 */
1393 int
zpool_do_add(int argc,char ** argv)1394 zpool_do_add(int argc, char **argv)
1395 {
1396 boolean_t check_replication = B_TRUE;
1397 boolean_t check_inuse = B_TRUE;
1398 boolean_t dryrun = B_FALSE;
1399 boolean_t check_ashift = B_TRUE;
1400 boolean_t force = B_FALSE;
1401 int name_flags = 0;
1402 int c;
1403 nvlist_t *nvroot;
1404 char *poolname;
1405 int ret;
1406 zpool_handle_t *zhp;
1407 nvlist_t *config;
1408 nvlist_t *props = NULL;
1409 char *propval;
1410
1411 struct option long_options[] = {
1412 {"allow-in-use", no_argument, NULL, ZPOOL_OPTION_ALLOW_INUSE},
1413 {"allow-replication-mismatch", no_argument, NULL,
1414 ZPOOL_OPTION_ALLOW_REPLICATION_MISMATCH},
1415 {"allow-ashift-mismatch", no_argument, NULL,
1416 ZPOOL_OPTION_ALLOW_ASHIFT_MISMATCH},
1417 {0, 0, 0, 0}
1418 };
1419
1420 /* check options */
1421 while ((c = getopt_long(argc, argv, "fgLno:P", long_options, NULL))
1422 != -1) {
1423 switch (c) {
1424 case 'f':
1425 force = B_TRUE;
1426 break;
1427 case 'g':
1428 name_flags |= VDEV_NAME_GUID;
1429 break;
1430 case 'L':
1431 name_flags |= VDEV_NAME_FOLLOW_LINKS;
1432 break;
1433 case 'n':
1434 dryrun = B_TRUE;
1435 break;
1436 case 'o':
1437 if ((propval = strchr(optarg, '=')) == NULL) {
1438 (void) fprintf(stderr, gettext("missing "
1439 "'=' for -o option\n"));
1440 usage(B_FALSE);
1441 }
1442 *propval = '\0';
1443 propval++;
1444
1445 if ((strcmp(optarg, ZPOOL_CONFIG_ASHIFT) != 0) ||
1446 (add_prop_list(optarg, propval, &props, B_TRUE)))
1447 usage(B_FALSE);
1448 break;
1449 case 'P':
1450 name_flags |= VDEV_NAME_PATH;
1451 break;
1452 case ZPOOL_OPTION_ALLOW_INUSE:
1453 check_inuse = B_FALSE;
1454 break;
1455 case ZPOOL_OPTION_ALLOW_REPLICATION_MISMATCH:
1456 check_replication = B_FALSE;
1457 break;
1458 case ZPOOL_OPTION_ALLOW_ASHIFT_MISMATCH:
1459 check_ashift = B_FALSE;
1460 break;
1461 case '?':
1462 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
1463 optopt);
1464 usage(B_FALSE);
1465 }
1466 }
1467
1468 argc -= optind;
1469 argv += optind;
1470
1471 /* get pool name and check number of arguments */
1472 if (argc < 1) {
1473 (void) fprintf(stderr, gettext("missing pool name argument\n"));
1474 usage(B_FALSE);
1475 }
1476 if (argc < 2) {
1477 (void) fprintf(stderr, gettext("missing vdev specification\n"));
1478 usage(B_FALSE);
1479 }
1480
1481 if (force) {
1482 if (!check_inuse || !check_replication || !check_ashift) {
1483 (void) fprintf(stderr, gettext("'-f' option is not "
1484 "allowed with '--allow-replication-mismatch', "
1485 "'--allow-ashift-mismatch', or "
1486 "'--allow-in-use'\n"));
1487 usage(B_FALSE);
1488 }
1489 check_inuse = B_FALSE;
1490 check_replication = B_FALSE;
1491 check_ashift = B_FALSE;
1492 }
1493
1494 poolname = argv[0];
1495
1496 argc--;
1497 argv++;
1498
1499 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
1500 return (1);
1501
1502 if ((config = zpool_get_config(zhp, NULL)) == NULL) {
1503 (void) fprintf(stderr, gettext("pool '%s' is unavailable\n"),
1504 poolname);
1505 zpool_close(zhp);
1506 return (1);
1507 }
1508
1509 /* unless manually specified use "ashift" pool property (if set) */
1510 if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) {
1511 int intval;
1512 zprop_source_t src;
1513 char strval[ZPOOL_MAXPROPLEN];
1514
1515 intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src);
1516 if (src != ZPROP_SRC_DEFAULT) {
1517 (void) sprintf(strval, "%" PRId32, intval);
1518 verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval,
1519 &props, B_TRUE) == 0);
1520 }
1521 }
1522
1523 /* pass off to make_root_vdev for processing */
1524 nvroot = make_root_vdev(zhp, props, !check_inuse,
1525 check_replication, B_FALSE, dryrun, argc, argv);
1526 if (nvroot == NULL) {
1527 zpool_close(zhp);
1528 return (1);
1529 }
1530
1531 if (dryrun) {
1532 nvlist_t *poolnvroot;
1533 nvlist_t **l2child, **sparechild;
1534 uint_t l2children, sparechildren, c;
1535 char *vname;
1536 boolean_t hadcache = B_FALSE, hadspare = B_FALSE;
1537
1538 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
1539 &poolnvroot) == 0);
1540
1541 (void) printf(gettext("would update '%s' to the following "
1542 "configuration:\n\n"), zpool_get_name(zhp));
1543
1544 /* print original main pool and new tree */
1545 print_vdev_tree(zhp, poolname, poolnvroot, 0, "",
1546 name_flags | VDEV_NAME_TYPE_ID);
1547 print_vdev_tree(zhp, NULL, nvroot, 0, "", name_flags);
1548
1549 /* print other classes: 'dedup', 'special', and 'log' */
1550 if (zfs_special_devs(poolnvroot, VDEV_ALLOC_BIAS_DEDUP)) {
1551 print_vdev_tree(zhp, "dedup", poolnvroot, 0,
1552 VDEV_ALLOC_BIAS_DEDUP, name_flags);
1553 print_vdev_tree(zhp, NULL, nvroot, 0,
1554 VDEV_ALLOC_BIAS_DEDUP, name_flags);
1555 } else if (zfs_special_devs(nvroot, VDEV_ALLOC_BIAS_DEDUP)) {
1556 print_vdev_tree(zhp, "dedup", nvroot, 0,
1557 VDEV_ALLOC_BIAS_DEDUP, name_flags);
1558 }
1559
1560 if (zfs_special_devs(poolnvroot, VDEV_ALLOC_BIAS_SPECIAL)) {
1561 print_vdev_tree(zhp, "special", poolnvroot, 0,
1562 VDEV_ALLOC_BIAS_SPECIAL, name_flags);
1563 print_vdev_tree(zhp, NULL, nvroot, 0,
1564 VDEV_ALLOC_BIAS_SPECIAL, name_flags);
1565 } else if (zfs_special_devs(nvroot, VDEV_ALLOC_BIAS_SPECIAL)) {
1566 print_vdev_tree(zhp, "special", nvroot, 0,
1567 VDEV_ALLOC_BIAS_SPECIAL, name_flags);
1568 }
1569
1570 if (num_logs(poolnvroot) > 0) {
1571 print_vdev_tree(zhp, "logs", poolnvroot, 0,
1572 VDEV_ALLOC_BIAS_LOG, name_flags);
1573 print_vdev_tree(zhp, NULL, nvroot, 0,
1574 VDEV_ALLOC_BIAS_LOG, name_flags);
1575 } else if (num_logs(nvroot) > 0) {
1576 print_vdev_tree(zhp, "logs", nvroot, 0,
1577 VDEV_ALLOC_BIAS_LOG, name_flags);
1578 }
1579
1580 /* Do the same for the caches */
1581 if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_L2CACHE,
1582 &l2child, &l2children) == 0 && l2children) {
1583 hadcache = B_TRUE;
1584 (void) printf(gettext("\tcache\n"));
1585 for (c = 0; c < l2children; c++) {
1586 vname = zpool_vdev_name(g_zfs, NULL,
1587 l2child[c], name_flags);
1588 (void) printf("\t %s\n", vname);
1589 free(vname);
1590 }
1591 }
1592 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1593 &l2child, &l2children) == 0 && l2children) {
1594 if (!hadcache)
1595 (void) printf(gettext("\tcache\n"));
1596 for (c = 0; c < l2children; c++) {
1597 vname = zpool_vdev_name(g_zfs, NULL,
1598 l2child[c], name_flags);
1599 (void) printf("\t %s\n", vname);
1600 free(vname);
1601 }
1602 }
1603 /* And finally the spares */
1604 if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_SPARES,
1605 &sparechild, &sparechildren) == 0 && sparechildren > 0) {
1606 hadspare = B_TRUE;
1607 (void) printf(gettext("\tspares\n"));
1608 for (c = 0; c < sparechildren; c++) {
1609 vname = zpool_vdev_name(g_zfs, NULL,
1610 sparechild[c], name_flags);
1611 (void) printf("\t %s\n", vname);
1612 free(vname);
1613 }
1614 }
1615 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1616 &sparechild, &sparechildren) == 0 && sparechildren > 0) {
1617 if (!hadspare)
1618 (void) printf(gettext("\tspares\n"));
1619 for (c = 0; c < sparechildren; c++) {
1620 vname = zpool_vdev_name(g_zfs, NULL,
1621 sparechild[c], name_flags);
1622 (void) printf("\t %s\n", vname);
1623 free(vname);
1624 }
1625 }
1626
1627 ret = 0;
1628 } else {
1629 ret = (zpool_add(zhp, nvroot, check_ashift) != 0);
1630 }
1631
1632 nvlist_free(props);
1633 nvlist_free(nvroot);
1634 zpool_close(zhp);
1635
1636 return (ret);
1637 }
1638
1639 /*
1640 * zpool remove [-npsw] <pool> <vdev> ...
1641 *
1642 * Removes the given vdev from the pool.
1643 */
1644 int
zpool_do_remove(int argc,char ** argv)1645 zpool_do_remove(int argc, char **argv)
1646 {
1647 char *poolname;
1648 int i, ret = 0;
1649 zpool_handle_t *zhp = NULL;
1650 boolean_t stop = B_FALSE;
1651 int c;
1652 boolean_t noop = B_FALSE;
1653 boolean_t parsable = B_FALSE;
1654 boolean_t wait = B_FALSE;
1655
1656 /* check options */
1657 while ((c = getopt(argc, argv, "npsw")) != -1) {
1658 switch (c) {
1659 case 'n':
1660 noop = B_TRUE;
1661 break;
1662 case 'p':
1663 parsable = B_TRUE;
1664 break;
1665 case 's':
1666 stop = B_TRUE;
1667 break;
1668 case 'w':
1669 wait = B_TRUE;
1670 break;
1671 case '?':
1672 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
1673 optopt);
1674 usage(B_FALSE);
1675 }
1676 }
1677
1678 argc -= optind;
1679 argv += optind;
1680
1681 /* get pool name and check number of arguments */
1682 if (argc < 1) {
1683 (void) fprintf(stderr, gettext("missing pool name argument\n"));
1684 usage(B_FALSE);
1685 }
1686
1687 poolname = argv[0];
1688
1689 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
1690 return (1);
1691
1692 if (stop && noop) {
1693 zpool_close(zhp);
1694 (void) fprintf(stderr, gettext("stop request ignored\n"));
1695 return (0);
1696 }
1697
1698 if (stop) {
1699 if (argc > 1) {
1700 (void) fprintf(stderr, gettext("too many arguments\n"));
1701 usage(B_FALSE);
1702 }
1703 if (zpool_vdev_remove_cancel(zhp) != 0)
1704 ret = 1;
1705 if (wait) {
1706 (void) fprintf(stderr, gettext("invalid option "
1707 "combination: -w cannot be used with -s\n"));
1708 usage(B_FALSE);
1709 }
1710 } else {
1711 if (argc < 2) {
1712 (void) fprintf(stderr, gettext("missing device\n"));
1713 usage(B_FALSE);
1714 }
1715
1716 for (i = 1; i < argc; i++) {
1717 if (noop) {
1718 uint64_t size;
1719
1720 if (zpool_vdev_indirect_size(zhp, argv[i],
1721 &size) != 0) {
1722 ret = 1;
1723 break;
1724 }
1725 if (parsable) {
1726 (void) printf("%s %llu\n",
1727 argv[i], (unsigned long long)size);
1728 } else {
1729 char valstr[32];
1730 zfs_nicenum(size, valstr,
1731 sizeof (valstr));
1732 (void) printf("Memory that will be "
1733 "used after removing %s: %s\n",
1734 argv[i], valstr);
1735 }
1736 } else {
1737 if (zpool_vdev_remove(zhp, argv[i]) != 0)
1738 ret = 1;
1739 }
1740 }
1741
1742 if (ret == 0 && wait)
1743 ret = zpool_wait(zhp, ZPOOL_WAIT_REMOVE);
1744 }
1745 zpool_close(zhp);
1746
1747 return (ret);
1748 }
1749
1750 /*
1751 * Return 1 if a vdev is active (being used in a pool)
1752 * Return 0 if a vdev is inactive (offlined or faulted, or not in active pool)
1753 *
1754 * This is useful for checking if a disk in an active pool is offlined or
1755 * faulted.
1756 */
1757 static int
vdev_is_active(char * vdev_path)1758 vdev_is_active(char *vdev_path)
1759 {
1760 int fd;
1761 fd = open(vdev_path, O_EXCL);
1762 if (fd < 0) {
1763 return (1); /* cant open O_EXCL - disk is active */
1764 }
1765
1766 close(fd);
1767 return (0); /* disk is inactive in the pool */
1768 }
1769
1770 /*
1771 * zpool labelclear [-f] <vdev>
1772 *
1773 * -f Force clearing the label for the vdevs which are members of
1774 * the exported or foreign pools.
1775 *
1776 * Verifies that the vdev is not active and zeros out the label information
1777 * on the device.
1778 */
1779 int
zpool_do_labelclear(int argc,char ** argv)1780 zpool_do_labelclear(int argc, char **argv)
1781 {
1782 char vdev[MAXPATHLEN];
1783 char *name = NULL;
1784 int c, fd, ret = 0;
1785 nvlist_t *config;
1786 pool_state_t state;
1787 boolean_t inuse = B_FALSE;
1788 boolean_t force = B_FALSE;
1789
1790 /* check options */
1791 while ((c = getopt(argc, argv, "f")) != -1) {
1792 switch (c) {
1793 case 'f':
1794 force = B_TRUE;
1795 break;
1796 default:
1797 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
1798 optopt);
1799 usage(B_FALSE);
1800 }
1801 }
1802
1803 argc -= optind;
1804 argv += optind;
1805
1806 /* get vdev name */
1807 if (argc < 1) {
1808 (void) fprintf(stderr, gettext("missing vdev name\n"));
1809 usage(B_FALSE);
1810 }
1811 if (argc > 1) {
1812 (void) fprintf(stderr, gettext("too many arguments\n"));
1813 usage(B_FALSE);
1814 }
1815
1816 (void) strlcpy(vdev, argv[0], sizeof (vdev));
1817
1818 /*
1819 * If we cannot open an absolute path, we quit.
1820 * Otherwise if the provided vdev name doesn't point to a file,
1821 * try prepending expected disk paths and partition numbers.
1822 */
1823 if ((fd = open(vdev, O_RDWR)) < 0) {
1824 int error;
1825 if (vdev[0] == '/') {
1826 (void) fprintf(stderr, gettext("failed to open "
1827 "%s: %s\n"), vdev, strerror(errno));
1828 return (1);
1829 }
1830
1831 error = zfs_resolve_shortname(argv[0], vdev, MAXPATHLEN);
1832 if (error == 0 && zfs_dev_is_whole_disk(vdev)) {
1833 if (zfs_append_partition(vdev, MAXPATHLEN) == -1)
1834 error = ENOENT;
1835 }
1836
1837 if (error || ((fd = open(vdev, O_RDWR)) < 0)) {
1838 if (errno == ENOENT) {
1839 (void) fprintf(stderr, gettext(
1840 "failed to find device %s, try "
1841 "specifying absolute path instead\n"),
1842 argv[0]);
1843 return (1);
1844 }
1845
1846 (void) fprintf(stderr, gettext("failed to open %s:"
1847 " %s\n"), vdev, strerror(errno));
1848 return (1);
1849 }
1850 }
1851
1852 /*
1853 * Flush all dirty pages for the block device. This should not be
1854 * fatal when the device does not support BLKFLSBUF as would be the
1855 * case for a file vdev.
1856 */
1857 if ((zfs_dev_flush(fd) != 0) && (errno != ENOTTY))
1858 (void) fprintf(stderr, gettext("failed to invalidate "
1859 "cache for %s: %s\n"), vdev, strerror(errno));
1860
1861 if (zpool_read_label(fd, &config, NULL) != 0) {
1862 (void) fprintf(stderr,
1863 gettext("failed to read label from %s\n"), vdev);
1864 ret = 1;
1865 goto errout;
1866 }
1867 nvlist_free(config);
1868
1869 ret = zpool_in_use(g_zfs, fd, &state, &name, &inuse);
1870 if (ret != 0) {
1871 (void) fprintf(stderr,
1872 gettext("failed to check state for %s\n"), vdev);
1873 ret = 1;
1874 goto errout;
1875 }
1876
1877 if (!inuse)
1878 goto wipe_label;
1879
1880 switch (state) {
1881 default:
1882 case POOL_STATE_ACTIVE:
1883 case POOL_STATE_SPARE:
1884 case POOL_STATE_L2CACHE:
1885 /*
1886 * We allow the user to call 'zpool offline -f'
1887 * on an offlined disk in an active pool. We can check if
1888 * the disk is online by calling vdev_is_active().
1889 */
1890 if (force && !vdev_is_active(vdev))
1891 break;
1892
1893 (void) fprintf(stderr, gettext(
1894 "%s is a member (%s) of pool \"%s\""),
1895 vdev, zpool_pool_state_to_name(state), name);
1896
1897 if (force) {
1898 (void) fprintf(stderr, gettext(
1899 ". Offline the disk first to clear its label."));
1900 }
1901 printf("\n");
1902 ret = 1;
1903 goto errout;
1904
1905 case POOL_STATE_EXPORTED:
1906 if (force)
1907 break;
1908 (void) fprintf(stderr, gettext(
1909 "use '-f' to override the following error:\n"
1910 "%s is a member of exported pool \"%s\"\n"),
1911 vdev, name);
1912 ret = 1;
1913 goto errout;
1914
1915 case POOL_STATE_POTENTIALLY_ACTIVE:
1916 if (force)
1917 break;
1918 (void) fprintf(stderr, gettext(
1919 "use '-f' to override the following error:\n"
1920 "%s is a member of potentially active pool \"%s\"\n"),
1921 vdev, name);
1922 ret = 1;
1923 goto errout;
1924
1925 case POOL_STATE_DESTROYED:
1926 /* inuse should never be set for a destroyed pool */
1927 assert(0);
1928 break;
1929 }
1930
1931 wipe_label:
1932 ret = zpool_clear_label(fd);
1933 if (ret != 0) {
1934 (void) fprintf(stderr,
1935 gettext("failed to clear label for %s\n"), vdev);
1936 }
1937
1938 errout:
1939 free(name);
1940 (void) close(fd);
1941
1942 return (ret);
1943 }
1944
1945 /*
1946 * zpool create [-fnd] [-o property=value] ...
1947 * [-O file-system-property=value] ...
1948 * [-R root] [-m mountpoint] <pool> <dev> ...
1949 *
1950 * -f Force creation, even if devices appear in use
1951 * -n Do not create the pool, but display the resulting layout if it
1952 * were to be created.
1953 * -R Create a pool under an alternate root
1954 * -m Set default mountpoint for the root dataset. By default it's
1955 * '/<pool>'
1956 * -o Set property=value.
1957 * -o Set feature@feature=enabled|disabled.
1958 * -d Don't automatically enable all supported pool features
1959 * (individual features can be enabled with -o).
1960 * -O Set fsproperty=value in the pool's root file system
1961 *
1962 * Creates the named pool according to the given vdev specification. The
1963 * bulk of the vdev processing is done in make_root_vdev() in zpool_vdev.c.
1964 * Once we get the nvlist back from make_root_vdev(), we either print out the
1965 * contents (if '-n' was specified), or pass it to libzfs to do the creation.
1966 */
1967 int
zpool_do_create(int argc,char ** argv)1968 zpool_do_create(int argc, char **argv)
1969 {
1970 boolean_t force = B_FALSE;
1971 boolean_t dryrun = B_FALSE;
1972 boolean_t enable_pool_features = B_TRUE;
1973
1974 int c;
1975 nvlist_t *nvroot = NULL;
1976 char *poolname;
1977 char *tname = NULL;
1978 int ret = 1;
1979 char *altroot = NULL;
1980 char *compat = NULL;
1981 char *mountpoint = NULL;
1982 nvlist_t *fsprops = NULL;
1983 nvlist_t *props = NULL;
1984 char *propval;
1985
1986 /* check options */
1987 while ((c = getopt(argc, argv, ":fndR:m:o:O:t:")) != -1) {
1988 switch (c) {
1989 case 'f':
1990 force = B_TRUE;
1991 break;
1992 case 'n':
1993 dryrun = B_TRUE;
1994 break;
1995 case 'd':
1996 enable_pool_features = B_FALSE;
1997 break;
1998 case 'R':
1999 altroot = optarg;
2000 if (add_prop_list(zpool_prop_to_name(
2001 ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE))
2002 goto errout;
2003 if (add_prop_list_default(zpool_prop_to_name(
2004 ZPOOL_PROP_CACHEFILE), "none", &props))
2005 goto errout;
2006 break;
2007 case 'm':
2008 /* Equivalent to -O mountpoint=optarg */
2009 mountpoint = optarg;
2010 break;
2011 case 'o':
2012 if ((propval = strchr(optarg, '=')) == NULL) {
2013 (void) fprintf(stderr, gettext("missing "
2014 "'=' for -o option\n"));
2015 goto errout;
2016 }
2017 *propval = '\0';
2018 propval++;
2019
2020 if (add_prop_list(optarg, propval, &props, B_TRUE))
2021 goto errout;
2022
2023 /*
2024 * If the user is creating a pool that doesn't support
2025 * feature flags, don't enable any features.
2026 */
2027 if (zpool_name_to_prop(optarg) == ZPOOL_PROP_VERSION) {
2028 char *end;
2029 u_longlong_t ver;
2030
2031 ver = strtoull(propval, &end, 0);
2032 if (*end == '\0' &&
2033 ver < SPA_VERSION_FEATURES) {
2034 enable_pool_features = B_FALSE;
2035 }
2036 }
2037 if (zpool_name_to_prop(optarg) == ZPOOL_PROP_ALTROOT)
2038 altroot = propval;
2039 if (zpool_name_to_prop(optarg) ==
2040 ZPOOL_PROP_COMPATIBILITY)
2041 compat = propval;
2042 break;
2043 case 'O':
2044 if ((propval = strchr(optarg, '=')) == NULL) {
2045 (void) fprintf(stderr, gettext("missing "
2046 "'=' for -O option\n"));
2047 goto errout;
2048 }
2049 *propval = '\0';
2050 propval++;
2051
2052 /*
2053 * Mountpoints are checked and then added later.
2054 * Uniquely among properties, they can be specified
2055 * more than once, to avoid conflict with -m.
2056 */
2057 if (0 == strcmp(optarg,
2058 zfs_prop_to_name(ZFS_PROP_MOUNTPOINT))) {
2059 mountpoint = propval;
2060 } else if (add_prop_list(optarg, propval, &fsprops,
2061 B_FALSE)) {
2062 goto errout;
2063 }
2064 break;
2065 case 't':
2066 /*
2067 * Sanity check temporary pool name.
2068 */
2069 if (strchr(optarg, '/') != NULL) {
2070 (void) fprintf(stderr, gettext("cannot create "
2071 "'%s': invalid character '/' in temporary "
2072 "name\n"), optarg);
2073 (void) fprintf(stderr, gettext("use 'zfs "
2074 "create' to create a dataset\n"));
2075 goto errout;
2076 }
2077
2078 if (add_prop_list(zpool_prop_to_name(
2079 ZPOOL_PROP_TNAME), optarg, &props, B_TRUE))
2080 goto errout;
2081 if (add_prop_list_default(zpool_prop_to_name(
2082 ZPOOL_PROP_CACHEFILE), "none", &props))
2083 goto errout;
2084 tname = optarg;
2085 break;
2086 case ':':
2087 (void) fprintf(stderr, gettext("missing argument for "
2088 "'%c' option\n"), optopt);
2089 goto badusage;
2090 case '?':
2091 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
2092 optopt);
2093 goto badusage;
2094 }
2095 }
2096
2097 argc -= optind;
2098 argv += optind;
2099
2100 /* get pool name and check number of arguments */
2101 if (argc < 1) {
2102 (void) fprintf(stderr, gettext("missing pool name argument\n"));
2103 goto badusage;
2104 }
2105 if (argc < 2) {
2106 (void) fprintf(stderr, gettext("missing vdev specification\n"));
2107 goto badusage;
2108 }
2109
2110 poolname = argv[0];
2111
2112 /*
2113 * As a special case, check for use of '/' in the name, and direct the
2114 * user to use 'zfs create' instead.
2115 */
2116 if (strchr(poolname, '/') != NULL) {
2117 (void) fprintf(stderr, gettext("cannot create '%s': invalid "
2118 "character '/' in pool name\n"), poolname);
2119 (void) fprintf(stderr, gettext("use 'zfs create' to "
2120 "create a dataset\n"));
2121 goto errout;
2122 }
2123
2124 /* pass off to make_root_vdev for bulk processing */
2125 nvroot = make_root_vdev(NULL, props, force, !force, B_FALSE, dryrun,
2126 argc - 1, argv + 1);
2127 if (nvroot == NULL)
2128 goto errout;
2129
2130 /* make_root_vdev() allows 0 toplevel children if there are spares */
2131 if (!zfs_allocatable_devs(nvroot)) {
2132 (void) fprintf(stderr, gettext("invalid vdev "
2133 "specification: at least one toplevel vdev must be "
2134 "specified\n"));
2135 goto errout;
2136 }
2137
2138 if (altroot != NULL && altroot[0] != '/') {
2139 (void) fprintf(stderr, gettext("invalid alternate root '%s': "
2140 "must be an absolute path\n"), altroot);
2141 goto errout;
2142 }
2143
2144 /*
2145 * Check the validity of the mountpoint and direct the user to use the
2146 * '-m' mountpoint option if it looks like its in use.
2147 */
2148 if (mountpoint == NULL ||
2149 (strcmp(mountpoint, ZFS_MOUNTPOINT_LEGACY) != 0 &&
2150 strcmp(mountpoint, ZFS_MOUNTPOINT_NONE) != 0)) {
2151 char buf[MAXPATHLEN];
2152 DIR *dirp;
2153
2154 if (mountpoint && mountpoint[0] != '/') {
2155 (void) fprintf(stderr, gettext("invalid mountpoint "
2156 "'%s': must be an absolute path, 'legacy', or "
2157 "'none'\n"), mountpoint);
2158 goto errout;
2159 }
2160
2161 if (mountpoint == NULL) {
2162 if (altroot != NULL)
2163 (void) snprintf(buf, sizeof (buf), "%s/%s",
2164 altroot, poolname);
2165 else
2166 (void) snprintf(buf, sizeof (buf), "/%s",
2167 poolname);
2168 } else {
2169 if (altroot != NULL)
2170 (void) snprintf(buf, sizeof (buf), "%s%s",
2171 altroot, mountpoint);
2172 else
2173 (void) snprintf(buf, sizeof (buf), "%s",
2174 mountpoint);
2175 }
2176
2177 if ((dirp = opendir(buf)) == NULL && errno != ENOENT) {
2178 (void) fprintf(stderr, gettext("mountpoint '%s' : "
2179 "%s\n"), buf, strerror(errno));
2180 (void) fprintf(stderr, gettext("use '-m' "
2181 "option to provide a different default\n"));
2182 goto errout;
2183 } else if (dirp) {
2184 int count = 0;
2185
2186 while (count < 3 && readdir(dirp) != NULL)
2187 count++;
2188 (void) closedir(dirp);
2189
2190 if (count > 2) {
2191 (void) fprintf(stderr, gettext("mountpoint "
2192 "'%s' exists and is not empty\n"), buf);
2193 (void) fprintf(stderr, gettext("use '-m' "
2194 "option to provide a "
2195 "different default\n"));
2196 goto errout;
2197 }
2198 }
2199 }
2200
2201 /*
2202 * Now that the mountpoint's validity has been checked, ensure that
2203 * the property is set appropriately prior to creating the pool.
2204 */
2205 if (mountpoint != NULL) {
2206 ret = add_prop_list(zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
2207 mountpoint, &fsprops, B_FALSE);
2208 if (ret != 0)
2209 goto errout;
2210 }
2211
2212 ret = 1;
2213 if (dryrun) {
2214 /*
2215 * For a dry run invocation, print out a basic message and run
2216 * through all the vdevs in the list and print out in an
2217 * appropriate hierarchy.
2218 */
2219 (void) printf(gettext("would create '%s' with the "
2220 "following layout:\n\n"), poolname);
2221
2222 print_vdev_tree(NULL, poolname, nvroot, 0, "", 0);
2223 print_vdev_tree(NULL, "dedup", nvroot, 0,
2224 VDEV_ALLOC_BIAS_DEDUP, 0);
2225 print_vdev_tree(NULL, "special", nvroot, 0,
2226 VDEV_ALLOC_BIAS_SPECIAL, 0);
2227 print_vdev_tree(NULL, "logs", nvroot, 0,
2228 VDEV_ALLOC_BIAS_LOG, 0);
2229 print_cache_list(nvroot, 0);
2230 print_spare_list(nvroot, 0);
2231
2232 ret = 0;
2233 } else {
2234 /*
2235 * Load in feature set.
2236 * Note: if compatibility property not given, we'll have
2237 * NULL, which means 'all features'.
2238 */
2239 boolean_t requested_features[SPA_FEATURES];
2240 if (zpool_do_load_compat(compat, requested_features) !=
2241 ZPOOL_COMPATIBILITY_OK)
2242 goto errout;
2243
2244 /*
2245 * props contains list of features to enable.
2246 * For each feature:
2247 * - remove it if feature@name=disabled
2248 * - leave it there if feature@name=enabled
2249 * - add it if:
2250 * - enable_pool_features (ie: no '-d' or '-o version')
2251 * - it's supported by the kernel module
2252 * - it's in the requested feature set
2253 * - warn if it's enabled but not in compat
2254 */
2255 for (spa_feature_t i = 0; i < SPA_FEATURES; i++) {
2256 char propname[MAXPATHLEN];
2257 const char *propval;
2258 zfeature_info_t *feat = &spa_feature_table[i];
2259
2260 (void) snprintf(propname, sizeof (propname),
2261 "feature@%s", feat->fi_uname);
2262
2263 if (!nvlist_lookup_string(props, propname, &propval)) {
2264 if (strcmp(propval,
2265 ZFS_FEATURE_DISABLED) == 0) {
2266 (void) nvlist_remove_all(props,
2267 propname);
2268 } else if (strcmp(propval,
2269 ZFS_FEATURE_ENABLED) == 0 &&
2270 !requested_features[i]) {
2271 (void) fprintf(stderr, gettext(
2272 "Warning: feature \"%s\" enabled "
2273 "but is not in specified "
2274 "'compatibility' feature set.\n"),
2275 feat->fi_uname);
2276 }
2277 } else if (
2278 enable_pool_features &&
2279 feat->fi_zfs_mod_supported &&
2280 requested_features[i]) {
2281 ret = add_prop_list(propname,
2282 ZFS_FEATURE_ENABLED, &props, B_TRUE);
2283 if (ret != 0)
2284 goto errout;
2285 }
2286 }
2287
2288 ret = 1;
2289 if (zpool_create(g_zfs, poolname,
2290 nvroot, props, fsprops) == 0) {
2291 zfs_handle_t *pool = zfs_open(g_zfs,
2292 tname ? tname : poolname, ZFS_TYPE_FILESYSTEM);
2293 if (pool != NULL) {
2294 if (zfs_mount(pool, NULL, 0) == 0) {
2295 ret = zfs_share(pool, NULL);
2296 zfs_commit_shares(NULL);
2297 }
2298 zfs_close(pool);
2299 }
2300 } else if (libzfs_errno(g_zfs) == EZFS_INVALIDNAME) {
2301 (void) fprintf(stderr, gettext("pool name may have "
2302 "been omitted\n"));
2303 }
2304 }
2305
2306 errout:
2307 nvlist_free(nvroot);
2308 nvlist_free(fsprops);
2309 nvlist_free(props);
2310 return (ret);
2311 badusage:
2312 nvlist_free(fsprops);
2313 nvlist_free(props);
2314 usage(B_FALSE);
2315 return (2);
2316 }
2317
2318 /*
2319 * zpool destroy <pool>
2320 *
2321 * -f Forcefully unmount any datasets
2322 *
2323 * Destroy the given pool. Automatically unmounts any datasets in the pool.
2324 */
2325 int
zpool_do_destroy(int argc,char ** argv)2326 zpool_do_destroy(int argc, char **argv)
2327 {
2328 boolean_t force = B_FALSE;
2329 int c;
2330 char *pool;
2331 zpool_handle_t *zhp;
2332 int ret;
2333
2334 /* check options */
2335 while ((c = getopt(argc, argv, "f")) != -1) {
2336 switch (c) {
2337 case 'f':
2338 force = B_TRUE;
2339 break;
2340 case '?':
2341 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
2342 optopt);
2343 usage(B_FALSE);
2344 }
2345 }
2346
2347 argc -= optind;
2348 argv += optind;
2349
2350 /* check arguments */
2351 if (argc < 1) {
2352 (void) fprintf(stderr, gettext("missing pool argument\n"));
2353 usage(B_FALSE);
2354 }
2355 if (argc > 1) {
2356 (void) fprintf(stderr, gettext("too many arguments\n"));
2357 usage(B_FALSE);
2358 }
2359
2360 pool = argv[0];
2361
2362 if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) {
2363 /*
2364 * As a special case, check for use of '/' in the name, and
2365 * direct the user to use 'zfs destroy' instead.
2366 */
2367 if (strchr(pool, '/') != NULL)
2368 (void) fprintf(stderr, gettext("use 'zfs destroy' to "
2369 "destroy a dataset\n"));
2370 return (1);
2371 }
2372
2373 if (zpool_disable_datasets(zhp, force) != 0) {
2374 (void) fprintf(stderr, gettext("could not destroy '%s': "
2375 "could not unmount datasets\n"), zpool_get_name(zhp));
2376 zpool_close(zhp);
2377 return (1);
2378 }
2379
2380 /* The history must be logged as part of the export */
2381 log_history = B_FALSE;
2382
2383 ret = (zpool_destroy(zhp, history_str) != 0);
2384
2385 zpool_close(zhp);
2386
2387 return (ret);
2388 }
2389
2390 typedef struct export_cbdata {
2391 tpool_t *tpool;
2392 pthread_mutex_t mnttab_lock;
2393 boolean_t force;
2394 boolean_t hardforce;
2395 int retval;
2396 } export_cbdata_t;
2397
2398
2399 typedef struct {
2400 char *aea_poolname;
2401 export_cbdata_t *aea_cbdata;
2402 } async_export_args_t;
2403
2404 /*
2405 * Export one pool
2406 */
2407 static int
zpool_export_one(zpool_handle_t * zhp,void * data)2408 zpool_export_one(zpool_handle_t *zhp, void *data)
2409 {
2410 export_cbdata_t *cb = data;
2411
2412 /*
2413 * zpool_disable_datasets() is not thread-safe for mnttab access.
2414 * So we serialize access here for 'zpool export -a' parallel case.
2415 */
2416 if (cb->tpool != NULL)
2417 pthread_mutex_lock(&cb->mnttab_lock);
2418
2419 int retval = zpool_disable_datasets(zhp, cb->force);
2420
2421 if (cb->tpool != NULL)
2422 pthread_mutex_unlock(&cb->mnttab_lock);
2423
2424 if (retval)
2425 return (1);
2426
2427 if (cb->hardforce) {
2428 if (zpool_export_force(zhp, history_str) != 0)
2429 return (1);
2430 } else if (zpool_export(zhp, cb->force, history_str) != 0) {
2431 return (1);
2432 }
2433
2434 return (0);
2435 }
2436
2437 /*
2438 * Asynchronous export request
2439 */
2440 static void
zpool_export_task(void * arg)2441 zpool_export_task(void *arg)
2442 {
2443 async_export_args_t *aea = arg;
2444
2445 zpool_handle_t *zhp = zpool_open(g_zfs, aea->aea_poolname);
2446 if (zhp != NULL) {
2447 int ret = zpool_export_one(zhp, aea->aea_cbdata);
2448 if (ret != 0)
2449 aea->aea_cbdata->retval = ret;
2450 zpool_close(zhp);
2451 } else {
2452 aea->aea_cbdata->retval = 1;
2453 }
2454
2455 free(aea->aea_poolname);
2456 free(aea);
2457 }
2458
2459 /*
2460 * Process an export request in parallel
2461 */
2462 static int
zpool_export_one_async(zpool_handle_t * zhp,void * data)2463 zpool_export_one_async(zpool_handle_t *zhp, void *data)
2464 {
2465 tpool_t *tpool = ((export_cbdata_t *)data)->tpool;
2466 async_export_args_t *aea = safe_malloc(sizeof (async_export_args_t));
2467
2468 /* save pool name since zhp will go out of scope */
2469 aea->aea_poolname = strdup(zpool_get_name(zhp));
2470 aea->aea_cbdata = data;
2471
2472 /* ship off actual export to another thread */
2473 if (tpool_dispatch(tpool, zpool_export_task, (void *)aea) != 0)
2474 return (errno); /* unlikely */
2475 else
2476 return (0);
2477 }
2478
2479 /*
2480 * zpool export [-f] <pool> ...
2481 *
2482 * -a Export all pools
2483 * -f Forcefully unmount datasets
2484 *
2485 * Export the given pools. By default, the command will attempt to cleanly
2486 * unmount any active datasets within the pool. If the '-f' flag is specified,
2487 * then the datasets will be forcefully unmounted.
2488 */
2489 int
zpool_do_export(int argc,char ** argv)2490 zpool_do_export(int argc, char **argv)
2491 {
2492 export_cbdata_t cb;
2493 boolean_t do_all = B_FALSE;
2494 boolean_t force = B_FALSE;
2495 boolean_t hardforce = B_FALSE;
2496 int c, ret;
2497
2498 /* check options */
2499 while ((c = getopt(argc, argv, "afF")) != -1) {
2500 switch (c) {
2501 case 'a':
2502 do_all = B_TRUE;
2503 break;
2504 case 'f':
2505 force = B_TRUE;
2506 break;
2507 case 'F':
2508 hardforce = B_TRUE;
2509 break;
2510 case '?':
2511 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
2512 optopt);
2513 usage(B_FALSE);
2514 }
2515 }
2516
2517 cb.force = force;
2518 cb.hardforce = hardforce;
2519 cb.tpool = NULL;
2520 cb.retval = 0;
2521 argc -= optind;
2522 argv += optind;
2523
2524 /* The history will be logged as part of the export itself */
2525 log_history = B_FALSE;
2526
2527 if (do_all) {
2528 if (argc != 0) {
2529 (void) fprintf(stderr, gettext("too many arguments\n"));
2530 usage(B_FALSE);
2531 }
2532
2533 cb.tpool = tpool_create(1, 5 * sysconf(_SC_NPROCESSORS_ONLN),
2534 0, NULL);
2535 pthread_mutex_init(&cb.mnttab_lock, NULL);
2536
2537 /* Asynchronously call zpool_export_one using thread pool */
2538 ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
2539 B_FALSE, zpool_export_one_async, &cb);
2540
2541 tpool_wait(cb.tpool);
2542 tpool_destroy(cb.tpool);
2543 (void) pthread_mutex_destroy(&cb.mnttab_lock);
2544
2545 return (ret | cb.retval);
2546 }
2547
2548 /* check arguments */
2549 if (argc < 1) {
2550 (void) fprintf(stderr, gettext("missing pool argument\n"));
2551 usage(B_FALSE);
2552 }
2553
2554 ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
2555 B_FALSE, zpool_export_one, &cb);
2556
2557 return (ret);
2558 }
2559
2560 /*
2561 * Given a vdev configuration, determine the maximum width needed for the device
2562 * name column.
2563 */
2564 static int
max_width(zpool_handle_t * zhp,nvlist_t * nv,int depth,int max,int name_flags)2565 max_width(zpool_handle_t *zhp, nvlist_t *nv, int depth, int max,
2566 int name_flags)
2567 {
2568 static const char *const subtypes[] =
2569 {ZPOOL_CONFIG_SPARES, ZPOOL_CONFIG_L2CACHE, ZPOOL_CONFIG_CHILDREN};
2570
2571 char *name = zpool_vdev_name(g_zfs, zhp, nv, name_flags);
2572 max = MAX(strlen(name) + depth, max);
2573 free(name);
2574
2575 nvlist_t **child;
2576 uint_t children;
2577 for (size_t i = 0; i < ARRAY_SIZE(subtypes); ++i)
2578 if (nvlist_lookup_nvlist_array(nv, subtypes[i],
2579 &child, &children) == 0)
2580 for (uint_t c = 0; c < children; ++c)
2581 max = MAX(max_width(zhp, child[c], depth + 2,
2582 max, name_flags), max);
2583
2584 return (max);
2585 }
2586
2587 typedef struct status_cbdata {
2588 int cb_count;
2589 int cb_name_flags;
2590 int cb_namewidth;
2591 boolean_t cb_allpools;
2592 boolean_t cb_verbose;
2593 boolean_t cb_literal;
2594 boolean_t cb_explain;
2595 boolean_t cb_first;
2596 boolean_t cb_dedup_stats;
2597 boolean_t cb_print_unhealthy;
2598 boolean_t cb_print_status;
2599 boolean_t cb_print_slow_ios;
2600 boolean_t cb_print_dio_verify;
2601 boolean_t cb_print_vdev_init;
2602 boolean_t cb_print_vdev_trim;
2603 vdev_cmd_data_list_t *vcdl;
2604 boolean_t cb_print_power;
2605 boolean_t cb_json;
2606 boolean_t cb_flat_vdevs;
2607 nvlist_t *cb_jsobj;
2608 boolean_t cb_json_as_int;
2609 boolean_t cb_json_pool_key_guid;
2610 } status_cbdata_t;
2611
2612 /* Return 1 if string is NULL, empty, or whitespace; return 0 otherwise. */
2613 static boolean_t
is_blank_str(const char * str)2614 is_blank_str(const char *str)
2615 {
2616 for (; str != NULL && *str != '\0'; ++str)
2617 if (!isblank(*str))
2618 return (B_FALSE);
2619 return (B_TRUE);
2620 }
2621
2622 static void
zpool_nvlist_cmd(vdev_cmd_data_list_t * vcdl,const char * pool,const char * path,nvlist_t * item)2623 zpool_nvlist_cmd(vdev_cmd_data_list_t *vcdl, const char *pool, const char *path,
2624 nvlist_t *item)
2625 {
2626 vdev_cmd_data_t *data;
2627 int i, j, k = 1;
2628 char tmp[256];
2629 const char *val;
2630
2631 for (i = 0; i < vcdl->count; i++) {
2632 if ((strcmp(vcdl->data[i].path, path) != 0) ||
2633 (strcmp(vcdl->data[i].pool, pool) != 0))
2634 continue;
2635
2636 data = &vcdl->data[i];
2637 for (j = 0; j < vcdl->uniq_cols_cnt; j++) {
2638 val = NULL;
2639 for (int k = 0; k < data->cols_cnt; k++) {
2640 if (strcmp(data->cols[k],
2641 vcdl->uniq_cols[j]) == 0) {
2642 val = data->lines[k];
2643 break;
2644 }
2645 }
2646 if (val == NULL || is_blank_str(val))
2647 val = "-";
2648 fnvlist_add_string(item, vcdl->uniq_cols[j], val);
2649 }
2650
2651 for (j = data->cols_cnt; j < data->lines_cnt; j++) {
2652 if (data->lines[j]) {
2653 snprintf(tmp, 256, "extra_%d", k++);
2654 fnvlist_add_string(item, tmp,
2655 data->lines[j]);
2656 }
2657 }
2658 break;
2659 }
2660 }
2661
2662 /* Print command output lines for specific vdev in a specific pool */
2663 static void
zpool_print_cmd(vdev_cmd_data_list_t * vcdl,const char * pool,const char * path)2664 zpool_print_cmd(vdev_cmd_data_list_t *vcdl, const char *pool, const char *path)
2665 {
2666 vdev_cmd_data_t *data;
2667 int i, j;
2668 const char *val;
2669
2670 for (i = 0; i < vcdl->count; i++) {
2671 if ((strcmp(vcdl->data[i].path, path) != 0) ||
2672 (strcmp(vcdl->data[i].pool, pool) != 0)) {
2673 /* Not the vdev we're looking for */
2674 continue;
2675 }
2676
2677 data = &vcdl->data[i];
2678 /* Print out all the output values for this vdev */
2679 for (j = 0; j < vcdl->uniq_cols_cnt; j++) {
2680 val = NULL;
2681 /* Does this vdev have values for this column? */
2682 for (int k = 0; k < data->cols_cnt; k++) {
2683 if (strcmp(data->cols[k],
2684 vcdl->uniq_cols[j]) == 0) {
2685 /* yes it does, record the value */
2686 val = data->lines[k];
2687 break;
2688 }
2689 }
2690 /*
2691 * Mark empty values with dashes to make output
2692 * awk-able.
2693 */
2694 if (val == NULL || is_blank_str(val))
2695 val = "-";
2696
2697 printf("%*s", vcdl->uniq_cols_width[j], val);
2698 if (j < vcdl->uniq_cols_cnt - 1)
2699 fputs(" ", stdout);
2700 }
2701
2702 /* Print out any values that aren't in a column at the end */
2703 for (j = data->cols_cnt; j < data->lines_cnt; j++) {
2704 /* Did we have any columns? If so print a spacer. */
2705 if (vcdl->uniq_cols_cnt > 0)
2706 fputs(" ", stdout);
2707
2708 val = data->lines[j];
2709 fputs(val ?: "", stdout);
2710 }
2711 break;
2712 }
2713 }
2714
2715 /*
2716 * Print vdev initialization status for leaves
2717 */
2718 static void
print_status_initialize(vdev_stat_t * vs,boolean_t verbose)2719 print_status_initialize(vdev_stat_t *vs, boolean_t verbose)
2720 {
2721 if (verbose) {
2722 if ((vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE ||
2723 vs->vs_initialize_state == VDEV_INITIALIZE_SUSPENDED ||
2724 vs->vs_initialize_state == VDEV_INITIALIZE_COMPLETE) &&
2725 !vs->vs_scan_removing) {
2726 char zbuf[1024];
2727 char tbuf[256];
2728
2729 time_t t = vs->vs_initialize_action_time;
2730 int initialize_pct = 100;
2731 if (vs->vs_initialize_state !=
2732 VDEV_INITIALIZE_COMPLETE) {
2733 initialize_pct = (vs->vs_initialize_bytes_done *
2734 100 / (vs->vs_initialize_bytes_est + 1));
2735 }
2736
2737 (void) ctime_r(&t, tbuf);
2738 tbuf[24] = 0;
2739
2740 switch (vs->vs_initialize_state) {
2741 case VDEV_INITIALIZE_SUSPENDED:
2742 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2743 gettext("suspended, started at"), tbuf);
2744 break;
2745 case VDEV_INITIALIZE_ACTIVE:
2746 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2747 gettext("started at"), tbuf);
2748 break;
2749 case VDEV_INITIALIZE_COMPLETE:
2750 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2751 gettext("completed at"), tbuf);
2752 break;
2753 }
2754
2755 (void) printf(gettext(" (%d%% initialized%s)"),
2756 initialize_pct, zbuf);
2757 } else {
2758 (void) printf(gettext(" (uninitialized)"));
2759 }
2760 } else if (vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE) {
2761 (void) printf(gettext(" (initializing)"));
2762 }
2763 }
2764
2765 /*
2766 * Print vdev TRIM status for leaves
2767 */
2768 static void
print_status_trim(vdev_stat_t * vs,boolean_t verbose)2769 print_status_trim(vdev_stat_t *vs, boolean_t verbose)
2770 {
2771 if (verbose) {
2772 if ((vs->vs_trim_state == VDEV_TRIM_ACTIVE ||
2773 vs->vs_trim_state == VDEV_TRIM_SUSPENDED ||
2774 vs->vs_trim_state == VDEV_TRIM_COMPLETE) &&
2775 !vs->vs_scan_removing) {
2776 char zbuf[1024];
2777 char tbuf[256];
2778
2779 time_t t = vs->vs_trim_action_time;
2780 int trim_pct = 100;
2781 if (vs->vs_trim_state != VDEV_TRIM_COMPLETE) {
2782 trim_pct = (vs->vs_trim_bytes_done *
2783 100 / (vs->vs_trim_bytes_est + 1));
2784 }
2785
2786 (void) ctime_r(&t, tbuf);
2787 tbuf[24] = 0;
2788
2789 switch (vs->vs_trim_state) {
2790 case VDEV_TRIM_SUSPENDED:
2791 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2792 gettext("suspended, started at"), tbuf);
2793 break;
2794 case VDEV_TRIM_ACTIVE:
2795 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2796 gettext("started at"), tbuf);
2797 break;
2798 case VDEV_TRIM_COMPLETE:
2799 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2800 gettext("completed at"), tbuf);
2801 break;
2802 }
2803
2804 (void) printf(gettext(" (%d%% trimmed%s)"),
2805 trim_pct, zbuf);
2806 } else if (vs->vs_trim_notsup) {
2807 (void) printf(gettext(" (trim unsupported)"));
2808 } else {
2809 (void) printf(gettext(" (untrimmed)"));
2810 }
2811 } else if (vs->vs_trim_state == VDEV_TRIM_ACTIVE) {
2812 (void) printf(gettext(" (trimming)"));
2813 }
2814 }
2815
2816 /*
2817 * Return the color associated with a health string. This includes returning
2818 * NULL for no color change.
2819 */
2820 static const char *
health_str_to_color(const char * health)2821 health_str_to_color(const char *health)
2822 {
2823 if (strcmp(health, gettext("FAULTED")) == 0 ||
2824 strcmp(health, gettext("SUSPENDED")) == 0 ||
2825 strcmp(health, gettext("UNAVAIL")) == 0) {
2826 return (ANSI_RED);
2827 }
2828
2829 if (strcmp(health, gettext("OFFLINE")) == 0 ||
2830 strcmp(health, gettext("DEGRADED")) == 0 ||
2831 strcmp(health, gettext("REMOVED")) == 0) {
2832 return (ANSI_YELLOW);
2833 }
2834
2835 return (NULL);
2836 }
2837
2838 /*
2839 * Called for each leaf vdev. Returns 0 if the vdev is healthy.
2840 * A vdev is unhealthy if any of the following are true:
2841 * 1) there are read, write, or checksum errors,
2842 * 2) its state is not ONLINE, or
2843 * 3) slow IO reporting was requested (-s) and there are slow IOs.
2844 */
2845 static int
vdev_health_check_cb(void * hdl_data,nvlist_t * nv,void * data)2846 vdev_health_check_cb(void *hdl_data, nvlist_t *nv, void *data)
2847 {
2848 status_cbdata_t *cb = data;
2849 vdev_stat_t *vs;
2850 uint_t vsc;
2851 (void) hdl_data;
2852
2853 if (nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
2854 (uint64_t **)&vs, &vsc) != 0)
2855 return (1);
2856
2857 if (vs->vs_checksum_errors || vs->vs_read_errors ||
2858 vs->vs_write_errors || vs->vs_state != VDEV_STATE_HEALTHY)
2859 return (1);
2860
2861 if (cb->cb_print_slow_ios && vs->vs_slow_ios)
2862 return (1);
2863
2864 return (0);
2865 }
2866
2867 /*
2868 * Print out configuration state as requested by status_callback.
2869 */
2870 static void
print_status_config(zpool_handle_t * zhp,status_cbdata_t * cb,const char * name,nvlist_t * nv,int depth,boolean_t isspare,vdev_rebuild_stat_t * vrs)2871 print_status_config(zpool_handle_t *zhp, status_cbdata_t *cb, const char *name,
2872 nvlist_t *nv, int depth, boolean_t isspare, vdev_rebuild_stat_t *vrs)
2873 {
2874 nvlist_t **child, *root;
2875 uint_t c, i, vsc, children;
2876 pool_scan_stat_t *ps = NULL;
2877 vdev_stat_t *vs;
2878 char rbuf[6], wbuf[6], cbuf[6], dbuf[6];
2879 char *vname;
2880 uint64_t notpresent;
2881 spare_cbdata_t spare_cb;
2882 const char *state;
2883 const char *type;
2884 const char *path = NULL;
2885 const char *rcolor = NULL, *wcolor = NULL, *ccolor = NULL,
2886 *scolor = NULL;
2887
2888 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2889 &child, &children) != 0)
2890 children = 0;
2891
2892 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
2893 (uint64_t **)&vs, &vsc) == 0);
2894
2895 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
2896
2897 if (strcmp(type, VDEV_TYPE_INDIRECT) == 0)
2898 return;
2899
2900 state = zpool_state_to_name(vs->vs_state, vs->vs_aux);
2901
2902 if (isspare) {
2903 /*
2904 * For hot spares, we use the terms 'INUSE' and 'AVAILABLE' for
2905 * online drives.
2906 */
2907 if (vs->vs_aux == VDEV_AUX_SPARED)
2908 state = gettext("INUSE");
2909 else if (vs->vs_state == VDEV_STATE_HEALTHY)
2910 state = gettext("AVAIL");
2911 }
2912
2913 /*
2914 * If '-e' is specified then top-level vdevs and their children
2915 * can be pruned if all of their leaves are healthy.
2916 */
2917 if (cb->cb_print_unhealthy && depth > 0 &&
2918 for_each_vdev_in_nvlist(nv, vdev_health_check_cb, cb) == 0) {
2919 return;
2920 }
2921
2922 printf_color(health_str_to_color(state),
2923 "\t%*s%-*s %-8s", depth, "", cb->cb_namewidth - depth,
2924 name, state);
2925
2926 if (!isspare) {
2927 if (vs->vs_read_errors)
2928 rcolor = ANSI_RED;
2929
2930 if (vs->vs_write_errors)
2931 wcolor = ANSI_RED;
2932
2933 if (vs->vs_checksum_errors)
2934 ccolor = ANSI_RED;
2935
2936 if (vs->vs_slow_ios)
2937 scolor = ANSI_BLUE;
2938
2939 if (cb->cb_literal) {
2940 fputc(' ', stdout);
2941 printf_color(rcolor, "%5llu",
2942 (u_longlong_t)vs->vs_read_errors);
2943 fputc(' ', stdout);
2944 printf_color(wcolor, "%5llu",
2945 (u_longlong_t)vs->vs_write_errors);
2946 fputc(' ', stdout);
2947 printf_color(ccolor, "%5llu",
2948 (u_longlong_t)vs->vs_checksum_errors);
2949 } else {
2950 zfs_nicenum(vs->vs_read_errors, rbuf, sizeof (rbuf));
2951 zfs_nicenum(vs->vs_write_errors, wbuf, sizeof (wbuf));
2952 zfs_nicenum(vs->vs_checksum_errors, cbuf,
2953 sizeof (cbuf));
2954 fputc(' ', stdout);
2955 printf_color(rcolor, "%5s", rbuf);
2956 fputc(' ', stdout);
2957 printf_color(wcolor, "%5s", wbuf);
2958 fputc(' ', stdout);
2959 printf_color(ccolor, "%5s", cbuf);
2960 }
2961 if (cb->cb_print_slow_ios) {
2962 if (children == 0) {
2963 /* Only leafs vdevs have slow IOs */
2964 zfs_nicenum(vs->vs_slow_ios, rbuf,
2965 sizeof (rbuf));
2966 } else {
2967 snprintf(rbuf, sizeof (rbuf), "-");
2968 }
2969
2970 if (cb->cb_literal)
2971 printf_color(scolor, " %5llu",
2972 (u_longlong_t)vs->vs_slow_ios);
2973 else
2974 printf_color(scolor, " %5s", rbuf);
2975 }
2976 if (cb->cb_print_power) {
2977 if (children == 0) {
2978 /* Only leaf vdevs have physical slots */
2979 switch (zpool_power_current_state(zhp, (char *)
2980 fnvlist_lookup_string(nv,
2981 ZPOOL_CONFIG_PATH))) {
2982 case 0:
2983 printf_color(ANSI_RED, " %5s",
2984 gettext("off"));
2985 break;
2986 case 1:
2987 printf(" %5s", gettext("on"));
2988 break;
2989 default:
2990 printf(" %5s", "-");
2991 }
2992 } else {
2993 printf(" %5s", "-");
2994 }
2995 }
2996 if (VDEV_STAT_VALID(vs_dio_verify_errors, vsc) &&
2997 cb->cb_print_dio_verify) {
2998 zfs_nicenum(vs->vs_dio_verify_errors, dbuf,
2999 sizeof (dbuf));
3000
3001 if (cb->cb_literal)
3002 printf(" %5llu",
3003 (u_longlong_t)vs->vs_dio_verify_errors);
3004 else
3005 printf(" %5s", dbuf);
3006 }
3007 }
3008
3009 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
3010 ¬present) == 0) {
3011 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0);
3012 (void) printf(" %s %s", gettext("was"), path);
3013 } else if (vs->vs_aux != 0) {
3014 (void) printf(" ");
3015 color_start(ANSI_RED);
3016 switch (vs->vs_aux) {
3017 case VDEV_AUX_OPEN_FAILED:
3018 (void) printf(gettext("cannot open"));
3019 break;
3020
3021 case VDEV_AUX_BAD_GUID_SUM:
3022 (void) printf(gettext("missing device"));
3023 break;
3024
3025 case VDEV_AUX_NO_REPLICAS:
3026 (void) printf(gettext("insufficient replicas"));
3027 break;
3028
3029 case VDEV_AUX_VERSION_NEWER:
3030 (void) printf(gettext("newer version"));
3031 break;
3032
3033 case VDEV_AUX_UNSUP_FEAT:
3034 (void) printf(gettext("unsupported feature(s)"));
3035 break;
3036
3037 case VDEV_AUX_ASHIFT_TOO_BIG:
3038 (void) printf(gettext("unsupported minimum blocksize"));
3039 break;
3040
3041 case VDEV_AUX_SPARED:
3042 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3043 &spare_cb.cb_guid) == 0);
3044 if (zpool_iter(g_zfs, find_spare, &spare_cb) == 1) {
3045 if (strcmp(zpool_get_name(spare_cb.cb_zhp),
3046 zpool_get_name(zhp)) == 0)
3047 (void) printf(gettext("currently in "
3048 "use"));
3049 else
3050 (void) printf(gettext("in use by "
3051 "pool '%s'"),
3052 zpool_get_name(spare_cb.cb_zhp));
3053 zpool_close(spare_cb.cb_zhp);
3054 } else {
3055 (void) printf(gettext("currently in use"));
3056 }
3057 break;
3058
3059 case VDEV_AUX_ERR_EXCEEDED:
3060 if (vs->vs_read_errors + vs->vs_write_errors +
3061 vs->vs_checksum_errors == 0 && children == 0 &&
3062 vs->vs_slow_ios > 0) {
3063 (void) printf(gettext("too many slow I/Os"));
3064 } else {
3065 (void) printf(gettext("too many errors"));
3066 }
3067 break;
3068
3069 case VDEV_AUX_IO_FAILURE:
3070 (void) printf(gettext("experienced I/O failures"));
3071 break;
3072
3073 case VDEV_AUX_BAD_LOG:
3074 (void) printf(gettext("bad intent log"));
3075 break;
3076
3077 case VDEV_AUX_EXTERNAL:
3078 (void) printf(gettext("external device fault"));
3079 break;
3080
3081 case VDEV_AUX_SPLIT_POOL:
3082 (void) printf(gettext("split into new pool"));
3083 break;
3084
3085 case VDEV_AUX_ACTIVE:
3086 (void) printf(gettext("currently in use"));
3087 break;
3088
3089 case VDEV_AUX_CHILDREN_OFFLINE:
3090 (void) printf(gettext("all children offline"));
3091 break;
3092
3093 case VDEV_AUX_BAD_LABEL:
3094 (void) printf(gettext("invalid label"));
3095 break;
3096
3097 default:
3098 (void) printf(gettext("corrupted data"));
3099 break;
3100 }
3101 color_end();
3102 } else if (children == 0 && !isspare &&
3103 getenv("ZPOOL_STATUS_NON_NATIVE_ASHIFT_IGNORE") == NULL &&
3104 VDEV_STAT_VALID(vs_physical_ashift, vsc) &&
3105 vs->vs_configured_ashift < vs->vs_physical_ashift) {
3106 (void) printf(
3107 gettext(" block size: %dB configured, %dB native"),
3108 1 << vs->vs_configured_ashift, 1 << vs->vs_physical_ashift);
3109 }
3110
3111 if (vs->vs_scan_removing != 0) {
3112 (void) printf(gettext(" (removing)"));
3113 } else if (VDEV_STAT_VALID(vs_noalloc, vsc) && vs->vs_noalloc != 0) {
3114 (void) printf(gettext(" (non-allocating)"));
3115 }
3116
3117 /* The root vdev has the scrub/resilver stats */
3118 root = fnvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
3119 ZPOOL_CONFIG_VDEV_TREE);
3120 (void) nvlist_lookup_uint64_array(root, ZPOOL_CONFIG_SCAN_STATS,
3121 (uint64_t **)&ps, &c);
3122
3123 /*
3124 * If you force fault a drive that's resilvering, its scan stats can
3125 * get frozen in time, giving the false impression that it's
3126 * being resilvered. That's why we check the state to see if the vdev
3127 * is healthy before reporting "resilvering" or "repairing".
3128 */
3129 if (ps != NULL && ps->pss_state == DSS_SCANNING && children == 0 &&
3130 vs->vs_state == VDEV_STATE_HEALTHY) {
3131 if (vs->vs_scan_processed != 0) {
3132 (void) printf(gettext(" (%s)"),
3133 (ps->pss_func == POOL_SCAN_RESILVER) ?
3134 "resilvering" : "repairing");
3135 } else if (vs->vs_resilver_deferred) {
3136 (void) printf(gettext(" (awaiting resilver)"));
3137 }
3138 }
3139
3140 /* The top-level vdevs have the rebuild stats */
3141 if (vrs != NULL && vrs->vrs_state == VDEV_REBUILD_ACTIVE &&
3142 children == 0 && vs->vs_state == VDEV_STATE_HEALTHY) {
3143 if (vs->vs_rebuild_processed != 0) {
3144 (void) printf(gettext(" (resilvering)"));
3145 }
3146 }
3147
3148 if (cb->vcdl != NULL) {
3149 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
3150 printf(" ");
3151 zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path);
3152 }
3153 }
3154
3155 /* Display vdev initialization and trim status for leaves. */
3156 if (children == 0) {
3157 print_status_initialize(vs, cb->cb_print_vdev_init);
3158 print_status_trim(vs, cb->cb_print_vdev_trim);
3159 }
3160
3161 (void) printf("\n");
3162
3163 for (c = 0; c < children; c++) {
3164 uint64_t islog = B_FALSE, ishole = B_FALSE;
3165
3166 /* Don't print logs or holes here */
3167 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
3168 &islog);
3169 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
3170 &ishole);
3171 if (islog || ishole)
3172 continue;
3173 /* Only print normal classes here */
3174 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
3175 continue;
3176
3177 /* Provide vdev_rebuild_stats to children if available */
3178 if (vrs == NULL) {
3179 (void) nvlist_lookup_uint64_array(nv,
3180 ZPOOL_CONFIG_REBUILD_STATS,
3181 (uint64_t **)&vrs, &i);
3182 }
3183
3184 vname = zpool_vdev_name(g_zfs, zhp, child[c],
3185 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
3186 print_status_config(zhp, cb, vname, child[c], depth + 2,
3187 isspare, vrs);
3188 free(vname);
3189 }
3190 }
3191
3192 /*
3193 * Print the configuration of an exported pool. Iterate over all vdevs in the
3194 * pool, printing out the name and status for each one.
3195 */
3196 static void
print_import_config(status_cbdata_t * cb,const char * name,nvlist_t * nv,int depth)3197 print_import_config(status_cbdata_t *cb, const char *name, nvlist_t *nv,
3198 int depth)
3199 {
3200 nvlist_t **child;
3201 uint_t c, children;
3202 vdev_stat_t *vs;
3203 const char *type;
3204 char *vname;
3205
3206 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
3207 if (strcmp(type, VDEV_TYPE_MISSING) == 0 ||
3208 strcmp(type, VDEV_TYPE_HOLE) == 0)
3209 return;
3210
3211 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
3212 (uint64_t **)&vs, &c) == 0);
3213
3214 (void) printf("\t%*s%-*s", depth, "", cb->cb_namewidth - depth, name);
3215 (void) printf(" %s", zpool_state_to_name(vs->vs_state, vs->vs_aux));
3216
3217 if (vs->vs_aux != 0) {
3218 (void) printf(" ");
3219
3220 switch (vs->vs_aux) {
3221 case VDEV_AUX_OPEN_FAILED:
3222 (void) printf(gettext("cannot open"));
3223 break;
3224
3225 case VDEV_AUX_BAD_GUID_SUM:
3226 (void) printf(gettext("missing device"));
3227 break;
3228
3229 case VDEV_AUX_NO_REPLICAS:
3230 (void) printf(gettext("insufficient replicas"));
3231 break;
3232
3233 case VDEV_AUX_VERSION_NEWER:
3234 (void) printf(gettext("newer version"));
3235 break;
3236
3237 case VDEV_AUX_UNSUP_FEAT:
3238 (void) printf(gettext("unsupported feature(s)"));
3239 break;
3240
3241 case VDEV_AUX_ERR_EXCEEDED:
3242 (void) printf(gettext("too many errors"));
3243 break;
3244
3245 case VDEV_AUX_ACTIVE:
3246 (void) printf(gettext("currently in use"));
3247 break;
3248
3249 case VDEV_AUX_CHILDREN_OFFLINE:
3250 (void) printf(gettext("all children offline"));
3251 break;
3252
3253 case VDEV_AUX_BAD_LABEL:
3254 (void) printf(gettext("invalid label"));
3255 break;
3256
3257 default:
3258 (void) printf(gettext("corrupted data"));
3259 break;
3260 }
3261 }
3262 (void) printf("\n");
3263
3264 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
3265 &child, &children) != 0)
3266 return;
3267
3268 for (c = 0; c < children; c++) {
3269 uint64_t is_log = B_FALSE;
3270
3271 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
3272 &is_log);
3273 if (is_log)
3274 continue;
3275 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
3276 continue;
3277
3278 vname = zpool_vdev_name(g_zfs, NULL, child[c],
3279 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
3280 print_import_config(cb, vname, child[c], depth + 2);
3281 free(vname);
3282 }
3283
3284 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
3285 &child, &children) == 0) {
3286 (void) printf(gettext("\tcache\n"));
3287 for (c = 0; c < children; c++) {
3288 vname = zpool_vdev_name(g_zfs, NULL, child[c],
3289 cb->cb_name_flags);
3290 (void) printf("\t %s\n", vname);
3291 free(vname);
3292 }
3293 }
3294
3295 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
3296 &child, &children) == 0) {
3297 (void) printf(gettext("\tspares\n"));
3298 for (c = 0; c < children; c++) {
3299 vname = zpool_vdev_name(g_zfs, NULL, child[c],
3300 cb->cb_name_flags);
3301 (void) printf("\t %s\n", vname);
3302 free(vname);
3303 }
3304 }
3305 }
3306
3307 /*
3308 * Print specialized class vdevs.
3309 *
3310 * These are recorded as top level vdevs in the main pool child array
3311 * but with "is_log" set to 1 or an "alloc_bias" string. We use either
3312 * print_status_config() or print_import_config() to print the top level
3313 * class vdevs then any of their children (eg mirrored slogs) are printed
3314 * recursively - which works because only the top level vdev is marked.
3315 */
3316 static void
print_class_vdevs(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t * nv,const char * class)3317 print_class_vdevs(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,
3318 const char *class)
3319 {
3320 uint_t c, children;
3321 nvlist_t **child;
3322 boolean_t printed = B_FALSE;
3323
3324 assert(zhp != NULL || !cb->cb_verbose);
3325
3326 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &child,
3327 &children) != 0)
3328 return;
3329
3330 for (c = 0; c < children; c++) {
3331 uint64_t is_log = B_FALSE;
3332 const char *bias = NULL;
3333 const char *type = NULL;
3334
3335 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
3336 &is_log);
3337
3338 if (is_log) {
3339 bias = (char *)VDEV_ALLOC_CLASS_LOGS;
3340 } else {
3341 (void) nvlist_lookup_string(child[c],
3342 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
3343 (void) nvlist_lookup_string(child[c],
3344 ZPOOL_CONFIG_TYPE, &type);
3345 }
3346
3347 if (bias == NULL || strcmp(bias, class) != 0)
3348 continue;
3349 if (!is_log && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
3350 continue;
3351
3352 if (!printed) {
3353 (void) printf("\t%s\t\n", gettext(class));
3354 printed = B_TRUE;
3355 }
3356
3357 char *name = zpool_vdev_name(g_zfs, zhp, child[c],
3358 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
3359 if (cb->cb_print_status)
3360 print_status_config(zhp, cb, name, child[c], 2,
3361 B_FALSE, NULL);
3362 else
3363 print_import_config(cb, name, child[c], 2);
3364 free(name);
3365 }
3366 }
3367
3368 /*
3369 * Display the status for the given pool.
3370 */
3371 static int
show_import(nvlist_t * config,boolean_t report_error)3372 show_import(nvlist_t *config, boolean_t report_error)
3373 {
3374 uint64_t pool_state;
3375 vdev_stat_t *vs;
3376 const char *name;
3377 uint64_t guid;
3378 uint64_t hostid = 0;
3379 const char *msgid;
3380 const char *hostname = "unknown";
3381 nvlist_t *nvroot, *nvinfo;
3382 zpool_status_t reason;
3383 zpool_errata_t errata;
3384 const char *health;
3385 uint_t vsc;
3386 const char *comment;
3387 const char *indent;
3388 char buf[2048];
3389 status_cbdata_t cb = { 0 };
3390
3391 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
3392 &name) == 0);
3393 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
3394 &guid) == 0);
3395 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
3396 &pool_state) == 0);
3397 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
3398 &nvroot) == 0);
3399
3400 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
3401 (uint64_t **)&vs, &vsc) == 0);
3402 health = zpool_state_to_name(vs->vs_state, vs->vs_aux);
3403
3404 reason = zpool_import_status(config, &msgid, &errata);
3405
3406 /*
3407 * If we're importing using a cachefile, then we won't report any
3408 * errors unless we are in the scan phase of the import.
3409 */
3410 if (reason != ZPOOL_STATUS_OK && !report_error)
3411 return (reason);
3412
3413 if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0) {
3414 indent = " ";
3415 } else {
3416 comment = NULL;
3417 indent = "";
3418 }
3419
3420 (void) printf(gettext("%s pool: %s\n"), indent, name);
3421 (void) printf(gettext("%s id: %llu\n"), indent, (u_longlong_t)guid);
3422 (void) printf(gettext("%s state: %s"), indent, health);
3423 if (pool_state == POOL_STATE_DESTROYED)
3424 (void) printf(gettext(" (DESTROYED)"));
3425 (void) printf("\n");
3426
3427 if (reason != ZPOOL_STATUS_OK) {
3428 (void) printf("%s", indent);
3429 printf_color(ANSI_BOLD, gettext("status: "));
3430 }
3431 switch (reason) {
3432 case ZPOOL_STATUS_MISSING_DEV_R:
3433 case ZPOOL_STATUS_MISSING_DEV_NR:
3434 case ZPOOL_STATUS_BAD_GUID_SUM:
3435 printf_color(ANSI_YELLOW, gettext("One or more devices are "
3436 "missing from the system.\n"));
3437 break;
3438
3439 case ZPOOL_STATUS_CORRUPT_LABEL_R:
3440 case ZPOOL_STATUS_CORRUPT_LABEL_NR:
3441 printf_color(ANSI_YELLOW, gettext("One or more devices "
3442 "contains corrupted data.\n"));
3443 break;
3444
3445 case ZPOOL_STATUS_CORRUPT_DATA:
3446 printf_color(ANSI_YELLOW, gettext("The pool data is "
3447 "corrupted.\n"));
3448 break;
3449
3450 case ZPOOL_STATUS_OFFLINE_DEV:
3451 printf_color(ANSI_YELLOW, gettext("One or more devices "
3452 "are offlined.\n"));
3453 break;
3454
3455 case ZPOOL_STATUS_CORRUPT_POOL:
3456 printf_color(ANSI_YELLOW, gettext("The pool metadata is "
3457 "corrupted.\n"));
3458 break;
3459
3460 case ZPOOL_STATUS_VERSION_OLDER:
3461 printf_color(ANSI_YELLOW, gettext("The pool is formatted using "
3462 "a legacy on-disk version.\n"));
3463 break;
3464
3465 case ZPOOL_STATUS_VERSION_NEWER:
3466 printf_color(ANSI_YELLOW, gettext("The pool is formatted using "
3467 "an incompatible version.\n"));
3468 break;
3469
3470 case ZPOOL_STATUS_FEAT_DISABLED:
3471 printf_color(ANSI_YELLOW, gettext("Some supported "
3472 "features are not enabled on the pool.\n"
3473 "\t%s(Note that they may be intentionally disabled if the\n"
3474 "\t%s'compatibility' property is set.)\n"), indent, indent);
3475 break;
3476
3477 case ZPOOL_STATUS_COMPATIBILITY_ERR:
3478 printf_color(ANSI_YELLOW, gettext("Error reading or parsing "
3479 "the file(s) indicated by the 'compatibility'\n"
3480 "\t%sproperty.\n"), indent);
3481 break;
3482
3483 case ZPOOL_STATUS_INCOMPATIBLE_FEAT:
3484 printf_color(ANSI_YELLOW, gettext("One or more features "
3485 "are enabled on the pool despite not being\n"
3486 "\t%srequested by the 'compatibility' property.\n"),
3487 indent);
3488 break;
3489
3490 case ZPOOL_STATUS_UNSUP_FEAT_READ:
3491 printf_color(ANSI_YELLOW, gettext("The pool uses the following "
3492 "feature(s) not supported on this system:\n"));
3493 color_start(ANSI_YELLOW);
3494 zpool_collect_unsup_feat(config, buf, 2048);
3495 (void) printf("%s", buf);
3496 color_end();
3497 break;
3498
3499 case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
3500 printf_color(ANSI_YELLOW, gettext("The pool can only be "
3501 "accessed in read-only mode on this system. It\n"
3502 "\t%scannot be accessed in read-write mode because it uses "
3503 "the following\n"
3504 "\t%sfeature(s) not supported on this system:\n"),
3505 indent, indent);
3506 color_start(ANSI_YELLOW);
3507 zpool_collect_unsup_feat(config, buf, 2048);
3508 (void) printf("%s", buf);
3509 color_end();
3510 break;
3511
3512 case ZPOOL_STATUS_HOSTID_ACTIVE:
3513 printf_color(ANSI_YELLOW, gettext("The pool is currently "
3514 "imported by another system.\n"));
3515 break;
3516
3517 case ZPOOL_STATUS_HOSTID_REQUIRED:
3518 printf_color(ANSI_YELLOW, gettext("The pool has the "
3519 "multihost property on. It cannot\n"
3520 "\t%sbe safely imported when the system hostid is not "
3521 "set.\n"), indent);
3522 break;
3523
3524 case ZPOOL_STATUS_HOSTID_MISMATCH:
3525 printf_color(ANSI_YELLOW, gettext("The pool was last accessed "
3526 "by another system.\n"));
3527 break;
3528
3529 case ZPOOL_STATUS_FAULTED_DEV_R:
3530 case ZPOOL_STATUS_FAULTED_DEV_NR:
3531 printf_color(ANSI_YELLOW, gettext("One or more devices are "
3532 "faulted.\n"));
3533 break;
3534
3535 case ZPOOL_STATUS_BAD_LOG:
3536 printf_color(ANSI_YELLOW, gettext("An intent log record cannot "
3537 "be read.\n"));
3538 break;
3539
3540 case ZPOOL_STATUS_RESILVERING:
3541 case ZPOOL_STATUS_REBUILDING:
3542 printf_color(ANSI_YELLOW, gettext("One or more devices were "
3543 "being resilvered.\n"));
3544 break;
3545
3546 case ZPOOL_STATUS_ERRATA:
3547 printf_color(ANSI_YELLOW, gettext("Errata #%d detected.\n"),
3548 errata);
3549 break;
3550
3551 case ZPOOL_STATUS_NON_NATIVE_ASHIFT:
3552 printf_color(ANSI_YELLOW, gettext("One or more devices are "
3553 "configured to use a non-native block size.\n"
3554 "\t%sExpect reduced performance.\n"), indent);
3555 break;
3556
3557 default:
3558 /*
3559 * No other status can be seen when importing pools.
3560 */
3561 assert(reason == ZPOOL_STATUS_OK);
3562 }
3563
3564 /*
3565 * Print out an action according to the overall state of the pool.
3566 */
3567 if (vs->vs_state != VDEV_STATE_HEALTHY ||
3568 reason != ZPOOL_STATUS_ERRATA || errata != ZPOOL_ERRATA_NONE) {
3569 (void) printf("%s", indent);
3570 (void) printf(gettext("action: "));
3571 }
3572 if (vs->vs_state == VDEV_STATE_HEALTHY) {
3573 if (reason == ZPOOL_STATUS_VERSION_OLDER ||
3574 reason == ZPOOL_STATUS_FEAT_DISABLED) {
3575 (void) printf(gettext("The pool can be imported using "
3576 "its name or numeric identifier, though\n"
3577 "\t%ssome features will not be available without "
3578 "an explicit 'zpool upgrade'.\n"), indent);
3579 } else if (reason == ZPOOL_STATUS_COMPATIBILITY_ERR) {
3580 (void) printf(gettext("The pool can be imported using "
3581 "its name or numeric\n"
3582 "\t%sidentifier, though the file(s) indicated by "
3583 "its 'compatibility'\n"
3584 "\t%sproperty cannot be parsed at this time.\n"),
3585 indent, indent);
3586 } else if (reason == ZPOOL_STATUS_HOSTID_MISMATCH) {
3587 (void) printf(gettext("The pool can be imported using "
3588 "its name or numeric identifier and\n"
3589 "\t%sthe '-f' flag.\n"), indent);
3590 } else if (reason == ZPOOL_STATUS_ERRATA) {
3591 switch (errata) {
3592 case ZPOOL_ERRATA_ZOL_2094_SCRUB:
3593 (void) printf(gettext("The pool can be "
3594 "imported using its name or numeric "
3595 "identifier,\n"
3596 "\t%showever there is a compatibility "
3597 "issue which should be corrected\n"
3598 "\t%sby running 'zpool scrub'\n"),
3599 indent, indent);
3600 break;
3601
3602 case ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY:
3603 (void) printf(gettext("The pool cannot be "
3604 "imported with this version of ZFS due to\n"
3605 "\t%san active asynchronous destroy. "
3606 "Revert to an earlier version\n"
3607 "\t%sand allow the destroy to complete "
3608 "before updating.\n"), indent, indent);
3609 break;
3610
3611 case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION:
3612 (void) printf(gettext("Existing encrypted "
3613 "datasets contain an on-disk "
3614 "incompatibility, which\n"
3615 "\t%sneeds to be corrected. Backup these "
3616 "datasets to new encrypted datasets\n"
3617 "\t%sand destroy the old ones.\n"),
3618 indent, indent);
3619 break;
3620
3621 case ZPOOL_ERRATA_ZOL_8308_ENCRYPTION:
3622 (void) printf(gettext("Existing encrypted "
3623 "snapshots and bookmarks contain an "
3624 "on-disk\n"
3625 "\t%sincompatibility. This may cause "
3626 "on-disk corruption if they are used\n"
3627 "\t%swith 'zfs recv'. To correct the "
3628 "issue, enable the bookmark_v2 feature.\n"
3629 "\t%sNo additional action is needed if "
3630 "there are no encrypted snapshots or\n"
3631 "\t%sbookmarks. If preserving the "
3632 "encrypted snapshots and bookmarks is\n"
3633 "\t%srequired, use a non-raw send to "
3634 "backup and restore them. Alternately,\n"
3635 "\t%sthey may be removed to resolve the "
3636 "incompatibility.\n"), indent, indent,
3637 indent, indent, indent, indent);
3638 break;
3639 default:
3640 /*
3641 * All errata must contain an action message.
3642 */
3643 assert(errata == ZPOOL_ERRATA_NONE);
3644 }
3645 } else {
3646 (void) printf(gettext("The pool can be imported using "
3647 "its name or numeric identifier.\n"));
3648 }
3649 } else if (vs->vs_state == VDEV_STATE_DEGRADED) {
3650 (void) printf(gettext("The pool can be imported despite "
3651 "missing or damaged devices. The\n"
3652 "\t%sfault tolerance of the pool may be compromised if "
3653 "imported.\n"), indent);
3654 } else {
3655 switch (reason) {
3656 case ZPOOL_STATUS_VERSION_NEWER:
3657 (void) printf(gettext("The pool cannot be imported. "
3658 "Access the pool on a system running newer\n"
3659 "\t%ssoftware, or recreate the pool from "
3660 "backup.\n"), indent);
3661 break;
3662 case ZPOOL_STATUS_UNSUP_FEAT_READ:
3663 (void) printf(gettext("The pool cannot be imported. "
3664 "Access the pool on a system that supports\n"
3665 "\t%sthe required feature(s), or recreate the pool "
3666 "from backup.\n"), indent);
3667 break;
3668 case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
3669 (void) printf(gettext("The pool cannot be imported in "
3670 "read-write mode. Import the pool with\n"
3671 "\t%s'-o readonly=on', access the pool on a system "
3672 "that supports the\n"
3673 "\t%srequired feature(s), or recreate the pool "
3674 "from backup.\n"), indent, indent);
3675 break;
3676 case ZPOOL_STATUS_MISSING_DEV_R:
3677 case ZPOOL_STATUS_MISSING_DEV_NR:
3678 case ZPOOL_STATUS_BAD_GUID_SUM:
3679 (void) printf(gettext("The pool cannot be imported. "
3680 "Attach the missing\n"
3681 "\t%sdevices and try again.\n"), indent);
3682 break;
3683 case ZPOOL_STATUS_HOSTID_ACTIVE:
3684 VERIFY0(nvlist_lookup_nvlist(config,
3685 ZPOOL_CONFIG_LOAD_INFO, &nvinfo));
3686
3687 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME))
3688 hostname = fnvlist_lookup_string(nvinfo,
3689 ZPOOL_CONFIG_MMP_HOSTNAME);
3690
3691 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID))
3692 hostid = fnvlist_lookup_uint64(nvinfo,
3693 ZPOOL_CONFIG_MMP_HOSTID);
3694
3695 (void) printf(gettext("The pool must be exported from "
3696 "%s (hostid=%"PRIx64")\n"
3697 "\t%sbefore it can be safely imported.\n"),
3698 hostname, hostid, indent);
3699 break;
3700 case ZPOOL_STATUS_HOSTID_REQUIRED:
3701 (void) printf(gettext("Set a unique system hostid with "
3702 "the zgenhostid(8) command.\n"));
3703 break;
3704 default:
3705 (void) printf(gettext("The pool cannot be imported due "
3706 "to damaged devices or data.\n"));
3707 }
3708 }
3709
3710 /* Print the comment attached to the pool. */
3711 if (comment != NULL)
3712 (void) printf(gettext("comment: %s\n"), comment);
3713
3714 /*
3715 * If the state is "closed" or "can't open", and the aux state
3716 * is "corrupt data":
3717 */
3718 if ((vs->vs_state == VDEV_STATE_CLOSED ||
3719 vs->vs_state == VDEV_STATE_CANT_OPEN) &&
3720 vs->vs_aux == VDEV_AUX_CORRUPT_DATA) {
3721 if (pool_state == POOL_STATE_DESTROYED)
3722 (void) printf(gettext("\t%sThe pool was destroyed, "
3723 "but can be imported using the '-Df' flags.\n"),
3724 indent);
3725 else if (pool_state != POOL_STATE_EXPORTED)
3726 (void) printf(gettext("\t%sThe pool may be active on "
3727 "another system, but can be imported using\n"
3728 "\t%sthe '-f' flag.\n"), indent, indent);
3729 }
3730
3731 if (msgid != NULL) {
3732 (void) printf(gettext("%s see: "
3733 "https://openzfs.github.io/openzfs-docs/msg/%s\n"),
3734 indent, msgid);
3735 }
3736
3737 (void) printf(gettext("%sconfig:\n\n"), indent);
3738
3739 cb.cb_namewidth = max_width(NULL, nvroot, 0, strlen(name),
3740 VDEV_NAME_TYPE_ID);
3741 if (cb.cb_namewidth < 10)
3742 cb.cb_namewidth = 10;
3743
3744 print_import_config(&cb, name, nvroot, 0);
3745
3746 print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_BIAS_DEDUP);
3747 print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_BIAS_SPECIAL);
3748 print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_CLASS_LOGS);
3749
3750 if (reason == ZPOOL_STATUS_BAD_GUID_SUM) {
3751 (void) printf(gettext("\n\t%sAdditional devices are known to "
3752 "be part of this pool, though their\n"
3753 "\t%sexact configuration cannot be determined.\n"),
3754 indent, indent);
3755 }
3756 return (0);
3757 }
3758
3759 static boolean_t
zfs_force_import_required(nvlist_t * config)3760 zfs_force_import_required(nvlist_t *config)
3761 {
3762 uint64_t state;
3763 uint64_t hostid = 0;
3764 nvlist_t *nvinfo;
3765
3766 state = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE);
3767 nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
3768
3769 /*
3770 * The hostid on LOAD_INFO comes from the MOS label via
3771 * spa_tryimport(). If its not there then we're likely talking to an
3772 * older kernel, so use the top one, which will be from the label
3773 * discovered in zpool_find_import(), or if a cachefile is in use, the
3774 * local hostid.
3775 */
3776 if (nvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_HOSTID, &hostid) != 0)
3777 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_HOSTID,
3778 &hostid);
3779
3780 if (state != POOL_STATE_EXPORTED && hostid != get_system_hostid())
3781 return (B_TRUE);
3782
3783 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE)) {
3784 mmp_state_t mmp_state = fnvlist_lookup_uint64(nvinfo,
3785 ZPOOL_CONFIG_MMP_STATE);
3786
3787 if (mmp_state != MMP_STATE_INACTIVE)
3788 return (B_TRUE);
3789 }
3790
3791 return (B_FALSE);
3792 }
3793
3794 /*
3795 * Perform the import for the given configuration. This passes the heavy
3796 * lifting off to zpool_import_props(), and then mounts the datasets contained
3797 * within the pool.
3798 */
3799 static int
do_import(nvlist_t * config,const char * newname,const char * mntopts,nvlist_t * props,int flags,uint_t mntthreads)3800 do_import(nvlist_t *config, const char *newname, const char *mntopts,
3801 nvlist_t *props, int flags, uint_t mntthreads)
3802 {
3803 int ret = 0;
3804 int ms_status = 0;
3805 zpool_handle_t *zhp;
3806 const char *name;
3807 uint64_t version;
3808
3809 name = fnvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME);
3810 version = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION);
3811
3812 if (!SPA_VERSION_IS_SUPPORTED(version)) {
3813 (void) fprintf(stderr, gettext("cannot import '%s': pool "
3814 "is formatted using an unsupported ZFS version\n"), name);
3815 return (1);
3816 } else if (zfs_force_import_required(config) &&
3817 !(flags & ZFS_IMPORT_ANY_HOST)) {
3818 mmp_state_t mmp_state = MMP_STATE_INACTIVE;
3819 nvlist_t *nvinfo;
3820
3821 nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
3822 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE))
3823 mmp_state = fnvlist_lookup_uint64(nvinfo,
3824 ZPOOL_CONFIG_MMP_STATE);
3825
3826 if (mmp_state == MMP_STATE_ACTIVE) {
3827 const char *hostname = "<unknown>";
3828 uint64_t hostid = 0;
3829
3830 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME))
3831 hostname = fnvlist_lookup_string(nvinfo,
3832 ZPOOL_CONFIG_MMP_HOSTNAME);
3833
3834 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID))
3835 hostid = fnvlist_lookup_uint64(nvinfo,
3836 ZPOOL_CONFIG_MMP_HOSTID);
3837
3838 (void) fprintf(stderr, gettext("cannot import '%s': "
3839 "pool is imported on %s (hostid: "
3840 "0x%"PRIx64")\nExport the pool on the other "
3841 "system, then run 'zpool import'.\n"),
3842 name, hostname, hostid);
3843 } else if (mmp_state == MMP_STATE_NO_HOSTID) {
3844 (void) fprintf(stderr, gettext("Cannot import '%s': "
3845 "pool has the multihost property on and the\n"
3846 "system's hostid is not set. Set a unique hostid "
3847 "with the zgenhostid(8) command.\n"), name);
3848 } else {
3849 const char *hostname = "<unknown>";
3850 time_t timestamp = 0;
3851 uint64_t hostid = 0;
3852
3853 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_HOSTNAME))
3854 hostname = fnvlist_lookup_string(nvinfo,
3855 ZPOOL_CONFIG_HOSTNAME);
3856 else if (nvlist_exists(config, ZPOOL_CONFIG_HOSTNAME))
3857 hostname = fnvlist_lookup_string(config,
3858 ZPOOL_CONFIG_HOSTNAME);
3859
3860 if (nvlist_exists(config, ZPOOL_CONFIG_TIMESTAMP))
3861 timestamp = fnvlist_lookup_uint64(config,
3862 ZPOOL_CONFIG_TIMESTAMP);
3863
3864 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_HOSTID))
3865 hostid = fnvlist_lookup_uint64(nvinfo,
3866 ZPOOL_CONFIG_HOSTID);
3867 else if (nvlist_exists(config, ZPOOL_CONFIG_HOSTID))
3868 hostid = fnvlist_lookup_uint64(config,
3869 ZPOOL_CONFIG_HOSTID);
3870
3871 (void) fprintf(stderr, gettext("cannot import '%s': "
3872 "pool was previously in use from another system.\n"
3873 "Last accessed by %s (hostid=%"PRIx64") at %s"
3874 "The pool can be imported, use 'zpool import -f' "
3875 "to import the pool.\n"), name, hostname,
3876 hostid, ctime(×tamp));
3877 }
3878
3879 return (1);
3880 }
3881
3882 if (zpool_import_props(g_zfs, config, newname, props, flags) != 0)
3883 return (1);
3884
3885 if (newname != NULL)
3886 name = newname;
3887
3888 if ((zhp = zpool_open_canfail(g_zfs, name)) == NULL)
3889 return (1);
3890
3891 /*
3892 * Loading keys is best effort. We don't want to return immediately
3893 * if it fails but we do want to give the error to the caller.
3894 */
3895 if (flags & ZFS_IMPORT_LOAD_KEYS &&
3896 zfs_crypto_attempt_load_keys(g_zfs, name) != 0)
3897 ret = 1;
3898
3899 if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL &&
3900 !(flags & ZFS_IMPORT_ONLY)) {
3901 ms_status = zpool_enable_datasets(zhp, mntopts, 0, mntthreads);
3902 if (ms_status == EZFS_SHAREFAILED) {
3903 (void) fprintf(stderr, gettext("Import was "
3904 "successful, but unable to share some datasets\n"));
3905 } else if (ms_status == EZFS_MOUNTFAILED) {
3906 (void) fprintf(stderr, gettext("Import was "
3907 "successful, but unable to mount some datasets\n"));
3908 }
3909 }
3910
3911 zpool_close(zhp);
3912 return (ret);
3913 }
3914
3915 typedef struct import_parameters {
3916 nvlist_t *ip_config;
3917 const char *ip_mntopts;
3918 nvlist_t *ip_props;
3919 int ip_flags;
3920 uint_t ip_mntthreads;
3921 int *ip_err;
3922 } import_parameters_t;
3923
3924 static void
do_import_task(void * arg)3925 do_import_task(void *arg)
3926 {
3927 import_parameters_t *ip = arg;
3928 *ip->ip_err |= do_import(ip->ip_config, NULL, ip->ip_mntopts,
3929 ip->ip_props, ip->ip_flags, ip->ip_mntthreads);
3930 free(ip);
3931 }
3932
3933
3934 static int
import_pools(nvlist_t * pools,nvlist_t * props,char * mntopts,int flags,char * orig_name,char * new_name,importargs_t * import)3935 import_pools(nvlist_t *pools, nvlist_t *props, char *mntopts, int flags,
3936 char *orig_name, char *new_name, importargs_t *import)
3937 {
3938 nvlist_t *config = NULL;
3939 nvlist_t *found_config = NULL;
3940 uint64_t pool_state;
3941 boolean_t pool_specified = (import->poolname != NULL ||
3942 import->guid != 0);
3943 uint_t npools = 0;
3944
3945
3946 tpool_t *tp = NULL;
3947 if (import->do_all) {
3948 tp = tpool_create(1, 5 * sysconf(_SC_NPROCESSORS_ONLN),
3949 0, NULL);
3950 }
3951
3952 /*
3953 * At this point we have a list of import candidate configs. Even if
3954 * we were searching by pool name or guid, we still need to
3955 * post-process the list to deal with pool state and possible
3956 * duplicate names.
3957 */
3958 int err = 0;
3959 nvpair_t *elem = NULL;
3960 boolean_t first = B_TRUE;
3961 if (!pool_specified && import->do_all) {
3962 while ((elem = nvlist_next_nvpair(pools, elem)) != NULL)
3963 npools++;
3964 }
3965 while ((elem = nvlist_next_nvpair(pools, elem)) != NULL) {
3966
3967 verify(nvpair_value_nvlist(elem, &config) == 0);
3968
3969 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
3970 &pool_state) == 0);
3971 if (!import->do_destroyed &&
3972 pool_state == POOL_STATE_DESTROYED)
3973 continue;
3974 if (import->do_destroyed &&
3975 pool_state != POOL_STATE_DESTROYED)
3976 continue;
3977
3978 verify(nvlist_add_nvlist(config, ZPOOL_LOAD_POLICY,
3979 import->policy) == 0);
3980
3981 if (!pool_specified) {
3982 if (first)
3983 first = B_FALSE;
3984 else if (!import->do_all)
3985 (void) fputc('\n', stdout);
3986
3987 if (import->do_all) {
3988 import_parameters_t *ip = safe_malloc(
3989 sizeof (import_parameters_t));
3990
3991 ip->ip_config = config;
3992 ip->ip_mntopts = mntopts;
3993 ip->ip_props = props;
3994 ip->ip_flags = flags;
3995 ip->ip_mntthreads = mount_tp_nthr / npools;
3996 ip->ip_err = &err;
3997
3998 (void) tpool_dispatch(tp, do_import_task,
3999 (void *)ip);
4000 } else {
4001 /*
4002 * If we're importing from cachefile, then
4003 * we don't want to report errors until we
4004 * are in the scan phase of the import. If
4005 * we get an error, then we return that error
4006 * to invoke the scan phase.
4007 */
4008 if (import->cachefile && !import->scan)
4009 err = show_import(config, B_FALSE);
4010 else
4011 (void) show_import(config, B_TRUE);
4012 }
4013 } else if (import->poolname != NULL) {
4014 const char *name;
4015
4016 /*
4017 * We are searching for a pool based on name.
4018 */
4019 verify(nvlist_lookup_string(config,
4020 ZPOOL_CONFIG_POOL_NAME, &name) == 0);
4021
4022 if (strcmp(name, import->poolname) == 0) {
4023 if (found_config != NULL) {
4024 (void) fprintf(stderr, gettext(
4025 "cannot import '%s': more than "
4026 "one matching pool\n"),
4027 import->poolname);
4028 (void) fprintf(stderr, gettext(
4029 "import by numeric ID instead\n"));
4030 err = B_TRUE;
4031 }
4032 found_config = config;
4033 }
4034 } else {
4035 uint64_t guid;
4036
4037 /*
4038 * Search for a pool by guid.
4039 */
4040 verify(nvlist_lookup_uint64(config,
4041 ZPOOL_CONFIG_POOL_GUID, &guid) == 0);
4042
4043 if (guid == import->guid)
4044 found_config = config;
4045 }
4046 }
4047 if (import->do_all) {
4048 tpool_wait(tp);
4049 tpool_destroy(tp);
4050 }
4051
4052 /*
4053 * If we were searching for a specific pool, verify that we found a
4054 * pool, and then do the import.
4055 */
4056 if (pool_specified && err == 0) {
4057 if (found_config == NULL) {
4058 (void) fprintf(stderr, gettext("cannot import '%s': "
4059 "no such pool available\n"), orig_name);
4060 err = B_TRUE;
4061 } else {
4062 err |= do_import(found_config, new_name,
4063 mntopts, props, flags, mount_tp_nthr);
4064 }
4065 }
4066
4067 /*
4068 * If we were just looking for pools, report an error if none were
4069 * found.
4070 */
4071 if (!pool_specified && first)
4072 (void) fprintf(stderr,
4073 gettext("no pools available to import\n"));
4074 return (err);
4075 }
4076
4077 typedef struct target_exists_args {
4078 const char *poolname;
4079 uint64_t poolguid;
4080 } target_exists_args_t;
4081
4082 static int
name_or_guid_exists(zpool_handle_t * zhp,void * data)4083 name_or_guid_exists(zpool_handle_t *zhp, void *data)
4084 {
4085 target_exists_args_t *args = data;
4086 nvlist_t *config = zpool_get_config(zhp, NULL);
4087 int found = 0;
4088
4089 if (config == NULL)
4090 return (0);
4091
4092 if (args->poolname != NULL) {
4093 const char *pool_name;
4094
4095 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
4096 &pool_name) == 0);
4097 if (strcmp(pool_name, args->poolname) == 0)
4098 found = 1;
4099 } else {
4100 uint64_t pool_guid;
4101
4102 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
4103 &pool_guid) == 0);
4104 if (pool_guid == args->poolguid)
4105 found = 1;
4106 }
4107 zpool_close(zhp);
4108
4109 return (found);
4110 }
4111 /*
4112 * zpool checkpoint <pool>
4113 * checkpoint --discard <pool>
4114 *
4115 * -d Discard the checkpoint from a checkpointed
4116 * --discard pool.
4117 *
4118 * -w Wait for discarding a checkpoint to complete.
4119 * --wait
4120 *
4121 * Checkpoints the specified pool, by taking a "snapshot" of its
4122 * current state. A pool can only have one checkpoint at a time.
4123 */
4124 int
zpool_do_checkpoint(int argc,char ** argv)4125 zpool_do_checkpoint(int argc, char **argv)
4126 {
4127 boolean_t discard, wait;
4128 char *pool;
4129 zpool_handle_t *zhp;
4130 int c, err;
4131
4132 struct option long_options[] = {
4133 {"discard", no_argument, NULL, 'd'},
4134 {"wait", no_argument, NULL, 'w'},
4135 {0, 0, 0, 0}
4136 };
4137
4138 discard = B_FALSE;
4139 wait = B_FALSE;
4140 while ((c = getopt_long(argc, argv, ":dw", long_options, NULL)) != -1) {
4141 switch (c) {
4142 case 'd':
4143 discard = B_TRUE;
4144 break;
4145 case 'w':
4146 wait = B_TRUE;
4147 break;
4148 case '?':
4149 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
4150 optopt);
4151 usage(B_FALSE);
4152 }
4153 }
4154
4155 if (wait && !discard) {
4156 (void) fprintf(stderr, gettext("--wait only valid when "
4157 "--discard also specified\n"));
4158 usage(B_FALSE);
4159 }
4160
4161 argc -= optind;
4162 argv += optind;
4163
4164 if (argc < 1) {
4165 (void) fprintf(stderr, gettext("missing pool argument\n"));
4166 usage(B_FALSE);
4167 }
4168
4169 if (argc > 1) {
4170 (void) fprintf(stderr, gettext("too many arguments\n"));
4171 usage(B_FALSE);
4172 }
4173
4174 pool = argv[0];
4175
4176 if ((zhp = zpool_open(g_zfs, pool)) == NULL) {
4177 /* As a special case, check for use of '/' in the name */
4178 if (strchr(pool, '/') != NULL)
4179 (void) fprintf(stderr, gettext("'zpool checkpoint' "
4180 "doesn't work on datasets. To save the state "
4181 "of a dataset from a specific point in time "
4182 "please use 'zfs snapshot'\n"));
4183 return (1);
4184 }
4185
4186 if (discard) {
4187 err = (zpool_discard_checkpoint(zhp) != 0);
4188 if (err == 0 && wait)
4189 err = zpool_wait(zhp, ZPOOL_WAIT_CKPT_DISCARD);
4190 } else {
4191 err = (zpool_checkpoint(zhp) != 0);
4192 }
4193
4194 zpool_close(zhp);
4195
4196 return (err);
4197 }
4198
4199 #define CHECKPOINT_OPT 1024
4200
4201 /*
4202 * zpool prefetch <type> [<type opts>] <pool>
4203 *
4204 * Prefetchs a particular type of data in the specified pool.
4205 */
4206 int
zpool_do_prefetch(int argc,char ** argv)4207 zpool_do_prefetch(int argc, char **argv)
4208 {
4209 int c;
4210 char *poolname;
4211 char *typestr = NULL;
4212 zpool_prefetch_type_t type;
4213 zpool_handle_t *zhp;
4214 int err = 0;
4215
4216 while ((c = getopt(argc, argv, "t:")) != -1) {
4217 switch (c) {
4218 case 't':
4219 typestr = optarg;
4220 break;
4221 case ':':
4222 (void) fprintf(stderr, gettext("missing argument for "
4223 "'%c' option\n"), optopt);
4224 usage(B_FALSE);
4225 break;
4226 case '?':
4227 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
4228 optopt);
4229 usage(B_FALSE);
4230 }
4231 }
4232 argc -= optind;
4233 argv += optind;
4234
4235 if (argc < 1) {
4236 (void) fprintf(stderr, gettext("missing pool name argument\n"));
4237 usage(B_FALSE);
4238 }
4239
4240 if (argc > 1) {
4241 (void) fprintf(stderr, gettext("too many arguments\n"));
4242 usage(B_FALSE);
4243 }
4244
4245 poolname = argv[0];
4246
4247 argc--;
4248 argv++;
4249
4250 if (strcmp(typestr, "ddt") == 0) {
4251 type = ZPOOL_PREFETCH_DDT;
4252 } else {
4253 (void) fprintf(stderr, gettext("unsupported prefetch type\n"));
4254 usage(B_FALSE);
4255 }
4256
4257 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
4258 return (1);
4259
4260 err = zpool_prefetch(zhp, type);
4261
4262 zpool_close(zhp);
4263
4264 return (err);
4265 }
4266
4267 /*
4268 * zpool import [-d dir] [-D]
4269 * import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l]
4270 * [-d dir | -c cachefile | -s] [-f] -a
4271 * import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l]
4272 * [-d dir | -c cachefile | -s] [-f] [-n] [-F] <pool | id>
4273 * [newpool]
4274 *
4275 * -c Read pool information from a cachefile instead of searching
4276 * devices. If importing from a cachefile config fails, then
4277 * fallback to searching for devices only in the directories that
4278 * exist in the cachefile.
4279 *
4280 * -d Scan in a specific directory, other than /dev/. More than
4281 * one directory can be specified using multiple '-d' options.
4282 *
4283 * -D Scan for previously destroyed pools or import all or only
4284 * specified destroyed pools.
4285 *
4286 * -R Temporarily import the pool, with all mountpoints relative to
4287 * the given root. The pool will remain exported when the machine
4288 * is rebooted.
4289 *
4290 * -V Import even in the presence of faulted vdevs. This is an
4291 * intentionally undocumented option for testing purposes, and
4292 * treats the pool configuration as complete, leaving any bad
4293 * vdevs in the FAULTED state. In other words, it does verbatim
4294 * import.
4295 *
4296 * -f Force import, even if it appears that the pool is active.
4297 *
4298 * -F Attempt rewind if necessary.
4299 *
4300 * -n See if rewind would work, but don't actually rewind.
4301 *
4302 * -N Import the pool but don't mount datasets.
4303 *
4304 * -T Specify a starting txg to use for import. This option is
4305 * intentionally undocumented option for testing purposes.
4306 *
4307 * -a Import all pools found.
4308 *
4309 * -l Load encryption keys while importing.
4310 *
4311 * -o Set property=value and/or temporary mount options (without '=').
4312 *
4313 * -s Scan using the default search path, the libblkid cache will
4314 * not be consulted.
4315 *
4316 * --rewind-to-checkpoint
4317 * Import the pool and revert back to the checkpoint.
4318 *
4319 * The import command scans for pools to import, and import pools based on pool
4320 * name and GUID. The pool can also be renamed as part of the import process.
4321 */
4322 int
zpool_do_import(int argc,char ** argv)4323 zpool_do_import(int argc, char **argv)
4324 {
4325 char **searchdirs = NULL;
4326 char *env, *envdup = NULL;
4327 int nsearch = 0;
4328 int c;
4329 int err = 0;
4330 nvlist_t *pools = NULL;
4331 boolean_t do_all = B_FALSE;
4332 boolean_t do_destroyed = B_FALSE;
4333 char *mntopts = NULL;
4334 uint64_t searchguid = 0;
4335 char *searchname = NULL;
4336 char *propval;
4337 nvlist_t *policy = NULL;
4338 nvlist_t *props = NULL;
4339 int flags = ZFS_IMPORT_NORMAL;
4340 uint32_t rewind_policy = ZPOOL_NO_REWIND;
4341 boolean_t dryrun = B_FALSE;
4342 boolean_t do_rewind = B_FALSE;
4343 boolean_t xtreme_rewind = B_FALSE;
4344 boolean_t do_scan = B_FALSE;
4345 boolean_t pool_exists = B_FALSE;
4346 uint64_t txg = -1ULL;
4347 char *cachefile = NULL;
4348 importargs_t idata = { 0 };
4349 char *endptr;
4350
4351 struct option long_options[] = {
4352 {"rewind-to-checkpoint", no_argument, NULL, CHECKPOINT_OPT},
4353 {0, 0, 0, 0}
4354 };
4355
4356 /* check options */
4357 while ((c = getopt_long(argc, argv, ":aCc:d:DEfFlmnNo:R:stT:VX",
4358 long_options, NULL)) != -1) {
4359 switch (c) {
4360 case 'a':
4361 do_all = B_TRUE;
4362 break;
4363 case 'c':
4364 cachefile = optarg;
4365 break;
4366 case 'd':
4367 searchdirs = safe_realloc(searchdirs,
4368 (nsearch + 1) * sizeof (char *));
4369 searchdirs[nsearch++] = optarg;
4370 break;
4371 case 'D':
4372 do_destroyed = B_TRUE;
4373 break;
4374 case 'f':
4375 flags |= ZFS_IMPORT_ANY_HOST;
4376 break;
4377 case 'F':
4378 do_rewind = B_TRUE;
4379 break;
4380 case 'l':
4381 flags |= ZFS_IMPORT_LOAD_KEYS;
4382 break;
4383 case 'm':
4384 flags |= ZFS_IMPORT_MISSING_LOG;
4385 break;
4386 case 'n':
4387 dryrun = B_TRUE;
4388 break;
4389 case 'N':
4390 flags |= ZFS_IMPORT_ONLY;
4391 break;
4392 case 'o':
4393 if ((propval = strchr(optarg, '=')) != NULL) {
4394 *propval = '\0';
4395 propval++;
4396 if (add_prop_list(optarg, propval,
4397 &props, B_TRUE))
4398 goto error;
4399 } else {
4400 mntopts = optarg;
4401 }
4402 break;
4403 case 'R':
4404 if (add_prop_list(zpool_prop_to_name(
4405 ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE))
4406 goto error;
4407 if (add_prop_list_default(zpool_prop_to_name(
4408 ZPOOL_PROP_CACHEFILE), "none", &props))
4409 goto error;
4410 break;
4411 case 's':
4412 do_scan = B_TRUE;
4413 break;
4414 case 't':
4415 flags |= ZFS_IMPORT_TEMP_NAME;
4416 if (add_prop_list_default(zpool_prop_to_name(
4417 ZPOOL_PROP_CACHEFILE), "none", &props))
4418 goto error;
4419 break;
4420
4421 case 'T':
4422 errno = 0;
4423 txg = strtoull(optarg, &endptr, 0);
4424 if (errno != 0 || *endptr != '\0') {
4425 (void) fprintf(stderr,
4426 gettext("invalid txg value\n"));
4427 usage(B_FALSE);
4428 }
4429 rewind_policy = ZPOOL_DO_REWIND | ZPOOL_EXTREME_REWIND;
4430 break;
4431 case 'V':
4432 flags |= ZFS_IMPORT_VERBATIM;
4433 break;
4434 case 'X':
4435 xtreme_rewind = B_TRUE;
4436 break;
4437 case CHECKPOINT_OPT:
4438 flags |= ZFS_IMPORT_CHECKPOINT;
4439 break;
4440 case ':':
4441 (void) fprintf(stderr, gettext("missing argument for "
4442 "'%c' option\n"), optopt);
4443 usage(B_FALSE);
4444 break;
4445 case '?':
4446 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
4447 optopt);
4448 usage(B_FALSE);
4449 }
4450 }
4451
4452 argc -= optind;
4453 argv += optind;
4454
4455 if (cachefile && nsearch != 0) {
4456 (void) fprintf(stderr, gettext("-c is incompatible with -d\n"));
4457 usage(B_FALSE);
4458 }
4459
4460 if (cachefile && do_scan) {
4461 (void) fprintf(stderr, gettext("-c is incompatible with -s\n"));
4462 usage(B_FALSE);
4463 }
4464
4465 if ((flags & ZFS_IMPORT_LOAD_KEYS) && (flags & ZFS_IMPORT_ONLY)) {
4466 (void) fprintf(stderr, gettext("-l is incompatible with -N\n"));
4467 usage(B_FALSE);
4468 }
4469
4470 if ((flags & ZFS_IMPORT_LOAD_KEYS) && !do_all && argc == 0) {
4471 (void) fprintf(stderr, gettext("-l is only meaningful during "
4472 "an import\n"));
4473 usage(B_FALSE);
4474 }
4475
4476 if ((dryrun || xtreme_rewind) && !do_rewind) {
4477 (void) fprintf(stderr,
4478 gettext("-n or -X only meaningful with -F\n"));
4479 usage(B_FALSE);
4480 }
4481 if (dryrun)
4482 rewind_policy = ZPOOL_TRY_REWIND;
4483 else if (do_rewind)
4484 rewind_policy = ZPOOL_DO_REWIND;
4485 if (xtreme_rewind)
4486 rewind_policy |= ZPOOL_EXTREME_REWIND;
4487
4488 /* In the future, we can capture further policy and include it here */
4489 if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 ||
4490 nvlist_add_uint64(policy, ZPOOL_LOAD_REQUEST_TXG, txg) != 0 ||
4491 nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY,
4492 rewind_policy) != 0)
4493 goto error;
4494
4495 /* check argument count */
4496 if (do_all) {
4497 if (argc != 0) {
4498 (void) fprintf(stderr, gettext("too many arguments\n"));
4499 usage(B_FALSE);
4500 }
4501 } else {
4502 if (argc > 2) {
4503 (void) fprintf(stderr, gettext("too many arguments\n"));
4504 usage(B_FALSE);
4505 }
4506 }
4507
4508 /*
4509 * Check for the effective uid. We do this explicitly here because
4510 * otherwise any attempt to discover pools will silently fail.
4511 */
4512 if (argc == 0 && geteuid() != 0) {
4513 (void) fprintf(stderr, gettext("cannot "
4514 "discover pools: permission denied\n"));
4515
4516 free(searchdirs);
4517 nvlist_free(props);
4518 nvlist_free(policy);
4519 return (1);
4520 }
4521
4522 /*
4523 * Depending on the arguments given, we do one of the following:
4524 *
4525 * <none> Iterate through all pools and display information about
4526 * each one.
4527 *
4528 * -a Iterate through all pools and try to import each one.
4529 *
4530 * <id> Find the pool that corresponds to the given GUID/pool
4531 * name and import that one.
4532 *
4533 * -D Above options applies only to destroyed pools.
4534 */
4535 if (argc != 0) {
4536 char *endptr;
4537
4538 errno = 0;
4539 searchguid = strtoull(argv[0], &endptr, 10);
4540 if (errno != 0 || *endptr != '\0') {
4541 searchname = argv[0];
4542 searchguid = 0;
4543 }
4544
4545 /*
4546 * User specified a name or guid. Ensure it's unique.
4547 */
4548 target_exists_args_t search = {searchname, searchguid};
4549 pool_exists = zpool_iter(g_zfs, name_or_guid_exists, &search);
4550 }
4551
4552 /*
4553 * Check the environment for the preferred search path.
4554 */
4555 if ((searchdirs == NULL) && (env = getenv("ZPOOL_IMPORT_PATH"))) {
4556 char *dir, *tmp = NULL;
4557
4558 envdup = strdup(env);
4559
4560 for (dir = strtok_r(envdup, ":", &tmp);
4561 dir != NULL;
4562 dir = strtok_r(NULL, ":", &tmp)) {
4563 searchdirs = safe_realloc(searchdirs,
4564 (nsearch + 1) * sizeof (char *));
4565 searchdirs[nsearch++] = dir;
4566 }
4567 }
4568
4569 idata.path = searchdirs;
4570 idata.paths = nsearch;
4571 idata.poolname = searchname;
4572 idata.guid = searchguid;
4573 idata.cachefile = cachefile;
4574 idata.scan = do_scan;
4575 idata.policy = policy;
4576 idata.do_destroyed = do_destroyed;
4577 idata.do_all = do_all;
4578
4579 libpc_handle_t lpch = {
4580 .lpc_lib_handle = g_zfs,
4581 .lpc_ops = &libzfs_config_ops,
4582 .lpc_printerr = B_TRUE
4583 };
4584 pools = zpool_search_import(&lpch, &idata);
4585
4586 if (pools != NULL && pool_exists &&
4587 (argc == 1 || strcmp(argv[0], argv[1]) == 0)) {
4588 (void) fprintf(stderr, gettext("cannot import '%s': "
4589 "a pool with that name already exists\n"),
4590 argv[0]);
4591 (void) fprintf(stderr, gettext("use the form '%s "
4592 "<pool | id> <newpool>' to give it a new name\n"),
4593 "zpool import");
4594 err = 1;
4595 } else if (pools == NULL && pool_exists) {
4596 (void) fprintf(stderr, gettext("cannot import '%s': "
4597 "a pool with that name is already created/imported,\n"),
4598 argv[0]);
4599 (void) fprintf(stderr, gettext("and no additional pools "
4600 "with that name were found\n"));
4601 err = 1;
4602 } else if (pools == NULL) {
4603 if (argc != 0) {
4604 (void) fprintf(stderr, gettext("cannot import '%s': "
4605 "no such pool available\n"), argv[0]);
4606 }
4607 err = 1;
4608 }
4609
4610 if (err == 1) {
4611 free(searchdirs);
4612 free(envdup);
4613 nvlist_free(policy);
4614 nvlist_free(pools);
4615 nvlist_free(props);
4616 return (1);
4617 }
4618
4619 err = import_pools(pools, props, mntopts, flags,
4620 argc >= 1 ? argv[0] : NULL, argc >= 2 ? argv[1] : NULL, &idata);
4621
4622 /*
4623 * If we're using the cachefile and we failed to import, then
4624 * fallback to scanning the directory for pools that match
4625 * those in the cachefile.
4626 */
4627 if (err != 0 && cachefile != NULL) {
4628 (void) printf(gettext("cachefile import failed, retrying\n"));
4629
4630 /*
4631 * We use the scan flag to gather the directories that exist
4632 * in the cachefile. If we need to fallback to searching for
4633 * the pool config, we will only search devices in these
4634 * directories.
4635 */
4636 idata.scan = B_TRUE;
4637 nvlist_free(pools);
4638 pools = zpool_search_import(&lpch, &idata);
4639
4640 err = import_pools(pools, props, mntopts, flags,
4641 argc >= 1 ? argv[0] : NULL, argc >= 2 ? argv[1] : NULL,
4642 &idata);
4643 }
4644
4645 error:
4646 nvlist_free(props);
4647 nvlist_free(pools);
4648 nvlist_free(policy);
4649 free(searchdirs);
4650 free(envdup);
4651
4652 return (err ? 1 : 0);
4653 }
4654
4655 /*
4656 * zpool sync [-f] [pool] ...
4657 *
4658 * -f (undocumented) force uberblock (and config including zpool cache file)
4659 * update.
4660 *
4661 * Sync the specified pool(s).
4662 * Without arguments "zpool sync" will sync all pools.
4663 * This command initiates TXG sync(s) and will return after the TXG(s) commit.
4664 *
4665 */
4666 static int
zpool_do_sync(int argc,char ** argv)4667 zpool_do_sync(int argc, char **argv)
4668 {
4669 int ret;
4670 boolean_t force = B_FALSE;
4671
4672 /* check options */
4673 while ((ret = getopt(argc, argv, "f")) != -1) {
4674 switch (ret) {
4675 case 'f':
4676 force = B_TRUE;
4677 break;
4678 case '?':
4679 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
4680 optopt);
4681 usage(B_FALSE);
4682 }
4683 }
4684
4685 argc -= optind;
4686 argv += optind;
4687
4688 /* if argc == 0 we will execute zpool_sync_one on all pools */
4689 ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL,
4690 B_FALSE, zpool_sync_one, &force);
4691
4692 return (ret);
4693 }
4694
4695 typedef struct iostat_cbdata {
4696 uint64_t cb_flags;
4697 int cb_namewidth;
4698 int cb_iteration;
4699 boolean_t cb_verbose;
4700 boolean_t cb_literal;
4701 boolean_t cb_scripted;
4702 zpool_list_t *cb_list;
4703 vdev_cmd_data_list_t *vcdl;
4704 vdev_cbdata_t cb_vdevs;
4705 } iostat_cbdata_t;
4706
4707 /* iostat labels */
4708 typedef struct name_and_columns {
4709 const char *name; /* Column name */
4710 unsigned int columns; /* Center name to this number of columns */
4711 } name_and_columns_t;
4712
4713 #define IOSTAT_MAX_LABELS 15 /* Max number of labels on one line */
4714
4715 static const name_and_columns_t iostat_top_labels[][IOSTAT_MAX_LABELS] =
4716 {
4717 [IOS_DEFAULT] = {{"capacity", 2}, {"operations", 2}, {"bandwidth", 2},
4718 {NULL}},
4719 [IOS_LATENCY] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2},
4720 {"asyncq_wait", 2}, {"scrub", 1}, {"trim", 1}, {"rebuild", 1},
4721 {NULL}},
4722 [IOS_QUEUES] = {{"syncq_read", 2}, {"syncq_write", 2},
4723 {"asyncq_read", 2}, {"asyncq_write", 2}, {"scrubq_read", 2},
4724 {"trimq_write", 2}, {"rebuildq_write", 2}, {NULL}},
4725 [IOS_L_HISTO] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2},
4726 {"asyncq_wait", 2}, {NULL}},
4727 [IOS_RQ_HISTO] = {{"sync_read", 2}, {"sync_write", 2},
4728 {"async_read", 2}, {"async_write", 2}, {"scrub", 2},
4729 {"trim", 2}, {"rebuild", 2}, {NULL}},
4730 };
4731
4732 /* Shorthand - if "columns" field not set, default to 1 column */
4733 static const name_and_columns_t iostat_bottom_labels[][IOSTAT_MAX_LABELS] =
4734 {
4735 [IOS_DEFAULT] = {{"alloc"}, {"free"}, {"read"}, {"write"}, {"read"},
4736 {"write"}, {NULL}},
4737 [IOS_LATENCY] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"},
4738 {"write"}, {"read"}, {"write"}, {"wait"}, {"wait"}, {"wait"},
4739 {NULL}},
4740 [IOS_QUEUES] = {{"pend"}, {"activ"}, {"pend"}, {"activ"}, {"pend"},
4741 {"activ"}, {"pend"}, {"activ"}, {"pend"}, {"activ"},
4742 {"pend"}, {"activ"}, {"pend"}, {"activ"}, {NULL}},
4743 [IOS_L_HISTO] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"},
4744 {"write"}, {"read"}, {"write"}, {"scrub"}, {"trim"}, {"rebuild"},
4745 {NULL}},
4746 [IOS_RQ_HISTO] = {{"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"},
4747 {"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"},
4748 {"ind"}, {"agg"}, {NULL}},
4749 };
4750
4751 static const char *histo_to_title[] = {
4752 [IOS_L_HISTO] = "latency",
4753 [IOS_RQ_HISTO] = "req_size",
4754 };
4755
4756 /*
4757 * Return the number of labels in a null-terminated name_and_columns_t
4758 * array.
4759 *
4760 */
4761 static unsigned int
label_array_len(const name_and_columns_t * labels)4762 label_array_len(const name_and_columns_t *labels)
4763 {
4764 int i = 0;
4765
4766 while (labels[i].name)
4767 i++;
4768
4769 return (i);
4770 }
4771
4772 /*
4773 * Return the number of strings in a null-terminated string array.
4774 * For example:
4775 *
4776 * const char foo[] = {"bar", "baz", NULL}
4777 *
4778 * returns 2
4779 */
4780 static uint64_t
str_array_len(const char * array[])4781 str_array_len(const char *array[])
4782 {
4783 uint64_t i = 0;
4784 while (array[i])
4785 i++;
4786
4787 return (i);
4788 }
4789
4790
4791 /*
4792 * Return a default column width for default/latency/queue columns. This does
4793 * not include histograms, which have their columns autosized.
4794 */
4795 static unsigned int
default_column_width(iostat_cbdata_t * cb,enum iostat_type type)4796 default_column_width(iostat_cbdata_t *cb, enum iostat_type type)
4797 {
4798 unsigned long column_width = 5; /* Normal niceprint */
4799 static unsigned long widths[] = {
4800 /*
4801 * Choose some sane default column sizes for printing the
4802 * raw numbers.
4803 */
4804 [IOS_DEFAULT] = 15, /* 1PB capacity */
4805 [IOS_LATENCY] = 10, /* 1B ns = 10sec */
4806 [IOS_QUEUES] = 6, /* 1M queue entries */
4807 [IOS_L_HISTO] = 10, /* 1B ns = 10sec */
4808 [IOS_RQ_HISTO] = 6, /* 1M queue entries */
4809 };
4810
4811 if (cb->cb_literal)
4812 column_width = widths[type];
4813
4814 return (column_width);
4815 }
4816
4817 /*
4818 * Print the column labels, i.e:
4819 *
4820 * capacity operations bandwidth
4821 * alloc free read write read write ...
4822 *
4823 * If force_column_width is set, use it for the column width. If not set, use
4824 * the default column width.
4825 */
4826 static void
print_iostat_labels(iostat_cbdata_t * cb,unsigned int force_column_width,const name_and_columns_t labels[][IOSTAT_MAX_LABELS])4827 print_iostat_labels(iostat_cbdata_t *cb, unsigned int force_column_width,
4828 const name_and_columns_t labels[][IOSTAT_MAX_LABELS])
4829 {
4830 int i, idx, s;
4831 int text_start, rw_column_width, spaces_to_end;
4832 uint64_t flags = cb->cb_flags;
4833 uint64_t f;
4834 unsigned int column_width = force_column_width;
4835
4836 /* For each bit set in flags */
4837 for (f = flags; f; f &= ~(1ULL << idx)) {
4838 idx = lowbit64(f) - 1;
4839 if (!force_column_width)
4840 column_width = default_column_width(cb, idx);
4841 /* Print our top labels centered over "read write" label. */
4842 for (i = 0; i < label_array_len(labels[idx]); i++) {
4843 const char *name = labels[idx][i].name;
4844 /*
4845 * We treat labels[][].columns == 0 as shorthand
4846 * for one column. It makes writing out the label
4847 * tables more concise.
4848 */
4849 unsigned int columns = MAX(1, labels[idx][i].columns);
4850 unsigned int slen = strlen(name);
4851
4852 rw_column_width = (column_width * columns) +
4853 (2 * (columns - 1));
4854
4855 text_start = (int)((rw_column_width) / columns -
4856 slen / columns);
4857 if (text_start < 0)
4858 text_start = 0;
4859
4860 printf(" "); /* Two spaces between columns */
4861
4862 /* Space from beginning of column to label */
4863 for (s = 0; s < text_start; s++)
4864 printf(" ");
4865
4866 printf("%s", name);
4867
4868 /* Print space after label to end of column */
4869 spaces_to_end = rw_column_width - text_start - slen;
4870 if (spaces_to_end < 0)
4871 spaces_to_end = 0;
4872
4873 for (s = 0; s < spaces_to_end; s++)
4874 printf(" ");
4875 }
4876 }
4877 }
4878
4879
4880 /*
4881 * print_cmd_columns - Print custom column titles from -c
4882 *
4883 * If the user specified the "zpool status|iostat -c" then print their custom
4884 * column titles in the header. For example, print_cmd_columns() would print
4885 * the " col1 col2" part of this:
4886 *
4887 * $ zpool iostat -vc 'echo col1=val1; echo col2=val2'
4888 * ...
4889 * capacity operations bandwidth
4890 * pool alloc free read write read write col1 col2
4891 * ---------- ----- ----- ----- ----- ----- ----- ---- ----
4892 * mypool 269K 1008M 0 0 107 946
4893 * mirror 269K 1008M 0 0 107 946
4894 * sdb - - 0 0 102 473 val1 val2
4895 * sdc - - 0 0 5 473 val1 val2
4896 * ---------- ----- ----- ----- ----- ----- ----- ---- ----
4897 */
4898 static void
print_cmd_columns(vdev_cmd_data_list_t * vcdl,int use_dashes)4899 print_cmd_columns(vdev_cmd_data_list_t *vcdl, int use_dashes)
4900 {
4901 int i, j;
4902 vdev_cmd_data_t *data = &vcdl->data[0];
4903
4904 if (vcdl->count == 0 || data == NULL)
4905 return;
4906
4907 /*
4908 * Each vdev cmd should have the same column names unless the user did
4909 * something weird with their cmd. Just take the column names from the
4910 * first vdev and assume it works for all of them.
4911 */
4912 for (i = 0; i < vcdl->uniq_cols_cnt; i++) {
4913 printf(" ");
4914 if (use_dashes) {
4915 for (j = 0; j < vcdl->uniq_cols_width[i]; j++)
4916 printf("-");
4917 } else {
4918 printf_color(ANSI_BOLD, "%*s", vcdl->uniq_cols_width[i],
4919 vcdl->uniq_cols[i]);
4920 }
4921 }
4922 }
4923
4924
4925 /*
4926 * Utility function to print out a line of dashes like:
4927 *
4928 * -------------------------------- ----- ----- ----- ----- -----
4929 *
4930 * ...or a dashed named-row line like:
4931 *
4932 * logs - - - - -
4933 *
4934 * @cb: iostat data
4935 *
4936 * @force_column_width If non-zero, use the value as the column width.
4937 * Otherwise use the default column widths.
4938 *
4939 * @name: Print a dashed named-row line starting
4940 * with @name. Otherwise, print a regular
4941 * dashed line.
4942 */
4943 static void
print_iostat_dashes(iostat_cbdata_t * cb,unsigned int force_column_width,const char * name)4944 print_iostat_dashes(iostat_cbdata_t *cb, unsigned int force_column_width,
4945 const char *name)
4946 {
4947 int i;
4948 unsigned int namewidth;
4949 uint64_t flags = cb->cb_flags;
4950 uint64_t f;
4951 int idx;
4952 const name_and_columns_t *labels;
4953 const char *title;
4954
4955
4956 if (cb->cb_flags & IOS_ANYHISTO_M) {
4957 title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)];
4958 } else if (cb->cb_vdevs.cb_names_count) {
4959 title = "vdev";
4960 } else {
4961 title = "pool";
4962 }
4963
4964 namewidth = MAX(MAX(strlen(title), cb->cb_namewidth),
4965 name ? strlen(name) : 0);
4966
4967
4968 if (name) {
4969 printf("%-*s", namewidth, name);
4970 } else {
4971 for (i = 0; i < namewidth; i++)
4972 (void) printf("-");
4973 }
4974
4975 /* For each bit in flags */
4976 for (f = flags; f; f &= ~(1ULL << idx)) {
4977 unsigned int column_width;
4978 idx = lowbit64(f) - 1;
4979 if (force_column_width)
4980 column_width = force_column_width;
4981 else
4982 column_width = default_column_width(cb, idx);
4983
4984 labels = iostat_bottom_labels[idx];
4985 for (i = 0; i < label_array_len(labels); i++) {
4986 if (name)
4987 printf(" %*s-", column_width - 1, " ");
4988 else
4989 printf(" %.*s", column_width,
4990 "--------------------");
4991 }
4992 }
4993 }
4994
4995
4996 static void
print_iostat_separator_impl(iostat_cbdata_t * cb,unsigned int force_column_width)4997 print_iostat_separator_impl(iostat_cbdata_t *cb,
4998 unsigned int force_column_width)
4999 {
5000 print_iostat_dashes(cb, force_column_width, NULL);
5001 }
5002
5003 static void
print_iostat_separator(iostat_cbdata_t * cb)5004 print_iostat_separator(iostat_cbdata_t *cb)
5005 {
5006 print_iostat_separator_impl(cb, 0);
5007 }
5008
5009 static void
print_iostat_header_impl(iostat_cbdata_t * cb,unsigned int force_column_width,const char * histo_vdev_name)5010 print_iostat_header_impl(iostat_cbdata_t *cb, unsigned int force_column_width,
5011 const char *histo_vdev_name)
5012 {
5013 unsigned int namewidth;
5014 const char *title;
5015
5016 color_start(ANSI_BOLD);
5017
5018 if (cb->cb_flags & IOS_ANYHISTO_M) {
5019 title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)];
5020 } else if (cb->cb_vdevs.cb_names_count) {
5021 title = "vdev";
5022 } else {
5023 title = "pool";
5024 }
5025
5026 namewidth = MAX(MAX(strlen(title), cb->cb_namewidth),
5027 histo_vdev_name ? strlen(histo_vdev_name) : 0);
5028
5029 if (histo_vdev_name)
5030 printf("%-*s", namewidth, histo_vdev_name);
5031 else
5032 printf("%*s", namewidth, "");
5033
5034
5035 print_iostat_labels(cb, force_column_width, iostat_top_labels);
5036 printf("\n");
5037
5038 printf("%-*s", namewidth, title);
5039
5040 print_iostat_labels(cb, force_column_width, iostat_bottom_labels);
5041 if (cb->vcdl != NULL)
5042 print_cmd_columns(cb->vcdl, 0);
5043
5044 printf("\n");
5045
5046 print_iostat_separator_impl(cb, force_column_width);
5047
5048 if (cb->vcdl != NULL)
5049 print_cmd_columns(cb->vcdl, 1);
5050
5051 color_end();
5052
5053 printf("\n");
5054 }
5055
5056 static void
print_iostat_header(iostat_cbdata_t * cb)5057 print_iostat_header(iostat_cbdata_t *cb)
5058 {
5059 print_iostat_header_impl(cb, 0, NULL);
5060 }
5061
5062 /*
5063 * Prints a size string (i.e. 120M) with the suffix ("M") colored
5064 * by order of magnitude. Uses column_size to add padding.
5065 */
5066 static void
print_stat_color(const char * statbuf,unsigned int column_size)5067 print_stat_color(const char *statbuf, unsigned int column_size)
5068 {
5069 fputs(" ", stdout);
5070 size_t len = strlen(statbuf);
5071 while (len < column_size) {
5072 fputc(' ', stdout);
5073 column_size--;
5074 }
5075 if (*statbuf == '0') {
5076 color_start(ANSI_GRAY);
5077 fputc('0', stdout);
5078 } else {
5079 for (; *statbuf; statbuf++) {
5080 if (*statbuf == 'K') color_start(ANSI_GREEN);
5081 else if (*statbuf == 'M') color_start(ANSI_YELLOW);
5082 else if (*statbuf == 'G') color_start(ANSI_RED);
5083 else if (*statbuf == 'T') color_start(ANSI_BOLD_BLUE);
5084 else if (*statbuf == 'P') color_start(ANSI_MAGENTA);
5085 else if (*statbuf == 'E') color_start(ANSI_CYAN);
5086 fputc(*statbuf, stdout);
5087 if (--column_size <= 0)
5088 break;
5089 }
5090 }
5091 color_end();
5092 }
5093
5094 /*
5095 * Display a single statistic.
5096 */
5097 static void
print_one_stat(uint64_t value,enum zfs_nicenum_format format,unsigned int column_size,boolean_t scripted)5098 print_one_stat(uint64_t value, enum zfs_nicenum_format format,
5099 unsigned int column_size, boolean_t scripted)
5100 {
5101 char buf[64];
5102
5103 zfs_nicenum_format(value, buf, sizeof (buf), format);
5104
5105 if (scripted)
5106 printf("\t%s", buf);
5107 else
5108 print_stat_color(buf, column_size);
5109 }
5110
5111 /*
5112 * Calculate the default vdev stats
5113 *
5114 * Subtract oldvs from newvs, apply a scaling factor, and save the resulting
5115 * stats into calcvs.
5116 */
5117 static void
calc_default_iostats(vdev_stat_t * oldvs,vdev_stat_t * newvs,vdev_stat_t * calcvs)5118 calc_default_iostats(vdev_stat_t *oldvs, vdev_stat_t *newvs,
5119 vdev_stat_t *calcvs)
5120 {
5121 int i;
5122
5123 memcpy(calcvs, newvs, sizeof (*calcvs));
5124 for (i = 0; i < ARRAY_SIZE(calcvs->vs_ops); i++)
5125 calcvs->vs_ops[i] = (newvs->vs_ops[i] - oldvs->vs_ops[i]);
5126
5127 for (i = 0; i < ARRAY_SIZE(calcvs->vs_bytes); i++)
5128 calcvs->vs_bytes[i] = (newvs->vs_bytes[i] - oldvs->vs_bytes[i]);
5129 }
5130
5131 /*
5132 * Internal representation of the extended iostats data.
5133 *
5134 * The extended iostat stats are exported in nvlists as either uint64_t arrays
5135 * or single uint64_t's. We make both look like arrays to make them easier
5136 * to process. In order to make single uint64_t's look like arrays, we set
5137 * __data to the stat data, and then set *data = &__data with count = 1. Then,
5138 * we can just use *data and count.
5139 */
5140 struct stat_array {
5141 uint64_t *data;
5142 uint_t count; /* Number of entries in data[] */
5143 uint64_t __data; /* Only used when data is a single uint64_t */
5144 };
5145
5146 static uint64_t
stat_histo_max(struct stat_array * nva,unsigned int len)5147 stat_histo_max(struct stat_array *nva, unsigned int len)
5148 {
5149 uint64_t max = 0;
5150 int i;
5151 for (i = 0; i < len; i++)
5152 max = MAX(max, array64_max(nva[i].data, nva[i].count));
5153
5154 return (max);
5155 }
5156
5157 /*
5158 * Helper function to lookup a uint64_t array or uint64_t value and store its
5159 * data as a stat_array. If the nvpair is a single uint64_t value, then we make
5160 * it look like a one element array to make it easier to process.
5161 */
5162 static int
nvpair64_to_stat_array(nvlist_t * nvl,const char * name,struct stat_array * nva)5163 nvpair64_to_stat_array(nvlist_t *nvl, const char *name,
5164 struct stat_array *nva)
5165 {
5166 nvpair_t *tmp;
5167 int ret;
5168
5169 verify(nvlist_lookup_nvpair(nvl, name, &tmp) == 0);
5170 switch (nvpair_type(tmp)) {
5171 case DATA_TYPE_UINT64_ARRAY:
5172 ret = nvpair_value_uint64_array(tmp, &nva->data, &nva->count);
5173 break;
5174 case DATA_TYPE_UINT64:
5175 ret = nvpair_value_uint64(tmp, &nva->__data);
5176 nva->data = &nva->__data;
5177 nva->count = 1;
5178 break;
5179 default:
5180 /* Not a uint64_t */
5181 ret = EINVAL;
5182 break;
5183 }
5184
5185 return (ret);
5186 }
5187
5188 /*
5189 * Given a list of nvlist names, look up the extended stats in newnv and oldnv,
5190 * subtract them, and return the results in a newly allocated stat_array.
5191 * You must free the returned array after you are done with it with
5192 * free_calc_stats().
5193 *
5194 * Additionally, you can set "oldnv" to NULL if you simply want the newnv
5195 * values.
5196 */
5197 static struct stat_array *
calc_and_alloc_stats_ex(const char ** names,unsigned int len,nvlist_t * oldnv,nvlist_t * newnv)5198 calc_and_alloc_stats_ex(const char **names, unsigned int len, nvlist_t *oldnv,
5199 nvlist_t *newnv)
5200 {
5201 nvlist_t *oldnvx = NULL, *newnvx;
5202 struct stat_array *oldnva, *newnva, *calcnva;
5203 int i, j;
5204 unsigned int alloc_size = (sizeof (struct stat_array)) * len;
5205
5206 /* Extract our extended stats nvlist from the main list */
5207 verify(nvlist_lookup_nvlist(newnv, ZPOOL_CONFIG_VDEV_STATS_EX,
5208 &newnvx) == 0);
5209 if (oldnv) {
5210 verify(nvlist_lookup_nvlist(oldnv, ZPOOL_CONFIG_VDEV_STATS_EX,
5211 &oldnvx) == 0);
5212 }
5213
5214 newnva = safe_malloc(alloc_size);
5215 oldnva = safe_malloc(alloc_size);
5216 calcnva = safe_malloc(alloc_size);
5217
5218 for (j = 0; j < len; j++) {
5219 verify(nvpair64_to_stat_array(newnvx, names[j],
5220 &newnva[j]) == 0);
5221 calcnva[j].count = newnva[j].count;
5222 alloc_size = calcnva[j].count * sizeof (calcnva[j].data[0]);
5223 calcnva[j].data = safe_malloc(alloc_size);
5224 memcpy(calcnva[j].data, newnva[j].data, alloc_size);
5225
5226 if (oldnvx) {
5227 verify(nvpair64_to_stat_array(oldnvx, names[j],
5228 &oldnva[j]) == 0);
5229 for (i = 0; i < oldnva[j].count; i++)
5230 calcnva[j].data[i] -= oldnva[j].data[i];
5231 }
5232 }
5233 free(newnva);
5234 free(oldnva);
5235 return (calcnva);
5236 }
5237
5238 static void
free_calc_stats(struct stat_array * nva,unsigned int len)5239 free_calc_stats(struct stat_array *nva, unsigned int len)
5240 {
5241 int i;
5242 for (i = 0; i < len; i++)
5243 free(nva[i].data);
5244
5245 free(nva);
5246 }
5247
5248 static void
print_iostat_histo(struct stat_array * nva,unsigned int len,iostat_cbdata_t * cb,unsigned int column_width,unsigned int namewidth,double scale)5249 print_iostat_histo(struct stat_array *nva, unsigned int len,
5250 iostat_cbdata_t *cb, unsigned int column_width, unsigned int namewidth,
5251 double scale)
5252 {
5253 int i, j;
5254 char buf[6];
5255 uint64_t val;
5256 enum zfs_nicenum_format format;
5257 unsigned int buckets;
5258 unsigned int start_bucket;
5259
5260 if (cb->cb_literal)
5261 format = ZFS_NICENUM_RAW;
5262 else
5263 format = ZFS_NICENUM_1024;
5264
5265 /* All these histos are the same size, so just use nva[0].count */
5266 buckets = nva[0].count;
5267
5268 if (cb->cb_flags & IOS_RQ_HISTO_M) {
5269 /* Start at 512 - req size should never be lower than this */
5270 start_bucket = 9;
5271 } else {
5272 start_bucket = 0;
5273 }
5274
5275 for (j = start_bucket; j < buckets; j++) {
5276 /* Print histogram bucket label */
5277 if (cb->cb_flags & IOS_L_HISTO_M) {
5278 /* Ending range of this bucket */
5279 val = (1UL << (j + 1)) - 1;
5280 zfs_nicetime(val, buf, sizeof (buf));
5281 } else {
5282 /* Request size (starting range of bucket) */
5283 val = (1UL << j);
5284 zfs_nicenum(val, buf, sizeof (buf));
5285 }
5286
5287 if (cb->cb_scripted)
5288 printf("%llu", (u_longlong_t)val);
5289 else
5290 printf("%-*s", namewidth, buf);
5291
5292 /* Print the values on the line */
5293 for (i = 0; i < len; i++) {
5294 print_one_stat(nva[i].data[j] * scale, format,
5295 column_width, cb->cb_scripted);
5296 }
5297 printf("\n");
5298 }
5299 }
5300
5301 static void
print_solid_separator(unsigned int length)5302 print_solid_separator(unsigned int length)
5303 {
5304 while (length--)
5305 printf("-");
5306 printf("\n");
5307 }
5308
5309 static void
print_iostat_histos(iostat_cbdata_t * cb,nvlist_t * oldnv,nvlist_t * newnv,double scale,const char * name)5310 print_iostat_histos(iostat_cbdata_t *cb, nvlist_t *oldnv,
5311 nvlist_t *newnv, double scale, const char *name)
5312 {
5313 unsigned int column_width;
5314 unsigned int namewidth;
5315 unsigned int entire_width;
5316 enum iostat_type type;
5317 struct stat_array *nva;
5318 const char **names;
5319 unsigned int names_len;
5320
5321 /* What type of histo are we? */
5322 type = IOS_HISTO_IDX(cb->cb_flags);
5323
5324 /* Get NULL-terminated array of nvlist names for our histo */
5325 names = vsx_type_to_nvlist[type];
5326 names_len = str_array_len(names); /* num of names */
5327
5328 nva = calc_and_alloc_stats_ex(names, names_len, oldnv, newnv);
5329
5330 if (cb->cb_literal) {
5331 column_width = MAX(5,
5332 (unsigned int) log10(stat_histo_max(nva, names_len)) + 1);
5333 } else {
5334 column_width = 5;
5335 }
5336
5337 namewidth = MAX(cb->cb_namewidth,
5338 strlen(histo_to_title[IOS_HISTO_IDX(cb->cb_flags)]));
5339
5340 /*
5341 * Calculate the entire line width of what we're printing. The
5342 * +2 is for the two spaces between columns:
5343 */
5344 /* read write */
5345 /* ----- ----- */
5346 /* |___| <---------- column_width */
5347 /* */
5348 /* |__________| <--- entire_width */
5349 /* */
5350 entire_width = namewidth + (column_width + 2) *
5351 label_array_len(iostat_bottom_labels[type]);
5352
5353 if (cb->cb_scripted)
5354 printf("%s\n", name);
5355 else
5356 print_iostat_header_impl(cb, column_width, name);
5357
5358 print_iostat_histo(nva, names_len, cb, column_width,
5359 namewidth, scale);
5360
5361 free_calc_stats(nva, names_len);
5362 if (!cb->cb_scripted)
5363 print_solid_separator(entire_width);
5364 }
5365
5366 /*
5367 * Calculate the average latency of a power-of-two latency histogram
5368 */
5369 static uint64_t
single_histo_average(uint64_t * histo,unsigned int buckets)5370 single_histo_average(uint64_t *histo, unsigned int buckets)
5371 {
5372 int i;
5373 uint64_t count = 0, total = 0;
5374
5375 for (i = 0; i < buckets; i++) {
5376 /*
5377 * Our buckets are power-of-two latency ranges. Use the
5378 * midpoint latency of each bucket to calculate the average.
5379 * For example:
5380 *
5381 * Bucket Midpoint
5382 * 8ns-15ns: 12ns
5383 * 16ns-31ns: 24ns
5384 * ...
5385 */
5386 if (histo[i] != 0) {
5387 total += histo[i] * (((1UL << i) + ((1UL << i)/2)));
5388 count += histo[i];
5389 }
5390 }
5391
5392 /* Prevent divide by zero */
5393 return (count == 0 ? 0 : total / count);
5394 }
5395
5396 static void
print_iostat_queues(iostat_cbdata_t * cb,nvlist_t * newnv)5397 print_iostat_queues(iostat_cbdata_t *cb, nvlist_t *newnv)
5398 {
5399 const char *names[] = {
5400 ZPOOL_CONFIG_VDEV_SYNC_R_PEND_QUEUE,
5401 ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE,
5402 ZPOOL_CONFIG_VDEV_SYNC_W_PEND_QUEUE,
5403 ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE,
5404 ZPOOL_CONFIG_VDEV_ASYNC_R_PEND_QUEUE,
5405 ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE,
5406 ZPOOL_CONFIG_VDEV_ASYNC_W_PEND_QUEUE,
5407 ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE,
5408 ZPOOL_CONFIG_VDEV_SCRUB_PEND_QUEUE,
5409 ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,
5410 ZPOOL_CONFIG_VDEV_TRIM_PEND_QUEUE,
5411 ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE,
5412 ZPOOL_CONFIG_VDEV_REBUILD_PEND_QUEUE,
5413 ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE,
5414 };
5415
5416 struct stat_array *nva;
5417
5418 unsigned int column_width = default_column_width(cb, IOS_QUEUES);
5419 enum zfs_nicenum_format format;
5420
5421 nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), NULL, newnv);
5422
5423 if (cb->cb_literal)
5424 format = ZFS_NICENUM_RAW;
5425 else
5426 format = ZFS_NICENUM_1024;
5427
5428 for (int i = 0; i < ARRAY_SIZE(names); i++) {
5429 uint64_t val = nva[i].data[0];
5430 print_one_stat(val, format, column_width, cb->cb_scripted);
5431 }
5432
5433 free_calc_stats(nva, ARRAY_SIZE(names));
5434 }
5435
5436 static void
print_iostat_latency(iostat_cbdata_t * cb,nvlist_t * oldnv,nvlist_t * newnv)5437 print_iostat_latency(iostat_cbdata_t *cb, nvlist_t *oldnv,
5438 nvlist_t *newnv)
5439 {
5440 int i;
5441 uint64_t val;
5442 const char *names[] = {
5443 ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
5444 ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
5445 ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
5446 ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
5447 ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO,
5448 ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO,
5449 ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO,
5450 ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO,
5451 ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO,
5452 ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
5453 ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
5454 };
5455 struct stat_array *nva;
5456
5457 unsigned int column_width = default_column_width(cb, IOS_LATENCY);
5458 enum zfs_nicenum_format format;
5459
5460 nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), oldnv, newnv);
5461
5462 if (cb->cb_literal)
5463 format = ZFS_NICENUM_RAWTIME;
5464 else
5465 format = ZFS_NICENUM_TIME;
5466
5467 /* Print our avg latencies on the line */
5468 for (i = 0; i < ARRAY_SIZE(names); i++) {
5469 /* Compute average latency for a latency histo */
5470 val = single_histo_average(nva[i].data, nva[i].count);
5471 print_one_stat(val, format, column_width, cb->cb_scripted);
5472 }
5473 free_calc_stats(nva, ARRAY_SIZE(names));
5474 }
5475
5476 /*
5477 * Print default statistics (capacity/operations/bandwidth)
5478 */
5479 static void
print_iostat_default(vdev_stat_t * vs,iostat_cbdata_t * cb,double scale)5480 print_iostat_default(vdev_stat_t *vs, iostat_cbdata_t *cb, double scale)
5481 {
5482 unsigned int column_width = default_column_width(cb, IOS_DEFAULT);
5483 enum zfs_nicenum_format format;
5484 char na; /* char to print for "not applicable" values */
5485
5486 if (cb->cb_literal) {
5487 format = ZFS_NICENUM_RAW;
5488 na = '0';
5489 } else {
5490 format = ZFS_NICENUM_1024;
5491 na = '-';
5492 }
5493
5494 /* only toplevel vdevs have capacity stats */
5495 if (vs->vs_space == 0) {
5496 if (cb->cb_scripted)
5497 printf("\t%c\t%c", na, na);
5498 else
5499 printf(" %*c %*c", column_width, na, column_width,
5500 na);
5501 } else {
5502 print_one_stat(vs->vs_alloc, format, column_width,
5503 cb->cb_scripted);
5504 print_one_stat(vs->vs_space - vs->vs_alloc, format,
5505 column_width, cb->cb_scripted);
5506 }
5507
5508 print_one_stat((uint64_t)(vs->vs_ops[ZIO_TYPE_READ] * scale),
5509 format, column_width, cb->cb_scripted);
5510 print_one_stat((uint64_t)(vs->vs_ops[ZIO_TYPE_WRITE] * scale),
5511 format, column_width, cb->cb_scripted);
5512 print_one_stat((uint64_t)(vs->vs_bytes[ZIO_TYPE_READ] * scale),
5513 format, column_width, cb->cb_scripted);
5514 print_one_stat((uint64_t)(vs->vs_bytes[ZIO_TYPE_WRITE] * scale),
5515 format, column_width, cb->cb_scripted);
5516 }
5517
5518 static const char *const class_name[] = {
5519 VDEV_ALLOC_BIAS_DEDUP,
5520 VDEV_ALLOC_BIAS_SPECIAL,
5521 VDEV_ALLOC_CLASS_LOGS
5522 };
5523
5524 /*
5525 * Print out all the statistics for the given vdev. This can either be the
5526 * toplevel configuration, or called recursively. If 'name' is NULL, then this
5527 * is a verbose output, and we don't want to display the toplevel pool stats.
5528 *
5529 * Returns the number of stat lines printed.
5530 */
5531 static unsigned int
print_vdev_stats(zpool_handle_t * zhp,const char * name,nvlist_t * oldnv,nvlist_t * newnv,iostat_cbdata_t * cb,int depth)5532 print_vdev_stats(zpool_handle_t *zhp, const char *name, nvlist_t *oldnv,
5533 nvlist_t *newnv, iostat_cbdata_t *cb, int depth)
5534 {
5535 nvlist_t **oldchild, **newchild;
5536 uint_t c, children, oldchildren;
5537 vdev_stat_t *oldvs, *newvs, *calcvs;
5538 vdev_stat_t zerovs = { 0 };
5539 char *vname;
5540 int i;
5541 int ret = 0;
5542 uint64_t tdelta;
5543 double scale;
5544
5545 if (strcmp(name, VDEV_TYPE_INDIRECT) == 0)
5546 return (ret);
5547
5548 calcvs = safe_malloc(sizeof (*calcvs));
5549
5550 if (oldnv != NULL) {
5551 verify(nvlist_lookup_uint64_array(oldnv,
5552 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&oldvs, &c) == 0);
5553 } else {
5554 oldvs = &zerovs;
5555 }
5556
5557 /* Do we only want to see a specific vdev? */
5558 for (i = 0; i < cb->cb_vdevs.cb_names_count; i++) {
5559 /* Yes we do. Is this the vdev? */
5560 if (strcmp(name, cb->cb_vdevs.cb_names[i]) == 0) {
5561 /*
5562 * This is our vdev. Since it is the only vdev we
5563 * will be displaying, make depth = 0 so that it
5564 * doesn't get indented.
5565 */
5566 depth = 0;
5567 break;
5568 }
5569 }
5570
5571 if (cb->cb_vdevs.cb_names_count && (i == cb->cb_vdevs.cb_names_count)) {
5572 /* Couldn't match the name */
5573 goto children;
5574 }
5575
5576
5577 verify(nvlist_lookup_uint64_array(newnv, ZPOOL_CONFIG_VDEV_STATS,
5578 (uint64_t **)&newvs, &c) == 0);
5579
5580 /*
5581 * Print the vdev name unless it's is a histogram. Histograms
5582 * display the vdev name in the header itself.
5583 */
5584 if (!(cb->cb_flags & IOS_ANYHISTO_M)) {
5585 if (cb->cb_scripted) {
5586 printf("%s", name);
5587 } else {
5588 if (strlen(name) + depth > cb->cb_namewidth)
5589 (void) printf("%*s%s", depth, "", name);
5590 else
5591 (void) printf("%*s%s%*s", depth, "", name,
5592 (int)(cb->cb_namewidth - strlen(name) -
5593 depth), "");
5594 }
5595 }
5596
5597 /* Calculate our scaling factor */
5598 tdelta = newvs->vs_timestamp - oldvs->vs_timestamp;
5599 if ((oldvs->vs_timestamp == 0) && (cb->cb_flags & IOS_ANYHISTO_M)) {
5600 /*
5601 * If we specify printing histograms with no time interval, then
5602 * print the histogram numbers over the entire lifetime of the
5603 * vdev.
5604 */
5605 scale = 1;
5606 } else {
5607 if (tdelta == 0)
5608 scale = 1.0;
5609 else
5610 scale = (double)NANOSEC / tdelta;
5611 }
5612
5613 if (cb->cb_flags & IOS_DEFAULT_M) {
5614 calc_default_iostats(oldvs, newvs, calcvs);
5615 print_iostat_default(calcvs, cb, scale);
5616 }
5617 if (cb->cb_flags & IOS_LATENCY_M)
5618 print_iostat_latency(cb, oldnv, newnv);
5619 if (cb->cb_flags & IOS_QUEUES_M)
5620 print_iostat_queues(cb, newnv);
5621 if (cb->cb_flags & IOS_ANYHISTO_M) {
5622 printf("\n");
5623 print_iostat_histos(cb, oldnv, newnv, scale, name);
5624 }
5625
5626 if (cb->vcdl != NULL) {
5627 const char *path;
5628 if (nvlist_lookup_string(newnv, ZPOOL_CONFIG_PATH,
5629 &path) == 0) {
5630 printf(" ");
5631 zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path);
5632 }
5633 }
5634
5635 if (!(cb->cb_flags & IOS_ANYHISTO_M))
5636 printf("\n");
5637
5638 ret++;
5639
5640 children:
5641
5642 free(calcvs);
5643
5644 if (!cb->cb_verbose)
5645 return (ret);
5646
5647 if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_CHILDREN,
5648 &newchild, &children) != 0)
5649 return (ret);
5650
5651 if (oldnv) {
5652 if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_CHILDREN,
5653 &oldchild, &oldchildren) != 0)
5654 return (ret);
5655
5656 children = MIN(oldchildren, children);
5657 }
5658
5659 /*
5660 * print normal top-level devices
5661 */
5662 for (c = 0; c < children; c++) {
5663 uint64_t ishole = B_FALSE, islog = B_FALSE;
5664
5665 (void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_HOLE,
5666 &ishole);
5667
5668 (void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_LOG,
5669 &islog);
5670
5671 if (ishole || islog)
5672 continue;
5673
5674 if (nvlist_exists(newchild[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
5675 continue;
5676
5677 vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
5678 cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID);
5679 ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c] : NULL,
5680 newchild[c], cb, depth + 2);
5681 free(vname);
5682 }
5683
5684 /*
5685 * print all other top-level devices
5686 */
5687 for (uint_t n = 0; n < ARRAY_SIZE(class_name); n++) {
5688 boolean_t printed = B_FALSE;
5689
5690 for (c = 0; c < children; c++) {
5691 uint64_t islog = B_FALSE;
5692 const char *bias = NULL;
5693 const char *type = NULL;
5694
5695 (void) nvlist_lookup_uint64(newchild[c],
5696 ZPOOL_CONFIG_IS_LOG, &islog);
5697 if (islog) {
5698 bias = VDEV_ALLOC_CLASS_LOGS;
5699 } else {
5700 (void) nvlist_lookup_string(newchild[c],
5701 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
5702 (void) nvlist_lookup_string(newchild[c],
5703 ZPOOL_CONFIG_TYPE, &type);
5704 }
5705 if (bias == NULL || strcmp(bias, class_name[n]) != 0)
5706 continue;
5707 if (!islog && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
5708 continue;
5709
5710 if (!printed) {
5711 if ((!(cb->cb_flags & IOS_ANYHISTO_M)) &&
5712 !cb->cb_scripted &&
5713 !cb->cb_vdevs.cb_names) {
5714 print_iostat_dashes(cb, 0,
5715 class_name[n]);
5716 }
5717 printf("\n");
5718 printed = B_TRUE;
5719 }
5720
5721 vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
5722 cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID);
5723 ret += print_vdev_stats(zhp, vname, oldnv ?
5724 oldchild[c] : NULL, newchild[c], cb, depth + 2);
5725 free(vname);
5726 }
5727 }
5728
5729 /*
5730 * Include level 2 ARC devices in iostat output
5731 */
5732 if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_L2CACHE,
5733 &newchild, &children) != 0)
5734 return (ret);
5735
5736 if (oldnv) {
5737 if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_L2CACHE,
5738 &oldchild, &oldchildren) != 0)
5739 return (ret);
5740
5741 children = MIN(oldchildren, children);
5742 }
5743
5744 if (children > 0) {
5745 if ((!(cb->cb_flags & IOS_ANYHISTO_M)) && !cb->cb_scripted &&
5746 !cb->cb_vdevs.cb_names) {
5747 print_iostat_dashes(cb, 0, "cache");
5748 }
5749 printf("\n");
5750
5751 for (c = 0; c < children; c++) {
5752 vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
5753 cb->cb_vdevs.cb_name_flags);
5754 ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c]
5755 : NULL, newchild[c], cb, depth + 2);
5756 free(vname);
5757 }
5758 }
5759
5760 return (ret);
5761 }
5762
5763 static int
refresh_iostat(zpool_handle_t * zhp,void * data)5764 refresh_iostat(zpool_handle_t *zhp, void *data)
5765 {
5766 iostat_cbdata_t *cb = data;
5767 boolean_t missing;
5768
5769 /*
5770 * If the pool has disappeared, remove it from the list and continue.
5771 */
5772 if (zpool_refresh_stats(zhp, &missing) != 0)
5773 return (-1);
5774
5775 if (missing)
5776 pool_list_remove(cb->cb_list, zhp);
5777
5778 return (0);
5779 }
5780
5781 /*
5782 * Callback to print out the iostats for the given pool.
5783 */
5784 static int
print_iostat(zpool_handle_t * zhp,void * data)5785 print_iostat(zpool_handle_t *zhp, void *data)
5786 {
5787 iostat_cbdata_t *cb = data;
5788 nvlist_t *oldconfig, *newconfig;
5789 nvlist_t *oldnvroot, *newnvroot;
5790 int ret;
5791
5792 newconfig = zpool_get_config(zhp, &oldconfig);
5793
5794 if (cb->cb_iteration == 1)
5795 oldconfig = NULL;
5796
5797 verify(nvlist_lookup_nvlist(newconfig, ZPOOL_CONFIG_VDEV_TREE,
5798 &newnvroot) == 0);
5799
5800 if (oldconfig == NULL)
5801 oldnvroot = NULL;
5802 else
5803 verify(nvlist_lookup_nvlist(oldconfig, ZPOOL_CONFIG_VDEV_TREE,
5804 &oldnvroot) == 0);
5805
5806 ret = print_vdev_stats(zhp, zpool_get_name(zhp), oldnvroot, newnvroot,
5807 cb, 0);
5808 if ((ret != 0) && !(cb->cb_flags & IOS_ANYHISTO_M) &&
5809 !cb->cb_scripted && cb->cb_verbose &&
5810 !cb->cb_vdevs.cb_names_count) {
5811 print_iostat_separator(cb);
5812 if (cb->vcdl != NULL) {
5813 print_cmd_columns(cb->vcdl, 1);
5814 }
5815 printf("\n");
5816 }
5817
5818 return (ret);
5819 }
5820
5821 static int
get_columns(void)5822 get_columns(void)
5823 {
5824 struct winsize ws;
5825 int columns = 80;
5826 int error;
5827
5828 if (isatty(STDOUT_FILENO)) {
5829 error = ioctl(STDOUT_FILENO, TIOCGWINSZ, &ws);
5830 if (error == 0)
5831 columns = ws.ws_col;
5832 } else {
5833 columns = 999;
5834 }
5835
5836 return (columns);
5837 }
5838
5839 /*
5840 * Return the required length of the pool/vdev name column. The minimum
5841 * allowed width and output formatting flags must be provided.
5842 */
5843 static int
get_namewidth(zpool_handle_t * zhp,int min_width,int flags,boolean_t verbose)5844 get_namewidth(zpool_handle_t *zhp, int min_width, int flags, boolean_t verbose)
5845 {
5846 nvlist_t *config, *nvroot;
5847 int width = min_width;
5848
5849 if ((config = zpool_get_config(zhp, NULL)) != NULL) {
5850 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
5851 &nvroot) == 0);
5852 size_t poolname_len = strlen(zpool_get_name(zhp));
5853 if (verbose == B_FALSE) {
5854 width = MAX(poolname_len, min_width);
5855 } else {
5856 width = MAX(poolname_len,
5857 max_width(zhp, nvroot, 0, min_width, flags));
5858 }
5859 }
5860
5861 return (width);
5862 }
5863
5864 /*
5865 * Parse the input string, get the 'interval' and 'count' value if there is one.
5866 */
5867 static void
get_interval_count(int * argcp,char ** argv,float * iv,unsigned long * cnt)5868 get_interval_count(int *argcp, char **argv, float *iv,
5869 unsigned long *cnt)
5870 {
5871 float interval = 0;
5872 unsigned long count = 0;
5873 int argc = *argcp;
5874
5875 /*
5876 * Determine if the last argument is an integer or a pool name
5877 */
5878 if (argc > 0 && zfs_isnumber(argv[argc - 1])) {
5879 char *end;
5880
5881 errno = 0;
5882 interval = strtof(argv[argc - 1], &end);
5883
5884 if (*end == '\0' && errno == 0) {
5885 if (interval == 0) {
5886 (void) fprintf(stderr, gettext(
5887 "interval cannot be zero\n"));
5888 usage(B_FALSE);
5889 }
5890 /*
5891 * Ignore the last parameter
5892 */
5893 argc--;
5894 } else {
5895 /*
5896 * If this is not a valid number, just plow on. The
5897 * user will get a more informative error message later
5898 * on.
5899 */
5900 interval = 0;
5901 }
5902 }
5903
5904 /*
5905 * If the last argument is also an integer, then we have both a count
5906 * and an interval.
5907 */
5908 if (argc > 0 && zfs_isnumber(argv[argc - 1])) {
5909 char *end;
5910
5911 errno = 0;
5912 count = interval;
5913 interval = strtof(argv[argc - 1], &end);
5914
5915 if (*end == '\0' && errno == 0) {
5916 if (interval == 0) {
5917 (void) fprintf(stderr, gettext(
5918 "interval cannot be zero\n"));
5919 usage(B_FALSE);
5920 }
5921
5922 /*
5923 * Ignore the last parameter
5924 */
5925 argc--;
5926 } else {
5927 interval = 0;
5928 }
5929 }
5930
5931 *iv = interval;
5932 *cnt = count;
5933 *argcp = argc;
5934 }
5935
5936 static void
get_timestamp_arg(char c)5937 get_timestamp_arg(char c)
5938 {
5939 if (c == 'u')
5940 timestamp_fmt = UDATE;
5941 else if (c == 'd')
5942 timestamp_fmt = DDATE;
5943 else
5944 usage(B_FALSE);
5945 }
5946
5947 /*
5948 * Return stat flags that are supported by all pools by both the module and
5949 * zpool iostat. "*data" should be initialized to all 0xFFs before running.
5950 * It will get ANDed down until only the flags that are supported on all pools
5951 * remain.
5952 */
5953 static int
get_stat_flags_cb(zpool_handle_t * zhp,void * data)5954 get_stat_flags_cb(zpool_handle_t *zhp, void *data)
5955 {
5956 uint64_t *mask = data;
5957 nvlist_t *config, *nvroot, *nvx;
5958 uint64_t flags = 0;
5959 int i, j;
5960
5961 config = zpool_get_config(zhp, NULL);
5962 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
5963 &nvroot) == 0);
5964
5965 /* Default stats are always supported, but for completeness.. */
5966 if (nvlist_exists(nvroot, ZPOOL_CONFIG_VDEV_STATS))
5967 flags |= IOS_DEFAULT_M;
5968
5969 /* Get our extended stats nvlist from the main list */
5970 if (nvlist_lookup_nvlist(nvroot, ZPOOL_CONFIG_VDEV_STATS_EX,
5971 &nvx) != 0) {
5972 /*
5973 * No extended stats; they're probably running an older
5974 * module. No big deal, we support that too.
5975 */
5976 goto end;
5977 }
5978
5979 /* For each extended stat, make sure all its nvpairs are supported */
5980 for (j = 0; j < ARRAY_SIZE(vsx_type_to_nvlist); j++) {
5981 if (!vsx_type_to_nvlist[j][0])
5982 continue;
5983
5984 /* Start off by assuming the flag is supported, then check */
5985 flags |= (1ULL << j);
5986 for (i = 0; vsx_type_to_nvlist[j][i]; i++) {
5987 if (!nvlist_exists(nvx, vsx_type_to_nvlist[j][i])) {
5988 /* flag isn't supported */
5989 flags = flags & ~(1ULL << j);
5990 break;
5991 }
5992 }
5993 }
5994 end:
5995 *mask = *mask & flags;
5996 return (0);
5997 }
5998
5999 /*
6000 * Return a bitmask of stats that are supported on all pools by both the module
6001 * and zpool iostat.
6002 */
6003 static uint64_t
get_stat_flags(zpool_list_t * list)6004 get_stat_flags(zpool_list_t *list)
6005 {
6006 uint64_t mask = -1;
6007
6008 /*
6009 * get_stat_flags_cb() will lop off bits from "mask" until only the
6010 * flags that are supported on all pools remain.
6011 */
6012 pool_list_iter(list, B_FALSE, get_stat_flags_cb, &mask);
6013 return (mask);
6014 }
6015
6016 /*
6017 * Return 1 if cb_data->cb_names[0] is this vdev's name, 0 otherwise.
6018 */
6019 static int
is_vdev_cb(void * zhp_data,nvlist_t * nv,void * cb_data)6020 is_vdev_cb(void *zhp_data, nvlist_t *nv, void *cb_data)
6021 {
6022 uint64_t guid;
6023 vdev_cbdata_t *cb = cb_data;
6024 zpool_handle_t *zhp = zhp_data;
6025
6026 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
6027 return (0);
6028
6029 return (guid == zpool_vdev_path_to_guid(zhp, cb->cb_names[0]));
6030 }
6031
6032 /*
6033 * Returns 1 if cb_data->cb_names[0] is a vdev name, 0 otherwise.
6034 */
6035 static int
is_vdev(zpool_handle_t * zhp,void * cb_data)6036 is_vdev(zpool_handle_t *zhp, void *cb_data)
6037 {
6038 return (for_each_vdev(zhp, is_vdev_cb, cb_data));
6039 }
6040
6041 /*
6042 * Check if vdevs are in a pool
6043 *
6044 * Return 1 if all argv[] strings are vdev names in pool "pool_name". Otherwise
6045 * return 0. If pool_name is NULL, then search all pools.
6046 */
6047 static int
are_vdevs_in_pool(int argc,char ** argv,char * pool_name,vdev_cbdata_t * cb)6048 are_vdevs_in_pool(int argc, char **argv, char *pool_name,
6049 vdev_cbdata_t *cb)
6050 {
6051 char **tmp_name;
6052 int ret = 0;
6053 int i;
6054 int pool_count = 0;
6055
6056 if ((argc == 0) || !*argv)
6057 return (0);
6058
6059 if (pool_name)
6060 pool_count = 1;
6061
6062 /* Temporarily hijack cb_names for a second... */
6063 tmp_name = cb->cb_names;
6064
6065 /* Go though our list of prospective vdev names */
6066 for (i = 0; i < argc; i++) {
6067 cb->cb_names = argv + i;
6068
6069 /* Is this name a vdev in our pools? */
6070 ret = for_each_pool(pool_count, &pool_name, B_TRUE, NULL,
6071 ZFS_TYPE_POOL, B_FALSE, is_vdev, cb);
6072 if (!ret) {
6073 /* No match */
6074 break;
6075 }
6076 }
6077
6078 cb->cb_names = tmp_name;
6079
6080 return (ret);
6081 }
6082
6083 static int
is_pool_cb(zpool_handle_t * zhp,void * data)6084 is_pool_cb(zpool_handle_t *zhp, void *data)
6085 {
6086 char *name = data;
6087 if (strcmp(name, zpool_get_name(zhp)) == 0)
6088 return (1);
6089
6090 return (0);
6091 }
6092
6093 /*
6094 * Do we have a pool named *name? If so, return 1, otherwise 0.
6095 */
6096 static int
is_pool(char * name)6097 is_pool(char *name)
6098 {
6099 return (for_each_pool(0, NULL, B_TRUE, NULL, ZFS_TYPE_POOL, B_FALSE,
6100 is_pool_cb, name));
6101 }
6102
6103 /* Are all our argv[] strings pool names? If so return 1, 0 otherwise. */
6104 static int
are_all_pools(int argc,char ** argv)6105 are_all_pools(int argc, char **argv)
6106 {
6107 if ((argc == 0) || !*argv)
6108 return (0);
6109
6110 while (--argc >= 0)
6111 if (!is_pool(argv[argc]))
6112 return (0);
6113
6114 return (1);
6115 }
6116
6117 /*
6118 * Helper function to print out vdev/pool names we can't resolve. Used for an
6119 * error message.
6120 */
6121 static void
error_list_unresolved_vdevs(int argc,char ** argv,char * pool_name,vdev_cbdata_t * cb)6122 error_list_unresolved_vdevs(int argc, char **argv, char *pool_name,
6123 vdev_cbdata_t *cb)
6124 {
6125 int i;
6126 char *name;
6127 char *str;
6128 for (i = 0; i < argc; i++) {
6129 name = argv[i];
6130
6131 if (is_pool(name))
6132 str = gettext("pool");
6133 else if (are_vdevs_in_pool(1, &name, pool_name, cb))
6134 str = gettext("vdev in this pool");
6135 else if (are_vdevs_in_pool(1, &name, NULL, cb))
6136 str = gettext("vdev in another pool");
6137 else
6138 str = gettext("unknown");
6139
6140 fprintf(stderr, "\t%s (%s)\n", name, str);
6141 }
6142 }
6143
6144 /*
6145 * Same as get_interval_count(), but with additional checks to not misinterpret
6146 * guids as interval/count values. Assumes VDEV_NAME_GUID is set in
6147 * cb.cb_vdevs.cb_name_flags.
6148 */
6149 static void
get_interval_count_filter_guids(int * argc,char ** argv,float * interval,unsigned long * count,iostat_cbdata_t * cb)6150 get_interval_count_filter_guids(int *argc, char **argv, float *interval,
6151 unsigned long *count, iostat_cbdata_t *cb)
6152 {
6153 int argc_for_interval = 0;
6154
6155 /* Is the last arg an interval value? Or a guid? */
6156 if (*argc >= 1 && !are_vdevs_in_pool(1, &argv[*argc - 1], NULL,
6157 &cb->cb_vdevs)) {
6158 /*
6159 * The last arg is not a guid, so it's probably an
6160 * interval value.
6161 */
6162 argc_for_interval++;
6163
6164 if (*argc >= 2 &&
6165 !are_vdevs_in_pool(1, &argv[*argc - 2], NULL,
6166 &cb->cb_vdevs)) {
6167 /*
6168 * The 2nd to last arg is not a guid, so it's probably
6169 * an interval value.
6170 */
6171 argc_for_interval++;
6172 }
6173 }
6174
6175 /* Point to our list of possible intervals */
6176 char **tmpargv = &argv[*argc - argc_for_interval];
6177
6178 *argc = *argc - argc_for_interval;
6179 get_interval_count(&argc_for_interval, tmpargv,
6180 interval, count);
6181 }
6182
6183 /*
6184 * Terminal height, in rows. Returns -1 if stdout is not connected to a TTY or
6185 * if we were unable to determine its size.
6186 */
6187 static int
terminal_height(void)6188 terminal_height(void)
6189 {
6190 struct winsize win;
6191
6192 if (isatty(STDOUT_FILENO) == 0)
6193 return (-1);
6194
6195 if (ioctl(STDOUT_FILENO, TIOCGWINSZ, &win) != -1 && win.ws_row > 0)
6196 return (win.ws_row);
6197
6198 return (-1);
6199 }
6200
6201 /*
6202 * Run one of the zpool status/iostat -c scripts with the help (-h) option and
6203 * print the result.
6204 *
6205 * name: Short name of the script ('iostat').
6206 * path: Full path to the script ('/usr/local/etc/zfs/zpool.d/iostat');
6207 */
6208 static void
print_zpool_script_help(char * name,char * path)6209 print_zpool_script_help(char *name, char *path)
6210 {
6211 char *argv[] = {path, (char *)"-h", NULL};
6212 char **lines = NULL;
6213 int lines_cnt = 0;
6214 int rc;
6215
6216 rc = libzfs_run_process_get_stdout_nopath(path, argv, NULL, &lines,
6217 &lines_cnt);
6218 if (rc != 0 || lines == NULL || lines_cnt <= 0) {
6219 if (lines != NULL)
6220 libzfs_free_str_array(lines, lines_cnt);
6221 return;
6222 }
6223
6224 for (int i = 0; i < lines_cnt; i++)
6225 if (!is_blank_str(lines[i]))
6226 printf(" %-14s %s\n", name, lines[i]);
6227
6228 libzfs_free_str_array(lines, lines_cnt);
6229 }
6230
6231 /*
6232 * Go though the zpool status/iostat -c scripts in the user's path, run their
6233 * help option (-h), and print out the results.
6234 */
6235 static void
print_zpool_dir_scripts(char * dirpath)6236 print_zpool_dir_scripts(char *dirpath)
6237 {
6238 DIR *dir;
6239 struct dirent *ent;
6240 char fullpath[MAXPATHLEN];
6241 struct stat dir_stat;
6242
6243 if ((dir = opendir(dirpath)) != NULL) {
6244 /* print all the files and directories within directory */
6245 while ((ent = readdir(dir)) != NULL) {
6246 if (snprintf(fullpath, sizeof (fullpath), "%s/%s",
6247 dirpath, ent->d_name) >= sizeof (fullpath)) {
6248 (void) fprintf(stderr,
6249 gettext("internal error: "
6250 "ZPOOL_SCRIPTS_PATH too large.\n"));
6251 exit(1);
6252 }
6253
6254 /* Print the scripts */
6255 if (stat(fullpath, &dir_stat) == 0)
6256 if (dir_stat.st_mode & S_IXUSR &&
6257 S_ISREG(dir_stat.st_mode))
6258 print_zpool_script_help(ent->d_name,
6259 fullpath);
6260 }
6261 closedir(dir);
6262 }
6263 }
6264
6265 /*
6266 * Print out help text for all zpool status/iostat -c scripts.
6267 */
6268 static void
print_zpool_script_list(const char * subcommand)6269 print_zpool_script_list(const char *subcommand)
6270 {
6271 char *dir, *sp, *tmp;
6272
6273 printf(gettext("Available 'zpool %s -c' commands:\n"), subcommand);
6274
6275 sp = zpool_get_cmd_search_path();
6276 if (sp == NULL)
6277 return;
6278
6279 for (dir = strtok_r(sp, ":", &tmp);
6280 dir != NULL;
6281 dir = strtok_r(NULL, ":", &tmp))
6282 print_zpool_dir_scripts(dir);
6283
6284 free(sp);
6285 }
6286
6287 /*
6288 * Set the minimum pool/vdev name column width. The width must be at least 10,
6289 * but may be as large as the column width - 42 so it still fits on one line.
6290 * NOTE: 42 is the width of the default capacity/operations/bandwidth output
6291 */
6292 static int
get_namewidth_iostat(zpool_handle_t * zhp,void * data)6293 get_namewidth_iostat(zpool_handle_t *zhp, void *data)
6294 {
6295 iostat_cbdata_t *cb = data;
6296 int width, available_width;
6297
6298 /*
6299 * get_namewidth() returns the maximum width of any name in that column
6300 * for any pool/vdev/device line that will be output.
6301 */
6302 width = get_namewidth(zhp, cb->cb_namewidth,
6303 cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID, cb->cb_verbose);
6304
6305 /*
6306 * The width we are calculating is the width of the header and also the
6307 * padding width for names that are less than maximum width. The stats
6308 * take up 42 characters, so the width available for names is:
6309 */
6310 available_width = get_columns() - 42;
6311
6312 /*
6313 * If the maximum width fits on a screen, then great! Make everything
6314 * line up by justifying all lines to the same width. If that max
6315 * width is larger than what's available, the name plus stats won't fit
6316 * on one line, and justifying to that width would cause every line to
6317 * wrap on the screen. We only want lines with long names to wrap.
6318 * Limit the padding to what won't wrap.
6319 */
6320 if (width > available_width)
6321 width = available_width;
6322
6323 /*
6324 * And regardless of whatever the screen width is (get_columns can
6325 * return 0 if the width is not known or less than 42 for a narrow
6326 * terminal) have the width be a minimum of 10.
6327 */
6328 if (width < 10)
6329 width = 10;
6330
6331 /* Save the calculated width */
6332 cb->cb_namewidth = width;
6333
6334 return (0);
6335 }
6336
6337 /*
6338 * zpool iostat [[-c [script1,script2,...]] [-lq]|[-rw]] [-ghHLpPvy] [-n name]
6339 * [-T d|u] [[ pool ...]|[pool vdev ...]|[vdev ...]]
6340 * [interval [count]]
6341 *
6342 * -c CMD For each vdev, run command CMD
6343 * -g Display guid for individual vdev name.
6344 * -L Follow links when resolving vdev path name.
6345 * -P Display full path for vdev name.
6346 * -v Display statistics for individual vdevs
6347 * -h Display help
6348 * -p Display values in parsable (exact) format.
6349 * -H Scripted mode. Don't display headers, and separate properties
6350 * by a single tab.
6351 * -l Display average latency
6352 * -q Display queue depths
6353 * -w Display latency histograms
6354 * -r Display request size histogram
6355 * -T Display a timestamp in date(1) or Unix format
6356 * -n Only print headers once
6357 *
6358 * This command can be tricky because we want to be able to deal with pool
6359 * creation/destruction as well as vdev configuration changes. The bulk of this
6360 * processing is handled by the pool_list_* routines in zpool_iter.c. We rely
6361 * on pool_list_update() to detect the addition of new pools. Configuration
6362 * changes are all handled within libzfs.
6363 */
6364 int
zpool_do_iostat(int argc,char ** argv)6365 zpool_do_iostat(int argc, char **argv)
6366 {
6367 int c;
6368 int ret;
6369 int npools;
6370 float interval = 0;
6371 unsigned long count = 0;
6372 zpool_list_t *list;
6373 boolean_t verbose = B_FALSE;
6374 boolean_t latency = B_FALSE, l_histo = B_FALSE, rq_histo = B_FALSE;
6375 boolean_t queues = B_FALSE, parsable = B_FALSE, scripted = B_FALSE;
6376 boolean_t omit_since_boot = B_FALSE;
6377 boolean_t guid = B_FALSE;
6378 boolean_t follow_links = B_FALSE;
6379 boolean_t full_name = B_FALSE;
6380 boolean_t headers_once = B_FALSE;
6381 iostat_cbdata_t cb = { 0 };
6382 char *cmd = NULL;
6383
6384 /* Used for printing error message */
6385 const char flag_to_arg[] = {[IOS_LATENCY] = 'l', [IOS_QUEUES] = 'q',
6386 [IOS_L_HISTO] = 'w', [IOS_RQ_HISTO] = 'r'};
6387
6388 uint64_t unsupported_flags;
6389
6390 /* check options */
6391 while ((c = getopt(argc, argv, "c:gLPT:vyhplqrwnH")) != -1) {
6392 switch (c) {
6393 case 'c':
6394 if (cmd != NULL) {
6395 fprintf(stderr,
6396 gettext("Can't set -c flag twice\n"));
6397 exit(1);
6398 }
6399
6400 if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL &&
6401 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) {
6402 fprintf(stderr, gettext(
6403 "Can't run -c, disabled by "
6404 "ZPOOL_SCRIPTS_ENABLED.\n"));
6405 exit(1);
6406 }
6407
6408 if ((getuid() <= 0 || geteuid() <= 0) &&
6409 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) {
6410 fprintf(stderr, gettext(
6411 "Can't run -c with root privileges "
6412 "unless ZPOOL_SCRIPTS_AS_ROOT is set.\n"));
6413 exit(1);
6414 }
6415 cmd = optarg;
6416 verbose = B_TRUE;
6417 break;
6418 case 'g':
6419 guid = B_TRUE;
6420 break;
6421 case 'L':
6422 follow_links = B_TRUE;
6423 break;
6424 case 'P':
6425 full_name = B_TRUE;
6426 break;
6427 case 'T':
6428 get_timestamp_arg(*optarg);
6429 break;
6430 case 'v':
6431 verbose = B_TRUE;
6432 break;
6433 case 'p':
6434 parsable = B_TRUE;
6435 break;
6436 case 'l':
6437 latency = B_TRUE;
6438 break;
6439 case 'q':
6440 queues = B_TRUE;
6441 break;
6442 case 'H':
6443 scripted = B_TRUE;
6444 break;
6445 case 'w':
6446 l_histo = B_TRUE;
6447 break;
6448 case 'r':
6449 rq_histo = B_TRUE;
6450 break;
6451 case 'y':
6452 omit_since_boot = B_TRUE;
6453 break;
6454 case 'n':
6455 headers_once = B_TRUE;
6456 break;
6457 case 'h':
6458 usage(B_FALSE);
6459 break;
6460 case '?':
6461 if (optopt == 'c') {
6462 print_zpool_script_list("iostat");
6463 exit(0);
6464 } else {
6465 fprintf(stderr,
6466 gettext("invalid option '%c'\n"), optopt);
6467 }
6468 usage(B_FALSE);
6469 }
6470 }
6471
6472 argc -= optind;
6473 argv += optind;
6474
6475 cb.cb_literal = parsable;
6476 cb.cb_scripted = scripted;
6477
6478 if (guid)
6479 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_GUID;
6480 if (follow_links)
6481 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
6482 if (full_name)
6483 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_PATH;
6484 cb.cb_iteration = 0;
6485 cb.cb_namewidth = 0;
6486 cb.cb_verbose = verbose;
6487
6488 /* Get our interval and count values (if any) */
6489 if (guid) {
6490 get_interval_count_filter_guids(&argc, argv, &interval,
6491 &count, &cb);
6492 } else {
6493 get_interval_count(&argc, argv, &interval, &count);
6494 }
6495
6496 if (argc == 0) {
6497 /* No args, so just print the defaults. */
6498 } else if (are_all_pools(argc, argv)) {
6499 /* All the args are pool names */
6500 } else if (are_vdevs_in_pool(argc, argv, NULL, &cb.cb_vdevs)) {
6501 /* All the args are vdevs */
6502 cb.cb_vdevs.cb_names = argv;
6503 cb.cb_vdevs.cb_names_count = argc;
6504 argc = 0; /* No pools to process */
6505 } else if (are_all_pools(1, argv)) {
6506 /* The first arg is a pool name */
6507 if (are_vdevs_in_pool(argc - 1, argv + 1, argv[0],
6508 &cb.cb_vdevs)) {
6509 /* ...and the rest are vdev names */
6510 cb.cb_vdevs.cb_names = argv + 1;
6511 cb.cb_vdevs.cb_names_count = argc - 1;
6512 argc = 1; /* One pool to process */
6513 } else {
6514 fprintf(stderr, gettext("Expected either a list of "));
6515 fprintf(stderr, gettext("pools, or list of vdevs in"));
6516 fprintf(stderr, " \"%s\", ", argv[0]);
6517 fprintf(stderr, gettext("but got:\n"));
6518 error_list_unresolved_vdevs(argc - 1, argv + 1,
6519 argv[0], &cb.cb_vdevs);
6520 fprintf(stderr, "\n");
6521 usage(B_FALSE);
6522 return (1);
6523 }
6524 } else {
6525 /*
6526 * The args don't make sense. The first arg isn't a pool name,
6527 * nor are all the args vdevs.
6528 */
6529 fprintf(stderr, gettext("Unable to parse pools/vdevs list.\n"));
6530 fprintf(stderr, "\n");
6531 return (1);
6532 }
6533
6534 if (cb.cb_vdevs.cb_names_count != 0) {
6535 /*
6536 * If user specified vdevs, it implies verbose.
6537 */
6538 cb.cb_verbose = B_TRUE;
6539 }
6540
6541 /*
6542 * Construct the list of all interesting pools.
6543 */
6544 ret = 0;
6545 if ((list = pool_list_get(argc, argv, NULL, ZFS_TYPE_POOL, parsable,
6546 &ret)) == NULL)
6547 return (1);
6548
6549 if (pool_list_count(list) == 0 && argc != 0) {
6550 pool_list_free(list);
6551 return (1);
6552 }
6553
6554 if (pool_list_count(list) == 0 && interval == 0) {
6555 pool_list_free(list);
6556 (void) fprintf(stderr, gettext("no pools available\n"));
6557 return (1);
6558 }
6559
6560 if ((l_histo || rq_histo) && (cmd != NULL || latency || queues)) {
6561 pool_list_free(list);
6562 (void) fprintf(stderr,
6563 gettext("[-r|-w] isn't allowed with [-c|-l|-q]\n"));
6564 usage(B_FALSE);
6565 return (1);
6566 }
6567
6568 if (l_histo && rq_histo) {
6569 pool_list_free(list);
6570 (void) fprintf(stderr,
6571 gettext("Only one of [-r|-w] can be passed at a time\n"));
6572 usage(B_FALSE);
6573 return (1);
6574 }
6575
6576 /*
6577 * Enter the main iostat loop.
6578 */
6579 cb.cb_list = list;
6580
6581 if (l_histo) {
6582 /*
6583 * Histograms tables look out of place when you try to display
6584 * them with the other stats, so make a rule that you can only
6585 * print histograms by themselves.
6586 */
6587 cb.cb_flags = IOS_L_HISTO_M;
6588 } else if (rq_histo) {
6589 cb.cb_flags = IOS_RQ_HISTO_M;
6590 } else {
6591 cb.cb_flags = IOS_DEFAULT_M;
6592 if (latency)
6593 cb.cb_flags |= IOS_LATENCY_M;
6594 if (queues)
6595 cb.cb_flags |= IOS_QUEUES_M;
6596 }
6597
6598 /*
6599 * See if the module supports all the stats we want to display.
6600 */
6601 unsupported_flags = cb.cb_flags & ~get_stat_flags(list);
6602 if (unsupported_flags) {
6603 uint64_t f;
6604 int idx;
6605 fprintf(stderr,
6606 gettext("The loaded zfs module doesn't support:"));
6607
6608 /* for each bit set in unsupported_flags */
6609 for (f = unsupported_flags; f; f &= ~(1ULL << idx)) {
6610 idx = lowbit64(f) - 1;
6611 fprintf(stderr, " -%c", flag_to_arg[idx]);
6612 }
6613
6614 fprintf(stderr, ". Try running a newer module.\n");
6615 pool_list_free(list);
6616
6617 return (1);
6618 }
6619
6620 for (;;) {
6621 if ((npools = pool_list_count(list)) == 0)
6622 (void) fprintf(stderr, gettext("no pools available\n"));
6623 else {
6624 /*
6625 * If this is the first iteration and -y was supplied
6626 * we skip any printing.
6627 */
6628 boolean_t skip = (omit_since_boot &&
6629 cb.cb_iteration == 0);
6630
6631 /*
6632 * Refresh all statistics. This is done as an
6633 * explicit step before calculating the maximum name
6634 * width, so that any * configuration changes are
6635 * properly accounted for.
6636 */
6637 (void) pool_list_iter(list, B_FALSE, refresh_iostat,
6638 &cb);
6639
6640 /*
6641 * Iterate over all pools to determine the maximum width
6642 * for the pool / device name column across all pools.
6643 */
6644 cb.cb_namewidth = 0;
6645 (void) pool_list_iter(list, B_FALSE,
6646 get_namewidth_iostat, &cb);
6647
6648 if (timestamp_fmt != NODATE)
6649 print_timestamp(timestamp_fmt);
6650
6651 if (cmd != NULL && cb.cb_verbose &&
6652 !(cb.cb_flags & IOS_ANYHISTO_M)) {
6653 cb.vcdl = all_pools_for_each_vdev_run(argc,
6654 argv, cmd, g_zfs, cb.cb_vdevs.cb_names,
6655 cb.cb_vdevs.cb_names_count,
6656 cb.cb_vdevs.cb_name_flags);
6657 } else {
6658 cb.vcdl = NULL;
6659 }
6660
6661
6662 /*
6663 * Check terminal size so we can print headers
6664 * even when terminal window has its height
6665 * changed.
6666 */
6667 int winheight = terminal_height();
6668 /*
6669 * Are we connected to TTY? If not, headers_once
6670 * should be true, to avoid breaking scripts.
6671 */
6672 if (winheight < 0)
6673 headers_once = B_TRUE;
6674
6675 /*
6676 * If it's the first time and we're not skipping it,
6677 * or either skip or verbose mode, print the header.
6678 *
6679 * The histogram code explicitly prints its header on
6680 * every vdev, so skip this for histograms.
6681 */
6682 if (((++cb.cb_iteration == 1 && !skip) ||
6683 (skip != verbose) ||
6684 (!headers_once &&
6685 (cb.cb_iteration % winheight) == 0)) &&
6686 (!(cb.cb_flags & IOS_ANYHISTO_M)) &&
6687 !cb.cb_scripted)
6688 print_iostat_header(&cb);
6689
6690 if (skip) {
6691 (void) fflush(stdout);
6692 (void) fsleep(interval);
6693 continue;
6694 }
6695
6696 pool_list_iter(list, B_FALSE, print_iostat, &cb);
6697
6698 /*
6699 * If there's more than one pool, and we're not in
6700 * verbose mode (which prints a separator for us),
6701 * then print a separator.
6702 *
6703 * In addition, if we're printing specific vdevs then
6704 * we also want an ending separator.
6705 */
6706 if (((npools > 1 && !verbose &&
6707 !(cb.cb_flags & IOS_ANYHISTO_M)) ||
6708 (!(cb.cb_flags & IOS_ANYHISTO_M) &&
6709 cb.cb_vdevs.cb_names_count)) &&
6710 !cb.cb_scripted) {
6711 print_iostat_separator(&cb);
6712 if (cb.vcdl != NULL)
6713 print_cmd_columns(cb.vcdl, 1);
6714 printf("\n");
6715 }
6716
6717 if (cb.vcdl != NULL)
6718 free_vdev_cmd_data_list(cb.vcdl);
6719
6720 }
6721
6722 if (interval == 0)
6723 break;
6724
6725 if (count != 0 && --count == 0)
6726 break;
6727
6728 (void) fflush(stdout);
6729 (void) fsleep(interval);
6730 }
6731
6732 pool_list_free(list);
6733
6734 return (ret);
6735 }
6736
6737 typedef struct list_cbdata {
6738 boolean_t cb_verbose;
6739 int cb_name_flags;
6740 int cb_namewidth;
6741 boolean_t cb_json;
6742 boolean_t cb_scripted;
6743 zprop_list_t *cb_proplist;
6744 boolean_t cb_literal;
6745 nvlist_t *cb_jsobj;
6746 boolean_t cb_json_as_int;
6747 boolean_t cb_json_pool_key_guid;
6748 } list_cbdata_t;
6749
6750
6751 /*
6752 * Given a list of columns to display, output appropriate headers for each one.
6753 */
6754 static void
print_header(list_cbdata_t * cb)6755 print_header(list_cbdata_t *cb)
6756 {
6757 zprop_list_t *pl = cb->cb_proplist;
6758 char headerbuf[ZPOOL_MAXPROPLEN];
6759 const char *header;
6760 boolean_t first = B_TRUE;
6761 boolean_t right_justify;
6762 size_t width = 0;
6763
6764 for (; pl != NULL; pl = pl->pl_next) {
6765 width = pl->pl_width;
6766 if (first && cb->cb_verbose) {
6767 /*
6768 * Reset the width to accommodate the verbose listing
6769 * of devices.
6770 */
6771 width = cb->cb_namewidth;
6772 }
6773
6774 if (!first)
6775 (void) fputs(" ", stdout);
6776 else
6777 first = B_FALSE;
6778
6779 right_justify = B_FALSE;
6780 if (pl->pl_prop != ZPROP_USERPROP) {
6781 header = zpool_prop_column_name(pl->pl_prop);
6782 right_justify = zpool_prop_align_right(pl->pl_prop);
6783 } else {
6784 int i;
6785
6786 for (i = 0; pl->pl_user_prop[i] != '\0'; i++)
6787 headerbuf[i] = toupper(pl->pl_user_prop[i]);
6788 headerbuf[i] = '\0';
6789 header = headerbuf;
6790 }
6791
6792 if (pl->pl_next == NULL && !right_justify)
6793 (void) fputs(header, stdout);
6794 else if (right_justify)
6795 (void) printf("%*s", (int)width, header);
6796 else
6797 (void) printf("%-*s", (int)width, header);
6798 }
6799
6800 (void) fputc('\n', stdout);
6801 }
6802
6803 /*
6804 * Given a pool and a list of properties, print out all the properties according
6805 * to the described layout. Used by zpool_do_list().
6806 */
6807 static void
collect_pool(zpool_handle_t * zhp,list_cbdata_t * cb)6808 collect_pool(zpool_handle_t *zhp, list_cbdata_t *cb)
6809 {
6810 zprop_list_t *pl = cb->cb_proplist;
6811 boolean_t first = B_TRUE;
6812 char property[ZPOOL_MAXPROPLEN];
6813 const char *propstr;
6814 boolean_t right_justify;
6815 size_t width;
6816 zprop_source_t sourcetype = ZPROP_SRC_NONE;
6817 nvlist_t *item, *d, *props;
6818 item = d = props = NULL;
6819
6820 if (cb->cb_json) {
6821 item = fnvlist_alloc();
6822 props = fnvlist_alloc();
6823 d = fnvlist_lookup_nvlist(cb->cb_jsobj, "pools");
6824 if (d == NULL) {
6825 fprintf(stderr, "pools obj not found.\n");
6826 exit(1);
6827 }
6828 fill_pool_info(item, zhp, B_TRUE, cb->cb_json_as_int);
6829 }
6830
6831 for (; pl != NULL; pl = pl->pl_next) {
6832
6833 width = pl->pl_width;
6834 if (first && cb->cb_verbose) {
6835 /*
6836 * Reset the width to accommodate the verbose listing
6837 * of devices.
6838 */
6839 width = cb->cb_namewidth;
6840 }
6841
6842 if (!cb->cb_json && !first) {
6843 if (cb->cb_scripted)
6844 (void) fputc('\t', stdout);
6845 else
6846 (void) fputs(" ", stdout);
6847 } else {
6848 first = B_FALSE;
6849 }
6850
6851 right_justify = B_FALSE;
6852 if (pl->pl_prop != ZPROP_USERPROP) {
6853 if (zpool_get_prop(zhp, pl->pl_prop, property,
6854 sizeof (property), &sourcetype,
6855 cb->cb_literal) != 0)
6856 propstr = "-";
6857 else
6858 propstr = property;
6859
6860 right_justify = zpool_prop_align_right(pl->pl_prop);
6861 } else if ((zpool_prop_feature(pl->pl_user_prop) ||
6862 zpool_prop_unsupported(pl->pl_user_prop)) &&
6863 zpool_prop_get_feature(zhp, pl->pl_user_prop, property,
6864 sizeof (property)) == 0) {
6865 propstr = property;
6866 sourcetype = ZPROP_SRC_LOCAL;
6867 } else if (zfs_prop_user(pl->pl_user_prop) &&
6868 zpool_get_userprop(zhp, pl->pl_user_prop, property,
6869 sizeof (property), &sourcetype) == 0) {
6870 propstr = property;
6871 } else {
6872 propstr = "-";
6873 }
6874
6875 if (cb->cb_json) {
6876 if (pl->pl_prop == ZPOOL_PROP_NAME)
6877 continue;
6878 const char *prop_name;
6879 if (pl->pl_prop != ZPROP_USERPROP)
6880 prop_name = zpool_prop_to_name(pl->pl_prop);
6881 else
6882 prop_name = pl->pl_user_prop;
6883 (void) zprop_nvlist_one_property(
6884 prop_name, propstr,
6885 sourcetype, NULL, NULL, props, cb->cb_json_as_int);
6886 } else {
6887 /*
6888 * If this is being called in scripted mode, or if this
6889 * is the last column and it is left-justified, don't
6890 * include a width format specifier.
6891 */
6892 if (cb->cb_scripted || (pl->pl_next == NULL &&
6893 !right_justify))
6894 (void) fputs(propstr, stdout);
6895 else if (right_justify)
6896 (void) printf("%*s", (int)width, propstr);
6897 else
6898 (void) printf("%-*s", (int)width, propstr);
6899 }
6900 }
6901
6902 if (cb->cb_json) {
6903 fnvlist_add_nvlist(item, "properties", props);
6904 if (cb->cb_json_pool_key_guid) {
6905 char pool_guid[256];
6906 uint64_t guid = fnvlist_lookup_uint64(
6907 zpool_get_config(zhp, NULL),
6908 ZPOOL_CONFIG_POOL_GUID);
6909 snprintf(pool_guid, 256, "%llu",
6910 (u_longlong_t)guid);
6911 fnvlist_add_nvlist(d, pool_guid, item);
6912 } else {
6913 fnvlist_add_nvlist(d, zpool_get_name(zhp),
6914 item);
6915 }
6916 fnvlist_free(props);
6917 fnvlist_free(item);
6918 } else
6919 (void) fputc('\n', stdout);
6920 }
6921
6922 static void
collect_vdev_prop(zpool_prop_t prop,uint64_t value,const char * str,boolean_t scripted,boolean_t valid,enum zfs_nicenum_format format,boolean_t json,nvlist_t * nvl,boolean_t as_int)6923 collect_vdev_prop(zpool_prop_t prop, uint64_t value, const char *str,
6924 boolean_t scripted, boolean_t valid, enum zfs_nicenum_format format,
6925 boolean_t json, nvlist_t *nvl, boolean_t as_int)
6926 {
6927 char propval[64];
6928 boolean_t fixed;
6929 size_t width = zprop_width(prop, &fixed, ZFS_TYPE_POOL);
6930
6931 switch (prop) {
6932 case ZPOOL_PROP_SIZE:
6933 case ZPOOL_PROP_EXPANDSZ:
6934 case ZPOOL_PROP_CHECKPOINT:
6935 case ZPOOL_PROP_DEDUPRATIO:
6936 case ZPOOL_PROP_DEDUPCACHED:
6937 if (value == 0)
6938 (void) strlcpy(propval, "-", sizeof (propval));
6939 else
6940 zfs_nicenum_format(value, propval, sizeof (propval),
6941 format);
6942 break;
6943 case ZPOOL_PROP_FRAGMENTATION:
6944 if (value == ZFS_FRAG_INVALID) {
6945 (void) strlcpy(propval, "-", sizeof (propval));
6946 } else if (format == ZFS_NICENUM_RAW) {
6947 (void) snprintf(propval, sizeof (propval), "%llu",
6948 (unsigned long long)value);
6949 } else {
6950 (void) snprintf(propval, sizeof (propval), "%llu%%",
6951 (unsigned long long)value);
6952 }
6953 break;
6954 case ZPOOL_PROP_CAPACITY:
6955 /* capacity value is in parts-per-10,000 (aka permyriad) */
6956 if (format == ZFS_NICENUM_RAW)
6957 (void) snprintf(propval, sizeof (propval), "%llu",
6958 (unsigned long long)value / 100);
6959 else
6960 (void) snprintf(propval, sizeof (propval),
6961 value < 1000 ? "%1.2f%%" : value < 10000 ?
6962 "%2.1f%%" : "%3.0f%%", value / 100.0);
6963 break;
6964 case ZPOOL_PROP_HEALTH:
6965 width = 8;
6966 (void) strlcpy(propval, str, sizeof (propval));
6967 break;
6968 default:
6969 zfs_nicenum_format(value, propval, sizeof (propval), format);
6970 }
6971
6972 if (!valid)
6973 (void) strlcpy(propval, "-", sizeof (propval));
6974
6975 if (json) {
6976 zprop_nvlist_one_property(zpool_prop_to_name(prop), propval,
6977 ZPROP_SRC_NONE, NULL, NULL, nvl, as_int);
6978 } else {
6979 if (scripted)
6980 (void) printf("\t%s", propval);
6981 else
6982 (void) printf(" %*s", (int)width, propval);
6983 }
6984 }
6985
6986 /*
6987 * print static default line per vdev
6988 * not compatible with '-o' <proplist> option
6989 */
6990 static void
collect_list_stats(zpool_handle_t * zhp,const char * name,nvlist_t * nv,list_cbdata_t * cb,int depth,boolean_t isspare,nvlist_t * item)6991 collect_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv,
6992 list_cbdata_t *cb, int depth, boolean_t isspare, nvlist_t *item)
6993 {
6994 nvlist_t **child;
6995 vdev_stat_t *vs;
6996 uint_t c, children = 0;
6997 char *vname;
6998 boolean_t scripted = cb->cb_scripted;
6999 uint64_t islog = B_FALSE;
7000 nvlist_t *props, *ent, *ch, *obj, *l2c, *sp;
7001 props = ent = ch = obj = sp = l2c = NULL;
7002 const char *dashes = "%-*s - - - - "
7003 "- - - - -\n";
7004
7005 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
7006 (uint64_t **)&vs, &c) == 0);
7007
7008 if (name != NULL) {
7009 boolean_t toplevel = (vs->vs_space != 0);
7010 uint64_t cap;
7011 enum zfs_nicenum_format format;
7012 const char *state;
7013
7014 if (cb->cb_literal)
7015 format = ZFS_NICENUM_RAW;
7016 else
7017 format = ZFS_NICENUM_1024;
7018
7019 if (strcmp(name, VDEV_TYPE_INDIRECT) == 0)
7020 return;
7021
7022 if (cb->cb_json) {
7023 props = fnvlist_alloc();
7024 ent = fnvlist_alloc();
7025 fill_vdev_info(ent, zhp, (char *)name, B_FALSE,
7026 cb->cb_json_as_int);
7027 } else {
7028 if (scripted)
7029 (void) printf("\t%s", name);
7030 else if (strlen(name) + depth > cb->cb_namewidth)
7031 (void) printf("%*s%s", depth, "", name);
7032 else
7033 (void) printf("%*s%s%*s", depth, "", name,
7034 (int)(cb->cb_namewidth - strlen(name) -
7035 depth), "");
7036 }
7037
7038 /*
7039 * Print the properties for the individual vdevs. Some
7040 * properties are only applicable to toplevel vdevs. The
7041 * 'toplevel' boolean value is passed to the print_one_column()
7042 * to indicate that the value is valid.
7043 */
7044 if (VDEV_STAT_VALID(vs_pspace, c) && vs->vs_pspace) {
7045 collect_vdev_prop(ZPOOL_PROP_SIZE, vs->vs_pspace, NULL,
7046 scripted, B_TRUE, format, cb->cb_json, props,
7047 cb->cb_json_as_int);
7048 } else {
7049 collect_vdev_prop(ZPOOL_PROP_SIZE, vs->vs_space, NULL,
7050 scripted, toplevel, format, cb->cb_json, props,
7051 cb->cb_json_as_int);
7052 }
7053 collect_vdev_prop(ZPOOL_PROP_ALLOCATED, vs->vs_alloc, NULL,
7054 scripted, toplevel, format, cb->cb_json, props,
7055 cb->cb_json_as_int);
7056 collect_vdev_prop(ZPOOL_PROP_FREE, vs->vs_space - vs->vs_alloc,
7057 NULL, scripted, toplevel, format, cb->cb_json, props,
7058 cb->cb_json_as_int);
7059 collect_vdev_prop(ZPOOL_PROP_CHECKPOINT,
7060 vs->vs_checkpoint_space, NULL, scripted, toplevel, format,
7061 cb->cb_json, props, cb->cb_json_as_int);
7062 collect_vdev_prop(ZPOOL_PROP_EXPANDSZ, vs->vs_esize, NULL,
7063 scripted, B_TRUE, format, cb->cb_json, props,
7064 cb->cb_json_as_int);
7065 collect_vdev_prop(ZPOOL_PROP_FRAGMENTATION,
7066 vs->vs_fragmentation, NULL, scripted,
7067 (vs->vs_fragmentation != ZFS_FRAG_INVALID && toplevel),
7068 format, cb->cb_json, props, cb->cb_json_as_int);
7069 cap = (vs->vs_space == 0) ? 0 :
7070 (vs->vs_alloc * 10000 / vs->vs_space);
7071 collect_vdev_prop(ZPOOL_PROP_CAPACITY, cap, NULL,
7072 scripted, toplevel, format, cb->cb_json, props,
7073 cb->cb_json_as_int);
7074 collect_vdev_prop(ZPOOL_PROP_DEDUPRATIO, 0, NULL,
7075 scripted, toplevel, format, cb->cb_json, props,
7076 cb->cb_json_as_int);
7077 state = zpool_state_to_name(vs->vs_state, vs->vs_aux);
7078 if (isspare) {
7079 if (vs->vs_aux == VDEV_AUX_SPARED)
7080 state = "INUSE";
7081 else if (vs->vs_state == VDEV_STATE_HEALTHY)
7082 state = "AVAIL";
7083 }
7084 collect_vdev_prop(ZPOOL_PROP_HEALTH, 0, state, scripted,
7085 B_TRUE, format, cb->cb_json, props, cb->cb_json_as_int);
7086
7087 if (cb->cb_json) {
7088 fnvlist_add_nvlist(ent, "properties", props);
7089 fnvlist_free(props);
7090 } else
7091 (void) fputc('\n', stdout);
7092 }
7093
7094 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
7095 &child, &children) != 0) {
7096 if (cb->cb_json) {
7097 fnvlist_add_nvlist(item, name, ent);
7098 fnvlist_free(ent);
7099 }
7100 return;
7101 }
7102
7103 if (cb->cb_json) {
7104 ch = fnvlist_alloc();
7105 }
7106
7107 /* list the normal vdevs first */
7108 for (c = 0; c < children; c++) {
7109 uint64_t ishole = B_FALSE;
7110
7111 if (nvlist_lookup_uint64(child[c],
7112 ZPOOL_CONFIG_IS_HOLE, &ishole) == 0 && ishole)
7113 continue;
7114
7115 if (nvlist_lookup_uint64(child[c],
7116 ZPOOL_CONFIG_IS_LOG, &islog) == 0 && islog)
7117 continue;
7118
7119 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
7120 continue;
7121
7122 vname = zpool_vdev_name(g_zfs, zhp, child[c],
7123 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
7124
7125 if (name == NULL || cb->cb_json != B_TRUE)
7126 collect_list_stats(zhp, vname, child[c], cb, depth + 2,
7127 B_FALSE, item);
7128 else if (cb->cb_json) {
7129 collect_list_stats(zhp, vname, child[c], cb, depth + 2,
7130 B_FALSE, ch);
7131 }
7132 free(vname);
7133 }
7134
7135 if (cb->cb_json) {
7136 if (!nvlist_empty(ch))
7137 fnvlist_add_nvlist(ent, "vdevs", ch);
7138 fnvlist_free(ch);
7139 }
7140
7141 /* list the classes: 'logs', 'dedup', and 'special' */
7142 for (uint_t n = 0; n < ARRAY_SIZE(class_name); n++) {
7143 boolean_t printed = B_FALSE;
7144 if (cb->cb_json)
7145 obj = fnvlist_alloc();
7146 for (c = 0; c < children; c++) {
7147 const char *bias = NULL;
7148 const char *type = NULL;
7149
7150 if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
7151 &islog) == 0 && islog) {
7152 bias = VDEV_ALLOC_CLASS_LOGS;
7153 } else {
7154 (void) nvlist_lookup_string(child[c],
7155 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
7156 (void) nvlist_lookup_string(child[c],
7157 ZPOOL_CONFIG_TYPE, &type);
7158 }
7159 if (bias == NULL || strcmp(bias, class_name[n]) != 0)
7160 continue;
7161 if (!islog && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
7162 continue;
7163
7164 if (!printed && !cb->cb_json) {
7165 /* LINTED E_SEC_PRINTF_VAR_FMT */
7166 (void) printf(dashes, cb->cb_namewidth,
7167 class_name[n]);
7168 printed = B_TRUE;
7169 }
7170 vname = zpool_vdev_name(g_zfs, zhp, child[c],
7171 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
7172 collect_list_stats(zhp, vname, child[c], cb, depth + 2,
7173 B_FALSE, obj);
7174 free(vname);
7175 }
7176 if (cb->cb_json) {
7177 if (!nvlist_empty(obj))
7178 fnvlist_add_nvlist(item, class_name[n], obj);
7179 fnvlist_free(obj);
7180 }
7181 }
7182
7183 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
7184 &child, &children) == 0 && children > 0) {
7185 if (cb->cb_json) {
7186 l2c = fnvlist_alloc();
7187 } else {
7188 /* LINTED E_SEC_PRINTF_VAR_FMT */
7189 (void) printf(dashes, cb->cb_namewidth, "cache");
7190 }
7191 for (c = 0; c < children; c++) {
7192 vname = zpool_vdev_name(g_zfs, zhp, child[c],
7193 cb->cb_name_flags);
7194 collect_list_stats(zhp, vname, child[c], cb, depth + 2,
7195 B_FALSE, l2c);
7196 free(vname);
7197 }
7198 if (cb->cb_json) {
7199 if (!nvlist_empty(l2c))
7200 fnvlist_add_nvlist(item, "l2cache", l2c);
7201 fnvlist_free(l2c);
7202 }
7203 }
7204
7205 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, &child,
7206 &children) == 0 && children > 0) {
7207 if (cb->cb_json) {
7208 sp = fnvlist_alloc();
7209 } else {
7210 /* LINTED E_SEC_PRINTF_VAR_FMT */
7211 (void) printf(dashes, cb->cb_namewidth, "spare");
7212 }
7213 for (c = 0; c < children; c++) {
7214 vname = zpool_vdev_name(g_zfs, zhp, child[c],
7215 cb->cb_name_flags);
7216 collect_list_stats(zhp, vname, child[c], cb, depth + 2,
7217 B_TRUE, sp);
7218 free(vname);
7219 }
7220 if (cb->cb_json) {
7221 if (!nvlist_empty(sp))
7222 fnvlist_add_nvlist(item, "spares", sp);
7223 fnvlist_free(sp);
7224 }
7225 }
7226
7227 if (name != NULL && cb->cb_json) {
7228 fnvlist_add_nvlist(item, name, ent);
7229 fnvlist_free(ent);
7230 }
7231 }
7232
7233 /*
7234 * Generic callback function to list a pool.
7235 */
7236 static int
list_callback(zpool_handle_t * zhp,void * data)7237 list_callback(zpool_handle_t *zhp, void *data)
7238 {
7239 nvlist_t *p, *d, *nvdevs;
7240 uint64_t guid;
7241 char pool_guid[256];
7242 const char *pool_name = zpool_get_name(zhp);
7243 list_cbdata_t *cbp = data;
7244 p = d = nvdevs = NULL;
7245
7246 collect_pool(zhp, cbp);
7247
7248 if (cbp->cb_verbose) {
7249 nvlist_t *config, *nvroot;
7250 config = zpool_get_config(zhp, NULL);
7251 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
7252 &nvroot) == 0);
7253 if (cbp->cb_json) {
7254 d = fnvlist_lookup_nvlist(cbp->cb_jsobj,
7255 "pools");
7256 if (cbp->cb_json_pool_key_guid) {
7257 guid = fnvlist_lookup_uint64(config,
7258 ZPOOL_CONFIG_POOL_GUID);
7259 snprintf(pool_guid, 256, "%llu",
7260 (u_longlong_t)guid);
7261 p = fnvlist_lookup_nvlist(d, pool_guid);
7262 } else {
7263 p = fnvlist_lookup_nvlist(d, pool_name);
7264 }
7265 nvdevs = fnvlist_alloc();
7266 }
7267 collect_list_stats(zhp, NULL, nvroot, cbp, 0, B_FALSE, nvdevs);
7268 if (cbp->cb_json) {
7269 fnvlist_add_nvlist(p, "vdevs", nvdevs);
7270 if (cbp->cb_json_pool_key_guid)
7271 fnvlist_add_nvlist(d, pool_guid, p);
7272 else
7273 fnvlist_add_nvlist(d, pool_name, p);
7274 fnvlist_add_nvlist(cbp->cb_jsobj, "pools", d);
7275 fnvlist_free(nvdevs);
7276 }
7277 }
7278
7279 return (0);
7280 }
7281
7282 /*
7283 * Set the minimum pool/vdev name column width. The width must be at least 9,
7284 * but may be as large as needed.
7285 */
7286 static int
get_namewidth_list(zpool_handle_t * zhp,void * data)7287 get_namewidth_list(zpool_handle_t *zhp, void *data)
7288 {
7289 list_cbdata_t *cb = data;
7290 int width;
7291
7292 width = get_namewidth(zhp, cb->cb_namewidth,
7293 cb->cb_name_flags | VDEV_NAME_TYPE_ID, cb->cb_verbose);
7294
7295 if (width < 9)
7296 width = 9;
7297
7298 cb->cb_namewidth = width;
7299
7300 return (0);
7301 }
7302
7303 /*
7304 * zpool list [-gHLpP] [-o prop[,prop]*] [-T d|u] [pool] ... [interval [count]]
7305 *
7306 * -g Display guid for individual vdev name.
7307 * -H Scripted mode. Don't display headers, and separate properties
7308 * by a single tab.
7309 * -L Follow links when resolving vdev path name.
7310 * -o List of properties to display. Defaults to
7311 * "name,size,allocated,free,expandsize,fragmentation,capacity,"
7312 * "dedupratio,health,altroot"
7313 * -p Display values in parsable (exact) format.
7314 * -P Display full path for vdev name.
7315 * -T Display a timestamp in date(1) or Unix format
7316 * -j Display the output in JSON format
7317 * --json-int Display the numbers as integer instead of strings.
7318 * --json-pool-key-guid Set pool GUID as key for pool objects.
7319 *
7320 * List all pools in the system, whether or not they're healthy. Output space
7321 * statistics for each one, as well as health status summary.
7322 */
7323 int
zpool_do_list(int argc,char ** argv)7324 zpool_do_list(int argc, char **argv)
7325 {
7326 int c;
7327 int ret = 0;
7328 list_cbdata_t cb = { 0 };
7329 static char default_props[] =
7330 "name,size,allocated,free,checkpoint,expandsize,fragmentation,"
7331 "capacity,dedupratio,health,altroot";
7332 char *props = default_props;
7333 float interval = 0;
7334 unsigned long count = 0;
7335 zpool_list_t *list;
7336 boolean_t first = B_TRUE;
7337 nvlist_t *data = NULL;
7338 current_prop_type = ZFS_TYPE_POOL;
7339
7340 struct option long_options[] = {
7341 {"json", no_argument, NULL, 'j'},
7342 {"json-int", no_argument, NULL, ZPOOL_OPTION_JSON_NUMS_AS_INT},
7343 {"json-pool-key-guid", no_argument, NULL,
7344 ZPOOL_OPTION_POOL_KEY_GUID},
7345 {0, 0, 0, 0}
7346 };
7347
7348 /* check options */
7349 while ((c = getopt_long(argc, argv, ":gjHLo:pPT:v", long_options,
7350 NULL)) != -1) {
7351 switch (c) {
7352 case 'g':
7353 cb.cb_name_flags |= VDEV_NAME_GUID;
7354 break;
7355 case 'H':
7356 cb.cb_scripted = B_TRUE;
7357 break;
7358 case 'L':
7359 cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
7360 break;
7361 case 'o':
7362 props = optarg;
7363 break;
7364 case 'P':
7365 cb.cb_name_flags |= VDEV_NAME_PATH;
7366 break;
7367 case 'p':
7368 cb.cb_literal = B_TRUE;
7369 break;
7370 case 'j':
7371 cb.cb_json = B_TRUE;
7372 break;
7373 case ZPOOL_OPTION_JSON_NUMS_AS_INT:
7374 cb.cb_json_as_int = B_TRUE;
7375 cb.cb_literal = B_TRUE;
7376 break;
7377 case ZPOOL_OPTION_POOL_KEY_GUID:
7378 cb.cb_json_pool_key_guid = B_TRUE;
7379 break;
7380 case 'T':
7381 get_timestamp_arg(*optarg);
7382 break;
7383 case 'v':
7384 cb.cb_verbose = B_TRUE;
7385 cb.cb_namewidth = 8; /* 8 until precalc is avail */
7386 break;
7387 case ':':
7388 (void) fprintf(stderr, gettext("missing argument for "
7389 "'%c' option\n"), optopt);
7390 usage(B_FALSE);
7391 break;
7392 case '?':
7393 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7394 optopt);
7395 usage(B_FALSE);
7396 }
7397 }
7398
7399 argc -= optind;
7400 argv += optind;
7401
7402 if (!cb.cb_json && cb.cb_json_as_int) {
7403 (void) fprintf(stderr, gettext("'--json-int' only works with"
7404 " '-j' option\n"));
7405 usage(B_FALSE);
7406 }
7407
7408 if (!cb.cb_json && cb.cb_json_pool_key_guid) {
7409 (void) fprintf(stderr, gettext("'json-pool-key-guid' only"
7410 " works with '-j' option\n"));
7411 usage(B_FALSE);
7412 }
7413
7414 get_interval_count(&argc, argv, &interval, &count);
7415
7416 if (zprop_get_list(g_zfs, props, &cb.cb_proplist, ZFS_TYPE_POOL) != 0)
7417 usage(B_FALSE);
7418
7419 for (;;) {
7420 if ((list = pool_list_get(argc, argv, &cb.cb_proplist,
7421 ZFS_TYPE_POOL, cb.cb_literal, &ret)) == NULL)
7422 return (1);
7423
7424 if (pool_list_count(list) == 0)
7425 break;
7426
7427 if (cb.cb_json) {
7428 cb.cb_jsobj = zpool_json_schema(0, 1);
7429 data = fnvlist_alloc();
7430 fnvlist_add_nvlist(cb.cb_jsobj, "pools", data);
7431 fnvlist_free(data);
7432 }
7433
7434 cb.cb_namewidth = 0;
7435 (void) pool_list_iter(list, B_FALSE, get_namewidth_list, &cb);
7436
7437 if (timestamp_fmt != NODATE) {
7438 if (cb.cb_json) {
7439 if (cb.cb_json_as_int) {
7440 fnvlist_add_uint64(cb.cb_jsobj, "time",
7441 time(NULL));
7442 } else {
7443 char ts[128];
7444 get_timestamp(timestamp_fmt, ts, 128);
7445 fnvlist_add_string(cb.cb_jsobj, "time",
7446 ts);
7447 }
7448 } else
7449 print_timestamp(timestamp_fmt);
7450 }
7451
7452 if (!cb.cb_scripted && (first || cb.cb_verbose) &&
7453 !cb.cb_json) {
7454 print_header(&cb);
7455 first = B_FALSE;
7456 }
7457 ret = pool_list_iter(list, B_TRUE, list_callback, &cb);
7458
7459 if (ret == 0 && cb.cb_json)
7460 zcmd_print_json(cb.cb_jsobj);
7461 else if (ret != 0 && cb.cb_json)
7462 nvlist_free(cb.cb_jsobj);
7463
7464 if (interval == 0)
7465 break;
7466
7467 if (count != 0 && --count == 0)
7468 break;
7469
7470 pool_list_free(list);
7471
7472 (void) fflush(stdout);
7473 (void) fsleep(interval);
7474 }
7475
7476 if (argc == 0 && !cb.cb_scripted && !cb.cb_json &&
7477 pool_list_count(list) == 0) {
7478 (void) printf(gettext("no pools available\n"));
7479 ret = 0;
7480 }
7481
7482 pool_list_free(list);
7483 zprop_free_list(cb.cb_proplist);
7484 return (ret);
7485 }
7486
7487 static int
zpool_do_attach_or_replace(int argc,char ** argv,int replacing)7488 zpool_do_attach_or_replace(int argc, char **argv, int replacing)
7489 {
7490 boolean_t force = B_FALSE;
7491 boolean_t rebuild = B_FALSE;
7492 boolean_t wait = B_FALSE;
7493 int c;
7494 nvlist_t *nvroot;
7495 char *poolname, *old_disk, *new_disk;
7496 zpool_handle_t *zhp;
7497 nvlist_t *props = NULL;
7498 char *propval;
7499 int ret;
7500
7501 /* check options */
7502 while ((c = getopt(argc, argv, "fo:sw")) != -1) {
7503 switch (c) {
7504 case 'f':
7505 force = B_TRUE;
7506 break;
7507 case 'o':
7508 if ((propval = strchr(optarg, '=')) == NULL) {
7509 (void) fprintf(stderr, gettext("missing "
7510 "'=' for -o option\n"));
7511 usage(B_FALSE);
7512 }
7513 *propval = '\0';
7514 propval++;
7515
7516 if ((strcmp(optarg, ZPOOL_CONFIG_ASHIFT) != 0) ||
7517 (add_prop_list(optarg, propval, &props, B_TRUE)))
7518 usage(B_FALSE);
7519 break;
7520 case 's':
7521 rebuild = B_TRUE;
7522 break;
7523 case 'w':
7524 wait = B_TRUE;
7525 break;
7526 case '?':
7527 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7528 optopt);
7529 usage(B_FALSE);
7530 }
7531 }
7532
7533 argc -= optind;
7534 argv += optind;
7535
7536 /* get pool name and check number of arguments */
7537 if (argc < 1) {
7538 (void) fprintf(stderr, gettext("missing pool name argument\n"));
7539 usage(B_FALSE);
7540 }
7541
7542 poolname = argv[0];
7543
7544 if (argc < 2) {
7545 (void) fprintf(stderr,
7546 gettext("missing <device> specification\n"));
7547 usage(B_FALSE);
7548 }
7549
7550 old_disk = argv[1];
7551
7552 if (argc < 3) {
7553 if (!replacing) {
7554 (void) fprintf(stderr,
7555 gettext("missing <new_device> specification\n"));
7556 usage(B_FALSE);
7557 }
7558 new_disk = old_disk;
7559 argc -= 1;
7560 argv += 1;
7561 } else {
7562 new_disk = argv[2];
7563 argc -= 2;
7564 argv += 2;
7565 }
7566
7567 if (argc > 1) {
7568 (void) fprintf(stderr, gettext("too many arguments\n"));
7569 usage(B_FALSE);
7570 }
7571
7572 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) {
7573 nvlist_free(props);
7574 return (1);
7575 }
7576
7577 if (zpool_get_config(zhp, NULL) == NULL) {
7578 (void) fprintf(stderr, gettext("pool '%s' is unavailable\n"),
7579 poolname);
7580 zpool_close(zhp);
7581 nvlist_free(props);
7582 return (1);
7583 }
7584
7585 /* unless manually specified use "ashift" pool property (if set) */
7586 if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) {
7587 int intval;
7588 zprop_source_t src;
7589 char strval[ZPOOL_MAXPROPLEN];
7590
7591 intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src);
7592 if (src != ZPROP_SRC_DEFAULT) {
7593 (void) sprintf(strval, "%" PRId32, intval);
7594 verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval,
7595 &props, B_TRUE) == 0);
7596 }
7597 }
7598
7599 nvroot = make_root_vdev(zhp, props, force, B_FALSE, replacing, B_FALSE,
7600 argc, argv);
7601 if (nvroot == NULL) {
7602 zpool_close(zhp);
7603 nvlist_free(props);
7604 return (1);
7605 }
7606
7607 ret = zpool_vdev_attach(zhp, old_disk, new_disk, nvroot, replacing,
7608 rebuild);
7609
7610 if (ret == 0 && wait) {
7611 zpool_wait_activity_t activity = ZPOOL_WAIT_RESILVER;
7612 char raidz_prefix[] = "raidz";
7613 if (replacing) {
7614 activity = ZPOOL_WAIT_REPLACE;
7615 } else if (strncmp(old_disk,
7616 raidz_prefix, strlen(raidz_prefix)) == 0) {
7617 activity = ZPOOL_WAIT_RAIDZ_EXPAND;
7618 }
7619 ret = zpool_wait(zhp, activity);
7620 }
7621
7622 nvlist_free(props);
7623 nvlist_free(nvroot);
7624 zpool_close(zhp);
7625
7626 return (ret);
7627 }
7628
7629 /*
7630 * zpool replace [-fsw] [-o property=value] <pool> <device> <new_device>
7631 *
7632 * -f Force attach, even if <new_device> appears to be in use.
7633 * -s Use sequential instead of healing reconstruction for resilver.
7634 * -o Set property=value.
7635 * -w Wait for replacing to complete before returning
7636 *
7637 * Replace <device> with <new_device>.
7638 */
7639 int
zpool_do_replace(int argc,char ** argv)7640 zpool_do_replace(int argc, char **argv)
7641 {
7642 return (zpool_do_attach_or_replace(argc, argv, B_TRUE));
7643 }
7644
7645 /*
7646 * zpool attach [-fsw] [-o property=value] <pool> <device>|<vdev> <new_device>
7647 *
7648 * -f Force attach, even if <new_device> appears to be in use.
7649 * -s Use sequential instead of healing reconstruction for resilver.
7650 * -o Set property=value.
7651 * -w Wait for resilvering (mirror) or expansion (raidz) to complete
7652 * before returning.
7653 *
7654 * Attach <new_device> to a <device> or <vdev>, where the vdev can be of type
7655 * mirror or raidz. If <device> is not part of a mirror, then <device> will
7656 * be transformed into a mirror of <device> and <new_device>. When a mirror
7657 * is involved, <new_device> will begin life with a DTL of [0, now], and will
7658 * immediately begin to resilver itself. For the raidz case, a expansion will
7659 * commence and reflow the raidz data across all the disks including the
7660 * <new_device>.
7661 */
7662 int
zpool_do_attach(int argc,char ** argv)7663 zpool_do_attach(int argc, char **argv)
7664 {
7665 return (zpool_do_attach_or_replace(argc, argv, B_FALSE));
7666 }
7667
7668 /*
7669 * zpool detach [-f] <pool> <device>
7670 *
7671 * -f Force detach of <device>, even if DTLs argue against it
7672 * (not supported yet)
7673 *
7674 * Detach a device from a mirror. The operation will be refused if <device>
7675 * is the last device in the mirror, or if the DTLs indicate that this device
7676 * has the only valid copy of some data.
7677 */
7678 int
zpool_do_detach(int argc,char ** argv)7679 zpool_do_detach(int argc, char **argv)
7680 {
7681 int c;
7682 char *poolname, *path;
7683 zpool_handle_t *zhp;
7684 int ret;
7685
7686 /* check options */
7687 while ((c = getopt(argc, argv, "")) != -1) {
7688 switch (c) {
7689 case '?':
7690 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7691 optopt);
7692 usage(B_FALSE);
7693 }
7694 }
7695
7696 argc -= optind;
7697 argv += optind;
7698
7699 /* get pool name and check number of arguments */
7700 if (argc < 1) {
7701 (void) fprintf(stderr, gettext("missing pool name argument\n"));
7702 usage(B_FALSE);
7703 }
7704
7705 if (argc < 2) {
7706 (void) fprintf(stderr,
7707 gettext("missing <device> specification\n"));
7708 usage(B_FALSE);
7709 }
7710
7711 poolname = argv[0];
7712 path = argv[1];
7713
7714 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
7715 return (1);
7716
7717 ret = zpool_vdev_detach(zhp, path);
7718
7719 zpool_close(zhp);
7720
7721 return (ret);
7722 }
7723
7724 /*
7725 * zpool split [-gLnP] [-o prop=val] ...
7726 * [-o mntopt] ...
7727 * [-R altroot] <pool> <newpool> [<device> ...]
7728 *
7729 * -g Display guid for individual vdev name.
7730 * -L Follow links when resolving vdev path name.
7731 * -n Do not split the pool, but display the resulting layout if
7732 * it were to be split.
7733 * -o Set property=value, or set mount options.
7734 * -P Display full path for vdev name.
7735 * -R Mount the split-off pool under an alternate root.
7736 * -l Load encryption keys while importing.
7737 *
7738 * Splits the named pool and gives it the new pool name. Devices to be split
7739 * off may be listed, provided that no more than one device is specified
7740 * per top-level vdev mirror. The newly split pool is left in an exported
7741 * state unless -R is specified.
7742 *
7743 * Restrictions: the top-level of the pool pool must only be made up of
7744 * mirrors; all devices in the pool must be healthy; no device may be
7745 * undergoing a resilvering operation.
7746 */
7747 int
zpool_do_split(int argc,char ** argv)7748 zpool_do_split(int argc, char **argv)
7749 {
7750 char *srcpool, *newpool, *propval;
7751 char *mntopts = NULL;
7752 splitflags_t flags;
7753 int c, ret = 0;
7754 int ms_status = 0;
7755 boolean_t loadkeys = B_FALSE;
7756 zpool_handle_t *zhp;
7757 nvlist_t *config, *props = NULL;
7758
7759 flags.dryrun = B_FALSE;
7760 flags.import = B_FALSE;
7761 flags.name_flags = 0;
7762
7763 /* check options */
7764 while ((c = getopt(argc, argv, ":gLR:lno:P")) != -1) {
7765 switch (c) {
7766 case 'g':
7767 flags.name_flags |= VDEV_NAME_GUID;
7768 break;
7769 case 'L':
7770 flags.name_flags |= VDEV_NAME_FOLLOW_LINKS;
7771 break;
7772 case 'R':
7773 flags.import = B_TRUE;
7774 if (add_prop_list(
7775 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), optarg,
7776 &props, B_TRUE) != 0) {
7777 nvlist_free(props);
7778 usage(B_FALSE);
7779 }
7780 break;
7781 case 'l':
7782 loadkeys = B_TRUE;
7783 break;
7784 case 'n':
7785 flags.dryrun = B_TRUE;
7786 break;
7787 case 'o':
7788 if ((propval = strchr(optarg, '=')) != NULL) {
7789 *propval = '\0';
7790 propval++;
7791 if (add_prop_list(optarg, propval,
7792 &props, B_TRUE) != 0) {
7793 nvlist_free(props);
7794 usage(B_FALSE);
7795 }
7796 } else {
7797 mntopts = optarg;
7798 }
7799 break;
7800 case 'P':
7801 flags.name_flags |= VDEV_NAME_PATH;
7802 break;
7803 case ':':
7804 (void) fprintf(stderr, gettext("missing argument for "
7805 "'%c' option\n"), optopt);
7806 usage(B_FALSE);
7807 break;
7808 case '?':
7809 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7810 optopt);
7811 usage(B_FALSE);
7812 break;
7813 }
7814 }
7815
7816 if (!flags.import && mntopts != NULL) {
7817 (void) fprintf(stderr, gettext("setting mntopts is only "
7818 "valid when importing the pool\n"));
7819 usage(B_FALSE);
7820 }
7821
7822 if (!flags.import && loadkeys) {
7823 (void) fprintf(stderr, gettext("loading keys is only "
7824 "valid when importing the pool\n"));
7825 usage(B_FALSE);
7826 }
7827
7828 argc -= optind;
7829 argv += optind;
7830
7831 if (argc < 1) {
7832 (void) fprintf(stderr, gettext("Missing pool name\n"));
7833 usage(B_FALSE);
7834 }
7835 if (argc < 2) {
7836 (void) fprintf(stderr, gettext("Missing new pool name\n"));
7837 usage(B_FALSE);
7838 }
7839
7840 srcpool = argv[0];
7841 newpool = argv[1];
7842
7843 argc -= 2;
7844 argv += 2;
7845
7846 if ((zhp = zpool_open(g_zfs, srcpool)) == NULL) {
7847 nvlist_free(props);
7848 return (1);
7849 }
7850
7851 config = split_mirror_vdev(zhp, newpool, props, flags, argc, argv);
7852 if (config == NULL) {
7853 ret = 1;
7854 } else {
7855 if (flags.dryrun) {
7856 (void) printf(gettext("would create '%s' with the "
7857 "following layout:\n\n"), newpool);
7858 print_vdev_tree(NULL, newpool, config, 0, "",
7859 flags.name_flags);
7860 print_vdev_tree(NULL, "dedup", config, 0,
7861 VDEV_ALLOC_BIAS_DEDUP, 0);
7862 print_vdev_tree(NULL, "special", config, 0,
7863 VDEV_ALLOC_BIAS_SPECIAL, 0);
7864 }
7865 }
7866
7867 zpool_close(zhp);
7868
7869 if (ret != 0 || flags.dryrun || !flags.import) {
7870 nvlist_free(config);
7871 nvlist_free(props);
7872 return (ret);
7873 }
7874
7875 /*
7876 * The split was successful. Now we need to open the new
7877 * pool and import it.
7878 */
7879 if ((zhp = zpool_open_canfail(g_zfs, newpool)) == NULL) {
7880 nvlist_free(config);
7881 nvlist_free(props);
7882 return (1);
7883 }
7884
7885 if (loadkeys) {
7886 ret = zfs_crypto_attempt_load_keys(g_zfs, newpool);
7887 if (ret != 0)
7888 ret = 1;
7889 }
7890
7891 if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL) {
7892 ms_status = zpool_enable_datasets(zhp, mntopts, 0,
7893 mount_tp_nthr);
7894 if (ms_status == EZFS_SHAREFAILED) {
7895 (void) fprintf(stderr, gettext("Split was successful, "
7896 "datasets are mounted but sharing of some datasets "
7897 "has failed\n"));
7898 } else if (ms_status == EZFS_MOUNTFAILED) {
7899 (void) fprintf(stderr, gettext("Split was successful"
7900 ", but some datasets could not be mounted\n"));
7901 (void) fprintf(stderr, gettext("Try doing '%s' with a "
7902 "different altroot\n"), "zpool import");
7903 }
7904 }
7905 zpool_close(zhp);
7906 nvlist_free(config);
7907 nvlist_free(props);
7908
7909 return (ret);
7910 }
7911
7912
7913 /*
7914 * zpool online [--power] <pool> <device> ...
7915 *
7916 * --power: Power on the enclosure slot to the drive (if possible)
7917 */
7918 int
zpool_do_online(int argc,char ** argv)7919 zpool_do_online(int argc, char **argv)
7920 {
7921 int c, i;
7922 char *poolname;
7923 zpool_handle_t *zhp;
7924 int ret = 0;
7925 vdev_state_t newstate;
7926 int flags = 0;
7927 boolean_t is_power_on = B_FALSE;
7928 struct option long_options[] = {
7929 {"power", no_argument, NULL, ZPOOL_OPTION_POWER},
7930 {0, 0, 0, 0}
7931 };
7932
7933 /* check options */
7934 while ((c = getopt_long(argc, argv, "e", long_options, NULL)) != -1) {
7935 switch (c) {
7936 case 'e':
7937 flags |= ZFS_ONLINE_EXPAND;
7938 break;
7939 case ZPOOL_OPTION_POWER:
7940 is_power_on = B_TRUE;
7941 break;
7942 case '?':
7943 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7944 optopt);
7945 usage(B_FALSE);
7946 }
7947 }
7948
7949 if (libzfs_envvar_is_set("ZPOOL_AUTO_POWER_ON_SLOT"))
7950 is_power_on = B_TRUE;
7951
7952 argc -= optind;
7953 argv += optind;
7954
7955 /* get pool name and check number of arguments */
7956 if (argc < 1) {
7957 (void) fprintf(stderr, gettext("missing pool name\n"));
7958 usage(B_FALSE);
7959 }
7960 if (argc < 2) {
7961 (void) fprintf(stderr, gettext("missing device name\n"));
7962 usage(B_FALSE);
7963 }
7964
7965 poolname = argv[0];
7966
7967 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) {
7968 (void) fprintf(stderr, gettext("failed to open pool "
7969 "\"%s\""), poolname);
7970 return (1);
7971 }
7972
7973 for (i = 1; i < argc; i++) {
7974 vdev_state_t oldstate;
7975 boolean_t avail_spare, l2cache;
7976 int rc;
7977
7978 if (is_power_on) {
7979 rc = zpool_power_on_and_disk_wait(zhp, argv[i]);
7980 if (rc == ENOTSUP) {
7981 (void) fprintf(stderr,
7982 gettext("Power control not supported\n"));
7983 }
7984 if (rc != 0)
7985 return (rc);
7986 }
7987
7988 nvlist_t *tgt = zpool_find_vdev(zhp, argv[i], &avail_spare,
7989 &l2cache, NULL);
7990 if (tgt == NULL) {
7991 ret = 1;
7992 (void) fprintf(stderr, gettext("couldn't find device "
7993 "\"%s\" in pool \"%s\"\n"), argv[i], poolname);
7994 continue;
7995 }
7996 uint_t vsc;
7997 oldstate = ((vdev_stat_t *)fnvlist_lookup_uint64_array(tgt,
7998 ZPOOL_CONFIG_VDEV_STATS, &vsc))->vs_state;
7999 if ((rc = zpool_vdev_online(zhp, argv[i], flags,
8000 &newstate)) == 0) {
8001 if (newstate != VDEV_STATE_HEALTHY) {
8002 (void) printf(gettext("warning: device '%s' "
8003 "onlined, but remains in faulted state\n"),
8004 argv[i]);
8005 if (newstate == VDEV_STATE_FAULTED)
8006 (void) printf(gettext("use 'zpool "
8007 "clear' to restore a faulted "
8008 "device\n"));
8009 else
8010 (void) printf(gettext("use 'zpool "
8011 "replace' to replace devices "
8012 "that are no longer present\n"));
8013 if ((flags & ZFS_ONLINE_EXPAND)) {
8014 (void) printf(gettext("%s: failed "
8015 "to expand usable space on "
8016 "unhealthy device '%s'\n"),
8017 (oldstate >= VDEV_STATE_DEGRADED ?
8018 "error" : "warning"), argv[i]);
8019 if (oldstate >= VDEV_STATE_DEGRADED) {
8020 ret = 1;
8021 break;
8022 }
8023 }
8024 }
8025 } else {
8026 (void) fprintf(stderr, gettext("Failed to online "
8027 "\"%s\" in pool \"%s\": %d\n"),
8028 argv[i], poolname, rc);
8029 ret = 1;
8030 }
8031 }
8032
8033 zpool_close(zhp);
8034
8035 return (ret);
8036 }
8037
8038 /*
8039 * zpool offline [-ft]|[--power] <pool> <device> ...
8040 *
8041 *
8042 * -f Force the device into a faulted state.
8043 *
8044 * -t Only take the device off-line temporarily. The offline/faulted
8045 * state will not be persistent across reboots.
8046 *
8047 * --power Power off the enclosure slot to the drive (if possible)
8048 */
8049 int
zpool_do_offline(int argc,char ** argv)8050 zpool_do_offline(int argc, char **argv)
8051 {
8052 int c, i;
8053 char *poolname;
8054 zpool_handle_t *zhp;
8055 int ret = 0;
8056 boolean_t istmp = B_FALSE;
8057 boolean_t fault = B_FALSE;
8058 boolean_t is_power_off = B_FALSE;
8059
8060 struct option long_options[] = {
8061 {"power", no_argument, NULL, ZPOOL_OPTION_POWER},
8062 {0, 0, 0, 0}
8063 };
8064
8065 /* check options */
8066 while ((c = getopt_long(argc, argv, "ft", long_options, NULL)) != -1) {
8067 switch (c) {
8068 case 'f':
8069 fault = B_TRUE;
8070 break;
8071 case 't':
8072 istmp = B_TRUE;
8073 break;
8074 case ZPOOL_OPTION_POWER:
8075 is_power_off = B_TRUE;
8076 break;
8077 case '?':
8078 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
8079 optopt);
8080 usage(B_FALSE);
8081 }
8082 }
8083
8084 if (is_power_off && fault) {
8085 (void) fprintf(stderr,
8086 gettext("-0 and -f cannot be used together\n"));
8087 usage(B_FALSE);
8088 return (1);
8089 }
8090
8091 if (is_power_off && istmp) {
8092 (void) fprintf(stderr,
8093 gettext("-0 and -t cannot be used together\n"));
8094 usage(B_FALSE);
8095 return (1);
8096 }
8097
8098 argc -= optind;
8099 argv += optind;
8100
8101 /* get pool name and check number of arguments */
8102 if (argc < 1) {
8103 (void) fprintf(stderr, gettext("missing pool name\n"));
8104 usage(B_FALSE);
8105 }
8106 if (argc < 2) {
8107 (void) fprintf(stderr, gettext("missing device name\n"));
8108 usage(B_FALSE);
8109 }
8110
8111 poolname = argv[0];
8112
8113 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) {
8114 (void) fprintf(stderr, gettext("failed to open pool "
8115 "\"%s\""), poolname);
8116 return (1);
8117 }
8118
8119 for (i = 1; i < argc; i++) {
8120 uint64_t guid = zpool_vdev_path_to_guid(zhp, argv[i]);
8121 if (is_power_off) {
8122 /*
8123 * Note: we have to power off first, then set REMOVED,
8124 * or else zpool_vdev_set_removed_state() returns
8125 * EAGAIN.
8126 */
8127 ret = zpool_power_off(zhp, argv[i]);
8128 if (ret != 0) {
8129 (void) fprintf(stderr, "%s %s %d\n",
8130 gettext("unable to power off slot for"),
8131 argv[i], ret);
8132 }
8133 zpool_vdev_set_removed_state(zhp, guid, VDEV_AUX_NONE);
8134
8135 } else if (fault) {
8136 vdev_aux_t aux;
8137 if (istmp == B_FALSE) {
8138 /* Force the fault to persist across imports */
8139 aux = VDEV_AUX_EXTERNAL_PERSIST;
8140 } else {
8141 aux = VDEV_AUX_EXTERNAL;
8142 }
8143
8144 if (guid == 0 || zpool_vdev_fault(zhp, guid, aux) != 0)
8145 ret = 1;
8146 } else {
8147 if (zpool_vdev_offline(zhp, argv[i], istmp) != 0)
8148 ret = 1;
8149 }
8150 }
8151
8152 zpool_close(zhp);
8153
8154 return (ret);
8155 }
8156
8157 /*
8158 * zpool clear [-nF]|[--power] <pool> [device]
8159 *
8160 * Clear all errors associated with a pool or a particular device.
8161 */
8162 int
zpool_do_clear(int argc,char ** argv)8163 zpool_do_clear(int argc, char **argv)
8164 {
8165 int c;
8166 int ret = 0;
8167 boolean_t dryrun = B_FALSE;
8168 boolean_t do_rewind = B_FALSE;
8169 boolean_t xtreme_rewind = B_FALSE;
8170 boolean_t is_power_on = B_FALSE;
8171 uint32_t rewind_policy = ZPOOL_NO_REWIND;
8172 nvlist_t *policy = NULL;
8173 zpool_handle_t *zhp;
8174 char *pool, *device;
8175
8176 struct option long_options[] = {
8177 {"power", no_argument, NULL, ZPOOL_OPTION_POWER},
8178 {0, 0, 0, 0}
8179 };
8180
8181 /* check options */
8182 while ((c = getopt_long(argc, argv, "FnX", long_options,
8183 NULL)) != -1) {
8184 switch (c) {
8185 case 'F':
8186 do_rewind = B_TRUE;
8187 break;
8188 case 'n':
8189 dryrun = B_TRUE;
8190 break;
8191 case 'X':
8192 xtreme_rewind = B_TRUE;
8193 break;
8194 case ZPOOL_OPTION_POWER:
8195 is_power_on = B_TRUE;
8196 break;
8197 case '?':
8198 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
8199 optopt);
8200 usage(B_FALSE);
8201 }
8202 }
8203
8204 if (libzfs_envvar_is_set("ZPOOL_AUTO_POWER_ON_SLOT"))
8205 is_power_on = B_TRUE;
8206
8207 argc -= optind;
8208 argv += optind;
8209
8210 if (argc < 1) {
8211 (void) fprintf(stderr, gettext("missing pool name\n"));
8212 usage(B_FALSE);
8213 }
8214
8215 if (argc > 2) {
8216 (void) fprintf(stderr, gettext("too many arguments\n"));
8217 usage(B_FALSE);
8218 }
8219
8220 if ((dryrun || xtreme_rewind) && !do_rewind) {
8221 (void) fprintf(stderr,
8222 gettext("-n or -X only meaningful with -F\n"));
8223 usage(B_FALSE);
8224 }
8225 if (dryrun)
8226 rewind_policy = ZPOOL_TRY_REWIND;
8227 else if (do_rewind)
8228 rewind_policy = ZPOOL_DO_REWIND;
8229 if (xtreme_rewind)
8230 rewind_policy |= ZPOOL_EXTREME_REWIND;
8231
8232 /* In future, further rewind policy choices can be passed along here */
8233 if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 ||
8234 nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY,
8235 rewind_policy) != 0) {
8236 return (1);
8237 }
8238
8239 pool = argv[0];
8240 device = argc == 2 ? argv[1] : NULL;
8241
8242 if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) {
8243 nvlist_free(policy);
8244 return (1);
8245 }
8246
8247 if (is_power_on) {
8248 if (device == NULL) {
8249 zpool_power_on_pool_and_wait_for_devices(zhp);
8250 } else {
8251 zpool_power_on_and_disk_wait(zhp, device);
8252 }
8253 }
8254
8255 if (zpool_clear(zhp, device, policy) != 0)
8256 ret = 1;
8257
8258 zpool_close(zhp);
8259
8260 nvlist_free(policy);
8261
8262 return (ret);
8263 }
8264
8265 /*
8266 * zpool reguid [-g <guid>] <pool>
8267 */
8268 int
zpool_do_reguid(int argc,char ** argv)8269 zpool_do_reguid(int argc, char **argv)
8270 {
8271 uint64_t guid;
8272 uint64_t *guidp = NULL;
8273 int c;
8274 char *endptr;
8275 char *poolname;
8276 zpool_handle_t *zhp;
8277 int ret = 0;
8278
8279 /* check options */
8280 while ((c = getopt(argc, argv, "g:")) != -1) {
8281 switch (c) {
8282 case 'g':
8283 errno = 0;
8284 guid = strtoull(optarg, &endptr, 10);
8285 if (errno != 0 || *endptr != '\0') {
8286 (void) fprintf(stderr,
8287 gettext("invalid GUID: %s\n"), optarg);
8288 usage(B_FALSE);
8289 }
8290 guidp = &guid;
8291 break;
8292 case '?':
8293 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
8294 optopt);
8295 usage(B_FALSE);
8296 }
8297 }
8298
8299 argc -= optind;
8300 argv += optind;
8301
8302 /* get pool name and check number of arguments */
8303 if (argc < 1) {
8304 (void) fprintf(stderr, gettext("missing pool name\n"));
8305 usage(B_FALSE);
8306 }
8307
8308 if (argc > 1) {
8309 (void) fprintf(stderr, gettext("too many arguments\n"));
8310 usage(B_FALSE);
8311 }
8312
8313 poolname = argv[0];
8314 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
8315 return (1);
8316
8317 ret = zpool_set_guid(zhp, guidp);
8318
8319 zpool_close(zhp);
8320 return (ret);
8321 }
8322
8323
8324 /*
8325 * zpool reopen <pool>
8326 *
8327 * Reopen the pool so that the kernel can update the sizes of all vdevs.
8328 */
8329 int
zpool_do_reopen(int argc,char ** argv)8330 zpool_do_reopen(int argc, char **argv)
8331 {
8332 int c;
8333 int ret = 0;
8334 boolean_t scrub_restart = B_TRUE;
8335
8336 /* check options */
8337 while ((c = getopt(argc, argv, "n")) != -1) {
8338 switch (c) {
8339 case 'n':
8340 scrub_restart = B_FALSE;
8341 break;
8342 case '?':
8343 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
8344 optopt);
8345 usage(B_FALSE);
8346 }
8347 }
8348
8349 argc -= optind;
8350 argv += optind;
8351
8352 /* if argc == 0 we will execute zpool_reopen_one on all pools */
8353 ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
8354 B_FALSE, zpool_reopen_one, &scrub_restart);
8355
8356 return (ret);
8357 }
8358
8359 typedef struct scrub_cbdata {
8360 int cb_type;
8361 pool_scrub_cmd_t cb_scrub_cmd;
8362 time_t cb_date_start;
8363 time_t cb_date_end;
8364 } scrub_cbdata_t;
8365
8366 static boolean_t
zpool_has_checkpoint(zpool_handle_t * zhp)8367 zpool_has_checkpoint(zpool_handle_t *zhp)
8368 {
8369 nvlist_t *config, *nvroot;
8370
8371 config = zpool_get_config(zhp, NULL);
8372
8373 if (config != NULL) {
8374 pool_checkpoint_stat_t *pcs = NULL;
8375 uint_t c;
8376
8377 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
8378 (void) nvlist_lookup_uint64_array(nvroot,
8379 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
8380
8381 if (pcs == NULL || pcs->pcs_state == CS_NONE)
8382 return (B_FALSE);
8383
8384 assert(pcs->pcs_state == CS_CHECKPOINT_EXISTS ||
8385 pcs->pcs_state == CS_CHECKPOINT_DISCARDING);
8386 return (B_TRUE);
8387 }
8388
8389 return (B_FALSE);
8390 }
8391
8392 static int
scrub_callback(zpool_handle_t * zhp,void * data)8393 scrub_callback(zpool_handle_t *zhp, void *data)
8394 {
8395 scrub_cbdata_t *cb = data;
8396 int err;
8397
8398 /*
8399 * Ignore faulted pools.
8400 */
8401 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
8402 (void) fprintf(stderr, gettext("cannot scan '%s': pool is "
8403 "currently unavailable\n"), zpool_get_name(zhp));
8404 return (1);
8405 }
8406
8407 err = zpool_scan_range(zhp, cb->cb_type, cb->cb_scrub_cmd,
8408 cb->cb_date_start, cb->cb_date_end);
8409 if (err == 0 && zpool_has_checkpoint(zhp) &&
8410 cb->cb_type == POOL_SCAN_SCRUB) {
8411 (void) printf(gettext("warning: will not scrub state that "
8412 "belongs to the checkpoint of pool '%s'\n"),
8413 zpool_get_name(zhp));
8414 }
8415
8416 return (err != 0);
8417 }
8418
8419 static int
wait_callback(zpool_handle_t * zhp,void * data)8420 wait_callback(zpool_handle_t *zhp, void *data)
8421 {
8422 zpool_wait_activity_t *act = data;
8423 return (zpool_wait(zhp, *act));
8424 }
8425
8426 static time_t
date_string_to_sec(const char * timestr,boolean_t rounding)8427 date_string_to_sec(const char *timestr, boolean_t rounding)
8428 {
8429 struct tm tm = {0};
8430 int adjustment = rounding ? 1 : 0;
8431
8432 /* Allow mktime to determine timezone. */
8433 tm.tm_isdst = -1;
8434
8435 if (strptime(timestr, "%Y-%m-%d %H:%M", &tm) == NULL) {
8436 if (strptime(timestr, "%Y-%m-%d", &tm) == NULL) {
8437 fprintf(stderr, gettext("Failed to parse the date.\n"));
8438 usage(B_FALSE);
8439 }
8440 adjustment *= 24 * 60 * 60;
8441 } else {
8442 adjustment *= 60;
8443 }
8444
8445 return (mktime(&tm) + adjustment);
8446 }
8447
8448 /*
8449 * zpool scrub [-e | -s | -p | -C | -E | -S] [-w] <pool> ...
8450 *
8451 * -e Only scrub blocks in the error log.
8452 * -E End date of scrub.
8453 * -S Start date of scrub.
8454 * -s Stop. Stops any in-progress scrub.
8455 * -p Pause. Pause in-progress scrub.
8456 * -w Wait. Blocks until scrub has completed.
8457 * -C Scrub from last saved txg.
8458 */
8459 int
zpool_do_scrub(int argc,char ** argv)8460 zpool_do_scrub(int argc, char **argv)
8461 {
8462 int c;
8463 scrub_cbdata_t cb;
8464 boolean_t wait = B_FALSE;
8465 int error;
8466
8467 cb.cb_type = POOL_SCAN_SCRUB;
8468 cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
8469 cb.cb_date_start = cb.cb_date_end = 0;
8470
8471 boolean_t is_error_scrub = B_FALSE;
8472 boolean_t is_pause = B_FALSE;
8473 boolean_t is_stop = B_FALSE;
8474 boolean_t is_txg_continue = B_FALSE;
8475 boolean_t scrub_all = B_FALSE;
8476
8477 /* check options */
8478 while ((c = getopt(argc, argv, "aspweCE:S:")) != -1) {
8479 switch (c) {
8480 case 'a':
8481 scrub_all = B_TRUE;
8482 break;
8483 case 'e':
8484 is_error_scrub = B_TRUE;
8485 break;
8486 case 'E':
8487 /*
8488 * Round the date. It's better to scrub more data than
8489 * less. This also makes the date inclusive.
8490 */
8491 cb.cb_date_end = date_string_to_sec(optarg, B_TRUE);
8492 break;
8493 case 's':
8494 is_stop = B_TRUE;
8495 break;
8496 case 'S':
8497 cb.cb_date_start = date_string_to_sec(optarg, B_FALSE);
8498 break;
8499 case 'p':
8500 is_pause = B_TRUE;
8501 break;
8502 case 'w':
8503 wait = B_TRUE;
8504 break;
8505 case 'C':
8506 is_txg_continue = B_TRUE;
8507 break;
8508 case '?':
8509 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
8510 optopt);
8511 usage(B_FALSE);
8512 }
8513 }
8514
8515 if (is_pause && is_stop) {
8516 (void) fprintf(stderr, gettext("invalid option "
8517 "combination: -s and -p are mutually exclusive\n"));
8518 usage(B_FALSE);
8519 } else if (is_pause && is_txg_continue) {
8520 (void) fprintf(stderr, gettext("invalid option "
8521 "combination: -p and -C are mutually exclusive\n"));
8522 usage(B_FALSE);
8523 } else if (is_stop && is_txg_continue) {
8524 (void) fprintf(stderr, gettext("invalid option "
8525 "combination: -s and -C are mutually exclusive\n"));
8526 usage(B_FALSE);
8527 } else if (is_error_scrub && is_txg_continue) {
8528 (void) fprintf(stderr, gettext("invalid option "
8529 "combination: -e and -C are mutually exclusive\n"));
8530 usage(B_FALSE);
8531 } else {
8532 if (is_error_scrub)
8533 cb.cb_type = POOL_SCAN_ERRORSCRUB;
8534
8535 if (is_pause) {
8536 cb.cb_scrub_cmd = POOL_SCRUB_PAUSE;
8537 } else if (is_stop) {
8538 cb.cb_type = POOL_SCAN_NONE;
8539 } else if (is_txg_continue) {
8540 cb.cb_scrub_cmd = POOL_SCRUB_FROM_LAST_TXG;
8541 } else {
8542 cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
8543 }
8544 }
8545
8546 if ((cb.cb_date_start != 0 || cb.cb_date_end != 0) &&
8547 cb.cb_scrub_cmd != POOL_SCRUB_NORMAL) {
8548 (void) fprintf(stderr, gettext("invalid option combination: "
8549 "start/end date is available only with normal scrub\n"));
8550 usage(B_FALSE);
8551 }
8552 if (cb.cb_date_start != 0 && cb.cb_date_end != 0 &&
8553 cb.cb_date_start > cb.cb_date_end) {
8554 (void) fprintf(stderr, gettext("invalid arguments: "
8555 "end date has to be later than start date\n"));
8556 usage(B_FALSE);
8557 }
8558
8559 if (wait && (cb.cb_type == POOL_SCAN_NONE ||
8560 cb.cb_scrub_cmd == POOL_SCRUB_PAUSE)) {
8561 (void) fprintf(stderr, gettext("invalid option combination: "
8562 "-w cannot be used with -p or -s\n"));
8563 usage(B_FALSE);
8564 }
8565
8566 argc -= optind;
8567 argv += optind;
8568
8569 if (argc < 1 && !scrub_all) {
8570 (void) fprintf(stderr, gettext("missing pool name argument\n"));
8571 usage(B_FALSE);
8572 }
8573
8574 error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
8575 B_FALSE, scrub_callback, &cb);
8576
8577 if (wait && !error) {
8578 zpool_wait_activity_t act = ZPOOL_WAIT_SCRUB;
8579 error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
8580 B_FALSE, wait_callback, &act);
8581 }
8582
8583 return (error);
8584 }
8585
8586 /*
8587 * zpool resilver <pool> ...
8588 *
8589 * Restarts any in-progress resilver
8590 */
8591 int
zpool_do_resilver(int argc,char ** argv)8592 zpool_do_resilver(int argc, char **argv)
8593 {
8594 int c;
8595 scrub_cbdata_t cb;
8596
8597 cb.cb_type = POOL_SCAN_RESILVER;
8598 cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
8599 cb.cb_date_start = cb.cb_date_end = 0;
8600
8601 /* check options */
8602 while ((c = getopt(argc, argv, "")) != -1) {
8603 switch (c) {
8604 case '?':
8605 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
8606 optopt);
8607 usage(B_FALSE);
8608 }
8609 }
8610
8611 argc -= optind;
8612 argv += optind;
8613
8614 if (argc < 1) {
8615 (void) fprintf(stderr, gettext("missing pool name argument\n"));
8616 usage(B_FALSE);
8617 }
8618
8619 return (for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
8620 B_FALSE, scrub_callback, &cb));
8621 }
8622
8623 /*
8624 * zpool trim [-d] [-r <rate>] [-c | -s] <pool> [<device> ...]
8625 *
8626 * -c Cancel. Ends any in-progress trim.
8627 * -d Secure trim. Requires kernel and device support.
8628 * -r <rate> Sets the TRIM rate in bytes (per second). Supports
8629 * adding a multiplier suffix such as 'k' or 'm'.
8630 * -s Suspend. TRIM can then be restarted with no flags.
8631 * -w Wait. Blocks until trimming has completed.
8632 */
8633 int
zpool_do_trim(int argc,char ** argv)8634 zpool_do_trim(int argc, char **argv)
8635 {
8636 struct option long_options[] = {
8637 {"cancel", no_argument, NULL, 'c'},
8638 {"secure", no_argument, NULL, 'd'},
8639 {"rate", required_argument, NULL, 'r'},
8640 {"suspend", no_argument, NULL, 's'},
8641 {"wait", no_argument, NULL, 'w'},
8642 {"all", no_argument, NULL, 'a'},
8643 {0, 0, 0, 0}
8644 };
8645
8646 pool_trim_func_t cmd_type = POOL_TRIM_START;
8647 uint64_t rate = 0;
8648 boolean_t secure = B_FALSE;
8649 boolean_t wait = B_FALSE;
8650 boolean_t trimall = B_FALSE;
8651 int error;
8652
8653 int c;
8654 while ((c = getopt_long(argc, argv, "acdr:sw", long_options, NULL))
8655 != -1) {
8656 switch (c) {
8657 case 'a':
8658 trimall = B_TRUE;
8659 break;
8660 case 'c':
8661 if (cmd_type != POOL_TRIM_START &&
8662 cmd_type != POOL_TRIM_CANCEL) {
8663 (void) fprintf(stderr, gettext("-c cannot be "
8664 "combined with other options\n"));
8665 usage(B_FALSE);
8666 }
8667 cmd_type = POOL_TRIM_CANCEL;
8668 break;
8669 case 'd':
8670 if (cmd_type != POOL_TRIM_START) {
8671 (void) fprintf(stderr, gettext("-d cannot be "
8672 "combined with the -c or -s options\n"));
8673 usage(B_FALSE);
8674 }
8675 secure = B_TRUE;
8676 break;
8677 case 'r':
8678 if (cmd_type != POOL_TRIM_START) {
8679 (void) fprintf(stderr, gettext("-r cannot be "
8680 "combined with the -c or -s options\n"));
8681 usage(B_FALSE);
8682 }
8683 if (zfs_nicestrtonum(g_zfs, optarg, &rate) == -1) {
8684 (void) fprintf(stderr, "%s: %s\n",
8685 gettext("invalid value for rate"),
8686 libzfs_error_description(g_zfs));
8687 usage(B_FALSE);
8688 }
8689 break;
8690 case 's':
8691 if (cmd_type != POOL_TRIM_START &&
8692 cmd_type != POOL_TRIM_SUSPEND) {
8693 (void) fprintf(stderr, gettext("-s cannot be "
8694 "combined with other options\n"));
8695 usage(B_FALSE);
8696 }
8697 cmd_type = POOL_TRIM_SUSPEND;
8698 break;
8699 case 'w':
8700 wait = B_TRUE;
8701 break;
8702 case '?':
8703 if (optopt != 0) {
8704 (void) fprintf(stderr,
8705 gettext("invalid option '%c'\n"), optopt);
8706 } else {
8707 (void) fprintf(stderr,
8708 gettext("invalid option '%s'\n"),
8709 argv[optind - 1]);
8710 }
8711 usage(B_FALSE);
8712 }
8713 }
8714
8715 argc -= optind;
8716 argv += optind;
8717
8718 trimflags_t trim_flags = {
8719 .secure = secure,
8720 .rate = rate,
8721 .wait = wait,
8722 };
8723
8724 trim_cbdata_t cbdata = {
8725 .trim_flags = trim_flags,
8726 .cmd_type = cmd_type
8727 };
8728
8729 if (argc < 1 && !trimall) {
8730 (void) fprintf(stderr, gettext("missing pool name argument\n"));
8731 usage(B_FALSE);
8732 return (-1);
8733 }
8734
8735 if (wait && (cmd_type != POOL_TRIM_START)) {
8736 (void) fprintf(stderr, gettext("-w cannot be used with -c or "
8737 "-s options\n"));
8738 usage(B_FALSE);
8739 }
8740
8741 if (trimall && argc > 0) {
8742 (void) fprintf(stderr, gettext("-a cannot be combined with "
8743 "individual zpools or vdevs\n"));
8744 usage(B_FALSE);
8745 }
8746
8747 if (argc == 0 && trimall) {
8748 cbdata.trim_flags.fullpool = B_TRUE;
8749 /* Trim each pool recursively */
8750 error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
8751 B_FALSE, zpool_trim_one, &cbdata);
8752 } else if (argc == 1) {
8753 char *poolname = argv[0];
8754 zpool_handle_t *zhp = zpool_open(g_zfs, poolname);
8755 if (zhp == NULL)
8756 return (-1);
8757 /* no individual leaf vdevs specified, so add them all */
8758 error = zpool_trim_one(zhp, &cbdata);
8759 zpool_close(zhp);
8760 } else {
8761 char *poolname = argv[0];
8762 zpool_handle_t *zhp = zpool_open(g_zfs, poolname);
8763 if (zhp == NULL)
8764 return (-1);
8765 /* leaf vdevs specified, trim only those */
8766 cbdata.trim_flags.fullpool = B_FALSE;
8767 nvlist_t *vdevs = fnvlist_alloc();
8768 for (int i = 1; i < argc; i++) {
8769 fnvlist_add_boolean(vdevs, argv[i]);
8770 }
8771 error = zpool_trim(zhp, cbdata.cmd_type, vdevs,
8772 &cbdata.trim_flags);
8773 fnvlist_free(vdevs);
8774 zpool_close(zhp);
8775 }
8776
8777 return (error);
8778 }
8779
8780 /*
8781 * Converts a total number of seconds to a human readable string broken
8782 * down in to days/hours/minutes/seconds.
8783 */
8784 static void
secs_to_dhms(uint64_t total,char * buf)8785 secs_to_dhms(uint64_t total, char *buf)
8786 {
8787 uint64_t days = total / 60 / 60 / 24;
8788 uint64_t hours = (total / 60 / 60) % 24;
8789 uint64_t mins = (total / 60) % 60;
8790 uint64_t secs = (total % 60);
8791
8792 if (days > 0) {
8793 (void) sprintf(buf, "%llu days %02llu:%02llu:%02llu",
8794 (u_longlong_t)days, (u_longlong_t)hours,
8795 (u_longlong_t)mins, (u_longlong_t)secs);
8796 } else {
8797 (void) sprintf(buf, "%02llu:%02llu:%02llu",
8798 (u_longlong_t)hours, (u_longlong_t)mins,
8799 (u_longlong_t)secs);
8800 }
8801 }
8802
8803 /*
8804 * Print out detailed error scrub status.
8805 */
8806 static void
print_err_scrub_status(pool_scan_stat_t * ps)8807 print_err_scrub_status(pool_scan_stat_t *ps)
8808 {
8809 time_t start, end, pause;
8810 uint64_t total_secs_left;
8811 uint64_t secs_left, mins_left, hours_left, days_left;
8812 uint64_t examined, to_be_examined;
8813
8814 if (ps == NULL || ps->pss_error_scrub_func != POOL_SCAN_ERRORSCRUB) {
8815 return;
8816 }
8817
8818 (void) printf(gettext(" scrub: "));
8819
8820 start = ps->pss_error_scrub_start;
8821 end = ps->pss_error_scrub_end;
8822 pause = ps->pss_pass_error_scrub_pause;
8823 examined = ps->pss_error_scrub_examined;
8824 to_be_examined = ps->pss_error_scrub_to_be_examined;
8825
8826 assert(ps->pss_error_scrub_func == POOL_SCAN_ERRORSCRUB);
8827
8828 if (ps->pss_error_scrub_state == DSS_FINISHED) {
8829 total_secs_left = end - start;
8830 days_left = total_secs_left / 60 / 60 / 24;
8831 hours_left = (total_secs_left / 60 / 60) % 24;
8832 mins_left = (total_secs_left / 60) % 60;
8833 secs_left = (total_secs_left % 60);
8834
8835 (void) printf(gettext("scrubbed %llu error blocks in %llu days "
8836 "%02llu:%02llu:%02llu on %s"), (u_longlong_t)examined,
8837 (u_longlong_t)days_left, (u_longlong_t)hours_left,
8838 (u_longlong_t)mins_left, (u_longlong_t)secs_left,
8839 ctime(&end));
8840
8841 return;
8842 } else if (ps->pss_error_scrub_state == DSS_CANCELED) {
8843 (void) printf(gettext("error scrub canceled on %s"),
8844 ctime(&end));
8845 return;
8846 }
8847 assert(ps->pss_error_scrub_state == DSS_ERRORSCRUBBING);
8848
8849 /* Error scrub is in progress. */
8850 if (pause == 0) {
8851 (void) printf(gettext("error scrub in progress since %s"),
8852 ctime(&start));
8853 } else {
8854 (void) printf(gettext("error scrub paused since %s"),
8855 ctime(&pause));
8856 (void) printf(gettext("\terror scrub started on %s"),
8857 ctime(&start));
8858 }
8859
8860 double fraction_done = (double)examined / (to_be_examined + examined);
8861 (void) printf(gettext("\t%.2f%% done, issued I/O for %llu error"
8862 " blocks"), 100 * fraction_done, (u_longlong_t)examined);
8863
8864 (void) printf("\n");
8865 }
8866
8867 /*
8868 * Print out detailed scrub status.
8869 */
8870 static void
print_scan_scrub_resilver_status(pool_scan_stat_t * ps)8871 print_scan_scrub_resilver_status(pool_scan_stat_t *ps)
8872 {
8873 time_t start, end, pause;
8874 uint64_t pass_scanned, scanned, pass_issued, issued, total_s, total_i;
8875 uint64_t elapsed, scan_rate, issue_rate;
8876 double fraction_done;
8877 char processed_buf[7], scanned_buf[7], issued_buf[7], total_s_buf[7];
8878 char total_i_buf[7], srate_buf[7], irate_buf[7], time_buf[32];
8879
8880 printf(" ");
8881 printf_color(ANSI_BOLD, gettext("scan:"));
8882 printf(" ");
8883
8884 /* If there's never been a scan, there's not much to say. */
8885 if (ps == NULL || ps->pss_func == POOL_SCAN_NONE ||
8886 ps->pss_func >= POOL_SCAN_FUNCS) {
8887 (void) printf(gettext("none requested\n"));
8888 return;
8889 }
8890
8891 start = ps->pss_start_time;
8892 end = ps->pss_end_time;
8893 pause = ps->pss_pass_scrub_pause;
8894
8895 zfs_nicebytes(ps->pss_processed, processed_buf, sizeof (processed_buf));
8896
8897 int is_resilver = ps->pss_func == POOL_SCAN_RESILVER;
8898 int is_scrub = ps->pss_func == POOL_SCAN_SCRUB;
8899 assert(is_resilver || is_scrub);
8900
8901 /* Scan is finished or canceled. */
8902 if (ps->pss_state == DSS_FINISHED) {
8903 secs_to_dhms(end - start, time_buf);
8904
8905 if (is_scrub) {
8906 (void) printf(gettext("scrub repaired %s "
8907 "in %s with %llu errors on %s"), processed_buf,
8908 time_buf, (u_longlong_t)ps->pss_errors,
8909 ctime(&end));
8910 } else if (is_resilver) {
8911 (void) printf(gettext("resilvered %s "
8912 "in %s with %llu errors on %s"), processed_buf,
8913 time_buf, (u_longlong_t)ps->pss_errors,
8914 ctime(&end));
8915 }
8916 return;
8917 } else if (ps->pss_state == DSS_CANCELED) {
8918 if (is_scrub) {
8919 (void) printf(gettext("scrub canceled on %s"),
8920 ctime(&end));
8921 } else if (is_resilver) {
8922 (void) printf(gettext("resilver canceled on %s"),
8923 ctime(&end));
8924 }
8925 return;
8926 }
8927
8928 assert(ps->pss_state == DSS_SCANNING);
8929
8930 /* Scan is in progress. Resilvers can't be paused. */
8931 if (is_scrub) {
8932 if (pause == 0) {
8933 (void) printf(gettext("scrub in progress since %s"),
8934 ctime(&start));
8935 } else {
8936 (void) printf(gettext("scrub paused since %s"),
8937 ctime(&pause));
8938 (void) printf(gettext("\tscrub started on %s"),
8939 ctime(&start));
8940 }
8941 } else if (is_resilver) {
8942 (void) printf(gettext("resilver in progress since %s"),
8943 ctime(&start));
8944 }
8945
8946 scanned = ps->pss_examined;
8947 pass_scanned = ps->pss_pass_exam;
8948 issued = ps->pss_issued;
8949 pass_issued = ps->pss_pass_issued;
8950 total_s = ps->pss_to_examine;
8951 total_i = ps->pss_to_examine - ps->pss_skipped;
8952
8953 /* we are only done with a block once we have issued the IO for it */
8954 fraction_done = (double)issued / total_i;
8955
8956 /* elapsed time for this pass, rounding up to 1 if it's 0 */
8957 elapsed = time(NULL) - ps->pss_pass_start;
8958 elapsed -= ps->pss_pass_scrub_spent_paused;
8959 elapsed = (elapsed != 0) ? elapsed : 1;
8960
8961 scan_rate = pass_scanned / elapsed;
8962 issue_rate = pass_issued / elapsed;
8963
8964 /* format all of the numbers we will be reporting */
8965 zfs_nicebytes(scanned, scanned_buf, sizeof (scanned_buf));
8966 zfs_nicebytes(issued, issued_buf, sizeof (issued_buf));
8967 zfs_nicebytes(total_s, total_s_buf, sizeof (total_s_buf));
8968 zfs_nicebytes(total_i, total_i_buf, sizeof (total_i_buf));
8969
8970 /* do not print estimated time if we have a paused scrub */
8971 (void) printf(gettext("\t%s / %s scanned"), scanned_buf, total_s_buf);
8972 if (pause == 0 && scan_rate > 0) {
8973 zfs_nicebytes(scan_rate, srate_buf, sizeof (srate_buf));
8974 (void) printf(gettext(" at %s/s"), srate_buf);
8975 }
8976 (void) printf(gettext(", %s / %s issued"), issued_buf, total_i_buf);
8977 if (pause == 0 && issue_rate > 0) {
8978 zfs_nicebytes(issue_rate, irate_buf, sizeof (irate_buf));
8979 (void) printf(gettext(" at %s/s"), irate_buf);
8980 }
8981 (void) printf(gettext("\n"));
8982
8983 if (is_resilver) {
8984 (void) printf(gettext("\t%s resilvered, %.2f%% done"),
8985 processed_buf, 100 * fraction_done);
8986 } else if (is_scrub) {
8987 (void) printf(gettext("\t%s repaired, %.2f%% done"),
8988 processed_buf, 100 * fraction_done);
8989 }
8990
8991 if (pause == 0) {
8992 /*
8993 * Only provide an estimate iff:
8994 * 1) we haven't yet issued all we expected, and
8995 * 2) the issue rate exceeds 10 MB/s, and
8996 * 3) it's either:
8997 * a) a resilver which has started repairs, or
8998 * b) a scrub which has entered the issue phase.
8999 */
9000 if (total_i >= issued && issue_rate >= 10 * 1024 * 1024 &&
9001 ((is_resilver && ps->pss_processed > 0) ||
9002 (is_scrub && issued > 0))) {
9003 secs_to_dhms((total_i - issued) / issue_rate, time_buf);
9004 (void) printf(gettext(", %s to go\n"), time_buf);
9005 } else {
9006 (void) printf(gettext(", no estimated "
9007 "completion time\n"));
9008 }
9009 } else {
9010 (void) printf(gettext("\n"));
9011 }
9012 }
9013
9014 static void
print_rebuild_status_impl(vdev_rebuild_stat_t * vrs,uint_t c,char * vdev_name)9015 print_rebuild_status_impl(vdev_rebuild_stat_t *vrs, uint_t c, char *vdev_name)
9016 {
9017 if (vrs == NULL || vrs->vrs_state == VDEV_REBUILD_NONE)
9018 return;
9019
9020 printf(" ");
9021 printf_color(ANSI_BOLD, gettext("scan:"));
9022 printf(" ");
9023
9024 uint64_t bytes_scanned = vrs->vrs_bytes_scanned;
9025 uint64_t bytes_issued = vrs->vrs_bytes_issued;
9026 uint64_t bytes_rebuilt = vrs->vrs_bytes_rebuilt;
9027 uint64_t bytes_est_s = vrs->vrs_bytes_est;
9028 uint64_t bytes_est_i = vrs->vrs_bytes_est;
9029 if (c > offsetof(vdev_rebuild_stat_t, vrs_pass_bytes_skipped) / 8)
9030 bytes_est_i -= vrs->vrs_pass_bytes_skipped;
9031 uint64_t scan_rate = (vrs->vrs_pass_bytes_scanned /
9032 (vrs->vrs_pass_time_ms + 1)) * 1000;
9033 uint64_t issue_rate = (vrs->vrs_pass_bytes_issued /
9034 (vrs->vrs_pass_time_ms + 1)) * 1000;
9035 double scan_pct = MIN((double)bytes_scanned * 100 /
9036 (bytes_est_s + 1), 100);
9037
9038 /* Format all of the numbers we will be reporting */
9039 char bytes_scanned_buf[7], bytes_issued_buf[7];
9040 char bytes_rebuilt_buf[7], bytes_est_s_buf[7], bytes_est_i_buf[7];
9041 char scan_rate_buf[7], issue_rate_buf[7], time_buf[32];
9042 zfs_nicebytes(bytes_scanned, bytes_scanned_buf,
9043 sizeof (bytes_scanned_buf));
9044 zfs_nicebytes(bytes_issued, bytes_issued_buf,
9045 sizeof (bytes_issued_buf));
9046 zfs_nicebytes(bytes_rebuilt, bytes_rebuilt_buf,
9047 sizeof (bytes_rebuilt_buf));
9048 zfs_nicebytes(bytes_est_s, bytes_est_s_buf, sizeof (bytes_est_s_buf));
9049 zfs_nicebytes(bytes_est_i, bytes_est_i_buf, sizeof (bytes_est_i_buf));
9050
9051 time_t start = vrs->vrs_start_time;
9052 time_t end = vrs->vrs_end_time;
9053
9054 /* Rebuild is finished or canceled. */
9055 if (vrs->vrs_state == VDEV_REBUILD_COMPLETE) {
9056 secs_to_dhms(vrs->vrs_scan_time_ms / 1000, time_buf);
9057 (void) printf(gettext("resilvered (%s) %s in %s "
9058 "with %llu errors on %s"), vdev_name, bytes_rebuilt_buf,
9059 time_buf, (u_longlong_t)vrs->vrs_errors, ctime(&end));
9060 return;
9061 } else if (vrs->vrs_state == VDEV_REBUILD_CANCELED) {
9062 (void) printf(gettext("resilver (%s) canceled on %s"),
9063 vdev_name, ctime(&end));
9064 return;
9065 } else if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
9066 (void) printf(gettext("resilver (%s) in progress since %s"),
9067 vdev_name, ctime(&start));
9068 }
9069
9070 assert(vrs->vrs_state == VDEV_REBUILD_ACTIVE);
9071
9072 (void) printf(gettext("\t%s / %s scanned"), bytes_scanned_buf,
9073 bytes_est_s_buf);
9074 if (scan_rate > 0) {
9075 zfs_nicebytes(scan_rate, scan_rate_buf, sizeof (scan_rate_buf));
9076 (void) printf(gettext(" at %s/s"), scan_rate_buf);
9077 }
9078 (void) printf(gettext(", %s / %s issued"), bytes_issued_buf,
9079 bytes_est_i_buf);
9080 if (issue_rate > 0) {
9081 zfs_nicebytes(issue_rate, issue_rate_buf,
9082 sizeof (issue_rate_buf));
9083 (void) printf(gettext(" at %s/s"), issue_rate_buf);
9084 }
9085 (void) printf(gettext("\n"));
9086
9087 (void) printf(gettext("\t%s resilvered, %.2f%% done"),
9088 bytes_rebuilt_buf, scan_pct);
9089
9090 if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
9091 if (bytes_est_s >= bytes_scanned &&
9092 scan_rate >= 10 * 1024 * 1024) {
9093 secs_to_dhms((bytes_est_s - bytes_scanned) / scan_rate,
9094 time_buf);
9095 (void) printf(gettext(", %s to go\n"), time_buf);
9096 } else {
9097 (void) printf(gettext(", no estimated "
9098 "completion time\n"));
9099 }
9100 } else {
9101 (void) printf(gettext("\n"));
9102 }
9103 }
9104
9105 /*
9106 * Print rebuild status for top-level vdevs.
9107 */
9108 static void
print_rebuild_status(zpool_handle_t * zhp,nvlist_t * nvroot)9109 print_rebuild_status(zpool_handle_t *zhp, nvlist_t *nvroot)
9110 {
9111 nvlist_t **child;
9112 uint_t children;
9113
9114 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
9115 &child, &children) != 0)
9116 children = 0;
9117
9118 for (uint_t c = 0; c < children; c++) {
9119 vdev_rebuild_stat_t *vrs;
9120 uint_t i;
9121
9122 if (nvlist_lookup_uint64_array(child[c],
9123 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i) == 0) {
9124 char *name = zpool_vdev_name(g_zfs, zhp,
9125 child[c], VDEV_NAME_TYPE_ID);
9126 print_rebuild_status_impl(vrs, i, name);
9127 free(name);
9128 }
9129 }
9130 }
9131
9132 /*
9133 * As we don't scrub checkpointed blocks, we want to warn the user that we
9134 * skipped scanning some blocks if a checkpoint exists or existed at any
9135 * time during the scan. If a sequential instead of healing reconstruction
9136 * was performed then the blocks were reconstructed. However, their checksums
9137 * have not been verified so we still print the warning.
9138 */
9139 static void
print_checkpoint_scan_warning(pool_scan_stat_t * ps,pool_checkpoint_stat_t * pcs)9140 print_checkpoint_scan_warning(pool_scan_stat_t *ps, pool_checkpoint_stat_t *pcs)
9141 {
9142 if (ps == NULL || pcs == NULL)
9143 return;
9144
9145 if (pcs->pcs_state == CS_NONE ||
9146 pcs->pcs_state == CS_CHECKPOINT_DISCARDING)
9147 return;
9148
9149 assert(pcs->pcs_state == CS_CHECKPOINT_EXISTS);
9150
9151 if (ps->pss_state == DSS_NONE)
9152 return;
9153
9154 if ((ps->pss_state == DSS_FINISHED || ps->pss_state == DSS_CANCELED) &&
9155 ps->pss_end_time < pcs->pcs_start_time)
9156 return;
9157
9158 if (ps->pss_state == DSS_FINISHED || ps->pss_state == DSS_CANCELED) {
9159 (void) printf(gettext(" scan warning: skipped blocks "
9160 "that are only referenced by the checkpoint.\n"));
9161 } else {
9162 assert(ps->pss_state == DSS_SCANNING);
9163 (void) printf(gettext(" scan warning: skipping blocks "
9164 "that are only referenced by the checkpoint.\n"));
9165 }
9166 }
9167
9168 /*
9169 * Returns B_TRUE if there is an active rebuild in progress. Otherwise,
9170 * B_FALSE is returned and 'rebuild_end_time' is set to the end time for
9171 * the last completed (or cancelled) rebuild.
9172 */
9173 static boolean_t
check_rebuilding(nvlist_t * nvroot,uint64_t * rebuild_end_time)9174 check_rebuilding(nvlist_t *nvroot, uint64_t *rebuild_end_time)
9175 {
9176 nvlist_t **child;
9177 uint_t children;
9178 boolean_t rebuilding = B_FALSE;
9179 uint64_t end_time = 0;
9180
9181 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
9182 &child, &children) != 0)
9183 children = 0;
9184
9185 for (uint_t c = 0; c < children; c++) {
9186 vdev_rebuild_stat_t *vrs;
9187 uint_t i;
9188
9189 if (nvlist_lookup_uint64_array(child[c],
9190 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i) == 0) {
9191
9192 if (vrs->vrs_end_time > end_time)
9193 end_time = vrs->vrs_end_time;
9194
9195 if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
9196 rebuilding = B_TRUE;
9197 end_time = 0;
9198 break;
9199 }
9200 }
9201 }
9202
9203 if (rebuild_end_time != NULL)
9204 *rebuild_end_time = end_time;
9205
9206 return (rebuilding);
9207 }
9208
9209 static void
vdev_stats_nvlist(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t * nv,int depth,boolean_t isspare,char * parent,nvlist_t * item)9210 vdev_stats_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,
9211 int depth, boolean_t isspare, char *parent, nvlist_t *item)
9212 {
9213 nvlist_t *vds, **child, *ch = NULL;
9214 uint_t vsc, children;
9215 vdev_stat_t *vs;
9216 char *vname;
9217 uint64_t notpresent;
9218 const char *type, *path;
9219
9220 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
9221 &child, &children) != 0)
9222 children = 0;
9223 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
9224 (uint64_t **)&vs, &vsc) == 0);
9225 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
9226 if (strcmp(type, VDEV_TYPE_INDIRECT) == 0)
9227 return;
9228
9229 if (cb->cb_print_unhealthy && depth > 0 &&
9230 for_each_vdev_in_nvlist(nv, vdev_health_check_cb, cb) == 0) {
9231 return;
9232 }
9233 vname = zpool_vdev_name(g_zfs, zhp, nv,
9234 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
9235 vds = fnvlist_alloc();
9236 fill_vdev_info(vds, zhp, vname, B_FALSE, cb->cb_json_as_int);
9237 if (cb->cb_flat_vdevs && parent != NULL) {
9238 fnvlist_add_string(vds, "parent", parent);
9239 }
9240
9241 if (isspare) {
9242 if (vs->vs_aux == VDEV_AUX_SPARED) {
9243 fnvlist_add_string(vds, "state", "INUSE");
9244 used_by_other(zhp, nv, vds);
9245 } else if (vs->vs_state == VDEV_STATE_HEALTHY)
9246 fnvlist_add_string(vds, "state", "AVAIL");
9247 } else {
9248 if (vs->vs_alloc) {
9249 nice_num_str_nvlist(vds, "alloc_space", vs->vs_alloc,
9250 cb->cb_literal, cb->cb_json_as_int,
9251 ZFS_NICENUM_BYTES);
9252 }
9253 if (vs->vs_space) {
9254 nice_num_str_nvlist(vds, "total_space", vs->vs_space,
9255 cb->cb_literal, cb->cb_json_as_int,
9256 ZFS_NICENUM_BYTES);
9257 }
9258 if (vs->vs_dspace) {
9259 nice_num_str_nvlist(vds, "def_space", vs->vs_dspace,
9260 cb->cb_literal, cb->cb_json_as_int,
9261 ZFS_NICENUM_BYTES);
9262 }
9263 if (vs->vs_rsize) {
9264 nice_num_str_nvlist(vds, "rep_dev_size", vs->vs_rsize,
9265 cb->cb_literal, cb->cb_json_as_int,
9266 ZFS_NICENUM_BYTES);
9267 }
9268 if (vs->vs_esize) {
9269 nice_num_str_nvlist(vds, "ex_dev_size", vs->vs_esize,
9270 cb->cb_literal, cb->cb_json_as_int,
9271 ZFS_NICENUM_BYTES);
9272 }
9273 if (vs->vs_self_healed) {
9274 nice_num_str_nvlist(vds, "self_healed",
9275 vs->vs_self_healed, cb->cb_literal,
9276 cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9277 }
9278 if (vs->vs_pspace) {
9279 nice_num_str_nvlist(vds, "phys_space", vs->vs_pspace,
9280 cb->cb_literal, cb->cb_json_as_int,
9281 ZFS_NICENUM_BYTES);
9282 }
9283 nice_num_str_nvlist(vds, "read_errors", vs->vs_read_errors,
9284 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
9285 nice_num_str_nvlist(vds, "write_errors", vs->vs_write_errors,
9286 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
9287 nice_num_str_nvlist(vds, "checksum_errors",
9288 vs->vs_checksum_errors, cb->cb_literal,
9289 cb->cb_json_as_int, ZFS_NICENUM_1024);
9290 if (vs->vs_scan_processed) {
9291 nice_num_str_nvlist(vds, "scan_processed",
9292 vs->vs_scan_processed, cb->cb_literal,
9293 cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9294 }
9295 if (vs->vs_checkpoint_space) {
9296 nice_num_str_nvlist(vds, "checkpoint_space",
9297 vs->vs_checkpoint_space, cb->cb_literal,
9298 cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9299 }
9300 if (vs->vs_resilver_deferred) {
9301 nice_num_str_nvlist(vds, "resilver_deferred",
9302 vs->vs_resilver_deferred, B_TRUE,
9303 cb->cb_json_as_int, ZFS_NICENUM_1024);
9304 }
9305 if (children == 0) {
9306 nice_num_str_nvlist(vds, "slow_ios", vs->vs_slow_ios,
9307 cb->cb_literal, cb->cb_json_as_int,
9308 ZFS_NICENUM_1024);
9309 }
9310 if (cb->cb_print_power) {
9311 if (children == 0) {
9312 /* Only leaf vdevs have physical slots */
9313 switch (zpool_power_current_state(zhp, (char *)
9314 fnvlist_lookup_string(nv,
9315 ZPOOL_CONFIG_PATH))) {
9316 case 0:
9317 fnvlist_add_string(vds, "power_state",
9318 "off");
9319 break;
9320 case 1:
9321 fnvlist_add_string(vds, "power_state",
9322 "on");
9323 break;
9324 default:
9325 fnvlist_add_string(vds, "power_state",
9326 "-");
9327 }
9328 } else {
9329 fnvlist_add_string(vds, "power_state", "-");
9330 }
9331 }
9332 }
9333
9334 if (cb->cb_print_dio_verify) {
9335 nice_num_str_nvlist(vds, "dio_verify_errors",
9336 vs->vs_dio_verify_errors, cb->cb_literal,
9337 cb->cb_json_as_int, ZFS_NICENUM_1024);
9338 }
9339
9340 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
9341 ¬present) == 0) {
9342 nice_num_str_nvlist(vds, ZPOOL_CONFIG_NOT_PRESENT,
9343 1, B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9344 fnvlist_add_string(vds, "was",
9345 fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH));
9346 } else if (vs->vs_aux != VDEV_AUX_NONE) {
9347 fnvlist_add_string(vds, "aux", vdev_aux_str[vs->vs_aux]);
9348 } else if (children == 0 && !isspare &&
9349 getenv("ZPOOL_STATUS_NON_NATIVE_ASHIFT_IGNORE") == NULL &&
9350 VDEV_STAT_VALID(vs_physical_ashift, vsc) &&
9351 vs->vs_configured_ashift < vs->vs_physical_ashift) {
9352 nice_num_str_nvlist(vds, "configured_ashift",
9353 vs->vs_configured_ashift, B_TRUE, cb->cb_json_as_int,
9354 ZFS_NICENUM_1024);
9355 nice_num_str_nvlist(vds, "physical_ashift",
9356 vs->vs_physical_ashift, B_TRUE, cb->cb_json_as_int,
9357 ZFS_NICENUM_1024);
9358 }
9359 if (vs->vs_scan_removing != 0) {
9360 nice_num_str_nvlist(vds, "removing", vs->vs_scan_removing,
9361 B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024);
9362 } else if (VDEV_STAT_VALID(vs_noalloc, vsc) && vs->vs_noalloc != 0) {
9363 nice_num_str_nvlist(vds, "noalloc", vs->vs_noalloc,
9364 B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024);
9365 }
9366
9367 if (cb->vcdl != NULL) {
9368 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
9369 zpool_nvlist_cmd(cb->vcdl, zpool_get_name(zhp),
9370 path, vds);
9371 }
9372 }
9373
9374 if (children == 0) {
9375 if (cb->cb_print_vdev_init) {
9376 if (vs->vs_initialize_state != 0) {
9377 uint64_t st = vs->vs_initialize_state;
9378 fnvlist_add_string(vds, "init_state",
9379 vdev_init_state_str[st]);
9380 nice_num_str_nvlist(vds, "initialized",
9381 vs->vs_initialize_bytes_done,
9382 cb->cb_literal, cb->cb_json_as_int,
9383 ZFS_NICENUM_BYTES);
9384 nice_num_str_nvlist(vds, "to_initialize",
9385 vs->vs_initialize_bytes_est,
9386 cb->cb_literal, cb->cb_json_as_int,
9387 ZFS_NICENUM_BYTES);
9388 nice_num_str_nvlist(vds, "init_time",
9389 vs->vs_initialize_action_time,
9390 cb->cb_literal, cb->cb_json_as_int,
9391 ZFS_NICE_TIMESTAMP);
9392 nice_num_str_nvlist(vds, "init_errors",
9393 vs->vs_initialize_errors,
9394 cb->cb_literal, cb->cb_json_as_int,
9395 ZFS_NICENUM_1024);
9396 } else {
9397 fnvlist_add_string(vds, "init_state",
9398 "UNINITIALIZED");
9399 }
9400 }
9401 if (cb->cb_print_vdev_trim) {
9402 if (vs->vs_trim_notsup == 0) {
9403 if (vs->vs_trim_state != 0) {
9404 uint64_t st = vs->vs_trim_state;
9405 fnvlist_add_string(vds, "trim_state",
9406 vdev_trim_state_str[st]);
9407 nice_num_str_nvlist(vds, "trimmed",
9408 vs->vs_trim_bytes_done,
9409 cb->cb_literal, cb->cb_json_as_int,
9410 ZFS_NICENUM_BYTES);
9411 nice_num_str_nvlist(vds, "to_trim",
9412 vs->vs_trim_bytes_est,
9413 cb->cb_literal, cb->cb_json_as_int,
9414 ZFS_NICENUM_BYTES);
9415 nice_num_str_nvlist(vds, "trim_time",
9416 vs->vs_trim_action_time,
9417 cb->cb_literal, cb->cb_json_as_int,
9418 ZFS_NICE_TIMESTAMP);
9419 nice_num_str_nvlist(vds, "trim_errors",
9420 vs->vs_trim_errors,
9421 cb->cb_literal, cb->cb_json_as_int,
9422 ZFS_NICENUM_1024);
9423 } else
9424 fnvlist_add_string(vds, "trim_state",
9425 "UNTRIMMED");
9426 }
9427 nice_num_str_nvlist(vds, "trim_notsup",
9428 vs->vs_trim_notsup, B_TRUE,
9429 cb->cb_json_as_int, ZFS_NICENUM_1024);
9430 }
9431 } else {
9432 ch = fnvlist_alloc();
9433 }
9434
9435 if (cb->cb_flat_vdevs && children == 0) {
9436 fnvlist_add_nvlist(item, vname, vds);
9437 }
9438
9439 for (int c = 0; c < children; c++) {
9440 uint64_t islog = B_FALSE, ishole = B_FALSE;
9441 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
9442 &islog);
9443 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
9444 &ishole);
9445 if (islog || ishole)
9446 continue;
9447 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
9448 continue;
9449 if (cb->cb_flat_vdevs) {
9450 vdev_stats_nvlist(zhp, cb, child[c], depth + 2, isspare,
9451 vname, item);
9452 }
9453 vdev_stats_nvlist(zhp, cb, child[c], depth + 2, isspare,
9454 vname, ch);
9455 }
9456
9457 if (ch != NULL) {
9458 if (!nvlist_empty(ch))
9459 fnvlist_add_nvlist(vds, "vdevs", ch);
9460 fnvlist_free(ch);
9461 }
9462 fnvlist_add_nvlist(item, vname, vds);
9463 fnvlist_free(vds);
9464 free(vname);
9465 }
9466
9467 static void
class_vdevs_nvlist(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t * nv,const char * class,nvlist_t * item)9468 class_vdevs_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,
9469 const char *class, nvlist_t *item)
9470 {
9471 uint_t c, children;
9472 nvlist_t **child;
9473 nvlist_t *class_obj = NULL;
9474
9475 if (!cb->cb_flat_vdevs)
9476 class_obj = fnvlist_alloc();
9477
9478 assert(zhp != NULL || !cb->cb_verbose);
9479
9480 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &child,
9481 &children) != 0)
9482 return;
9483
9484 for (c = 0; c < children; c++) {
9485 uint64_t is_log = B_FALSE;
9486 const char *bias = NULL;
9487 const char *type = NULL;
9488 char *name = zpool_vdev_name(g_zfs, zhp, child[c],
9489 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
9490
9491 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
9492 &is_log);
9493
9494 if (is_log) {
9495 bias = (char *)VDEV_ALLOC_CLASS_LOGS;
9496 } else {
9497 (void) nvlist_lookup_string(child[c],
9498 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
9499 (void) nvlist_lookup_string(child[c],
9500 ZPOOL_CONFIG_TYPE, &type);
9501 }
9502
9503 if (bias == NULL || strcmp(bias, class) != 0)
9504 continue;
9505 if (!is_log && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
9506 continue;
9507
9508 if (cb->cb_flat_vdevs) {
9509 vdev_stats_nvlist(zhp, cb, child[c], 2, B_FALSE,
9510 NULL, item);
9511 } else {
9512 vdev_stats_nvlist(zhp, cb, child[c], 2, B_FALSE,
9513 NULL, class_obj);
9514 }
9515 free(name);
9516 }
9517 if (!cb->cb_flat_vdevs) {
9518 if (!nvlist_empty(class_obj))
9519 fnvlist_add_nvlist(item, class, class_obj);
9520 fnvlist_free(class_obj);
9521 }
9522 }
9523
9524 static void
l2cache_nvlist(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t * nv,nvlist_t * item)9525 l2cache_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,
9526 nvlist_t *item)
9527 {
9528 nvlist_t *l2c = NULL, **l2cache;
9529 uint_t nl2cache;
9530 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
9531 &l2cache, &nl2cache) == 0) {
9532 if (nl2cache == 0)
9533 return;
9534 if (!cb->cb_flat_vdevs)
9535 l2c = fnvlist_alloc();
9536 for (int i = 0; i < nl2cache; i++) {
9537 if (cb->cb_flat_vdevs) {
9538 vdev_stats_nvlist(zhp, cb, l2cache[i], 2,
9539 B_FALSE, NULL, item);
9540 } else {
9541 vdev_stats_nvlist(zhp, cb, l2cache[i], 2,
9542 B_FALSE, NULL, l2c);
9543 }
9544 }
9545 }
9546 if (!cb->cb_flat_vdevs) {
9547 if (!nvlist_empty(l2c))
9548 fnvlist_add_nvlist(item, "l2cache", l2c);
9549 fnvlist_free(l2c);
9550 }
9551 }
9552
9553 static void
spares_nvlist(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t * nv,nvlist_t * item)9554 spares_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,
9555 nvlist_t *item)
9556 {
9557 nvlist_t *sp = NULL, **spares;
9558 uint_t nspares;
9559 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
9560 &spares, &nspares) == 0) {
9561 if (nspares == 0)
9562 return;
9563 if (!cb->cb_flat_vdevs)
9564 sp = fnvlist_alloc();
9565 for (int i = 0; i < nspares; i++) {
9566 if (cb->cb_flat_vdevs) {
9567 vdev_stats_nvlist(zhp, cb, spares[i], 2, B_TRUE,
9568 NULL, item);
9569 } else {
9570 vdev_stats_nvlist(zhp, cb, spares[i], 2, B_TRUE,
9571 NULL, sp);
9572 }
9573 }
9574 }
9575 if (!cb->cb_flat_vdevs) {
9576 if (!nvlist_empty(sp))
9577 fnvlist_add_nvlist(item, "spares", sp);
9578 fnvlist_free(sp);
9579 }
9580 }
9581
9582 static void
errors_nvlist(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t * item)9583 errors_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *item)
9584 {
9585 uint64_t nerr;
9586 nvlist_t *config = zpool_get_config(zhp, NULL);
9587 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT,
9588 &nerr) == 0) {
9589 nice_num_str_nvlist(item, ZPOOL_CONFIG_ERRCOUNT, nerr,
9590 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
9591 if (nerr != 0 && cb->cb_verbose) {
9592 nvlist_t *nverrlist = NULL;
9593 if (zpool_get_errlog(zhp, &nverrlist) == 0) {
9594 int i = 0;
9595 int count = 0;
9596 size_t len = MAXPATHLEN * 2;
9597 nvpair_t *elem = NULL;
9598
9599 for (nvpair_t *pair =
9600 nvlist_next_nvpair(nverrlist, NULL);
9601 pair != NULL;
9602 pair = nvlist_next_nvpair(nverrlist, pair))
9603 count++;
9604 char **errl = (char **)malloc(
9605 count * sizeof (char *));
9606
9607 while ((elem = nvlist_next_nvpair(nverrlist,
9608 elem)) != NULL) {
9609 nvlist_t *nv;
9610 uint64_t dsobj, obj;
9611
9612 verify(nvpair_value_nvlist(elem,
9613 &nv) == 0);
9614 verify(nvlist_lookup_uint64(nv,
9615 ZPOOL_ERR_DATASET, &dsobj) == 0);
9616 verify(nvlist_lookup_uint64(nv,
9617 ZPOOL_ERR_OBJECT, &obj) == 0);
9618 errl[i] = safe_malloc(len);
9619 zpool_obj_to_path(zhp, dsobj, obj,
9620 errl[i++], len);
9621 }
9622 nvlist_free(nverrlist);
9623 fnvlist_add_string_array(item, "errlist",
9624 (const char **)errl, count);
9625 for (int i = 0; i < count; ++i)
9626 free(errl[i]);
9627 free(errl);
9628 } else
9629 fnvlist_add_string(item, "errlist",
9630 strerror(errno));
9631 }
9632 }
9633 }
9634
9635 static void
ddt_stats_nvlist(ddt_stat_t * dds,status_cbdata_t * cb,nvlist_t * item)9636 ddt_stats_nvlist(ddt_stat_t *dds, status_cbdata_t *cb, nvlist_t *item)
9637 {
9638 nice_num_str_nvlist(item, "blocks", dds->dds_blocks,
9639 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
9640 nice_num_str_nvlist(item, "logical_size", dds->dds_lsize,
9641 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9642 nice_num_str_nvlist(item, "physical_size", dds->dds_psize,
9643 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9644 nice_num_str_nvlist(item, "deflated_size", dds->dds_dsize,
9645 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9646 nice_num_str_nvlist(item, "ref_blocks", dds->dds_ref_blocks,
9647 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
9648 nice_num_str_nvlist(item, "ref_lsize", dds->dds_ref_lsize,
9649 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9650 nice_num_str_nvlist(item, "ref_psize", dds->dds_ref_psize,
9651 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9652 nice_num_str_nvlist(item, "ref_dsize", dds->dds_ref_dsize,
9653 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9654 }
9655
9656 static void
dedup_stats_nvlist(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t * item)9657 dedup_stats_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *item)
9658 {
9659 nvlist_t *config;
9660 if (cb->cb_dedup_stats) {
9661 ddt_histogram_t *ddh;
9662 ddt_stat_t *dds;
9663 ddt_object_t *ddo;
9664 nvlist_t *ddt_stat, *ddt_obj, *dedup;
9665 uint_t c;
9666 uint64_t cspace_prop;
9667
9668 config = zpool_get_config(zhp, NULL);
9669 if (nvlist_lookup_uint64_array(config,
9670 ZPOOL_CONFIG_DDT_OBJ_STATS, (uint64_t **)&ddo, &c) != 0)
9671 return;
9672
9673 dedup = fnvlist_alloc();
9674 ddt_obj = fnvlist_alloc();
9675 nice_num_str_nvlist(dedup, "obj_count", ddo->ddo_count,
9676 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
9677 if (ddo->ddo_count == 0) {
9678 fnvlist_add_nvlist(dedup, ZPOOL_CONFIG_DDT_OBJ_STATS,
9679 ddt_obj);
9680 fnvlist_add_nvlist(item, "dedup_stats", dedup);
9681 fnvlist_free(ddt_obj);
9682 fnvlist_free(dedup);
9683 return;
9684 } else {
9685 nice_num_str_nvlist(dedup, "dspace", ddo->ddo_dspace,
9686 cb->cb_literal, cb->cb_json_as_int,
9687 ZFS_NICENUM_1024);
9688 nice_num_str_nvlist(dedup, "mspace", ddo->ddo_mspace,
9689 cb->cb_literal, cb->cb_json_as_int,
9690 ZFS_NICENUM_1024);
9691 /*
9692 * Squash cached size into in-core size to handle race.
9693 * Only include cached size if it is available.
9694 */
9695 cspace_prop = zpool_get_prop_int(zhp,
9696 ZPOOL_PROP_DEDUPCACHED, NULL);
9697 cspace_prop = MIN(cspace_prop, ddo->ddo_mspace);
9698 nice_num_str_nvlist(dedup, "cspace", cspace_prop,
9699 cb->cb_literal, cb->cb_json_as_int,
9700 ZFS_NICENUM_1024);
9701 }
9702
9703 ddt_stat = fnvlist_alloc();
9704 if (nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_STATS,
9705 (uint64_t **)&dds, &c) == 0) {
9706 nvlist_t *total = fnvlist_alloc();
9707 if (dds->dds_blocks == 0)
9708 fnvlist_add_string(total, "blocks", "0");
9709 else
9710 ddt_stats_nvlist(dds, cb, total);
9711 fnvlist_add_nvlist(ddt_stat, "total", total);
9712 fnvlist_free(total);
9713 }
9714 if (nvlist_lookup_uint64_array(config,
9715 ZPOOL_CONFIG_DDT_HISTOGRAM, (uint64_t **)&ddh, &c) == 0) {
9716 nvlist_t *hist = fnvlist_alloc();
9717 nvlist_t *entry = NULL;
9718 char buf[16];
9719 for (int h = 0; h < 64; h++) {
9720 if (ddh->ddh_stat[h].dds_blocks != 0) {
9721 entry = fnvlist_alloc();
9722 ddt_stats_nvlist(&ddh->ddh_stat[h], cb,
9723 entry);
9724 snprintf(buf, 16, "%d", h);
9725 fnvlist_add_nvlist(hist, buf, entry);
9726 fnvlist_free(entry);
9727 }
9728 }
9729 if (!nvlist_empty(hist))
9730 fnvlist_add_nvlist(ddt_stat, "histogram", hist);
9731 fnvlist_free(hist);
9732 }
9733
9734 if (!nvlist_empty(ddt_obj)) {
9735 fnvlist_add_nvlist(dedup, ZPOOL_CONFIG_DDT_OBJ_STATS,
9736 ddt_obj);
9737 }
9738 fnvlist_free(ddt_obj);
9739 if (!nvlist_empty(ddt_stat)) {
9740 fnvlist_add_nvlist(dedup, ZPOOL_CONFIG_DDT_STATS,
9741 ddt_stat);
9742 }
9743 fnvlist_free(ddt_stat);
9744 if (!nvlist_empty(dedup))
9745 fnvlist_add_nvlist(item, "dedup_stats", dedup);
9746 fnvlist_free(dedup);
9747 }
9748 }
9749
9750 static void
raidz_expand_status_nvlist(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t * nvroot,nvlist_t * item)9751 raidz_expand_status_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb,
9752 nvlist_t *nvroot, nvlist_t *item)
9753 {
9754 uint_t c;
9755 pool_raidz_expand_stat_t *pres = NULL;
9756 if (nvlist_lookup_uint64_array(nvroot,
9757 ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, (uint64_t **)&pres, &c) == 0) {
9758 nvlist_t **child;
9759 uint_t children;
9760 nvlist_t *nv = fnvlist_alloc();
9761 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
9762 &child, &children) == 0);
9763 assert(pres->pres_expanding_vdev < children);
9764 char *name =
9765 zpool_vdev_name(g_zfs, zhp,
9766 child[pres->pres_expanding_vdev], 0);
9767 fill_vdev_info(nv, zhp, name, B_FALSE, cb->cb_json_as_int);
9768 fnvlist_add_string(nv, "state",
9769 pool_scan_state_str[pres->pres_state]);
9770 nice_num_str_nvlist(nv, "expanding_vdev",
9771 pres->pres_expanding_vdev, B_TRUE, cb->cb_json_as_int,
9772 ZFS_NICENUM_1024);
9773 nice_num_str_nvlist(nv, "start_time", pres->pres_start_time,
9774 cb->cb_literal, cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);
9775 nice_num_str_nvlist(nv, "end_time", pres->pres_end_time,
9776 cb->cb_literal, cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);
9777 nice_num_str_nvlist(nv, "to_reflow", pres->pres_to_reflow,
9778 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9779 nice_num_str_nvlist(nv, "reflowed", pres->pres_reflowed,
9780 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9781 nice_num_str_nvlist(nv, "waiting_for_resilver",
9782 pres->pres_waiting_for_resilver, B_TRUE,
9783 cb->cb_json_as_int, ZFS_NICENUM_1024);
9784 fnvlist_add_nvlist(item, ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, nv);
9785 fnvlist_free(nv);
9786 free(name);
9787 }
9788 }
9789
9790 static void
checkpoint_status_nvlist(nvlist_t * nvroot,status_cbdata_t * cb,nvlist_t * item)9791 checkpoint_status_nvlist(nvlist_t *nvroot, status_cbdata_t *cb,
9792 nvlist_t *item)
9793 {
9794 uint_t c;
9795 pool_checkpoint_stat_t *pcs = NULL;
9796 if (nvlist_lookup_uint64_array(nvroot,
9797 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c) == 0) {
9798 nvlist_t *nv = fnvlist_alloc();
9799 fnvlist_add_string(nv, "state",
9800 checkpoint_state_str[pcs->pcs_state]);
9801 nice_num_str_nvlist(nv, "start_time",
9802 pcs->pcs_start_time, cb->cb_literal, cb->cb_json_as_int,
9803 ZFS_NICE_TIMESTAMP);
9804 nice_num_str_nvlist(nv, "space",
9805 pcs->pcs_space, cb->cb_literal, cb->cb_json_as_int,
9806 ZFS_NICENUM_BYTES);
9807 fnvlist_add_nvlist(item, ZPOOL_CONFIG_CHECKPOINT_STATS, nv);
9808 fnvlist_free(nv);
9809 }
9810 }
9811
9812 static void
removal_status_nvlist(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t * nvroot,nvlist_t * item)9813 removal_status_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb,
9814 nvlist_t *nvroot, nvlist_t *item)
9815 {
9816 uint_t c;
9817 pool_removal_stat_t *prs = NULL;
9818 if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_REMOVAL_STATS,
9819 (uint64_t **)&prs, &c) == 0) {
9820 if (prs->prs_state != DSS_NONE) {
9821 nvlist_t **child;
9822 uint_t children;
9823 verify(nvlist_lookup_nvlist_array(nvroot,
9824 ZPOOL_CONFIG_CHILDREN, &child, &children) == 0);
9825 assert(prs->prs_removing_vdev < children);
9826 char *vdev_name = zpool_vdev_name(g_zfs, zhp,
9827 child[prs->prs_removing_vdev], B_TRUE);
9828 nvlist_t *nv = fnvlist_alloc();
9829 fill_vdev_info(nv, zhp, vdev_name, B_FALSE,
9830 cb->cb_json_as_int);
9831 fnvlist_add_string(nv, "state",
9832 pool_scan_state_str[prs->prs_state]);
9833 nice_num_str_nvlist(nv, "removing_vdev",
9834 prs->prs_removing_vdev, B_TRUE, cb->cb_json_as_int,
9835 ZFS_NICENUM_1024);
9836 nice_num_str_nvlist(nv, "start_time",
9837 prs->prs_start_time, cb->cb_literal,
9838 cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);
9839 nice_num_str_nvlist(nv, "end_time", prs->prs_end_time,
9840 cb->cb_literal, cb->cb_json_as_int,
9841 ZFS_NICE_TIMESTAMP);
9842 nice_num_str_nvlist(nv, "to_copy", prs->prs_to_copy,
9843 cb->cb_literal, cb->cb_json_as_int,
9844 ZFS_NICENUM_BYTES);
9845 nice_num_str_nvlist(nv, "copied", prs->prs_copied,
9846 cb->cb_literal, cb->cb_json_as_int,
9847 ZFS_NICENUM_BYTES);
9848 nice_num_str_nvlist(nv, "mapping_memory",
9849 prs->prs_mapping_memory, cb->cb_literal,
9850 cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9851 fnvlist_add_nvlist(item,
9852 ZPOOL_CONFIG_REMOVAL_STATS, nv);
9853 fnvlist_free(nv);
9854 free(vdev_name);
9855 }
9856 }
9857 }
9858
9859 static void
scan_status_nvlist(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t * nvroot,nvlist_t * item)9860 scan_status_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb,
9861 nvlist_t *nvroot, nvlist_t *item)
9862 {
9863 pool_scan_stat_t *ps = NULL;
9864 uint_t c;
9865 nvlist_t *scan = fnvlist_alloc();
9866 nvlist_t **child;
9867 uint_t children;
9868
9869 if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_SCAN_STATS,
9870 (uint64_t **)&ps, &c) == 0) {
9871 fnvlist_add_string(scan, "function",
9872 pool_scan_func_str[ps->pss_func]);
9873 fnvlist_add_string(scan, "state",
9874 pool_scan_state_str[ps->pss_state]);
9875 nice_num_str_nvlist(scan, "start_time", ps->pss_start_time,
9876 cb->cb_literal, cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);
9877 nice_num_str_nvlist(scan, "end_time", ps->pss_end_time,
9878 cb->cb_literal, cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);
9879 nice_num_str_nvlist(scan, "to_examine", ps->pss_to_examine,
9880 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9881 nice_num_str_nvlist(scan, "examined", ps->pss_examined,
9882 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9883 nice_num_str_nvlist(scan, "skipped", ps->pss_skipped,
9884 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9885 nice_num_str_nvlist(scan, "processed", ps->pss_processed,
9886 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9887 nice_num_str_nvlist(scan, "errors", ps->pss_errors,
9888 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
9889 nice_num_str_nvlist(scan, "bytes_per_scan", ps->pss_pass_exam,
9890 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9891 nice_num_str_nvlist(scan, "pass_start", ps->pss_pass_start,
9892 B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024);
9893 nice_num_str_nvlist(scan, "scrub_pause",
9894 ps->pss_pass_scrub_pause, cb->cb_literal,
9895 cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);
9896 nice_num_str_nvlist(scan, "scrub_spent_paused",
9897 ps->pss_pass_scrub_spent_paused,
9898 B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024);
9899 nice_num_str_nvlist(scan, "issued_bytes_per_scan",
9900 ps->pss_pass_issued, cb->cb_literal,
9901 cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9902 nice_num_str_nvlist(scan, "issued", ps->pss_issued,
9903 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9904 if (ps->pss_error_scrub_func == POOL_SCAN_ERRORSCRUB &&
9905 ps->pss_error_scrub_start > ps->pss_start_time) {
9906 fnvlist_add_string(scan, "err_scrub_func",
9907 pool_scan_func_str[ps->pss_error_scrub_func]);
9908 fnvlist_add_string(scan, "err_scrub_state",
9909 pool_scan_state_str[ps->pss_error_scrub_state]);
9910 nice_num_str_nvlist(scan, "err_scrub_start_time",
9911 ps->pss_error_scrub_start,
9912 cb->cb_literal, cb->cb_json_as_int,
9913 ZFS_NICE_TIMESTAMP);
9914 nice_num_str_nvlist(scan, "err_scrub_end_time",
9915 ps->pss_error_scrub_end,
9916 cb->cb_literal, cb->cb_json_as_int,
9917 ZFS_NICE_TIMESTAMP);
9918 nice_num_str_nvlist(scan, "err_scrub_examined",
9919 ps->pss_error_scrub_examined,
9920 cb->cb_literal, cb->cb_json_as_int,
9921 ZFS_NICENUM_1024);
9922 nice_num_str_nvlist(scan, "err_scrub_to_examine",
9923 ps->pss_error_scrub_to_be_examined,
9924 cb->cb_literal, cb->cb_json_as_int,
9925 ZFS_NICENUM_1024);
9926 nice_num_str_nvlist(scan, "err_scrub_pause",
9927 ps->pss_pass_error_scrub_pause,
9928 B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024);
9929 }
9930 }
9931
9932 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
9933 &child, &children) == 0) {
9934 vdev_rebuild_stat_t *vrs;
9935 uint_t i;
9936 char *name;
9937 nvlist_t *nv;
9938 nvlist_t *rebuild = fnvlist_alloc();
9939 uint64_t st;
9940 for (uint_t c = 0; c < children; c++) {
9941 if (nvlist_lookup_uint64_array(child[c],
9942 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs,
9943 &i) == 0) {
9944 if (vrs->vrs_state != VDEV_REBUILD_NONE) {
9945 nv = fnvlist_alloc();
9946 name = zpool_vdev_name(g_zfs, zhp,
9947 child[c], VDEV_NAME_TYPE_ID);
9948 fill_vdev_info(nv, zhp, name, B_FALSE,
9949 cb->cb_json_as_int);
9950 st = vrs->vrs_state;
9951 fnvlist_add_string(nv, "state",
9952 vdev_rebuild_state_str[st]);
9953 nice_num_str_nvlist(nv, "start_time",
9954 vrs->vrs_start_time, cb->cb_literal,
9955 cb->cb_json_as_int,
9956 ZFS_NICE_TIMESTAMP);
9957 nice_num_str_nvlist(nv, "end_time",
9958 vrs->vrs_end_time, cb->cb_literal,
9959 cb->cb_json_as_int,
9960 ZFS_NICE_TIMESTAMP);
9961 nice_num_str_nvlist(nv, "scan_time",
9962 vrs->vrs_scan_time_ms * 1000000,
9963 cb->cb_literal, cb->cb_json_as_int,
9964 ZFS_NICENUM_TIME);
9965 nice_num_str_nvlist(nv, "scanned",
9966 vrs->vrs_bytes_scanned,
9967 cb->cb_literal, cb->cb_json_as_int,
9968 ZFS_NICENUM_BYTES);
9969 nice_num_str_nvlist(nv, "issued",
9970 vrs->vrs_bytes_issued,
9971 cb->cb_literal, cb->cb_json_as_int,
9972 ZFS_NICENUM_BYTES);
9973 nice_num_str_nvlist(nv, "rebuilt",
9974 vrs->vrs_bytes_rebuilt,
9975 cb->cb_literal, cb->cb_json_as_int,
9976 ZFS_NICENUM_BYTES);
9977 nice_num_str_nvlist(nv, "to_scan",
9978 vrs->vrs_bytes_est, cb->cb_literal,
9979 cb->cb_json_as_int,
9980 ZFS_NICENUM_BYTES);
9981 nice_num_str_nvlist(nv, "errors",
9982 vrs->vrs_errors, cb->cb_literal,
9983 cb->cb_json_as_int,
9984 ZFS_NICENUM_1024);
9985 nice_num_str_nvlist(nv, "pass_time",
9986 vrs->vrs_pass_time_ms * 1000000,
9987 cb->cb_literal, cb->cb_json_as_int,
9988 ZFS_NICENUM_TIME);
9989 nice_num_str_nvlist(nv, "pass_scanned",
9990 vrs->vrs_pass_bytes_scanned,
9991 cb->cb_literal, cb->cb_json_as_int,
9992 ZFS_NICENUM_BYTES);
9993 nice_num_str_nvlist(nv, "pass_issued",
9994 vrs->vrs_pass_bytes_issued,
9995 cb->cb_literal, cb->cb_json_as_int,
9996 ZFS_NICENUM_BYTES);
9997 nice_num_str_nvlist(nv, "pass_skipped",
9998 vrs->vrs_pass_bytes_skipped,
9999 cb->cb_literal, cb->cb_json_as_int,
10000 ZFS_NICENUM_BYTES);
10001 fnvlist_add_nvlist(rebuild, name, nv);
10002 free(name);
10003 }
10004 }
10005 }
10006 if (!nvlist_empty(rebuild))
10007 fnvlist_add_nvlist(scan, "rebuild_stats", rebuild);
10008 fnvlist_free(rebuild);
10009 }
10010
10011 if (!nvlist_empty(scan))
10012 fnvlist_add_nvlist(item, ZPOOL_CONFIG_SCAN_STATS, scan);
10013 fnvlist_free(scan);
10014 }
10015
10016 /*
10017 * Print the scan status.
10018 */
10019 static void
print_scan_status(zpool_handle_t * zhp,nvlist_t * nvroot)10020 print_scan_status(zpool_handle_t *zhp, nvlist_t *nvroot)
10021 {
10022 uint64_t rebuild_end_time = 0, resilver_end_time = 0;
10023 boolean_t have_resilver = B_FALSE, have_scrub = B_FALSE;
10024 boolean_t have_errorscrub = B_FALSE;
10025 boolean_t active_resilver = B_FALSE;
10026 pool_checkpoint_stat_t *pcs = NULL;
10027 pool_scan_stat_t *ps = NULL;
10028 uint_t c;
10029 time_t scrub_start = 0, errorscrub_start = 0;
10030
10031 if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_SCAN_STATS,
10032 (uint64_t **)&ps, &c) == 0) {
10033 if (ps->pss_func == POOL_SCAN_RESILVER) {
10034 resilver_end_time = ps->pss_end_time;
10035 active_resilver = (ps->pss_state == DSS_SCANNING);
10036 }
10037
10038 have_resilver = (ps->pss_func == POOL_SCAN_RESILVER);
10039 have_scrub = (ps->pss_func == POOL_SCAN_SCRUB);
10040 scrub_start = ps->pss_start_time;
10041 if (c > offsetof(pool_scan_stat_t,
10042 pss_pass_error_scrub_pause) / 8) {
10043 have_errorscrub = (ps->pss_error_scrub_func ==
10044 POOL_SCAN_ERRORSCRUB);
10045 errorscrub_start = ps->pss_error_scrub_start;
10046 }
10047 }
10048
10049 boolean_t active_rebuild = check_rebuilding(nvroot, &rebuild_end_time);
10050 boolean_t have_rebuild = (active_rebuild || (rebuild_end_time > 0));
10051
10052 /* Always print the scrub status when available. */
10053 if (have_scrub && scrub_start > errorscrub_start)
10054 print_scan_scrub_resilver_status(ps);
10055 else if (have_errorscrub && errorscrub_start >= scrub_start)
10056 print_err_scrub_status(ps);
10057
10058 /*
10059 * When there is an active resilver or rebuild print its status.
10060 * Otherwise print the status of the last resilver or rebuild.
10061 */
10062 if (active_resilver || (!active_rebuild && have_resilver &&
10063 resilver_end_time && resilver_end_time > rebuild_end_time)) {
10064 print_scan_scrub_resilver_status(ps);
10065 } else if (active_rebuild || (!active_resilver && have_rebuild &&
10066 rebuild_end_time && rebuild_end_time > resilver_end_time)) {
10067 print_rebuild_status(zhp, nvroot);
10068 }
10069
10070 (void) nvlist_lookup_uint64_array(nvroot,
10071 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
10072 print_checkpoint_scan_warning(ps, pcs);
10073 }
10074
10075 /*
10076 * Print out detailed removal status.
10077 */
10078 static void
print_removal_status(zpool_handle_t * zhp,pool_removal_stat_t * prs)10079 print_removal_status(zpool_handle_t *zhp, pool_removal_stat_t *prs)
10080 {
10081 char copied_buf[7], examined_buf[7], total_buf[7], rate_buf[7];
10082 time_t start, end;
10083 nvlist_t *config, *nvroot;
10084 nvlist_t **child;
10085 uint_t children;
10086 char *vdev_name;
10087
10088 if (prs == NULL || prs->prs_state == DSS_NONE)
10089 return;
10090
10091 /*
10092 * Determine name of vdev.
10093 */
10094 config = zpool_get_config(zhp, NULL);
10095 nvroot = fnvlist_lookup_nvlist(config,
10096 ZPOOL_CONFIG_VDEV_TREE);
10097 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
10098 &child, &children) == 0);
10099 assert(prs->prs_removing_vdev < children);
10100 vdev_name = zpool_vdev_name(g_zfs, zhp,
10101 child[prs->prs_removing_vdev], B_TRUE);
10102
10103 printf_color(ANSI_BOLD, gettext("remove: "));
10104
10105 start = prs->prs_start_time;
10106 end = prs->prs_end_time;
10107 zfs_nicenum(prs->prs_copied, copied_buf, sizeof (copied_buf));
10108
10109 /*
10110 * Removal is finished or canceled.
10111 */
10112 if (prs->prs_state == DSS_FINISHED) {
10113 uint64_t minutes_taken = (end - start) / 60;
10114
10115 (void) printf(gettext("Removal of vdev %llu copied %s "
10116 "in %lluh%um, completed on %s"),
10117 (longlong_t)prs->prs_removing_vdev,
10118 copied_buf,
10119 (u_longlong_t)(minutes_taken / 60),
10120 (uint_t)(minutes_taken % 60),
10121 ctime((time_t *)&end));
10122 } else if (prs->prs_state == DSS_CANCELED) {
10123 (void) printf(gettext("Removal of %s canceled on %s"),
10124 vdev_name, ctime(&end));
10125 } else {
10126 uint64_t copied, total, elapsed, rate, mins_left, hours_left;
10127 double fraction_done;
10128
10129 assert(prs->prs_state == DSS_SCANNING);
10130
10131 /*
10132 * Removal is in progress.
10133 */
10134 (void) printf(gettext(
10135 "Evacuation of %s in progress since %s"),
10136 vdev_name, ctime(&start));
10137
10138 copied = prs->prs_copied > 0 ? prs->prs_copied : 1;
10139 total = prs->prs_to_copy;
10140 fraction_done = (double)copied / total;
10141
10142 /* elapsed time for this pass */
10143 elapsed = time(NULL) - prs->prs_start_time;
10144 elapsed = elapsed > 0 ? elapsed : 1;
10145 rate = copied / elapsed;
10146 rate = rate > 0 ? rate : 1;
10147 mins_left = ((total - copied) / rate) / 60;
10148 hours_left = mins_left / 60;
10149
10150 zfs_nicenum(copied, examined_buf, sizeof (examined_buf));
10151 zfs_nicenum(total, total_buf, sizeof (total_buf));
10152 zfs_nicenum(rate, rate_buf, sizeof (rate_buf));
10153
10154 /*
10155 * do not print estimated time if hours_left is more than
10156 * 30 days
10157 */
10158 (void) printf(gettext(
10159 "\t%s copied out of %s at %s/s, %.2f%% done"),
10160 examined_buf, total_buf, rate_buf, 100 * fraction_done);
10161 if (hours_left < (30 * 24)) {
10162 (void) printf(gettext(", %lluh%um to go\n"),
10163 (u_longlong_t)hours_left, (uint_t)(mins_left % 60));
10164 } else {
10165 (void) printf(gettext(
10166 ", (copy is slow, no estimated time)\n"));
10167 }
10168 }
10169 free(vdev_name);
10170
10171 if (prs->prs_mapping_memory > 0) {
10172 char mem_buf[7];
10173 zfs_nicenum(prs->prs_mapping_memory, mem_buf, sizeof (mem_buf));
10174 (void) printf(gettext(
10175 "\t%s memory used for removed device mappings\n"),
10176 mem_buf);
10177 }
10178 }
10179
10180 /*
10181 * Print out detailed raidz expansion status.
10182 */
10183 static void
print_raidz_expand_status(zpool_handle_t * zhp,pool_raidz_expand_stat_t * pres)10184 print_raidz_expand_status(zpool_handle_t *zhp, pool_raidz_expand_stat_t *pres)
10185 {
10186 char copied_buf[7];
10187
10188 if (pres == NULL || pres->pres_state == DSS_NONE)
10189 return;
10190
10191 /*
10192 * Determine name of vdev.
10193 */
10194 nvlist_t *config = zpool_get_config(zhp, NULL);
10195 nvlist_t *nvroot = fnvlist_lookup_nvlist(config,
10196 ZPOOL_CONFIG_VDEV_TREE);
10197 nvlist_t **child;
10198 uint_t children;
10199 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
10200 &child, &children) == 0);
10201 assert(pres->pres_expanding_vdev < children);
10202
10203 printf_color(ANSI_BOLD, gettext("expand: "));
10204
10205 time_t start = pres->pres_start_time;
10206 time_t end = pres->pres_end_time;
10207 char *vname =
10208 zpool_vdev_name(g_zfs, zhp, child[pres->pres_expanding_vdev], 0);
10209 zfs_nicenum(pres->pres_reflowed, copied_buf, sizeof (copied_buf));
10210
10211 /*
10212 * Expansion is finished or canceled.
10213 */
10214 if (pres->pres_state == DSS_FINISHED) {
10215 char time_buf[32];
10216 secs_to_dhms(end - start, time_buf);
10217
10218 (void) printf(gettext("expanded %s-%u copied %s in %s, "
10219 "on %s"), vname, (int)pres->pres_expanding_vdev,
10220 copied_buf, time_buf, ctime((time_t *)&end));
10221 } else {
10222 char examined_buf[7], total_buf[7], rate_buf[7];
10223 uint64_t copied, total, elapsed, rate, secs_left;
10224 double fraction_done;
10225
10226 assert(pres->pres_state == DSS_SCANNING);
10227
10228 /*
10229 * Expansion is in progress.
10230 */
10231 (void) printf(gettext(
10232 "expansion of %s-%u in progress since %s"),
10233 vname, (int)pres->pres_expanding_vdev, ctime(&start));
10234
10235 copied = pres->pres_reflowed > 0 ? pres->pres_reflowed : 1;
10236 total = pres->pres_to_reflow;
10237 fraction_done = (double)copied / total;
10238
10239 /* elapsed time for this pass */
10240 elapsed = time(NULL) - pres->pres_start_time;
10241 elapsed = elapsed > 0 ? elapsed : 1;
10242 rate = copied / elapsed;
10243 rate = rate > 0 ? rate : 1;
10244 secs_left = (total - copied) / rate;
10245
10246 zfs_nicenum(copied, examined_buf, sizeof (examined_buf));
10247 zfs_nicenum(total, total_buf, sizeof (total_buf));
10248 zfs_nicenum(rate, rate_buf, sizeof (rate_buf));
10249
10250 /*
10251 * do not print estimated time if hours_left is more than
10252 * 30 days
10253 */
10254 (void) printf(gettext("\t%s / %s copied at %s/s, %.2f%% done"),
10255 examined_buf, total_buf, rate_buf, 100 * fraction_done);
10256 if (pres->pres_waiting_for_resilver) {
10257 (void) printf(gettext(", paused for resilver or "
10258 "clear\n"));
10259 } else if (secs_left < (30 * 24 * 3600)) {
10260 char time_buf[32];
10261 secs_to_dhms(secs_left, time_buf);
10262 (void) printf(gettext(", %s to go\n"), time_buf);
10263 } else {
10264 (void) printf(gettext(
10265 ", (copy is slow, no estimated time)\n"));
10266 }
10267 }
10268 free(vname);
10269 }
10270 static void
print_checkpoint_status(pool_checkpoint_stat_t * pcs)10271 print_checkpoint_status(pool_checkpoint_stat_t *pcs)
10272 {
10273 time_t start;
10274 char space_buf[7];
10275
10276 if (pcs == NULL || pcs->pcs_state == CS_NONE)
10277 return;
10278
10279 (void) printf(gettext("checkpoint: "));
10280
10281 start = pcs->pcs_start_time;
10282 zfs_nicenum(pcs->pcs_space, space_buf, sizeof (space_buf));
10283
10284 if (pcs->pcs_state == CS_CHECKPOINT_EXISTS) {
10285 char *date = ctime(&start);
10286
10287 /*
10288 * ctime() adds a newline at the end of the generated
10289 * string, thus the weird format specifier and the
10290 * strlen() call used to chop it off from the output.
10291 */
10292 (void) printf(gettext("created %.*s, consumes %s\n"),
10293 (int)(strlen(date) - 1), date, space_buf);
10294 return;
10295 }
10296
10297 assert(pcs->pcs_state == CS_CHECKPOINT_DISCARDING);
10298
10299 (void) printf(gettext("discarding, %s remaining.\n"),
10300 space_buf);
10301 }
10302
10303 static void
print_error_log(zpool_handle_t * zhp)10304 print_error_log(zpool_handle_t *zhp)
10305 {
10306 nvlist_t *nverrlist = NULL;
10307 nvpair_t *elem;
10308 char *pathname;
10309 size_t len = MAXPATHLEN * 2;
10310
10311 if (zpool_get_errlog(zhp, &nverrlist) != 0)
10312 return;
10313
10314 (void) printf("errors: Permanent errors have been "
10315 "detected in the following files:\n\n");
10316
10317 pathname = safe_malloc(len);
10318 elem = NULL;
10319 while ((elem = nvlist_next_nvpair(nverrlist, elem)) != NULL) {
10320 nvlist_t *nv;
10321 uint64_t dsobj, obj;
10322
10323 verify(nvpair_value_nvlist(elem, &nv) == 0);
10324 verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_DATASET,
10325 &dsobj) == 0);
10326 verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_OBJECT,
10327 &obj) == 0);
10328 zpool_obj_to_path(zhp, dsobj, obj, pathname, len);
10329 (void) printf("%7s %s\n", "", pathname);
10330 }
10331 free(pathname);
10332 nvlist_free(nverrlist);
10333 }
10334
10335 static void
print_spares(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t ** spares,uint_t nspares)10336 print_spares(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **spares,
10337 uint_t nspares)
10338 {
10339 uint_t i;
10340 char *name;
10341
10342 if (nspares == 0)
10343 return;
10344
10345 (void) printf(gettext("\tspares\n"));
10346
10347 for (i = 0; i < nspares; i++) {
10348 name = zpool_vdev_name(g_zfs, zhp, spares[i],
10349 cb->cb_name_flags);
10350 print_status_config(zhp, cb, name, spares[i], 2, B_TRUE, NULL);
10351 free(name);
10352 }
10353 }
10354
10355 static void
print_l2cache(zpool_handle_t * zhp,status_cbdata_t * cb,nvlist_t ** l2cache,uint_t nl2cache)10356 print_l2cache(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **l2cache,
10357 uint_t nl2cache)
10358 {
10359 uint_t i;
10360 char *name;
10361
10362 if (nl2cache == 0)
10363 return;
10364
10365 (void) printf(gettext("\tcache\n"));
10366
10367 for (i = 0; i < nl2cache; i++) {
10368 name = zpool_vdev_name(g_zfs, zhp, l2cache[i],
10369 cb->cb_name_flags);
10370 print_status_config(zhp, cb, name, l2cache[i], 2,
10371 B_FALSE, NULL);
10372 free(name);
10373 }
10374 }
10375
10376 static void
print_dedup_stats(zpool_handle_t * zhp,nvlist_t * config,boolean_t literal)10377 print_dedup_stats(zpool_handle_t *zhp, nvlist_t *config, boolean_t literal)
10378 {
10379 ddt_histogram_t *ddh;
10380 ddt_stat_t *dds;
10381 ddt_object_t *ddo;
10382 uint_t c;
10383 /* Extra space provided for literal display */
10384 char dspace[32], mspace[32], cspace[32];
10385 uint64_t cspace_prop;
10386 enum zfs_nicenum_format format;
10387 zprop_source_t src;
10388
10389 /*
10390 * If the pool was faulted then we may not have been able to
10391 * obtain the config. Otherwise, if we have anything in the dedup
10392 * table continue processing the stats.
10393 */
10394 if (nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_OBJ_STATS,
10395 (uint64_t **)&ddo, &c) != 0)
10396 return;
10397
10398 (void) printf("\n");
10399 (void) printf(gettext(" dedup: "));
10400 if (ddo->ddo_count == 0) {
10401 (void) printf(gettext("no DDT entries\n"));
10402 return;
10403 }
10404
10405 /*
10406 * Squash cached size into in-core size to handle race.
10407 * Only include cached size if it is available.
10408 */
10409 cspace_prop = zpool_get_prop_int(zhp, ZPOOL_PROP_DEDUPCACHED, &src);
10410 cspace_prop = MIN(cspace_prop, ddo->ddo_mspace);
10411 format = literal ? ZFS_NICENUM_RAW : ZFS_NICENUM_1024;
10412 zfs_nicenum_format(cspace_prop, cspace, sizeof (cspace), format);
10413 zfs_nicenum_format(ddo->ddo_dspace, dspace, sizeof (dspace), format);
10414 zfs_nicenum_format(ddo->ddo_mspace, mspace, sizeof (mspace), format);
10415 (void) printf("DDT entries %llu, size %s on disk, %s in core",
10416 (u_longlong_t)ddo->ddo_count,
10417 dspace,
10418 mspace);
10419 if (src != ZPROP_SRC_DEFAULT) {
10420 (void) printf(", %s cached (%.02f%%)",
10421 cspace,
10422 (double)cspace_prop / (double)ddo->ddo_mspace * 100.0);
10423 }
10424 (void) printf("\n");
10425
10426 verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_STATS,
10427 (uint64_t **)&dds, &c) == 0);
10428 verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_HISTOGRAM,
10429 (uint64_t **)&ddh, &c) == 0);
10430 zpool_dump_ddt(dds, ddh);
10431 }
10432
10433 #define ST_SIZE 4096
10434 #define AC_SIZE 2048
10435
10436 static void
print_status_reason(zpool_handle_t * zhp,status_cbdata_t * cbp,zpool_status_t reason,zpool_errata_t errata,nvlist_t * item)10437 print_status_reason(zpool_handle_t *zhp, status_cbdata_t *cbp,
10438 zpool_status_t reason, zpool_errata_t errata, nvlist_t *item)
10439 {
10440 char status[ST_SIZE];
10441 char action[AC_SIZE];
10442 memset(status, 0, ST_SIZE);
10443 memset(action, 0, AC_SIZE);
10444
10445 switch (reason) {
10446 case ZPOOL_STATUS_MISSING_DEV_R:
10447 snprintf(status, ST_SIZE, gettext("One or more devices could "
10448 "not be opened. Sufficient replicas exist for\n\tthe pool "
10449 "to continue functioning in a degraded state.\n"));
10450 snprintf(action, AC_SIZE, gettext("Attach the missing device "
10451 "and online it using 'zpool online'.\n"));
10452 break;
10453
10454 case ZPOOL_STATUS_MISSING_DEV_NR:
10455 snprintf(status, ST_SIZE, gettext("One or more devices could "
10456 "not be opened. There are insufficient\n\treplicas for the"
10457 " pool to continue functioning.\n"));
10458 snprintf(action, AC_SIZE, gettext("Attach the missing device "
10459 "and online it using 'zpool online'.\n"));
10460 break;
10461
10462 case ZPOOL_STATUS_CORRUPT_LABEL_R:
10463 snprintf(status, ST_SIZE, gettext("One or more devices could "
10464 "not be used because the label is missing or\n\tinvalid. "
10465 "Sufficient replicas exist for the pool to continue\n\t"
10466 "functioning in a degraded state.\n"));
10467 snprintf(action, AC_SIZE, gettext("Replace the device using "
10468 "'zpool replace'.\n"));
10469 break;
10470
10471 case ZPOOL_STATUS_CORRUPT_LABEL_NR:
10472 snprintf(status, ST_SIZE, gettext("One or more devices could "
10473 "not be used because the label is missing \n\tor invalid. "
10474 "There are insufficient replicas for the pool to "
10475 "continue\n\tfunctioning.\n"));
10476 zpool_explain_recover(zpool_get_handle(zhp),
10477 zpool_get_name(zhp), reason, zpool_get_config(zhp, NULL),
10478 action, AC_SIZE);
10479 break;
10480
10481 case ZPOOL_STATUS_FAILING_DEV:
10482 snprintf(status, ST_SIZE, gettext("One or more devices has "
10483 "experienced an unrecoverable error. An\n\tattempt was "
10484 "made to correct the error. Applications are "
10485 "unaffected.\n"));
10486 snprintf(action, AC_SIZE, gettext("Determine if the "
10487 "device needs to be replaced, and clear the errors\n\tusing"
10488 " 'zpool clear' or replace the device with 'zpool "
10489 "replace'.\n"));
10490 break;
10491
10492 case ZPOOL_STATUS_OFFLINE_DEV:
10493 snprintf(status, ST_SIZE, gettext("One or more devices has "
10494 "been taken offline by the administrator.\n\tSufficient "
10495 "replicas exist for the pool to continue functioning in "
10496 "a\n\tdegraded state.\n"));
10497 snprintf(action, AC_SIZE, gettext("Online the device "
10498 "using 'zpool online' or replace the device with\n\t'zpool "
10499 "replace'.\n"));
10500 break;
10501
10502 case ZPOOL_STATUS_REMOVED_DEV:
10503 snprintf(status, ST_SIZE, gettext("One or more devices have "
10504 "been removed.\n\tSufficient replicas exist for the pool "
10505 "to continue functioning in a\n\tdegraded state.\n"));
10506 snprintf(action, AC_SIZE, gettext("Online the device "
10507 "using zpool online' or replace the device with\n\t'zpool "
10508 "replace'.\n"));
10509 break;
10510
10511 case ZPOOL_STATUS_RESILVERING:
10512 case ZPOOL_STATUS_REBUILDING:
10513 snprintf(status, ST_SIZE, gettext("One or more devices is "
10514 "currently being resilvered. The pool will\n\tcontinue "
10515 "to function, possibly in a degraded state.\n"));
10516 snprintf(action, AC_SIZE, gettext("Wait for the resilver to "
10517 "complete.\n"));
10518 break;
10519
10520 case ZPOOL_STATUS_REBUILD_SCRUB:
10521 snprintf(status, ST_SIZE, gettext("One or more devices have "
10522 "been sequentially resilvered, scrubbing\n\tthe pool "
10523 "is recommended.\n"));
10524 snprintf(action, AC_SIZE, gettext("Use 'zpool scrub' to "
10525 "verify all data checksums.\n"));
10526 break;
10527
10528 case ZPOOL_STATUS_CORRUPT_DATA:
10529 snprintf(status, ST_SIZE, gettext("One or more devices has "
10530 "experienced an error resulting in data\n\tcorruption. "
10531 "Applications may be affected.\n"));
10532 snprintf(action, AC_SIZE, gettext("Restore the file in question"
10533 " if possible. Otherwise restore the\n\tentire pool from "
10534 "backup.\n"));
10535 break;
10536
10537 case ZPOOL_STATUS_CORRUPT_POOL:
10538 snprintf(status, ST_SIZE, gettext("The pool metadata is "
10539 "corrupted and the pool cannot be opened.\n"));
10540 zpool_explain_recover(zpool_get_handle(zhp),
10541 zpool_get_name(zhp), reason, zpool_get_config(zhp, NULL),
10542 action, AC_SIZE);
10543 break;
10544
10545 case ZPOOL_STATUS_VERSION_OLDER:
10546 snprintf(status, ST_SIZE, gettext("The pool is formatted using "
10547 "a legacy on-disk format. The pool can\n\tstill be used, "
10548 "but some features are unavailable.\n"));
10549 snprintf(action, AC_SIZE, gettext("Upgrade the pool using "
10550 "'zpool upgrade'. Once this is done, the\n\tpool will no "
10551 "longer be accessible on software that does not support\n\t"
10552 "feature flags.\n"));
10553 break;
10554
10555 case ZPOOL_STATUS_VERSION_NEWER:
10556 snprintf(status, ST_SIZE, gettext("The pool has been upgraded "
10557 "to a newer, incompatible on-disk version.\n\tThe pool "
10558 "cannot be accessed on this system.\n"));
10559 snprintf(action, AC_SIZE, gettext("Access the pool from a "
10560 "system running more recent software, or\n\trestore the "
10561 "pool from backup.\n"));
10562 break;
10563
10564 case ZPOOL_STATUS_FEAT_DISABLED:
10565 snprintf(status, ST_SIZE, gettext("Some supported and "
10566 "requested features are not enabled on the pool.\n\t"
10567 "The pool can still be used, but some features are "
10568 "unavailable.\n"));
10569 snprintf(action, AC_SIZE, gettext("Enable all features using "
10570 "'zpool upgrade'. Once this is done,\n\tthe pool may no "
10571 "longer be accessible by software that does not support\n\t"
10572 "the features. See zpool-features(7) for details.\n"));
10573 break;
10574
10575 case ZPOOL_STATUS_COMPATIBILITY_ERR:
10576 snprintf(status, ST_SIZE, gettext("This pool has a "
10577 "compatibility list specified, but it could not be\n\t"
10578 "read/parsed at this time. The pool can still be used, "
10579 "but this\n\tshould be investigated.\n"));
10580 snprintf(action, AC_SIZE, gettext("Check the value of the "
10581 "'compatibility' property against the\n\t"
10582 "appropriate file in " ZPOOL_SYSCONF_COMPAT_D " or "
10583 ZPOOL_DATA_COMPAT_D ".\n"));
10584 break;
10585
10586 case ZPOOL_STATUS_INCOMPATIBLE_FEAT:
10587 snprintf(status, ST_SIZE, gettext("One or more features "
10588 "are enabled on the pool despite not being\n\t"
10589 "requested by the 'compatibility' property.\n"));
10590 snprintf(action, AC_SIZE, gettext("Consider setting "
10591 "'compatibility' to an appropriate value, or\n\t"
10592 "adding needed features to the relevant file in\n\t"
10593 ZPOOL_SYSCONF_COMPAT_D " or " ZPOOL_DATA_COMPAT_D ".\n"));
10594 break;
10595
10596 case ZPOOL_STATUS_UNSUP_FEAT_READ:
10597 snprintf(status, ST_SIZE, gettext("The pool cannot be accessed "
10598 "on this system because it uses the\n\tfollowing feature(s)"
10599 " not supported on this system:\n"));
10600 zpool_collect_unsup_feat(zpool_get_config(zhp, NULL), status,
10601 1024);
10602 snprintf(action, AC_SIZE, gettext("Access the pool from a "
10603 "system that supports the required feature(s),\n\tor "
10604 "restore the pool from backup.\n"));
10605 break;
10606
10607 case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
10608 snprintf(status, ST_SIZE, gettext("The pool can only be "
10609 "accessed in read-only mode on this system. It\n\tcannot be"
10610 " accessed in read-write mode because it uses the "
10611 "following\n\tfeature(s) not supported on this system:\n"));
10612 zpool_collect_unsup_feat(zpool_get_config(zhp, NULL), status,
10613 1024);
10614 snprintf(action, AC_SIZE, gettext("The pool cannot be accessed "
10615 "in read-write mode. Import the pool with\n"
10616 "\t\"-o readonly=on\", access the pool from a system that "
10617 "supports the\n\trequired feature(s), or restore the "
10618 "pool from backup.\n"));
10619 break;
10620
10621 case ZPOOL_STATUS_FAULTED_DEV_R:
10622 snprintf(status, ST_SIZE, gettext("One or more devices are "
10623 "faulted in response to persistent errors.\n\tSufficient "
10624 "replicas exist for the pool to continue functioning "
10625 "in a\n\tdegraded state.\n"));
10626 snprintf(action, AC_SIZE, gettext("Replace the faulted device, "
10627 "or use 'zpool clear' to mark the device\n\trepaired.\n"));
10628 break;
10629
10630 case ZPOOL_STATUS_FAULTED_DEV_NR:
10631 snprintf(status, ST_SIZE, gettext("One or more devices are "
10632 "faulted in response to persistent errors. There are "
10633 "insufficient replicas for the pool to\n\tcontinue "
10634 "functioning.\n"));
10635 snprintf(action, AC_SIZE, gettext("Destroy and re-create the "
10636 "pool from a backup source. Manually marking the device\n"
10637 "\trepaired using 'zpool clear' may allow some data "
10638 "to be recovered.\n"));
10639 break;
10640
10641 case ZPOOL_STATUS_IO_FAILURE_MMP:
10642 snprintf(status, ST_SIZE, gettext("The pool is suspended "
10643 "because multihost writes failed or were delayed;\n\t"
10644 "another system could import the pool undetected.\n"));
10645 snprintf(action, AC_SIZE, gettext("Make sure the pool's devices"
10646 " are connected, then reboot your system and\n\timport the "
10647 "pool or run 'zpool clear' to resume the pool.\n"));
10648 break;
10649
10650 case ZPOOL_STATUS_IO_FAILURE_WAIT:
10651 case ZPOOL_STATUS_IO_FAILURE_CONTINUE:
10652 snprintf(status, ST_SIZE, gettext("One or more devices are "
10653 "faulted in response to IO failures.\n"));
10654 snprintf(action, AC_SIZE, gettext("Make sure the affected "
10655 "devices are connected, then run 'zpool clear'.\n"));
10656 break;
10657
10658 case ZPOOL_STATUS_BAD_LOG:
10659 snprintf(status, ST_SIZE, gettext("An intent log record "
10660 "could not be read.\n"
10661 "\tWaiting for administrator intervention to fix the "
10662 "faulted pool.\n"));
10663 snprintf(action, AC_SIZE, gettext("Either restore the affected "
10664 "device(s) and run 'zpool online',\n"
10665 "\tor ignore the intent log records by running "
10666 "'zpool clear'.\n"));
10667 break;
10668
10669 case ZPOOL_STATUS_NON_NATIVE_ASHIFT:
10670 snprintf(status, ST_SIZE, gettext("One or more devices are "
10671 "configured to use a non-native block size.\n"
10672 "\tExpect reduced performance.\n"));
10673 snprintf(action, AC_SIZE, gettext("Replace affected devices "
10674 "with devices that support the\n\tconfigured block size, "
10675 "or migrate data to a properly configured\n\tpool.\n"));
10676 break;
10677
10678 case ZPOOL_STATUS_HOSTID_MISMATCH:
10679 snprintf(status, ST_SIZE, gettext("Mismatch between pool hostid"
10680 " and system hostid on imported pool.\n\tThis pool was "
10681 "previously imported into a system with a different "
10682 "hostid,\n\tand then was verbatim imported into this "
10683 "system.\n"));
10684 snprintf(action, AC_SIZE, gettext("Export this pool on all "
10685 "systems on which it is imported.\n"
10686 "\tThen import it to correct the mismatch.\n"));
10687 break;
10688
10689 case ZPOOL_STATUS_ERRATA:
10690 snprintf(status, ST_SIZE, gettext("Errata #%d detected.\n"),
10691 errata);
10692 switch (errata) {
10693 case ZPOOL_ERRATA_NONE:
10694 break;
10695
10696 case ZPOOL_ERRATA_ZOL_2094_SCRUB:
10697 snprintf(action, AC_SIZE, gettext("To correct the issue"
10698 " run 'zpool scrub'.\n"));
10699 break;
10700
10701 case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION:
10702 (void) strlcat(status, gettext("\tExisting encrypted "
10703 "datasets contain an on-disk incompatibility\n\t "
10704 "which needs to be corrected.\n"), ST_SIZE);
10705 snprintf(action, AC_SIZE, gettext("To correct the issue"
10706 " backup existing encrypted datasets to new\n\t"
10707 "encrypted datasets and destroy the old ones. "
10708 "'zfs mount -o ro' can\n\tbe used to temporarily "
10709 "mount existing encrypted datasets readonly.\n"));
10710 break;
10711
10712 case ZPOOL_ERRATA_ZOL_8308_ENCRYPTION:
10713 (void) strlcat(status, gettext("\tExisting encrypted "
10714 "snapshots and bookmarks contain an on-disk\n\t"
10715 "incompatibility. This may cause on-disk "
10716 "corruption if they are used\n\twith "
10717 "'zfs recv'.\n"), ST_SIZE);
10718 snprintf(action, AC_SIZE, gettext("To correct the"
10719 "issue, enable the bookmark_v2 feature. No "
10720 "additional\n\taction is needed if there are no "
10721 "encrypted snapshots or bookmarks.\n\tIf preserving"
10722 "the encrypted snapshots and bookmarks is required,"
10723 " use\n\ta non-raw send to backup and restore them."
10724 " Alternately, they may be\n\tremoved to resolve "
10725 "the incompatibility.\n"));
10726 break;
10727
10728 default:
10729 /*
10730 * All errata which allow the pool to be imported
10731 * must contain an action message.
10732 */
10733 assert(0);
10734 }
10735 break;
10736
10737 default:
10738 /*
10739 * The remaining errors can't actually be generated, yet.
10740 */
10741 assert(reason == ZPOOL_STATUS_OK);
10742 }
10743
10744 if (status[0] != 0) {
10745 if (cbp->cb_json)
10746 fnvlist_add_string(item, "status", status);
10747 else {
10748 printf_color(ANSI_BOLD, gettext("status: "));
10749 printf_color(ANSI_YELLOW, status);
10750 }
10751 }
10752
10753 if (action[0] != 0) {
10754 if (cbp->cb_json)
10755 fnvlist_add_string(item, "action", action);
10756 else {
10757 printf_color(ANSI_BOLD, gettext("action: "));
10758 printf_color(ANSI_YELLOW, action);
10759 }
10760 }
10761 }
10762
10763 static int
status_callback_json(zpool_handle_t * zhp,void * data)10764 status_callback_json(zpool_handle_t *zhp, void *data)
10765 {
10766 status_cbdata_t *cbp = data;
10767 nvlist_t *config, *nvroot;
10768 const char *msgid;
10769 char pool_guid[256];
10770 char msgbuf[256];
10771 uint64_t guid;
10772 zpool_status_t reason;
10773 zpool_errata_t errata;
10774 uint_t c;
10775 vdev_stat_t *vs;
10776 nvlist_t *item, *d, *load_info, *vds;
10777
10778 /* If dedup stats were requested, also fetch dedupcached. */
10779 if (cbp->cb_dedup_stats > 1)
10780 zpool_add_propname(zhp, ZPOOL_DEDUPCACHED_PROP_NAME);
10781 reason = zpool_get_status(zhp, &msgid, &errata);
10782 /*
10783 * If we were given 'zpool status -x', only report those pools with
10784 * problems.
10785 */
10786 if (cbp->cb_explain &&
10787 (reason == ZPOOL_STATUS_OK ||
10788 reason == ZPOOL_STATUS_VERSION_OLDER ||
10789 reason == ZPOOL_STATUS_FEAT_DISABLED ||
10790 reason == ZPOOL_STATUS_COMPATIBILITY_ERR ||
10791 reason == ZPOOL_STATUS_INCOMPATIBLE_FEAT)) {
10792 return (0);
10793 }
10794
10795 d = fnvlist_lookup_nvlist(cbp->cb_jsobj, "pools");
10796 item = fnvlist_alloc();
10797 vds = fnvlist_alloc();
10798 fill_pool_info(item, zhp, B_FALSE, cbp->cb_json_as_int);
10799 config = zpool_get_config(zhp, NULL);
10800
10801 if (config != NULL) {
10802 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
10803 verify(nvlist_lookup_uint64_array(nvroot,
10804 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &c) == 0);
10805 if (cbp->cb_json_pool_key_guid) {
10806 guid = fnvlist_lookup_uint64(config,
10807 ZPOOL_CONFIG_POOL_GUID);
10808 snprintf(pool_guid, 256, "%llu", (u_longlong_t)guid);
10809 }
10810 cbp->cb_count++;
10811
10812 print_status_reason(zhp, cbp, reason, errata, item);
10813 if (msgid != NULL) {
10814 snprintf(msgbuf, 256,
10815 "https://openzfs.github.io/openzfs-docs/msg/%s",
10816 msgid);
10817 fnvlist_add_string(item, "msgid", msgid);
10818 fnvlist_add_string(item, "moreinfo", msgbuf);
10819 }
10820
10821 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
10822 &load_info) == 0) {
10823 fnvlist_add_nvlist(item, ZPOOL_CONFIG_LOAD_INFO,
10824 load_info);
10825 }
10826
10827 scan_status_nvlist(zhp, cbp, nvroot, item);
10828 removal_status_nvlist(zhp, cbp, nvroot, item);
10829 checkpoint_status_nvlist(nvroot, cbp, item);
10830 raidz_expand_status_nvlist(zhp, cbp, nvroot, item);
10831 vdev_stats_nvlist(zhp, cbp, nvroot, 0, B_FALSE, NULL, vds);
10832 if (cbp->cb_flat_vdevs) {
10833 class_vdevs_nvlist(zhp, cbp, nvroot,
10834 VDEV_ALLOC_BIAS_DEDUP, vds);
10835 class_vdevs_nvlist(zhp, cbp, nvroot,
10836 VDEV_ALLOC_BIAS_SPECIAL, vds);
10837 class_vdevs_nvlist(zhp, cbp, nvroot,
10838 VDEV_ALLOC_CLASS_LOGS, vds);
10839 l2cache_nvlist(zhp, cbp, nvroot, vds);
10840 spares_nvlist(zhp, cbp, nvroot, vds);
10841
10842 fnvlist_add_nvlist(item, "vdevs", vds);
10843 fnvlist_free(vds);
10844 } else {
10845 fnvlist_add_nvlist(item, "vdevs", vds);
10846 fnvlist_free(vds);
10847
10848 class_vdevs_nvlist(zhp, cbp, nvroot,
10849 VDEV_ALLOC_BIAS_DEDUP, item);
10850 class_vdevs_nvlist(zhp, cbp, nvroot,
10851 VDEV_ALLOC_BIAS_SPECIAL, item);
10852 class_vdevs_nvlist(zhp, cbp, nvroot,
10853 VDEV_ALLOC_CLASS_LOGS, item);
10854 l2cache_nvlist(zhp, cbp, nvroot, item);
10855 spares_nvlist(zhp, cbp, nvroot, item);
10856 }
10857 dedup_stats_nvlist(zhp, cbp, item);
10858 errors_nvlist(zhp, cbp, item);
10859 }
10860 if (cbp->cb_json_pool_key_guid) {
10861 fnvlist_add_nvlist(d, pool_guid, item);
10862 } else {
10863 fnvlist_add_nvlist(d, zpool_get_name(zhp),
10864 item);
10865 }
10866 fnvlist_free(item);
10867 return (0);
10868 }
10869
10870 /*
10871 * Display a summary of pool status. Displays a summary such as:
10872 *
10873 * pool: tank
10874 * status: DEGRADED
10875 * reason: One or more devices ...
10876 * see: https://openzfs.github.io/openzfs-docs/msg/ZFS-xxxx-01
10877 * config:
10878 * mirror DEGRADED
10879 * c1t0d0 OK
10880 * c2t0d0 UNAVAIL
10881 *
10882 * When given the '-v' option, we print out the complete config. If the '-e'
10883 * option is specified, then we print out error rate information as well.
10884 */
10885 static int
status_callback(zpool_handle_t * zhp,void * data)10886 status_callback(zpool_handle_t *zhp, void *data)
10887 {
10888 status_cbdata_t *cbp = data;
10889 nvlist_t *config, *nvroot;
10890 const char *msgid;
10891 zpool_status_t reason;
10892 zpool_errata_t errata;
10893 const char *health;
10894 uint_t c;
10895 vdev_stat_t *vs;
10896
10897 /* If dedup stats were requested, also fetch dedupcached. */
10898 if (cbp->cb_dedup_stats > 1)
10899 zpool_add_propname(zhp, ZPOOL_DEDUPCACHED_PROP_NAME);
10900
10901 config = zpool_get_config(zhp, NULL);
10902 reason = zpool_get_status(zhp, &msgid, &errata);
10903
10904 cbp->cb_count++;
10905
10906 /*
10907 * If we were given 'zpool status -x', only report those pools with
10908 * problems.
10909 */
10910 if (cbp->cb_explain &&
10911 (reason == ZPOOL_STATUS_OK ||
10912 reason == ZPOOL_STATUS_VERSION_OLDER ||
10913 reason == ZPOOL_STATUS_FEAT_DISABLED ||
10914 reason == ZPOOL_STATUS_COMPATIBILITY_ERR ||
10915 reason == ZPOOL_STATUS_INCOMPATIBLE_FEAT)) {
10916 if (!cbp->cb_allpools) {
10917 (void) printf(gettext("pool '%s' is healthy\n"),
10918 zpool_get_name(zhp));
10919 if (cbp->cb_first)
10920 cbp->cb_first = B_FALSE;
10921 }
10922 return (0);
10923 }
10924
10925 if (cbp->cb_first)
10926 cbp->cb_first = B_FALSE;
10927 else
10928 (void) printf("\n");
10929
10930 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
10931 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
10932 (uint64_t **)&vs, &c) == 0);
10933
10934 health = zpool_get_state_str(zhp);
10935
10936 printf(" ");
10937 printf_color(ANSI_BOLD, gettext("pool:"));
10938 printf(" %s\n", zpool_get_name(zhp));
10939 fputc(' ', stdout);
10940 printf_color(ANSI_BOLD, gettext("state: "));
10941
10942 printf_color(health_str_to_color(health), "%s", health);
10943
10944 fputc('\n', stdout);
10945 print_status_reason(zhp, cbp, reason, errata, NULL);
10946
10947 if (msgid != NULL) {
10948 printf(" ");
10949 printf_color(ANSI_BOLD, gettext("see:"));
10950 printf(gettext(
10951 " https://openzfs.github.io/openzfs-docs/msg/%s\n"),
10952 msgid);
10953 }
10954
10955 if (config != NULL) {
10956 uint64_t nerr;
10957 nvlist_t **spares, **l2cache;
10958 uint_t nspares, nl2cache;
10959
10960 print_scan_status(zhp, nvroot);
10961
10962 pool_removal_stat_t *prs = NULL;
10963 (void) nvlist_lookup_uint64_array(nvroot,
10964 ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c);
10965 print_removal_status(zhp, prs);
10966
10967 pool_checkpoint_stat_t *pcs = NULL;
10968 (void) nvlist_lookup_uint64_array(nvroot,
10969 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
10970 print_checkpoint_status(pcs);
10971
10972 pool_raidz_expand_stat_t *pres = NULL;
10973 (void) nvlist_lookup_uint64_array(nvroot,
10974 ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, (uint64_t **)&pres, &c);
10975 print_raidz_expand_status(zhp, pres);
10976
10977 cbp->cb_namewidth = max_width(zhp, nvroot, 0, 0,
10978 cbp->cb_name_flags | VDEV_NAME_TYPE_ID);
10979 if (cbp->cb_namewidth < 10)
10980 cbp->cb_namewidth = 10;
10981
10982 color_start(ANSI_BOLD);
10983 (void) printf(gettext("config:\n\n"));
10984 (void) printf(gettext("\t%-*s %-8s %5s %5s %5s"),
10985 cbp->cb_namewidth, "NAME", "STATE", "READ", "WRITE",
10986 "CKSUM");
10987 color_end();
10988
10989 if (cbp->cb_print_slow_ios) {
10990 printf_color(ANSI_BOLD, " %5s", gettext("SLOW"));
10991 }
10992
10993 if (cbp->cb_print_power) {
10994 printf_color(ANSI_BOLD, " %5s", gettext("POWER"));
10995 }
10996
10997 if (cbp->cb_print_dio_verify) {
10998 printf_color(ANSI_BOLD, " %5s", gettext("DIO"));
10999 }
11000
11001 if (cbp->vcdl != NULL)
11002 print_cmd_columns(cbp->vcdl, 0);
11003
11004 printf("\n");
11005
11006 print_status_config(zhp, cbp, zpool_get_name(zhp), nvroot, 0,
11007 B_FALSE, NULL);
11008
11009 print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_BIAS_DEDUP);
11010 print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_BIAS_SPECIAL);
11011 print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_CLASS_LOGS);
11012
11013 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
11014 &l2cache, &nl2cache) == 0)
11015 print_l2cache(zhp, cbp, l2cache, nl2cache);
11016
11017 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
11018 &spares, &nspares) == 0)
11019 print_spares(zhp, cbp, spares, nspares);
11020
11021 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT,
11022 &nerr) == 0) {
11023 (void) printf("\n");
11024 if (nerr == 0) {
11025 (void) printf(gettext(
11026 "errors: No known data errors\n"));
11027 } else if (!cbp->cb_verbose) {
11028 color_start(ANSI_RED);
11029 (void) printf(gettext("errors: %llu data "
11030 "errors, use '-v' for a list\n"),
11031 (u_longlong_t)nerr);
11032 color_end();
11033 } else {
11034 print_error_log(zhp);
11035 }
11036 }
11037
11038 if (cbp->cb_dedup_stats)
11039 print_dedup_stats(zhp, config, cbp->cb_literal);
11040 } else {
11041 (void) printf(gettext("config: The configuration cannot be "
11042 "determined.\n"));
11043 }
11044
11045 return (0);
11046 }
11047
11048 /*
11049 * zpool status [-dDegiLpPstvx] [-c [script1,script2,...]] ...
11050 * [-j|--json [--json-flat-vdevs] [--json-int] ...
11051 * [--json-pool-key-guid]] [--power] [-T d|u] ...
11052 * [pool] [interval [count]]
11053 *
11054 * -c CMD For each vdev, run command CMD
11055 * -D Display dedup status (undocumented)
11056 * -d Display Direct I/O write verify errors
11057 * -e Display only unhealthy vdevs
11058 * -g Display guid for individual vdev name.
11059 * -i Display vdev initialization status.
11060 * -j [...] Display output in JSON format
11061 * --json-flat-vdevs Display vdevs in flat hierarchy
11062 * --json-int Display numbers in integer format instead of string
11063 * --json-pool-key-guid Use pool GUID as key for pool objects
11064 * -L Follow links when resolving vdev path name.
11065 * -P Display full path for vdev name.
11066 * -p Display values in parsable (exact) format.
11067 * --power Display vdev enclosure slot power status
11068 * -s Display slow IOs column.
11069 * -T Display a timestamp in date(1) or Unix format
11070 * -t Display vdev TRIM status.
11071 * -v Display complete error logs
11072 * -x Display only pools with potential problems
11073 *
11074 * Describes the health status of all pools or some subset.
11075 */
11076 int
zpool_do_status(int argc,char ** argv)11077 zpool_do_status(int argc, char **argv)
11078 {
11079 int c;
11080 int ret;
11081 float interval = 0;
11082 unsigned long count = 0;
11083 status_cbdata_t cb = { 0 };
11084 nvlist_t *data;
11085 char *cmd = NULL;
11086
11087 struct option long_options[] = {
11088 {"power", no_argument, NULL, ZPOOL_OPTION_POWER},
11089 {"json", no_argument, NULL, 'j'},
11090 {"json-int", no_argument, NULL, ZPOOL_OPTION_JSON_NUMS_AS_INT},
11091 {"json-flat-vdevs", no_argument, NULL,
11092 ZPOOL_OPTION_JSON_FLAT_VDEVS},
11093 {"json-pool-key-guid", no_argument, NULL,
11094 ZPOOL_OPTION_POOL_KEY_GUID},
11095 {0, 0, 0, 0}
11096 };
11097
11098 /* check options */
11099 while ((c = getopt_long(argc, argv, "c:jdDegiLpPstT:vx", long_options,
11100 NULL)) != -1) {
11101 switch (c) {
11102 case 'c':
11103 if (cmd != NULL) {
11104 fprintf(stderr,
11105 gettext("Can't set -c flag twice\n"));
11106 exit(1);
11107 }
11108
11109 if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL &&
11110 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) {
11111 fprintf(stderr, gettext(
11112 "Can't run -c, disabled by "
11113 "ZPOOL_SCRIPTS_ENABLED.\n"));
11114 exit(1);
11115 }
11116
11117 if ((getuid() <= 0 || geteuid() <= 0) &&
11118 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) {
11119 fprintf(stderr, gettext(
11120 "Can't run -c with root privileges "
11121 "unless ZPOOL_SCRIPTS_AS_ROOT is set.\n"));
11122 exit(1);
11123 }
11124 cmd = optarg;
11125 break;
11126 case 'd':
11127 cb.cb_print_dio_verify = B_TRUE;
11128 break;
11129 case 'D':
11130 if (++cb.cb_dedup_stats > 2)
11131 cb.cb_dedup_stats = 2;
11132 break;
11133 case 'e':
11134 cb.cb_print_unhealthy = B_TRUE;
11135 break;
11136 case 'g':
11137 cb.cb_name_flags |= VDEV_NAME_GUID;
11138 break;
11139 case 'i':
11140 cb.cb_print_vdev_init = B_TRUE;
11141 break;
11142 case 'L':
11143 cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
11144 break;
11145 case 'p':
11146 cb.cb_literal = B_TRUE;
11147 break;
11148 case 'P':
11149 cb.cb_name_flags |= VDEV_NAME_PATH;
11150 break;
11151 case 's':
11152 cb.cb_print_slow_ios = B_TRUE;
11153 break;
11154 case 't':
11155 cb.cb_print_vdev_trim = B_TRUE;
11156 break;
11157 case 'T':
11158 get_timestamp_arg(*optarg);
11159 break;
11160 case 'v':
11161 cb.cb_verbose = B_TRUE;
11162 break;
11163 case 'j':
11164 cb.cb_json = B_TRUE;
11165 break;
11166 case 'x':
11167 cb.cb_explain = B_TRUE;
11168 break;
11169 case ZPOOL_OPTION_POWER:
11170 cb.cb_print_power = B_TRUE;
11171 break;
11172 case ZPOOL_OPTION_JSON_FLAT_VDEVS:
11173 cb.cb_flat_vdevs = B_TRUE;
11174 break;
11175 case ZPOOL_OPTION_JSON_NUMS_AS_INT:
11176 cb.cb_json_as_int = B_TRUE;
11177 cb.cb_literal = B_TRUE;
11178 break;
11179 case ZPOOL_OPTION_POOL_KEY_GUID:
11180 cb.cb_json_pool_key_guid = B_TRUE;
11181 break;
11182 case '?':
11183 if (optopt == 'c') {
11184 print_zpool_script_list("status");
11185 exit(0);
11186 } else {
11187 fprintf(stderr,
11188 gettext("invalid option '%c'\n"), optopt);
11189 }
11190 usage(B_FALSE);
11191 }
11192 }
11193
11194 argc -= optind;
11195 argv += optind;
11196
11197 get_interval_count(&argc, argv, &interval, &count);
11198
11199 if (argc == 0)
11200 cb.cb_allpools = B_TRUE;
11201
11202 cb.cb_first = B_TRUE;
11203 cb.cb_print_status = B_TRUE;
11204
11205 if (cb.cb_flat_vdevs && !cb.cb_json) {
11206 fprintf(stderr, gettext("'--json-flat-vdevs' only works with"
11207 " '-j' option\n"));
11208 usage(B_FALSE);
11209 }
11210
11211 if (cb.cb_json_as_int && !cb.cb_json) {
11212 (void) fprintf(stderr, gettext("'--json-int' only works with"
11213 " '-j' option\n"));
11214 usage(B_FALSE);
11215 }
11216
11217 if (!cb.cb_json && cb.cb_json_pool_key_guid) {
11218 (void) fprintf(stderr, gettext("'json-pool-key-guid' only"
11219 " works with '-j' option\n"));
11220 usage(B_FALSE);
11221 }
11222
11223 for (;;) {
11224 if (cb.cb_json) {
11225 cb.cb_jsobj = zpool_json_schema(0, 1);
11226 data = fnvlist_alloc();
11227 fnvlist_add_nvlist(cb.cb_jsobj, "pools", data);
11228 fnvlist_free(data);
11229 }
11230
11231 if (timestamp_fmt != NODATE) {
11232 if (cb.cb_json) {
11233 if (cb.cb_json_as_int) {
11234 fnvlist_add_uint64(cb.cb_jsobj, "time",
11235 time(NULL));
11236 } else {
11237 char ts[128];
11238 get_timestamp(timestamp_fmt, ts, 128);
11239 fnvlist_add_string(cb.cb_jsobj, "time",
11240 ts);
11241 }
11242 } else
11243 print_timestamp(timestamp_fmt);
11244 }
11245
11246 if (cmd != NULL)
11247 cb.vcdl = all_pools_for_each_vdev_run(argc, argv, cmd,
11248 NULL, NULL, 0, 0);
11249
11250 if (cb.cb_json) {
11251 ret = for_each_pool(argc, argv, B_TRUE, NULL,
11252 ZFS_TYPE_POOL, cb.cb_literal,
11253 status_callback_json, &cb);
11254 } else {
11255 ret = for_each_pool(argc, argv, B_TRUE, NULL,
11256 ZFS_TYPE_POOL, cb.cb_literal,
11257 status_callback, &cb);
11258 }
11259
11260 if (cb.vcdl != NULL)
11261 free_vdev_cmd_data_list(cb.vcdl);
11262
11263 if (cb.cb_json) {
11264 if (ret == 0)
11265 zcmd_print_json(cb.cb_jsobj);
11266 else
11267 nvlist_free(cb.cb_jsobj);
11268 } else {
11269 if (argc == 0 && cb.cb_count == 0) {
11270 (void) fprintf(stderr, "%s",
11271 gettext("no pools available\n"));
11272 } else if (cb.cb_explain && cb.cb_first &&
11273 cb.cb_allpools) {
11274 (void) printf("%s",
11275 gettext("all pools are healthy\n"));
11276 }
11277 }
11278
11279 if (ret != 0)
11280 return (ret);
11281
11282 if (interval == 0)
11283 break;
11284
11285 if (count != 0 && --count == 0)
11286 break;
11287
11288 (void) fflush(stdout);
11289 (void) fsleep(interval);
11290 }
11291
11292 return (0);
11293 }
11294
11295 typedef struct upgrade_cbdata {
11296 int cb_first;
11297 int cb_argc;
11298 uint64_t cb_version;
11299 char **cb_argv;
11300 } upgrade_cbdata_t;
11301
11302 static int
check_unsupp_fs(zfs_handle_t * zhp,void * unsupp_fs)11303 check_unsupp_fs(zfs_handle_t *zhp, void *unsupp_fs)
11304 {
11305 int zfs_version = (int)zfs_prop_get_int(zhp, ZFS_PROP_VERSION);
11306 int *count = (int *)unsupp_fs;
11307
11308 if (zfs_version > ZPL_VERSION) {
11309 (void) printf(gettext("%s (v%d) is not supported by this "
11310 "implementation of ZFS.\n"),
11311 zfs_get_name(zhp), zfs_version);
11312 (*count)++;
11313 }
11314
11315 zfs_iter_filesystems_v2(zhp, 0, check_unsupp_fs, unsupp_fs);
11316
11317 zfs_close(zhp);
11318
11319 return (0);
11320 }
11321
11322 static int
upgrade_version(zpool_handle_t * zhp,uint64_t version)11323 upgrade_version(zpool_handle_t *zhp, uint64_t version)
11324 {
11325 int ret;
11326 nvlist_t *config;
11327 uint64_t oldversion;
11328 int unsupp_fs = 0;
11329
11330 config = zpool_get_config(zhp, NULL);
11331 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
11332 &oldversion) == 0);
11333
11334 char compat[ZFS_MAXPROPLEN];
11335 if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, compat,
11336 ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
11337 compat[0] = '\0';
11338
11339 assert(SPA_VERSION_IS_SUPPORTED(oldversion));
11340 assert(oldversion < version);
11341
11342 ret = zfs_iter_root(zpool_get_handle(zhp), check_unsupp_fs, &unsupp_fs);
11343 if (ret != 0)
11344 return (ret);
11345
11346 if (unsupp_fs) {
11347 (void) fprintf(stderr, gettext("Upgrade not performed due "
11348 "to %d unsupported filesystems (max v%d).\n"),
11349 unsupp_fs, (int)ZPL_VERSION);
11350 return (1);
11351 }
11352
11353 if (strcmp(compat, ZPOOL_COMPAT_LEGACY) == 0) {
11354 (void) fprintf(stderr, gettext("Upgrade not performed because "
11355 "'compatibility' property set to '"
11356 ZPOOL_COMPAT_LEGACY "'.\n"));
11357 return (1);
11358 }
11359
11360 ret = zpool_upgrade(zhp, version);
11361 if (ret != 0)
11362 return (ret);
11363
11364 if (version >= SPA_VERSION_FEATURES) {
11365 (void) printf(gettext("Successfully upgraded "
11366 "'%s' from version %llu to feature flags.\n"),
11367 zpool_get_name(zhp), (u_longlong_t)oldversion);
11368 } else {
11369 (void) printf(gettext("Successfully upgraded "
11370 "'%s' from version %llu to version %llu.\n"),
11371 zpool_get_name(zhp), (u_longlong_t)oldversion,
11372 (u_longlong_t)version);
11373 }
11374
11375 return (0);
11376 }
11377
11378 static int
upgrade_enable_all(zpool_handle_t * zhp,int * countp)11379 upgrade_enable_all(zpool_handle_t *zhp, int *countp)
11380 {
11381 int i, ret, count;
11382 boolean_t firstff = B_TRUE;
11383 nvlist_t *enabled = zpool_get_features(zhp);
11384
11385 char compat[ZFS_MAXPROPLEN];
11386 if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, compat,
11387 ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
11388 compat[0] = '\0';
11389
11390 boolean_t requested_features[SPA_FEATURES];
11391 if (zpool_do_load_compat(compat, requested_features) !=
11392 ZPOOL_COMPATIBILITY_OK)
11393 return (-1);
11394
11395 count = 0;
11396 for (i = 0; i < SPA_FEATURES; i++) {
11397 const char *fname = spa_feature_table[i].fi_uname;
11398 const char *fguid = spa_feature_table[i].fi_guid;
11399
11400 if (!spa_feature_table[i].fi_zfs_mod_supported ||
11401 (spa_feature_table[i].fi_flags & ZFEATURE_FLAG_NO_UPGRADE))
11402 continue;
11403
11404 if (!nvlist_exists(enabled, fguid) && requested_features[i]) {
11405 char *propname;
11406 verify(-1 != asprintf(&propname, "feature@%s", fname));
11407 ret = zpool_set_prop(zhp, propname,
11408 ZFS_FEATURE_ENABLED);
11409 if (ret != 0) {
11410 free(propname);
11411 return (ret);
11412 }
11413 count++;
11414
11415 if (firstff) {
11416 (void) printf(gettext("Enabled the "
11417 "following features on '%s':\n"),
11418 zpool_get_name(zhp));
11419 firstff = B_FALSE;
11420 }
11421 (void) printf(gettext(" %s\n"), fname);
11422 free(propname);
11423 }
11424 }
11425
11426 if (countp != NULL)
11427 *countp = count;
11428 return (0);
11429 }
11430
11431 static int
upgrade_cb(zpool_handle_t * zhp,void * arg)11432 upgrade_cb(zpool_handle_t *zhp, void *arg)
11433 {
11434 upgrade_cbdata_t *cbp = arg;
11435 nvlist_t *config;
11436 uint64_t version;
11437 boolean_t modified_pool = B_FALSE;
11438 int ret;
11439
11440 config = zpool_get_config(zhp, NULL);
11441 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
11442 &version) == 0);
11443
11444 assert(SPA_VERSION_IS_SUPPORTED(version));
11445
11446 if (version < cbp->cb_version) {
11447 cbp->cb_first = B_FALSE;
11448 ret = upgrade_version(zhp, cbp->cb_version);
11449 if (ret != 0)
11450 return (ret);
11451 modified_pool = B_TRUE;
11452
11453 /*
11454 * If they did "zpool upgrade -a", then we could
11455 * be doing ioctls to different pools. We need
11456 * to log this history once to each pool, and bypass
11457 * the normal history logging that happens in main().
11458 */
11459 (void) zpool_log_history(g_zfs, history_str);
11460 log_history = B_FALSE;
11461 }
11462
11463 if (cbp->cb_version >= SPA_VERSION_FEATURES) {
11464 int count;
11465 ret = upgrade_enable_all(zhp, &count);
11466 if (ret != 0)
11467 return (ret);
11468
11469 if (count > 0) {
11470 cbp->cb_first = B_FALSE;
11471 modified_pool = B_TRUE;
11472 }
11473 }
11474
11475 if (modified_pool) {
11476 (void) printf("\n");
11477 (void) after_zpool_upgrade(zhp);
11478 }
11479
11480 return (0);
11481 }
11482
11483 static int
upgrade_list_older_cb(zpool_handle_t * zhp,void * arg)11484 upgrade_list_older_cb(zpool_handle_t *zhp, void *arg)
11485 {
11486 upgrade_cbdata_t *cbp = arg;
11487 nvlist_t *config;
11488 uint64_t version;
11489
11490 config = zpool_get_config(zhp, NULL);
11491 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
11492 &version) == 0);
11493
11494 assert(SPA_VERSION_IS_SUPPORTED(version));
11495
11496 if (version < SPA_VERSION_FEATURES) {
11497 if (cbp->cb_first) {
11498 (void) printf(gettext("The following pools are "
11499 "formatted with legacy version numbers and can\n"
11500 "be upgraded to use feature flags. After "
11501 "being upgraded, these pools\nwill no "
11502 "longer be accessible by software that does not "
11503 "support feature\nflags.\n\n"
11504 "Note that setting a pool's 'compatibility' "
11505 "feature to '" ZPOOL_COMPAT_LEGACY "' will\n"
11506 "inhibit upgrades.\n\n"));
11507 (void) printf(gettext("VER POOL\n"));
11508 (void) printf(gettext("--- ------------\n"));
11509 cbp->cb_first = B_FALSE;
11510 }
11511
11512 (void) printf("%2llu %s\n", (u_longlong_t)version,
11513 zpool_get_name(zhp));
11514 }
11515
11516 return (0);
11517 }
11518
11519 static int
upgrade_list_disabled_cb(zpool_handle_t * zhp,void * arg)11520 upgrade_list_disabled_cb(zpool_handle_t *zhp, void *arg)
11521 {
11522 upgrade_cbdata_t *cbp = arg;
11523 nvlist_t *config;
11524 uint64_t version;
11525
11526 config = zpool_get_config(zhp, NULL);
11527 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
11528 &version) == 0);
11529
11530 if (version >= SPA_VERSION_FEATURES) {
11531 int i;
11532 boolean_t poolfirst = B_TRUE;
11533 nvlist_t *enabled = zpool_get_features(zhp);
11534
11535 for (i = 0; i < SPA_FEATURES; i++) {
11536 const char *fguid = spa_feature_table[i].fi_guid;
11537 const char *fname = spa_feature_table[i].fi_uname;
11538
11539 if (!spa_feature_table[i].fi_zfs_mod_supported)
11540 continue;
11541
11542 if (!nvlist_exists(enabled, fguid)) {
11543 if (cbp->cb_first) {
11544 (void) printf(gettext("\nSome "
11545 "supported features are not "
11546 "enabled on the following pools. "
11547 "Once a\nfeature is enabled the "
11548 "pool may become incompatible with "
11549 "software\nthat does not support "
11550 "the feature. See "
11551 "zpool-features(7) for "
11552 "details.\n\n"
11553 "Note that the pool "
11554 "'compatibility' feature can be "
11555 "used to inhibit\nfeature "
11556 "upgrades.\n\n"
11557 "Features marked with (*) are not "
11558 "applied automatically on upgrade, "
11559 "and\nmust be applied explicitly "
11560 "with zpool-set(7).\n\n"));
11561 (void) printf(gettext("POOL "
11562 "FEATURE\n"));
11563 (void) printf(gettext("------"
11564 "---------\n"));
11565 cbp->cb_first = B_FALSE;
11566 }
11567
11568 if (poolfirst) {
11569 (void) printf(gettext("%s\n"),
11570 zpool_get_name(zhp));
11571 poolfirst = B_FALSE;
11572 }
11573
11574 (void) printf(gettext(" %s%s\n"), fname,
11575 spa_feature_table[i].fi_flags &
11576 ZFEATURE_FLAG_NO_UPGRADE ? "(*)" : "");
11577 }
11578 /*
11579 * If they did "zpool upgrade -a", then we could
11580 * be doing ioctls to different pools. We need
11581 * to log this history once to each pool, and bypass
11582 * the normal history logging that happens in main().
11583 */
11584 (void) zpool_log_history(g_zfs, history_str);
11585 log_history = B_FALSE;
11586 }
11587 }
11588
11589 return (0);
11590 }
11591
11592 static int
upgrade_one(zpool_handle_t * zhp,void * data)11593 upgrade_one(zpool_handle_t *zhp, void *data)
11594 {
11595 boolean_t modified_pool = B_FALSE;
11596 upgrade_cbdata_t *cbp = data;
11597 uint64_t cur_version;
11598 int ret;
11599
11600 if (strcmp("log", zpool_get_name(zhp)) == 0) {
11601 (void) fprintf(stderr, gettext("'log' is now a reserved word\n"
11602 "Pool 'log' must be renamed using export and import"
11603 " to upgrade.\n"));
11604 return (1);
11605 }
11606
11607 cur_version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
11608 if (cur_version > cbp->cb_version) {
11609 (void) printf(gettext("Pool '%s' is already formatted "
11610 "using more current version '%llu'.\n\n"),
11611 zpool_get_name(zhp), (u_longlong_t)cur_version);
11612 return (0);
11613 }
11614
11615 if (cbp->cb_version != SPA_VERSION && cur_version == cbp->cb_version) {
11616 (void) printf(gettext("Pool '%s' is already formatted "
11617 "using version %llu.\n\n"), zpool_get_name(zhp),
11618 (u_longlong_t)cbp->cb_version);
11619 return (0);
11620 }
11621
11622 if (cur_version != cbp->cb_version) {
11623 modified_pool = B_TRUE;
11624 ret = upgrade_version(zhp, cbp->cb_version);
11625 if (ret != 0)
11626 return (ret);
11627 }
11628
11629 if (cbp->cb_version >= SPA_VERSION_FEATURES) {
11630 int count = 0;
11631 ret = upgrade_enable_all(zhp, &count);
11632 if (ret != 0)
11633 return (ret);
11634
11635 if (count != 0) {
11636 modified_pool = B_TRUE;
11637 } else if (cur_version == SPA_VERSION) {
11638 (void) printf(gettext("Pool '%s' already has all "
11639 "supported and requested features enabled.\n"),
11640 zpool_get_name(zhp));
11641 }
11642 }
11643
11644 if (modified_pool) {
11645 (void) printf("\n");
11646 (void) after_zpool_upgrade(zhp);
11647 }
11648
11649 return (0);
11650 }
11651
11652 /*
11653 * zpool upgrade
11654 * zpool upgrade -v
11655 * zpool upgrade [-V version] <-a | pool ...>
11656 *
11657 * With no arguments, display downrev'd ZFS pool available for upgrade.
11658 * Individual pools can be upgraded by specifying the pool, and '-a' will
11659 * upgrade all pools.
11660 */
11661 int
zpool_do_upgrade(int argc,char ** argv)11662 zpool_do_upgrade(int argc, char **argv)
11663 {
11664 int c;
11665 upgrade_cbdata_t cb = { 0 };
11666 int ret = 0;
11667 boolean_t showversions = B_FALSE;
11668 boolean_t upgradeall = B_FALSE;
11669 char *end;
11670
11671
11672 /* check options */
11673 while ((c = getopt(argc, argv, ":avV:")) != -1) {
11674 switch (c) {
11675 case 'a':
11676 upgradeall = B_TRUE;
11677 break;
11678 case 'v':
11679 showversions = B_TRUE;
11680 break;
11681 case 'V':
11682 cb.cb_version = strtoll(optarg, &end, 10);
11683 if (*end != '\0' ||
11684 !SPA_VERSION_IS_SUPPORTED(cb.cb_version)) {
11685 (void) fprintf(stderr,
11686 gettext("invalid version '%s'\n"), optarg);
11687 usage(B_FALSE);
11688 }
11689 break;
11690 case ':':
11691 (void) fprintf(stderr, gettext("missing argument for "
11692 "'%c' option\n"), optopt);
11693 usage(B_FALSE);
11694 break;
11695 case '?':
11696 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
11697 optopt);
11698 usage(B_FALSE);
11699 }
11700 }
11701
11702 cb.cb_argc = argc;
11703 cb.cb_argv = argv;
11704 argc -= optind;
11705 argv += optind;
11706
11707 if (cb.cb_version == 0) {
11708 cb.cb_version = SPA_VERSION;
11709 } else if (!upgradeall && argc == 0) {
11710 (void) fprintf(stderr, gettext("-V option is "
11711 "incompatible with other arguments\n"));
11712 usage(B_FALSE);
11713 }
11714
11715 if (showversions) {
11716 if (upgradeall || argc != 0) {
11717 (void) fprintf(stderr, gettext("-v option is "
11718 "incompatible with other arguments\n"));
11719 usage(B_FALSE);
11720 }
11721 } else if (upgradeall) {
11722 if (argc != 0) {
11723 (void) fprintf(stderr, gettext("-a option should not "
11724 "be used along with a pool name\n"));
11725 usage(B_FALSE);
11726 }
11727 }
11728
11729 (void) printf("%s", gettext("This system supports ZFS pool feature "
11730 "flags.\n\n"));
11731 if (showversions) {
11732 int i;
11733
11734 (void) printf(gettext("The following features are "
11735 "supported:\n\n"));
11736 (void) printf(gettext("FEAT DESCRIPTION\n"));
11737 (void) printf("----------------------------------------------"
11738 "---------------\n");
11739 for (i = 0; i < SPA_FEATURES; i++) {
11740 zfeature_info_t *fi = &spa_feature_table[i];
11741 if (!fi->fi_zfs_mod_supported)
11742 continue;
11743 const char *ro =
11744 (fi->fi_flags & ZFEATURE_FLAG_READONLY_COMPAT) ?
11745 " (read-only compatible)" : "";
11746
11747 (void) printf("%-37s%s\n", fi->fi_uname, ro);
11748 (void) printf(" %s\n", fi->fi_desc);
11749 }
11750 (void) printf("\n");
11751
11752 (void) printf(gettext("The following legacy versions are also "
11753 "supported:\n\n"));
11754 (void) printf(gettext("VER DESCRIPTION\n"));
11755 (void) printf("--- -----------------------------------------"
11756 "---------------\n");
11757 (void) printf(gettext(" 1 Initial ZFS version\n"));
11758 (void) printf(gettext(" 2 Ditto blocks "
11759 "(replicated metadata)\n"));
11760 (void) printf(gettext(" 3 Hot spares and double parity "
11761 "RAID-Z\n"));
11762 (void) printf(gettext(" 4 zpool history\n"));
11763 (void) printf(gettext(" 5 Compression using the gzip "
11764 "algorithm\n"));
11765 (void) printf(gettext(" 6 bootfs pool property\n"));
11766 (void) printf(gettext(" 7 Separate intent log devices\n"));
11767 (void) printf(gettext(" 8 Delegated administration\n"));
11768 (void) printf(gettext(" 9 refquota and refreservation "
11769 "properties\n"));
11770 (void) printf(gettext(" 10 Cache devices\n"));
11771 (void) printf(gettext(" 11 Improved scrub performance\n"));
11772 (void) printf(gettext(" 12 Snapshot properties\n"));
11773 (void) printf(gettext(" 13 snapused property\n"));
11774 (void) printf(gettext(" 14 passthrough-x aclinherit\n"));
11775 (void) printf(gettext(" 15 user/group space accounting\n"));
11776 (void) printf(gettext(" 16 stmf property support\n"));
11777 (void) printf(gettext(" 17 Triple-parity RAID-Z\n"));
11778 (void) printf(gettext(" 18 Snapshot user holds\n"));
11779 (void) printf(gettext(" 19 Log device removal\n"));
11780 (void) printf(gettext(" 20 Compression using zle "
11781 "(zero-length encoding)\n"));
11782 (void) printf(gettext(" 21 Deduplication\n"));
11783 (void) printf(gettext(" 22 Received properties\n"));
11784 (void) printf(gettext(" 23 Slim ZIL\n"));
11785 (void) printf(gettext(" 24 System attributes\n"));
11786 (void) printf(gettext(" 25 Improved scrub stats\n"));
11787 (void) printf(gettext(" 26 Improved snapshot deletion "
11788 "performance\n"));
11789 (void) printf(gettext(" 27 Improved snapshot creation "
11790 "performance\n"));
11791 (void) printf(gettext(" 28 Multiple vdev replacements\n"));
11792 (void) printf(gettext("\nFor more information on a particular "
11793 "version, including supported releases,\n"));
11794 (void) printf(gettext("see the ZFS Administration Guide.\n\n"));
11795 } else if (argc == 0 && upgradeall) {
11796 cb.cb_first = B_TRUE;
11797 ret = zpool_iter(g_zfs, upgrade_cb, &cb);
11798 if (ret == 0 && cb.cb_first) {
11799 if (cb.cb_version == SPA_VERSION) {
11800 (void) printf(gettext("All pools are already "
11801 "formatted using feature flags.\n\n"));
11802 (void) printf(gettext("Every feature flags "
11803 "pool already has all supported and "
11804 "requested features enabled.\n"));
11805 } else {
11806 (void) printf(gettext("All pools are already "
11807 "formatted with version %llu or higher.\n"),
11808 (u_longlong_t)cb.cb_version);
11809 }
11810 }
11811 } else if (argc == 0) {
11812 cb.cb_first = B_TRUE;
11813 ret = zpool_iter(g_zfs, upgrade_list_older_cb, &cb);
11814 assert(ret == 0);
11815
11816 if (cb.cb_first) {
11817 (void) printf(gettext("All pools are formatted "
11818 "using feature flags.\n\n"));
11819 } else {
11820 (void) printf(gettext("\nUse 'zpool upgrade -v' "
11821 "for a list of available legacy versions.\n"));
11822 }
11823
11824 cb.cb_first = B_TRUE;
11825 ret = zpool_iter(g_zfs, upgrade_list_disabled_cb, &cb);
11826 assert(ret == 0);
11827
11828 if (cb.cb_first) {
11829 (void) printf(gettext("Every feature flags pool has "
11830 "all supported and requested features enabled.\n"));
11831 } else {
11832 (void) printf(gettext("\n"));
11833 }
11834 } else {
11835 ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL,
11836 B_FALSE, upgrade_one, &cb);
11837 }
11838
11839 return (ret);
11840 }
11841
11842 typedef struct hist_cbdata {
11843 boolean_t first;
11844 boolean_t longfmt;
11845 boolean_t internal;
11846 } hist_cbdata_t;
11847
11848 static void
print_history_records(nvlist_t * nvhis,hist_cbdata_t * cb)11849 print_history_records(nvlist_t *nvhis, hist_cbdata_t *cb)
11850 {
11851 nvlist_t **records;
11852 uint_t numrecords;
11853 int i;
11854
11855 verify(nvlist_lookup_nvlist_array(nvhis, ZPOOL_HIST_RECORD,
11856 &records, &numrecords) == 0);
11857 for (i = 0; i < numrecords; i++) {
11858 nvlist_t *rec = records[i];
11859 char tbuf[64] = "";
11860
11861 if (nvlist_exists(rec, ZPOOL_HIST_TIME)) {
11862 time_t tsec;
11863 struct tm t;
11864
11865 tsec = fnvlist_lookup_uint64(records[i],
11866 ZPOOL_HIST_TIME);
11867 (void) localtime_r(&tsec, &t);
11868 (void) strftime(tbuf, sizeof (tbuf), "%F.%T", &t);
11869 }
11870
11871 if (nvlist_exists(rec, ZPOOL_HIST_ELAPSED_NS)) {
11872 uint64_t elapsed_ns = fnvlist_lookup_int64(records[i],
11873 ZPOOL_HIST_ELAPSED_NS);
11874 (void) snprintf(tbuf + strlen(tbuf),
11875 sizeof (tbuf) - strlen(tbuf),
11876 " (%lldms)", (long long)elapsed_ns / 1000 / 1000);
11877 }
11878
11879 if (nvlist_exists(rec, ZPOOL_HIST_CMD)) {
11880 (void) printf("%s %s", tbuf,
11881 fnvlist_lookup_string(rec, ZPOOL_HIST_CMD));
11882 } else if (nvlist_exists(rec, ZPOOL_HIST_INT_EVENT)) {
11883 int ievent =
11884 fnvlist_lookup_uint64(rec, ZPOOL_HIST_INT_EVENT);
11885 if (!cb->internal)
11886 continue;
11887 if (ievent >= ZFS_NUM_LEGACY_HISTORY_EVENTS) {
11888 (void) printf("%s unrecognized record:\n",
11889 tbuf);
11890 dump_nvlist(rec, 4);
11891 continue;
11892 }
11893 (void) printf("%s [internal %s txg:%lld] %s", tbuf,
11894 zfs_history_event_names[ievent],
11895 (longlong_t)fnvlist_lookup_uint64(
11896 rec, ZPOOL_HIST_TXG),
11897 fnvlist_lookup_string(rec, ZPOOL_HIST_INT_STR));
11898 } else if (nvlist_exists(rec, ZPOOL_HIST_INT_NAME)) {
11899 if (!cb->internal)
11900 continue;
11901 (void) printf("%s [txg:%lld] %s", tbuf,
11902 (longlong_t)fnvlist_lookup_uint64(
11903 rec, ZPOOL_HIST_TXG),
11904 fnvlist_lookup_string(rec, ZPOOL_HIST_INT_NAME));
11905 if (nvlist_exists(rec, ZPOOL_HIST_DSNAME)) {
11906 (void) printf(" %s (%llu)",
11907 fnvlist_lookup_string(rec,
11908 ZPOOL_HIST_DSNAME),
11909 (u_longlong_t)fnvlist_lookup_uint64(rec,
11910 ZPOOL_HIST_DSID));
11911 }
11912 (void) printf(" %s", fnvlist_lookup_string(rec,
11913 ZPOOL_HIST_INT_STR));
11914 } else if (nvlist_exists(rec, ZPOOL_HIST_IOCTL)) {
11915 if (!cb->internal)
11916 continue;
11917 (void) printf("%s ioctl %s\n", tbuf,
11918 fnvlist_lookup_string(rec, ZPOOL_HIST_IOCTL));
11919 if (nvlist_exists(rec, ZPOOL_HIST_INPUT_NVL)) {
11920 (void) printf(" input:\n");
11921 dump_nvlist(fnvlist_lookup_nvlist(rec,
11922 ZPOOL_HIST_INPUT_NVL), 8);
11923 }
11924 if (nvlist_exists(rec, ZPOOL_HIST_OUTPUT_NVL)) {
11925 (void) printf(" output:\n");
11926 dump_nvlist(fnvlist_lookup_nvlist(rec,
11927 ZPOOL_HIST_OUTPUT_NVL), 8);
11928 }
11929 if (nvlist_exists(rec, ZPOOL_HIST_OUTPUT_SIZE)) {
11930 (void) printf(" output nvlist omitted; "
11931 "original size: %lldKB\n",
11932 (longlong_t)fnvlist_lookup_int64(rec,
11933 ZPOOL_HIST_OUTPUT_SIZE) / 1024);
11934 }
11935 if (nvlist_exists(rec, ZPOOL_HIST_ERRNO)) {
11936 (void) printf(" errno: %lld\n",
11937 (longlong_t)fnvlist_lookup_int64(rec,
11938 ZPOOL_HIST_ERRNO));
11939 }
11940 } else {
11941 if (!cb->internal)
11942 continue;
11943 (void) printf("%s unrecognized record:\n", tbuf);
11944 dump_nvlist(rec, 4);
11945 }
11946
11947 if (!cb->longfmt) {
11948 (void) printf("\n");
11949 continue;
11950 }
11951 (void) printf(" [");
11952 if (nvlist_exists(rec, ZPOOL_HIST_WHO)) {
11953 uid_t who = fnvlist_lookup_uint64(rec, ZPOOL_HIST_WHO);
11954 struct passwd *pwd = getpwuid(who);
11955 (void) printf("user %d ", (int)who);
11956 if (pwd != NULL)
11957 (void) printf("(%s) ", pwd->pw_name);
11958 }
11959 if (nvlist_exists(rec, ZPOOL_HIST_HOST)) {
11960 (void) printf("on %s",
11961 fnvlist_lookup_string(rec, ZPOOL_HIST_HOST));
11962 }
11963 if (nvlist_exists(rec, ZPOOL_HIST_ZONE)) {
11964 (void) printf(":%s",
11965 fnvlist_lookup_string(rec, ZPOOL_HIST_ZONE));
11966 }
11967
11968 (void) printf("]");
11969 (void) printf("\n");
11970 }
11971 }
11972
11973 /*
11974 * Print out the command history for a specific pool.
11975 */
11976 static int
get_history_one(zpool_handle_t * zhp,void * data)11977 get_history_one(zpool_handle_t *zhp, void *data)
11978 {
11979 nvlist_t *nvhis;
11980 int ret;
11981 hist_cbdata_t *cb = (hist_cbdata_t *)data;
11982 uint64_t off = 0;
11983 boolean_t eof = B_FALSE;
11984
11985 cb->first = B_FALSE;
11986
11987 (void) printf(gettext("History for '%s':\n"), zpool_get_name(zhp));
11988
11989 while (!eof) {
11990 if ((ret = zpool_get_history(zhp, &nvhis, &off, &eof)) != 0)
11991 return (ret);
11992
11993 print_history_records(nvhis, cb);
11994 nvlist_free(nvhis);
11995 }
11996 (void) printf("\n");
11997
11998 return (ret);
11999 }
12000
12001 /*
12002 * zpool history <pool>
12003 *
12004 * Displays the history of commands that modified pools.
12005 */
12006 int
zpool_do_history(int argc,char ** argv)12007 zpool_do_history(int argc, char **argv)
12008 {
12009 hist_cbdata_t cbdata = { 0 };
12010 int ret;
12011 int c;
12012
12013 cbdata.first = B_TRUE;
12014 /* check options */
12015 while ((c = getopt(argc, argv, "li")) != -1) {
12016 switch (c) {
12017 case 'l':
12018 cbdata.longfmt = B_TRUE;
12019 break;
12020 case 'i':
12021 cbdata.internal = B_TRUE;
12022 break;
12023 case '?':
12024 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
12025 optopt);
12026 usage(B_FALSE);
12027 }
12028 }
12029 argc -= optind;
12030 argv += optind;
12031
12032 ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL,
12033 B_FALSE, get_history_one, &cbdata);
12034
12035 if (argc == 0 && cbdata.first == B_TRUE) {
12036 (void) fprintf(stderr, gettext("no pools available\n"));
12037 return (0);
12038 }
12039
12040 return (ret);
12041 }
12042
12043 typedef struct ev_opts {
12044 int verbose;
12045 int scripted;
12046 int follow;
12047 int clear;
12048 char poolname[ZFS_MAX_DATASET_NAME_LEN];
12049 } ev_opts_t;
12050
12051 static void
zpool_do_events_short(nvlist_t * nvl,ev_opts_t * opts)12052 zpool_do_events_short(nvlist_t *nvl, ev_opts_t *opts)
12053 {
12054 char ctime_str[26], str[32];
12055 const char *ptr;
12056 int64_t *tv;
12057 uint_t n;
12058
12059 verify(nvlist_lookup_int64_array(nvl, FM_EREPORT_TIME, &tv, &n) == 0);
12060 memset(str, ' ', 32);
12061 (void) ctime_r((const time_t *)&tv[0], ctime_str);
12062 (void) memcpy(str, ctime_str+4, 6); /* 'Jun 30' */
12063 (void) memcpy(str+7, ctime_str+20, 4); /* '1993' */
12064 (void) memcpy(str+12, ctime_str+11, 8); /* '21:49:08' */
12065 (void) sprintf(str+20, ".%09lld", (longlong_t)tv[1]); /* '.123456789' */
12066 if (opts->scripted)
12067 (void) printf(gettext("%s\t"), str);
12068 else
12069 (void) printf(gettext("%s "), str);
12070
12071 verify(nvlist_lookup_string(nvl, FM_CLASS, &ptr) == 0);
12072 (void) printf(gettext("%s\n"), ptr);
12073 }
12074
12075 static void
zpool_do_events_nvprint(nvlist_t * nvl,int depth)12076 zpool_do_events_nvprint(nvlist_t *nvl, int depth)
12077 {
12078 nvpair_t *nvp;
12079 static char flagstr[256];
12080
12081 for (nvp = nvlist_next_nvpair(nvl, NULL);
12082 nvp != NULL; nvp = nvlist_next_nvpair(nvl, nvp)) {
12083
12084 data_type_t type = nvpair_type(nvp);
12085 const char *name = nvpair_name(nvp);
12086
12087 boolean_t b;
12088 uint8_t i8;
12089 uint16_t i16;
12090 uint32_t i32;
12091 uint64_t i64;
12092 const char *str;
12093 nvlist_t *cnv;
12094
12095 printf(gettext("%*s%s = "), depth, "", name);
12096
12097 switch (type) {
12098 case DATA_TYPE_BOOLEAN:
12099 printf(gettext("%s"), "1");
12100 break;
12101
12102 case DATA_TYPE_BOOLEAN_VALUE:
12103 (void) nvpair_value_boolean_value(nvp, &b);
12104 printf(gettext("%s"), b ? "1" : "0");
12105 break;
12106
12107 case DATA_TYPE_BYTE:
12108 (void) nvpair_value_byte(nvp, &i8);
12109 printf(gettext("0x%x"), i8);
12110 break;
12111
12112 case DATA_TYPE_INT8:
12113 (void) nvpair_value_int8(nvp, (void *)&i8);
12114 printf(gettext("0x%x"), i8);
12115 break;
12116
12117 case DATA_TYPE_UINT8:
12118 (void) nvpair_value_uint8(nvp, &i8);
12119 printf(gettext("0x%x"), i8);
12120 break;
12121
12122 case DATA_TYPE_INT16:
12123 (void) nvpair_value_int16(nvp, (void *)&i16);
12124 printf(gettext("0x%x"), i16);
12125 break;
12126
12127 case DATA_TYPE_UINT16:
12128 (void) nvpair_value_uint16(nvp, &i16);
12129 printf(gettext("0x%x"), i16);
12130 break;
12131
12132 case DATA_TYPE_INT32:
12133 (void) nvpair_value_int32(nvp, (void *)&i32);
12134 printf(gettext("0x%x"), i32);
12135 break;
12136
12137 case DATA_TYPE_UINT32:
12138 (void) nvpair_value_uint32(nvp, &i32);
12139 if (strcmp(name,
12140 FM_EREPORT_PAYLOAD_ZFS_ZIO_STAGE) == 0 ||
12141 strcmp(name,
12142 FM_EREPORT_PAYLOAD_ZFS_ZIO_PIPELINE) == 0) {
12143 zfs_valstr_zio_stage(i32, flagstr,
12144 sizeof (flagstr));
12145 printf(gettext("0x%x [%s]"), i32, flagstr);
12146 } else if (strcmp(name,
12147 FM_EREPORT_PAYLOAD_ZFS_ZIO_TYPE) == 0) {
12148 zfs_valstr_zio_type(i32, flagstr,
12149 sizeof (flagstr));
12150 printf(gettext("0x%x [%s]"), i32, flagstr);
12151 } else if (strcmp(name,
12152 FM_EREPORT_PAYLOAD_ZFS_ZIO_PRIORITY) == 0) {
12153 zfs_valstr_zio_priority(i32, flagstr,
12154 sizeof (flagstr));
12155 printf(gettext("0x%x [%s]"), i32, flagstr);
12156 } else {
12157 printf(gettext("0x%x"), i32);
12158 }
12159 break;
12160
12161 case DATA_TYPE_INT64:
12162 (void) nvpair_value_int64(nvp, (void *)&i64);
12163 printf(gettext("0x%llx"), (u_longlong_t)i64);
12164 break;
12165
12166 case DATA_TYPE_UINT64:
12167 (void) nvpair_value_uint64(nvp, &i64);
12168 /*
12169 * translate vdev state values to readable
12170 * strings to aide zpool events consumers
12171 */
12172 if (strcmp(name,
12173 FM_EREPORT_PAYLOAD_ZFS_VDEV_STATE) == 0 ||
12174 strcmp(name,
12175 FM_EREPORT_PAYLOAD_ZFS_VDEV_LASTSTATE) == 0) {
12176 printf(gettext("\"%s\" (0x%llx)"),
12177 zpool_state_to_name(i64, VDEV_AUX_NONE),
12178 (u_longlong_t)i64);
12179 } else if (strcmp(name,
12180 FM_EREPORT_PAYLOAD_ZFS_ZIO_FLAGS) == 0) {
12181 zfs_valstr_zio_flag(i64, flagstr,
12182 sizeof (flagstr));
12183 printf(gettext("0x%llx [%s]"),
12184 (u_longlong_t)i64, flagstr);
12185 } else {
12186 printf(gettext("0x%llx"), (u_longlong_t)i64);
12187 }
12188 break;
12189
12190 case DATA_TYPE_HRTIME:
12191 (void) nvpair_value_hrtime(nvp, (void *)&i64);
12192 printf(gettext("0x%llx"), (u_longlong_t)i64);
12193 break;
12194
12195 case DATA_TYPE_STRING:
12196 (void) nvpair_value_string(nvp, &str);
12197 printf(gettext("\"%s\""), str ? str : "<NULL>");
12198 break;
12199
12200 case DATA_TYPE_NVLIST:
12201 printf(gettext("(embedded nvlist)\n"));
12202 (void) nvpair_value_nvlist(nvp, &cnv);
12203 zpool_do_events_nvprint(cnv, depth + 8);
12204 printf(gettext("%*s(end %s)"), depth, "", name);
12205 break;
12206
12207 case DATA_TYPE_NVLIST_ARRAY: {
12208 nvlist_t **val;
12209 uint_t i, nelem;
12210
12211 (void) nvpair_value_nvlist_array(nvp, &val, &nelem);
12212 printf(gettext("(%d embedded nvlists)\n"), nelem);
12213 for (i = 0; i < nelem; i++) {
12214 printf(gettext("%*s%s[%d] = %s\n"),
12215 depth, "", name, i, "(embedded nvlist)");
12216 zpool_do_events_nvprint(val[i], depth + 8);
12217 printf(gettext("%*s(end %s[%i])\n"),
12218 depth, "", name, i);
12219 }
12220 printf(gettext("%*s(end %s)\n"), depth, "", name);
12221 }
12222 break;
12223
12224 case DATA_TYPE_INT8_ARRAY: {
12225 int8_t *val;
12226 uint_t i, nelem;
12227
12228 (void) nvpair_value_int8_array(nvp, &val, &nelem);
12229 for (i = 0; i < nelem; i++)
12230 printf(gettext("0x%x "), val[i]);
12231
12232 break;
12233 }
12234
12235 case DATA_TYPE_UINT8_ARRAY: {
12236 uint8_t *val;
12237 uint_t i, nelem;
12238
12239 (void) nvpair_value_uint8_array(nvp, &val, &nelem);
12240 for (i = 0; i < nelem; i++)
12241 printf(gettext("0x%x "), val[i]);
12242
12243 break;
12244 }
12245
12246 case DATA_TYPE_INT16_ARRAY: {
12247 int16_t *val;
12248 uint_t i, nelem;
12249
12250 (void) nvpair_value_int16_array(nvp, &val, &nelem);
12251 for (i = 0; i < nelem; i++)
12252 printf(gettext("0x%x "), val[i]);
12253
12254 break;
12255 }
12256
12257 case DATA_TYPE_UINT16_ARRAY: {
12258 uint16_t *val;
12259 uint_t i, nelem;
12260
12261 (void) nvpair_value_uint16_array(nvp, &val, &nelem);
12262 for (i = 0; i < nelem; i++)
12263 printf(gettext("0x%x "), val[i]);
12264
12265 break;
12266 }
12267
12268 case DATA_TYPE_INT32_ARRAY: {
12269 int32_t *val;
12270 uint_t i, nelem;
12271
12272 (void) nvpair_value_int32_array(nvp, &val, &nelem);
12273 for (i = 0; i < nelem; i++)
12274 printf(gettext("0x%x "), val[i]);
12275
12276 break;
12277 }
12278
12279 case DATA_TYPE_UINT32_ARRAY: {
12280 uint32_t *val;
12281 uint_t i, nelem;
12282
12283 (void) nvpair_value_uint32_array(nvp, &val, &nelem);
12284 for (i = 0; i < nelem; i++)
12285 printf(gettext("0x%x "), val[i]);
12286
12287 break;
12288 }
12289
12290 case DATA_TYPE_INT64_ARRAY: {
12291 int64_t *val;
12292 uint_t i, nelem;
12293
12294 (void) nvpair_value_int64_array(nvp, &val, &nelem);
12295 for (i = 0; i < nelem; i++)
12296 printf(gettext("0x%llx "),
12297 (u_longlong_t)val[i]);
12298
12299 break;
12300 }
12301
12302 case DATA_TYPE_UINT64_ARRAY: {
12303 uint64_t *val;
12304 uint_t i, nelem;
12305
12306 (void) nvpair_value_uint64_array(nvp, &val, &nelem);
12307 for (i = 0; i < nelem; i++)
12308 printf(gettext("0x%llx "),
12309 (u_longlong_t)val[i]);
12310
12311 break;
12312 }
12313
12314 case DATA_TYPE_STRING_ARRAY: {
12315 const char **str;
12316 uint_t i, nelem;
12317
12318 (void) nvpair_value_string_array(nvp, &str, &nelem);
12319 for (i = 0; i < nelem; i++)
12320 printf(gettext("\"%s\" "),
12321 str[i] ? str[i] : "<NULL>");
12322
12323 break;
12324 }
12325
12326 case DATA_TYPE_BOOLEAN_ARRAY:
12327 case DATA_TYPE_BYTE_ARRAY:
12328 case DATA_TYPE_DOUBLE:
12329 case DATA_TYPE_DONTCARE:
12330 case DATA_TYPE_UNKNOWN:
12331 printf(gettext("<unknown>"));
12332 break;
12333 }
12334
12335 printf(gettext("\n"));
12336 }
12337 }
12338
12339 static int
zpool_do_events_next(ev_opts_t * opts)12340 zpool_do_events_next(ev_opts_t *opts)
12341 {
12342 nvlist_t *nvl;
12343 int zevent_fd, ret, dropped;
12344 const char *pool;
12345
12346 zevent_fd = open(ZFS_DEV, O_RDWR);
12347 VERIFY(zevent_fd >= 0);
12348
12349 if (!opts->scripted)
12350 (void) printf(gettext("%-30s %s\n"), "TIME", "CLASS");
12351
12352 while (1) {
12353 ret = zpool_events_next(g_zfs, &nvl, &dropped,
12354 (opts->follow ? ZEVENT_NONE : ZEVENT_NONBLOCK), zevent_fd);
12355 if (ret || nvl == NULL)
12356 break;
12357
12358 if (dropped > 0)
12359 (void) printf(gettext("dropped %d events\n"), dropped);
12360
12361 if (strlen(opts->poolname) > 0 &&
12362 nvlist_lookup_string(nvl, FM_FMRI_ZFS_POOL, &pool) == 0 &&
12363 strcmp(opts->poolname, pool) != 0)
12364 continue;
12365
12366 zpool_do_events_short(nvl, opts);
12367
12368 if (opts->verbose) {
12369 zpool_do_events_nvprint(nvl, 8);
12370 printf(gettext("\n"));
12371 }
12372 (void) fflush(stdout);
12373
12374 nvlist_free(nvl);
12375 }
12376
12377 VERIFY(0 == close(zevent_fd));
12378
12379 return (ret);
12380 }
12381
12382 static int
zpool_do_events_clear(void)12383 zpool_do_events_clear(void)
12384 {
12385 int count, ret;
12386
12387 ret = zpool_events_clear(g_zfs, &count);
12388 if (!ret)
12389 (void) printf(gettext("cleared %d events\n"), count);
12390
12391 return (ret);
12392 }
12393
12394 /*
12395 * zpool events [-vHf [pool] | -c]
12396 *
12397 * Displays events logs by ZFS.
12398 */
12399 int
zpool_do_events(int argc,char ** argv)12400 zpool_do_events(int argc, char **argv)
12401 {
12402 ev_opts_t opts = { 0 };
12403 int ret;
12404 int c;
12405
12406 /* check options */
12407 while ((c = getopt(argc, argv, "vHfc")) != -1) {
12408 switch (c) {
12409 case 'v':
12410 opts.verbose = 1;
12411 break;
12412 case 'H':
12413 opts.scripted = 1;
12414 break;
12415 case 'f':
12416 opts.follow = 1;
12417 break;
12418 case 'c':
12419 opts.clear = 1;
12420 break;
12421 case '?':
12422 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
12423 optopt);
12424 usage(B_FALSE);
12425 }
12426 }
12427 argc -= optind;
12428 argv += optind;
12429
12430 if (argc > 1) {
12431 (void) fprintf(stderr, gettext("too many arguments\n"));
12432 usage(B_FALSE);
12433 } else if (argc == 1) {
12434 (void) strlcpy(opts.poolname, argv[0], sizeof (opts.poolname));
12435 if (!zfs_name_valid(opts.poolname, ZFS_TYPE_POOL)) {
12436 (void) fprintf(stderr,
12437 gettext("invalid pool name '%s'\n"), opts.poolname);
12438 usage(B_FALSE);
12439 }
12440 }
12441
12442 if ((argc == 1 || opts.verbose || opts.scripted || opts.follow) &&
12443 opts.clear) {
12444 (void) fprintf(stderr,
12445 gettext("invalid options combined with -c\n"));
12446 usage(B_FALSE);
12447 }
12448
12449 if (opts.clear)
12450 ret = zpool_do_events_clear();
12451 else
12452 ret = zpool_do_events_next(&opts);
12453
12454 return (ret);
12455 }
12456
12457 static int
get_callback_vdev(zpool_handle_t * zhp,char * vdevname,void * data)12458 get_callback_vdev(zpool_handle_t *zhp, char *vdevname, void *data)
12459 {
12460 zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
12461 char value[ZFS_MAXPROPLEN];
12462 zprop_source_t srctype;
12463 nvlist_t *props, *item, *d;
12464 props = item = d = NULL;
12465
12466 if (cbp->cb_json) {
12467 d = fnvlist_lookup_nvlist(cbp->cb_jsobj, "vdevs");
12468 if (d == NULL) {
12469 fprintf(stderr, "vdevs obj not found.\n");
12470 exit(1);
12471 }
12472 props = fnvlist_alloc();
12473 }
12474
12475 for (zprop_list_t *pl = cbp->cb_proplist; pl != NULL;
12476 pl = pl->pl_next) {
12477 char *prop_name;
12478 /*
12479 * If the first property is pool name, it is a special
12480 * placeholder that we can skip. This will also skip
12481 * over the name property when 'all' is specified.
12482 */
12483 if (pl->pl_prop == ZPOOL_PROP_NAME &&
12484 pl == cbp->cb_proplist)
12485 continue;
12486
12487 if (pl->pl_prop == ZPROP_INVAL) {
12488 prop_name = pl->pl_user_prop;
12489 } else {
12490 prop_name = (char *)vdev_prop_to_name(pl->pl_prop);
12491 }
12492 if (zpool_get_vdev_prop(zhp, vdevname, pl->pl_prop,
12493 prop_name, value, sizeof (value), &srctype,
12494 cbp->cb_literal) == 0) {
12495 zprop_collect_property(vdevname, cbp, prop_name,
12496 value, srctype, NULL, NULL, props);
12497 }
12498 }
12499
12500 if (cbp->cb_json) {
12501 if (!nvlist_empty(props)) {
12502 item = fnvlist_alloc();
12503 fill_vdev_info(item, zhp, vdevname, B_TRUE,
12504 cbp->cb_json_as_int);
12505 fnvlist_add_nvlist(item, "properties", props);
12506 fnvlist_add_nvlist(d, vdevname, item);
12507 fnvlist_add_nvlist(cbp->cb_jsobj, "vdevs", d);
12508 fnvlist_free(item);
12509 }
12510 fnvlist_free(props);
12511 }
12512
12513 return (0);
12514 }
12515
12516 static int
get_callback_vdev_cb(void * zhp_data,nvlist_t * nv,void * data)12517 get_callback_vdev_cb(void *zhp_data, nvlist_t *nv, void *data)
12518 {
12519 zpool_handle_t *zhp = zhp_data;
12520 zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
12521 char *vdevname;
12522 const char *type;
12523 int ret;
12524
12525 /*
12526 * zpool_vdev_name() transforms the root vdev name (i.e., root-0) to the
12527 * pool name for display purposes, which is not desired. Fallback to
12528 * zpool_vdev_name() when not dealing with the root vdev.
12529 */
12530 type = fnvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE);
12531 if (zhp != NULL && strcmp(type, "root") == 0)
12532 vdevname = strdup("root-0");
12533 else
12534 vdevname = zpool_vdev_name(g_zfs, zhp, nv,
12535 cbp->cb_vdevs.cb_name_flags);
12536
12537 (void) vdev_expand_proplist(zhp, vdevname, &cbp->cb_proplist);
12538
12539 ret = get_callback_vdev(zhp, vdevname, data);
12540
12541 free(vdevname);
12542
12543 return (ret);
12544 }
12545
12546 static int
get_callback(zpool_handle_t * zhp,void * data)12547 get_callback(zpool_handle_t *zhp, void *data)
12548 {
12549 zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
12550 char value[ZFS_MAXPROPLEN];
12551 zprop_source_t srctype;
12552 zprop_list_t *pl;
12553 int vid;
12554 int err = 0;
12555 nvlist_t *props, *item, *d;
12556 props = item = d = NULL;
12557
12558 if (cbp->cb_type == ZFS_TYPE_VDEV) {
12559 if (cbp->cb_json) {
12560 nvlist_t *pool = fnvlist_alloc();
12561 fill_pool_info(pool, zhp, B_FALSE, cbp->cb_json_as_int);
12562 fnvlist_add_nvlist(cbp->cb_jsobj, "pool", pool);
12563 fnvlist_free(pool);
12564 }
12565
12566 if (strcmp(cbp->cb_vdevs.cb_names[0], "all-vdevs") == 0) {
12567 for_each_vdev(zhp, get_callback_vdev_cb, data);
12568 } else {
12569 /* Adjust column widths for vdev properties */
12570 for (vid = 0; vid < cbp->cb_vdevs.cb_names_count;
12571 vid++) {
12572 vdev_expand_proplist(zhp,
12573 cbp->cb_vdevs.cb_names[vid],
12574 &cbp->cb_proplist);
12575 }
12576 /* Display the properties */
12577 for (vid = 0; vid < cbp->cb_vdevs.cb_names_count;
12578 vid++) {
12579 get_callback_vdev(zhp,
12580 cbp->cb_vdevs.cb_names[vid], data);
12581 }
12582 }
12583 } else {
12584 assert(cbp->cb_type == ZFS_TYPE_POOL);
12585 if (cbp->cb_json) {
12586 d = fnvlist_lookup_nvlist(cbp->cb_jsobj, "pools");
12587 if (d == NULL) {
12588 fprintf(stderr, "pools obj not found.\n");
12589 exit(1);
12590 }
12591 props = fnvlist_alloc();
12592 }
12593 for (pl = cbp->cb_proplist; pl != NULL; pl = pl->pl_next) {
12594 /*
12595 * Skip the special fake placeholder. This will also
12596 * skip over the name property when 'all' is specified.
12597 */
12598 if (pl->pl_prop == ZPOOL_PROP_NAME &&
12599 pl == cbp->cb_proplist)
12600 continue;
12601
12602 if (pl->pl_prop == ZPROP_INVAL &&
12603 zfs_prop_user(pl->pl_user_prop)) {
12604 srctype = ZPROP_SRC_LOCAL;
12605
12606 if (zpool_get_userprop(zhp, pl->pl_user_prop,
12607 value, sizeof (value), &srctype) != 0)
12608 continue;
12609
12610 err = zprop_collect_property(
12611 zpool_get_name(zhp), cbp, pl->pl_user_prop,
12612 value, srctype, NULL, NULL, props);
12613 } else if (pl->pl_prop == ZPROP_INVAL &&
12614 (zpool_prop_feature(pl->pl_user_prop) ||
12615 zpool_prop_unsupported(pl->pl_user_prop))) {
12616 srctype = ZPROP_SRC_LOCAL;
12617
12618 if (zpool_prop_get_feature(zhp,
12619 pl->pl_user_prop, value,
12620 sizeof (value)) == 0) {
12621 err = zprop_collect_property(
12622 zpool_get_name(zhp), cbp,
12623 pl->pl_user_prop, value, srctype,
12624 NULL, NULL, props);
12625 }
12626 } else {
12627 if (zpool_get_prop(zhp, pl->pl_prop, value,
12628 sizeof (value), &srctype,
12629 cbp->cb_literal) != 0)
12630 continue;
12631
12632 err = zprop_collect_property(
12633 zpool_get_name(zhp), cbp,
12634 zpool_prop_to_name(pl->pl_prop),
12635 value, srctype, NULL, NULL, props);
12636 }
12637 if (err != 0)
12638 return (err);
12639 }
12640
12641 if (cbp->cb_json) {
12642 if (!nvlist_empty(props)) {
12643 item = fnvlist_alloc();
12644 fill_pool_info(item, zhp, B_TRUE,
12645 cbp->cb_json_as_int);
12646 fnvlist_add_nvlist(item, "properties", props);
12647 if (cbp->cb_json_pool_key_guid) {
12648 char buf[256];
12649 uint64_t guid = fnvlist_lookup_uint64(
12650 zpool_get_config(zhp, NULL),
12651 ZPOOL_CONFIG_POOL_GUID);
12652 snprintf(buf, 256, "%llu",
12653 (u_longlong_t)guid);
12654 fnvlist_add_nvlist(d, buf, item);
12655 } else {
12656 const char *name = zpool_get_name(zhp);
12657 fnvlist_add_nvlist(d, name, item);
12658 }
12659 fnvlist_add_nvlist(cbp->cb_jsobj, "pools", d);
12660 fnvlist_free(item);
12661 }
12662 fnvlist_free(props);
12663 }
12664 }
12665
12666 return (0);
12667 }
12668
12669 /*
12670 * zpool get [-Hp] [-o "all" | field[,...]] <"all" | property[,...]> <pool> ...
12671 *
12672 * -H Scripted mode. Don't display headers, and separate properties
12673 * by a single tab.
12674 * -o List of columns to display. Defaults to
12675 * "name,property,value,source".
12676 * -p Display values in parsable (exact) format.
12677 * -j Display output in JSON format.
12678 * --json-int Display numbers as integers instead of strings.
12679 * --json-pool-key-guid Set pool GUID as key for pool objects.
12680 *
12681 * Get properties of pools in the system. Output space statistics
12682 * for each one as well as other attributes.
12683 */
12684 int
zpool_do_get(int argc,char ** argv)12685 zpool_do_get(int argc, char **argv)
12686 {
12687 zprop_get_cbdata_t cb = { 0 };
12688 zprop_list_t fake_name = { 0 };
12689 int ret;
12690 int c, i;
12691 char *propstr = NULL;
12692 char *vdev = NULL;
12693 nvlist_t *data = NULL;
12694
12695 cb.cb_first = B_TRUE;
12696
12697 /*
12698 * Set up default columns and sources.
12699 */
12700 cb.cb_sources = ZPROP_SRC_ALL;
12701 cb.cb_columns[0] = GET_COL_NAME;
12702 cb.cb_columns[1] = GET_COL_PROPERTY;
12703 cb.cb_columns[2] = GET_COL_VALUE;
12704 cb.cb_columns[3] = GET_COL_SOURCE;
12705 cb.cb_type = ZFS_TYPE_POOL;
12706 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_TYPE_ID;
12707 current_prop_type = cb.cb_type;
12708
12709 struct option long_options[] = {
12710 {"json", no_argument, NULL, 'j'},
12711 {"json-int", no_argument, NULL, ZPOOL_OPTION_JSON_NUMS_AS_INT},
12712 {"json-pool-key-guid", no_argument, NULL,
12713 ZPOOL_OPTION_POOL_KEY_GUID},
12714 {0, 0, 0, 0}
12715 };
12716
12717 /* check options */
12718 while ((c = getopt_long(argc, argv, ":jHpo:", long_options,
12719 NULL)) != -1) {
12720 switch (c) {
12721 case 'p':
12722 cb.cb_literal = B_TRUE;
12723 break;
12724 case 'H':
12725 cb.cb_scripted = B_TRUE;
12726 break;
12727 case 'j':
12728 cb.cb_json = B_TRUE;
12729 cb.cb_jsobj = zpool_json_schema(0, 1);
12730 data = fnvlist_alloc();
12731 break;
12732 case ZPOOL_OPTION_POOL_KEY_GUID:
12733 cb.cb_json_pool_key_guid = B_TRUE;
12734 break;
12735 case ZPOOL_OPTION_JSON_NUMS_AS_INT:
12736 cb.cb_json_as_int = B_TRUE;
12737 cb.cb_literal = B_TRUE;
12738 break;
12739 case 'o':
12740 memset(&cb.cb_columns, 0, sizeof (cb.cb_columns));
12741 i = 0;
12742
12743 for (char *tok; (tok = strsep(&optarg, ",")); ) {
12744 static const char *const col_opts[] =
12745 { "name", "property", "value", "source",
12746 "all" };
12747 static const zfs_get_column_t col_cols[] =
12748 { GET_COL_NAME, GET_COL_PROPERTY, GET_COL_VALUE,
12749 GET_COL_SOURCE };
12750
12751 if (i == ZFS_GET_NCOLS - 1) {
12752 (void) fprintf(stderr, gettext("too "
12753 "many fields given to -o "
12754 "option\n"));
12755 usage(B_FALSE);
12756 }
12757
12758 for (c = 0; c < ARRAY_SIZE(col_opts); ++c)
12759 if (strcmp(tok, col_opts[c]) == 0)
12760 goto found;
12761
12762 (void) fprintf(stderr,
12763 gettext("invalid column name '%s'\n"), tok);
12764 usage(B_FALSE);
12765
12766 found:
12767 if (c >= 4) {
12768 if (i > 0) {
12769 (void) fprintf(stderr,
12770 gettext("\"all\" conflicts "
12771 "with specific fields "
12772 "given to -o option\n"));
12773 usage(B_FALSE);
12774 }
12775
12776 memcpy(cb.cb_columns, col_cols,
12777 sizeof (col_cols));
12778 i = ZFS_GET_NCOLS - 1;
12779 } else
12780 cb.cb_columns[i++] = col_cols[c];
12781 }
12782 break;
12783 case '?':
12784 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
12785 optopt);
12786 usage(B_FALSE);
12787 }
12788 }
12789
12790 argc -= optind;
12791 argv += optind;
12792
12793 if (!cb.cb_json && cb.cb_json_as_int) {
12794 (void) fprintf(stderr, gettext("'--json-int' only works with"
12795 " '-j' option\n"));
12796 usage(B_FALSE);
12797 }
12798
12799 if (!cb.cb_json && cb.cb_json_pool_key_guid) {
12800 (void) fprintf(stderr, gettext("'json-pool-key-guid' only"
12801 " works with '-j' option\n"));
12802 usage(B_FALSE);
12803 }
12804
12805 if (argc < 1) {
12806 (void) fprintf(stderr, gettext("missing property "
12807 "argument\n"));
12808 usage(B_FALSE);
12809 }
12810
12811 /* Properties list is needed later by zprop_get_list() */
12812 propstr = argv[0];
12813
12814 argc--;
12815 argv++;
12816
12817 if (argc == 0) {
12818 /* No args, so just print the defaults. */
12819 } else if (are_all_pools(argc, argv)) {
12820 /* All the args are pool names */
12821 } else if (are_all_pools(1, argv)) {
12822 /* The first arg is a pool name */
12823 if ((argc == 2 && strcmp(argv[1], "all-vdevs") == 0) ||
12824 (argc == 2 && strcmp(argv[1], "root") == 0) ||
12825 are_vdevs_in_pool(argc - 1, argv + 1, argv[0],
12826 &cb.cb_vdevs)) {
12827
12828 if (strcmp(argv[1], "root") == 0)
12829 vdev = strdup("root-0");
12830
12831 /* ... and the rest are vdev names */
12832 if (vdev == NULL)
12833 cb.cb_vdevs.cb_names = argv + 1;
12834 else
12835 cb.cb_vdevs.cb_names = &vdev;
12836
12837 cb.cb_vdevs.cb_names_count = argc - 1;
12838 cb.cb_type = ZFS_TYPE_VDEV;
12839 argc = 1; /* One pool to process */
12840 } else {
12841 if (cb.cb_json) {
12842 nvlist_free(cb.cb_jsobj);
12843 nvlist_free(data);
12844 }
12845 fprintf(stderr, gettext("Expected a list of vdevs in"
12846 " \"%s\", but got:\n"), argv[0]);
12847 error_list_unresolved_vdevs(argc - 1, argv + 1,
12848 argv[0], &cb.cb_vdevs);
12849 fprintf(stderr, "\n");
12850 usage(B_FALSE);
12851 return (1);
12852 }
12853 } else {
12854 if (cb.cb_json) {
12855 nvlist_free(cb.cb_jsobj);
12856 nvlist_free(data);
12857 }
12858 /*
12859 * The first arg isn't the name of a valid pool.
12860 */
12861 fprintf(stderr, gettext("Cannot get properties of %s: "
12862 "no such pool available.\n"), argv[0]);
12863 return (1);
12864 }
12865
12866 if (zprop_get_list(g_zfs, propstr, &cb.cb_proplist,
12867 cb.cb_type) != 0) {
12868 /* Use correct list of valid properties (pool or vdev) */
12869 current_prop_type = cb.cb_type;
12870 usage(B_FALSE);
12871 }
12872
12873 if (cb.cb_proplist != NULL) {
12874 fake_name.pl_prop = ZPOOL_PROP_NAME;
12875 fake_name.pl_width = strlen(gettext("NAME"));
12876 fake_name.pl_next = cb.cb_proplist;
12877 cb.cb_proplist = &fake_name;
12878 }
12879
12880 if (cb.cb_json) {
12881 if (cb.cb_type == ZFS_TYPE_VDEV)
12882 fnvlist_add_nvlist(cb.cb_jsobj, "vdevs", data);
12883 else
12884 fnvlist_add_nvlist(cb.cb_jsobj, "pools", data);
12885 fnvlist_free(data);
12886 }
12887
12888 ret = for_each_pool(argc, argv, B_TRUE, &cb.cb_proplist, cb.cb_type,
12889 cb.cb_literal, get_callback, &cb);
12890
12891 if (ret == 0 && cb.cb_json)
12892 zcmd_print_json(cb.cb_jsobj);
12893 else if (ret != 0 && cb.cb_json)
12894 nvlist_free(cb.cb_jsobj);
12895
12896 if (cb.cb_proplist == &fake_name)
12897 zprop_free_list(fake_name.pl_next);
12898 else
12899 zprop_free_list(cb.cb_proplist);
12900
12901 if (vdev != NULL)
12902 free(vdev);
12903
12904 return (ret);
12905 }
12906
12907 typedef struct set_cbdata {
12908 char *cb_propname;
12909 char *cb_value;
12910 zfs_type_t cb_type;
12911 vdev_cbdata_t cb_vdevs;
12912 boolean_t cb_any_successful;
12913 } set_cbdata_t;
12914
12915 static int
set_pool_callback(zpool_handle_t * zhp,set_cbdata_t * cb)12916 set_pool_callback(zpool_handle_t *zhp, set_cbdata_t *cb)
12917 {
12918 int error;
12919
12920 /* Check if we have out-of-bounds features */
12921 if (strcmp(cb->cb_propname, ZPOOL_CONFIG_COMPATIBILITY) == 0) {
12922 boolean_t features[SPA_FEATURES];
12923 if (zpool_do_load_compat(cb->cb_value, features) !=
12924 ZPOOL_COMPATIBILITY_OK)
12925 return (-1);
12926
12927 nvlist_t *enabled = zpool_get_features(zhp);
12928 spa_feature_t i;
12929 for (i = 0; i < SPA_FEATURES; i++) {
12930 const char *fguid = spa_feature_table[i].fi_guid;
12931 if (nvlist_exists(enabled, fguid) && !features[i])
12932 break;
12933 }
12934 if (i < SPA_FEATURES)
12935 (void) fprintf(stderr, gettext("Warning: one or "
12936 "more features already enabled on pool '%s'\n"
12937 "are not present in this compatibility set.\n"),
12938 zpool_get_name(zhp));
12939 }
12940
12941 /* if we're setting a feature, check it's in compatibility set */
12942 if (zpool_prop_feature(cb->cb_propname) &&
12943 strcmp(cb->cb_value, ZFS_FEATURE_ENABLED) == 0) {
12944 char *fname = strchr(cb->cb_propname, '@') + 1;
12945 spa_feature_t f;
12946
12947 if (zfeature_lookup_name(fname, &f) == 0) {
12948 char compat[ZFS_MAXPROPLEN];
12949 if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY,
12950 compat, ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
12951 compat[0] = '\0';
12952
12953 boolean_t features[SPA_FEATURES];
12954 if (zpool_do_load_compat(compat, features) !=
12955 ZPOOL_COMPATIBILITY_OK) {
12956 (void) fprintf(stderr, gettext("Error: "
12957 "cannot enable feature '%s' on pool '%s'\n"
12958 "because the pool's 'compatibility' "
12959 "property cannot be parsed.\n"),
12960 fname, zpool_get_name(zhp));
12961 return (-1);
12962 }
12963
12964 if (!features[f]) {
12965 (void) fprintf(stderr, gettext("Error: "
12966 "cannot enable feature '%s' on pool '%s'\n"
12967 "as it is not specified in this pool's "
12968 "current compatibility set.\n"
12969 "Consider setting 'compatibility' to a "
12970 "less restrictive set, or to 'off'.\n"),
12971 fname, zpool_get_name(zhp));
12972 return (-1);
12973 }
12974 }
12975 }
12976
12977 error = zpool_set_prop(zhp, cb->cb_propname, cb->cb_value);
12978
12979 return (error);
12980 }
12981
12982 static int
set_callback(zpool_handle_t * zhp,void * data)12983 set_callback(zpool_handle_t *zhp, void *data)
12984 {
12985 int error;
12986 set_cbdata_t *cb = (set_cbdata_t *)data;
12987
12988 if (cb->cb_type == ZFS_TYPE_VDEV) {
12989 error = zpool_set_vdev_prop(zhp, *cb->cb_vdevs.cb_names,
12990 cb->cb_propname, cb->cb_value);
12991 } else {
12992 assert(cb->cb_type == ZFS_TYPE_POOL);
12993 error = set_pool_callback(zhp, cb);
12994 }
12995
12996 cb->cb_any_successful = !error;
12997 return (error);
12998 }
12999
13000 int
zpool_do_set(int argc,char ** argv)13001 zpool_do_set(int argc, char **argv)
13002 {
13003 set_cbdata_t cb = { 0 };
13004 int error;
13005 char *vdev = NULL;
13006
13007 current_prop_type = ZFS_TYPE_POOL;
13008 if (argc > 1 && argv[1][0] == '-') {
13009 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
13010 argv[1][1]);
13011 usage(B_FALSE);
13012 }
13013
13014 if (argc < 2) {
13015 (void) fprintf(stderr, gettext("missing property=value "
13016 "argument\n"));
13017 usage(B_FALSE);
13018 }
13019
13020 if (argc < 3) {
13021 (void) fprintf(stderr, gettext("missing pool name\n"));
13022 usage(B_FALSE);
13023 }
13024
13025 if (argc > 4) {
13026 (void) fprintf(stderr, gettext("too many pool names\n"));
13027 usage(B_FALSE);
13028 }
13029
13030 cb.cb_propname = argv[1];
13031 cb.cb_type = ZFS_TYPE_POOL;
13032 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_TYPE_ID;
13033 cb.cb_value = strchr(cb.cb_propname, '=');
13034 if (cb.cb_value == NULL) {
13035 (void) fprintf(stderr, gettext("missing value in "
13036 "property=value argument\n"));
13037 usage(B_FALSE);
13038 }
13039
13040 *(cb.cb_value) = '\0';
13041 cb.cb_value++;
13042 argc -= 2;
13043 argv += 2;
13044
13045 /* argv[0] is pool name */
13046 if (!is_pool(argv[0])) {
13047 (void) fprintf(stderr,
13048 gettext("cannot open '%s': is not a pool\n"), argv[0]);
13049 return (EINVAL);
13050 }
13051
13052 /* argv[1], when supplied, is vdev name */
13053 if (argc == 2) {
13054
13055 if (strcmp(argv[1], "root") == 0)
13056 vdev = strdup("root-0");
13057 else
13058 vdev = strdup(argv[1]);
13059
13060 if (!are_vdevs_in_pool(1, &vdev, argv[0], &cb.cb_vdevs)) {
13061 (void) fprintf(stderr, gettext(
13062 "cannot find '%s' in '%s': device not in pool\n"),
13063 vdev, argv[0]);
13064 free(vdev);
13065 return (EINVAL);
13066 }
13067 cb.cb_vdevs.cb_names = &vdev;
13068 cb.cb_vdevs.cb_names_count = 1;
13069 cb.cb_type = ZFS_TYPE_VDEV;
13070 }
13071
13072 error = for_each_pool(1, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
13073 B_FALSE, set_callback, &cb);
13074
13075 if (vdev != NULL)
13076 free(vdev);
13077
13078 return (error);
13079 }
13080
13081 /* Add up the total number of bytes left to initialize/trim across all vdevs */
13082 static uint64_t
vdev_activity_remaining(nvlist_t * nv,zpool_wait_activity_t activity)13083 vdev_activity_remaining(nvlist_t *nv, zpool_wait_activity_t activity)
13084 {
13085 uint64_t bytes_remaining;
13086 nvlist_t **child;
13087 uint_t c, children;
13088 vdev_stat_t *vs;
13089
13090 assert(activity == ZPOOL_WAIT_INITIALIZE ||
13091 activity == ZPOOL_WAIT_TRIM);
13092
13093 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
13094 (uint64_t **)&vs, &c) == 0);
13095
13096 if (activity == ZPOOL_WAIT_INITIALIZE &&
13097 vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE)
13098 bytes_remaining = vs->vs_initialize_bytes_est -
13099 vs->vs_initialize_bytes_done;
13100 else if (activity == ZPOOL_WAIT_TRIM &&
13101 vs->vs_trim_state == VDEV_TRIM_ACTIVE)
13102 bytes_remaining = vs->vs_trim_bytes_est -
13103 vs->vs_trim_bytes_done;
13104 else
13105 bytes_remaining = 0;
13106
13107 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
13108 &child, &children) != 0)
13109 children = 0;
13110
13111 for (c = 0; c < children; c++)
13112 bytes_remaining += vdev_activity_remaining(child[c], activity);
13113
13114 return (bytes_remaining);
13115 }
13116
13117 /* Add up the total number of bytes left to rebuild across top-level vdevs */
13118 static uint64_t
vdev_activity_top_remaining(nvlist_t * nv)13119 vdev_activity_top_remaining(nvlist_t *nv)
13120 {
13121 uint64_t bytes_remaining = 0;
13122 nvlist_t **child;
13123 uint_t children;
13124 int error;
13125
13126 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
13127 &child, &children) != 0)
13128 children = 0;
13129
13130 for (uint_t c = 0; c < children; c++) {
13131 vdev_rebuild_stat_t *vrs;
13132 uint_t i;
13133
13134 error = nvlist_lookup_uint64_array(child[c],
13135 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i);
13136 if (error == 0) {
13137 if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
13138 bytes_remaining += (vrs->vrs_bytes_est -
13139 vrs->vrs_bytes_rebuilt);
13140 }
13141 }
13142 }
13143
13144 return (bytes_remaining);
13145 }
13146
13147 /* Whether any vdevs are 'spare' or 'replacing' vdevs */
13148 static boolean_t
vdev_any_spare_replacing(nvlist_t * nv)13149 vdev_any_spare_replacing(nvlist_t *nv)
13150 {
13151 nvlist_t **child;
13152 uint_t c, children;
13153 const char *vdev_type;
13154
13155 (void) nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &vdev_type);
13156
13157 if (strcmp(vdev_type, VDEV_TYPE_REPLACING) == 0 ||
13158 strcmp(vdev_type, VDEV_TYPE_SPARE) == 0 ||
13159 strcmp(vdev_type, VDEV_TYPE_DRAID_SPARE) == 0) {
13160 return (B_TRUE);
13161 }
13162
13163 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
13164 &child, &children) != 0)
13165 children = 0;
13166
13167 for (c = 0; c < children; c++) {
13168 if (vdev_any_spare_replacing(child[c]))
13169 return (B_TRUE);
13170 }
13171
13172 return (B_FALSE);
13173 }
13174
13175 typedef struct wait_data {
13176 char *wd_poolname;
13177 boolean_t wd_scripted;
13178 boolean_t wd_exact;
13179 boolean_t wd_headers_once;
13180 boolean_t wd_should_exit;
13181 /* Which activities to wait for */
13182 boolean_t wd_enabled[ZPOOL_WAIT_NUM_ACTIVITIES];
13183 float wd_interval;
13184 pthread_cond_t wd_cv;
13185 pthread_mutex_t wd_mutex;
13186 } wait_data_t;
13187
13188 /*
13189 * Print to stdout a single line, containing one column for each activity that
13190 * we are waiting for specifying how many bytes of work are left for that
13191 * activity.
13192 */
13193 static void
print_wait_status_row(wait_data_t * wd,zpool_handle_t * zhp,int row)13194 print_wait_status_row(wait_data_t *wd, zpool_handle_t *zhp, int row)
13195 {
13196 nvlist_t *config, *nvroot;
13197 uint_t c;
13198 int i;
13199 pool_checkpoint_stat_t *pcs = NULL;
13200 pool_scan_stat_t *pss = NULL;
13201 pool_removal_stat_t *prs = NULL;
13202 pool_raidz_expand_stat_t *pres = NULL;
13203 const char *const headers[] = {"DISCARD", "FREE", "INITIALIZE",
13204 "REPLACE", "REMOVE", "RESILVER", "SCRUB", "TRIM", "RAIDZ_EXPAND"};
13205 int col_widths[ZPOOL_WAIT_NUM_ACTIVITIES];
13206
13207 /* Calculate the width of each column */
13208 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
13209 /*
13210 * Make sure we have enough space in the col for pretty-printed
13211 * numbers and for the column header, and then leave a couple
13212 * spaces between cols for readability.
13213 */
13214 col_widths[i] = MAX(strlen(headers[i]), 6) + 2;
13215 }
13216
13217 if (timestamp_fmt != NODATE)
13218 print_timestamp(timestamp_fmt);
13219
13220 /* Print header if appropriate */
13221 int term_height = terminal_height();
13222 boolean_t reprint_header = (!wd->wd_headers_once && term_height > 0 &&
13223 row % (term_height-1) == 0);
13224 if (!wd->wd_scripted && (row == 0 || reprint_header)) {
13225 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
13226 if (wd->wd_enabled[i])
13227 (void) printf("%*s", col_widths[i], headers[i]);
13228 }
13229 (void) fputc('\n', stdout);
13230 }
13231
13232 /* Bytes of work remaining in each activity */
13233 int64_t bytes_rem[ZPOOL_WAIT_NUM_ACTIVITIES] = {0};
13234
13235 bytes_rem[ZPOOL_WAIT_FREE] =
13236 zpool_get_prop_int(zhp, ZPOOL_PROP_FREEING, NULL);
13237
13238 config = zpool_get_config(zhp, NULL);
13239 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
13240
13241 (void) nvlist_lookup_uint64_array(nvroot,
13242 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
13243 if (pcs != NULL && pcs->pcs_state == CS_CHECKPOINT_DISCARDING)
13244 bytes_rem[ZPOOL_WAIT_CKPT_DISCARD] = pcs->pcs_space;
13245
13246 (void) nvlist_lookup_uint64_array(nvroot,
13247 ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c);
13248 if (prs != NULL && prs->prs_state == DSS_SCANNING)
13249 bytes_rem[ZPOOL_WAIT_REMOVE] = prs->prs_to_copy -
13250 prs->prs_copied;
13251
13252 (void) nvlist_lookup_uint64_array(nvroot,
13253 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&pss, &c);
13254 if (pss != NULL && pss->pss_state == DSS_SCANNING &&
13255 pss->pss_pass_scrub_pause == 0) {
13256 int64_t rem = pss->pss_to_examine - pss->pss_issued;
13257 if (pss->pss_func == POOL_SCAN_SCRUB)
13258 bytes_rem[ZPOOL_WAIT_SCRUB] = rem;
13259 else
13260 bytes_rem[ZPOOL_WAIT_RESILVER] = rem;
13261 } else if (check_rebuilding(nvroot, NULL)) {
13262 bytes_rem[ZPOOL_WAIT_RESILVER] =
13263 vdev_activity_top_remaining(nvroot);
13264 }
13265
13266 (void) nvlist_lookup_uint64_array(nvroot,
13267 ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, (uint64_t **)&pres, &c);
13268 if (pres != NULL && pres->pres_state == DSS_SCANNING) {
13269 int64_t rem = pres->pres_to_reflow - pres->pres_reflowed;
13270 bytes_rem[ZPOOL_WAIT_RAIDZ_EXPAND] = rem;
13271 }
13272
13273 bytes_rem[ZPOOL_WAIT_INITIALIZE] =
13274 vdev_activity_remaining(nvroot, ZPOOL_WAIT_INITIALIZE);
13275 bytes_rem[ZPOOL_WAIT_TRIM] =
13276 vdev_activity_remaining(nvroot, ZPOOL_WAIT_TRIM);
13277
13278 /*
13279 * A replace finishes after resilvering finishes, so the amount of work
13280 * left for a replace is the same as for resilvering.
13281 *
13282 * It isn't quite correct to say that if we have any 'spare' or
13283 * 'replacing' vdevs and a resilver is happening, then a replace is in
13284 * progress, like we do here. When a hot spare is used, the faulted vdev
13285 * is not removed after the hot spare is resilvered, so parent 'spare'
13286 * vdev is not removed either. So we could have a 'spare' vdev, but be
13287 * resilvering for a different reason. However, we use it as a heuristic
13288 * because we don't have access to the DTLs, which could tell us whether
13289 * or not we have really finished resilvering a hot spare.
13290 */
13291 if (vdev_any_spare_replacing(nvroot))
13292 bytes_rem[ZPOOL_WAIT_REPLACE] = bytes_rem[ZPOOL_WAIT_RESILVER];
13293
13294 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
13295 char buf[64];
13296 if (!wd->wd_enabled[i])
13297 continue;
13298
13299 if (wd->wd_exact) {
13300 (void) snprintf(buf, sizeof (buf), "%" PRIi64,
13301 bytes_rem[i]);
13302 } else {
13303 zfs_nicenum(bytes_rem[i], buf, sizeof (buf));
13304 }
13305
13306 if (wd->wd_scripted)
13307 (void) printf(i == 0 ? "%s" : "\t%s", buf);
13308 else
13309 (void) printf(" %*s", col_widths[i] - 1, buf);
13310 }
13311 (void) printf("\n");
13312 (void) fflush(stdout);
13313 }
13314
13315 static void *
wait_status_thread(void * arg)13316 wait_status_thread(void *arg)
13317 {
13318 wait_data_t *wd = (wait_data_t *)arg;
13319 zpool_handle_t *zhp;
13320
13321 if ((zhp = zpool_open(g_zfs, wd->wd_poolname)) == NULL)
13322 return (void *)(1);
13323
13324 for (int row = 0; ; row++) {
13325 boolean_t missing;
13326 struct timespec timeout;
13327 int ret = 0;
13328 (void) clock_gettime(CLOCK_REALTIME, &timeout);
13329
13330 if (zpool_refresh_stats(zhp, &missing) != 0 || missing ||
13331 zpool_props_refresh(zhp) != 0) {
13332 zpool_close(zhp);
13333 return (void *)(uintptr_t)(missing ? 0 : 1);
13334 }
13335
13336 print_wait_status_row(wd, zhp, row);
13337
13338 timeout.tv_sec += floor(wd->wd_interval);
13339 long nanos = timeout.tv_nsec +
13340 (wd->wd_interval - floor(wd->wd_interval)) * NANOSEC;
13341 if (nanos >= NANOSEC) {
13342 timeout.tv_sec++;
13343 timeout.tv_nsec = nanos - NANOSEC;
13344 } else {
13345 timeout.tv_nsec = nanos;
13346 }
13347 pthread_mutex_lock(&wd->wd_mutex);
13348 if (!wd->wd_should_exit)
13349 ret = pthread_cond_timedwait(&wd->wd_cv, &wd->wd_mutex,
13350 &timeout);
13351 pthread_mutex_unlock(&wd->wd_mutex);
13352 if (ret == 0) {
13353 break; /* signaled by main thread */
13354 } else if (ret != ETIMEDOUT) {
13355 (void) fprintf(stderr, gettext("pthread_cond_timedwait "
13356 "failed: %s\n"), strerror(ret));
13357 zpool_close(zhp);
13358 return (void *)(uintptr_t)(1);
13359 }
13360 }
13361
13362 zpool_close(zhp);
13363 return (void *)(0);
13364 }
13365
13366 int
zpool_do_wait(int argc,char ** argv)13367 zpool_do_wait(int argc, char **argv)
13368 {
13369 boolean_t verbose = B_FALSE;
13370 int c, i;
13371 unsigned long count;
13372 pthread_t status_thr;
13373 int error = 0;
13374 zpool_handle_t *zhp;
13375
13376 wait_data_t wd;
13377 wd.wd_scripted = B_FALSE;
13378 wd.wd_exact = B_FALSE;
13379 wd.wd_headers_once = B_FALSE;
13380 wd.wd_should_exit = B_FALSE;
13381
13382 pthread_mutex_init(&wd.wd_mutex, NULL);
13383 pthread_cond_init(&wd.wd_cv, NULL);
13384
13385 /* By default, wait for all types of activity. */
13386 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++)
13387 wd.wd_enabled[i] = B_TRUE;
13388
13389 while ((c = getopt(argc, argv, "HpT:t:")) != -1) {
13390 switch (c) {
13391 case 'H':
13392 wd.wd_scripted = B_TRUE;
13393 break;
13394 case 'n':
13395 wd.wd_headers_once = B_TRUE;
13396 break;
13397 case 'p':
13398 wd.wd_exact = B_TRUE;
13399 break;
13400 case 'T':
13401 get_timestamp_arg(*optarg);
13402 break;
13403 case 't':
13404 /* Reset activities array */
13405 memset(&wd.wd_enabled, 0, sizeof (wd.wd_enabled));
13406
13407 for (char *tok; (tok = strsep(&optarg, ",")); ) {
13408 static const char *const col_opts[] = {
13409 "discard", "free", "initialize", "replace",
13410 "remove", "resilver", "scrub", "trim",
13411 "raidz_expand" };
13412
13413 for (i = 0; i < ARRAY_SIZE(col_opts); ++i)
13414 if (strcmp(tok, col_opts[i]) == 0) {
13415 wd.wd_enabled[i] = B_TRUE;
13416 goto found;
13417 }
13418
13419 (void) fprintf(stderr,
13420 gettext("invalid activity '%s'\n"), tok);
13421 usage(B_FALSE);
13422 found:;
13423 }
13424 break;
13425 case '?':
13426 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
13427 optopt);
13428 usage(B_FALSE);
13429 }
13430 }
13431
13432 argc -= optind;
13433 argv += optind;
13434
13435 get_interval_count(&argc, argv, &wd.wd_interval, &count);
13436 if (count != 0) {
13437 /* This subcmd only accepts an interval, not a count */
13438 (void) fprintf(stderr, gettext("too many arguments\n"));
13439 usage(B_FALSE);
13440 }
13441
13442 if (wd.wd_interval != 0)
13443 verbose = B_TRUE;
13444
13445 if (argc < 1) {
13446 (void) fprintf(stderr, gettext("missing 'pool' argument\n"));
13447 usage(B_FALSE);
13448 }
13449 if (argc > 1) {
13450 (void) fprintf(stderr, gettext("too many arguments\n"));
13451 usage(B_FALSE);
13452 }
13453
13454 wd.wd_poolname = argv[0];
13455
13456 if ((zhp = zpool_open(g_zfs, wd.wd_poolname)) == NULL)
13457 return (1);
13458
13459 if (verbose) {
13460 /*
13461 * We use a separate thread for printing status updates because
13462 * the main thread will call lzc_wait(), which blocks as long
13463 * as an activity is in progress, which can be a long time.
13464 */
13465 if (pthread_create(&status_thr, NULL, wait_status_thread, &wd)
13466 != 0) {
13467 (void) fprintf(stderr, gettext("failed to create status"
13468 "thread: %s\n"), strerror(errno));
13469 zpool_close(zhp);
13470 return (1);
13471 }
13472 }
13473
13474 /*
13475 * Loop over all activities that we are supposed to wait for until none
13476 * of them are in progress. Note that this means we can end up waiting
13477 * for more activities to complete than just those that were in progress
13478 * when we began waiting; if an activity we are interested in begins
13479 * while we are waiting for another activity, we will wait for both to
13480 * complete before exiting.
13481 */
13482 for (;;) {
13483 boolean_t missing = B_FALSE;
13484 boolean_t any_waited = B_FALSE;
13485
13486 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
13487 boolean_t waited;
13488
13489 if (!wd.wd_enabled[i])
13490 continue;
13491
13492 error = zpool_wait_status(zhp, i, &missing, &waited);
13493 if (error != 0 || missing)
13494 break;
13495
13496 any_waited = (any_waited || waited);
13497 }
13498
13499 if (error != 0 || missing || !any_waited)
13500 break;
13501 }
13502
13503 zpool_close(zhp);
13504
13505 if (verbose) {
13506 uintptr_t status;
13507 pthread_mutex_lock(&wd.wd_mutex);
13508 wd.wd_should_exit = B_TRUE;
13509 pthread_cond_signal(&wd.wd_cv);
13510 pthread_mutex_unlock(&wd.wd_mutex);
13511 (void) pthread_join(status_thr, (void *)&status);
13512 if (status != 0)
13513 error = status;
13514 }
13515
13516 pthread_mutex_destroy(&wd.wd_mutex);
13517 pthread_cond_destroy(&wd.wd_cv);
13518 return (error);
13519 }
13520
13521 /*
13522 * zpool ddtprune -d|-p <amount> <pool>
13523 *
13524 * -d <days> Prune entries <days> old and older
13525 * -p <percent> Prune <percent> amount of entries
13526 *
13527 * Prune single reference entries from DDT to satisfy the amount specified.
13528 */
13529 int
zpool_do_ddt_prune(int argc,char ** argv)13530 zpool_do_ddt_prune(int argc, char **argv)
13531 {
13532 zpool_ddt_prune_unit_t unit = ZPOOL_DDT_PRUNE_NONE;
13533 uint64_t amount = 0;
13534 zpool_handle_t *zhp;
13535 char *endptr;
13536 int c;
13537
13538 while ((c = getopt(argc, argv, "d:p:")) != -1) {
13539 switch (c) {
13540 case 'd':
13541 if (unit == ZPOOL_DDT_PRUNE_PERCENTAGE) {
13542 (void) fprintf(stderr, gettext("-d cannot be "
13543 "combined with -p option\n"));
13544 usage(B_FALSE);
13545 }
13546 errno = 0;
13547 amount = strtoull(optarg, &endptr, 0);
13548 if (errno != 0 || *endptr != '\0' || amount == 0) {
13549 (void) fprintf(stderr,
13550 gettext("invalid days value\n"));
13551 usage(B_FALSE);
13552 }
13553 amount *= 86400; /* convert days to seconds */
13554 unit = ZPOOL_DDT_PRUNE_AGE;
13555 break;
13556 case 'p':
13557 if (unit == ZPOOL_DDT_PRUNE_AGE) {
13558 (void) fprintf(stderr, gettext("-p cannot be "
13559 "combined with -d option\n"));
13560 usage(B_FALSE);
13561 }
13562 errno = 0;
13563 amount = strtoull(optarg, &endptr, 0);
13564 if (errno != 0 || *endptr != '\0' ||
13565 amount == 0 || amount > 100) {
13566 (void) fprintf(stderr,
13567 gettext("invalid percentage value\n"));
13568 usage(B_FALSE);
13569 }
13570 unit = ZPOOL_DDT_PRUNE_PERCENTAGE;
13571 break;
13572 case '?':
13573 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
13574 optopt);
13575 usage(B_FALSE);
13576 }
13577 }
13578 argc -= optind;
13579 argv += optind;
13580
13581 if (unit == ZPOOL_DDT_PRUNE_NONE) {
13582 (void) fprintf(stderr,
13583 gettext("missing amount option (-d|-p <value>)\n"));
13584 usage(B_FALSE);
13585 } else if (argc < 1) {
13586 (void) fprintf(stderr, gettext("missing pool argument\n"));
13587 usage(B_FALSE);
13588 } else if (argc > 1) {
13589 (void) fprintf(stderr, gettext("too many arguments\n"));
13590 usage(B_FALSE);
13591 }
13592 zhp = zpool_open(g_zfs, argv[0]);
13593 if (zhp == NULL)
13594 return (-1);
13595
13596 int error = zpool_ddt_prune(zhp, unit, amount);
13597
13598 zpool_close(zhp);
13599
13600 return (error);
13601 }
13602
13603 static int
find_command_idx(const char * command,int * idx)13604 find_command_idx(const char *command, int *idx)
13605 {
13606 for (int i = 0; i < NCOMMAND; ++i) {
13607 if (command_table[i].name == NULL)
13608 continue;
13609
13610 if (strcmp(command, command_table[i].name) == 0) {
13611 *idx = i;
13612 return (0);
13613 }
13614 }
13615 return (1);
13616 }
13617
13618 /*
13619 * Display version message
13620 */
13621 static int
zpool_do_version(int argc,char ** argv)13622 zpool_do_version(int argc, char **argv)
13623 {
13624 int c;
13625 nvlist_t *jsobj = NULL, *zfs_ver = NULL;
13626 boolean_t json = B_FALSE;
13627
13628 struct option long_options[] = {
13629 {"json", no_argument, NULL, 'j'},
13630 };
13631
13632 while ((c = getopt_long(argc, argv, "j", long_options, NULL)) != -1) {
13633 switch (c) {
13634 case 'j':
13635 json = B_TRUE;
13636 jsobj = zpool_json_schema(0, 1);
13637 break;
13638 case '?':
13639 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
13640 optopt);
13641 usage(B_FALSE);
13642 }
13643 }
13644
13645 argc -= optind;
13646 if (argc != 0) {
13647 (void) fprintf(stderr, "too many arguments\n");
13648 usage(B_FALSE);
13649 }
13650
13651 if (json) {
13652 zfs_ver = zfs_version_nvlist();
13653 if (zfs_ver) {
13654 fnvlist_add_nvlist(jsobj, "zfs_version", zfs_ver);
13655 zcmd_print_json(jsobj);
13656 fnvlist_free(zfs_ver);
13657 return (0);
13658 } else
13659 return (-1);
13660 } else
13661 return (zfs_version_print() != 0);
13662 }
13663
13664 /* Display documentation */
13665 static int
zpool_do_help(int argc,char ** argv)13666 zpool_do_help(int argc, char **argv)
13667 {
13668 char page[MAXNAMELEN];
13669 if (argc < 3 || strcmp(argv[2], "zpool") == 0)
13670 strcpy(page, "zpool");
13671 else if (strcmp(argv[2], "concepts") == 0 ||
13672 strcmp(argv[2], "props") == 0)
13673 snprintf(page, sizeof (page), "zpool%s", argv[2]);
13674 else
13675 snprintf(page, sizeof (page), "zpool-%s", argv[2]);
13676
13677 execlp("man", "man", page, NULL);
13678
13679 fprintf(stderr, "couldn't run man program: %s", strerror(errno));
13680 return (-1);
13681 }
13682
13683 /*
13684 * Do zpool_load_compat() and print error message on failure
13685 */
13686 static zpool_compat_status_t
zpool_do_load_compat(const char * compat,boolean_t * list)13687 zpool_do_load_compat(const char *compat, boolean_t *list)
13688 {
13689 char report[1024];
13690
13691 zpool_compat_status_t ret;
13692
13693 ret = zpool_load_compat(compat, list, report, 1024);
13694 switch (ret) {
13695
13696 case ZPOOL_COMPATIBILITY_OK:
13697 break;
13698
13699 case ZPOOL_COMPATIBILITY_NOFILES:
13700 case ZPOOL_COMPATIBILITY_BADFILE:
13701 case ZPOOL_COMPATIBILITY_BADTOKEN:
13702 (void) fprintf(stderr, "Error: %s\n", report);
13703 break;
13704
13705 case ZPOOL_COMPATIBILITY_WARNTOKEN:
13706 (void) fprintf(stderr, "Warning: %s\n", report);
13707 ret = ZPOOL_COMPATIBILITY_OK;
13708 break;
13709 }
13710 return (ret);
13711 }
13712
13713 int
main(int argc,char ** argv)13714 main(int argc, char **argv)
13715 {
13716 int ret = 0;
13717 int i = 0;
13718 char *cmdname;
13719 char **newargv;
13720
13721 (void) setlocale(LC_ALL, "");
13722 (void) setlocale(LC_NUMERIC, "C");
13723 (void) textdomain(TEXT_DOMAIN);
13724 srand(time(NULL));
13725
13726 opterr = 0;
13727
13728 /*
13729 * Make sure the user has specified some command.
13730 */
13731 if (argc < 2) {
13732 (void) fprintf(stderr, gettext("missing command\n"));
13733 usage(B_FALSE);
13734 }
13735
13736 cmdname = argv[1];
13737
13738 /*
13739 * Special case '-?'
13740 */
13741 if ((strcmp(cmdname, "-?") == 0) || strcmp(cmdname, "--help") == 0)
13742 usage(B_TRUE);
13743
13744 /*
13745 * Special case '-V|--version'
13746 */
13747 if ((strcmp(cmdname, "-V") == 0) || (strcmp(cmdname, "--version") == 0))
13748 return (zfs_version_print() != 0);
13749
13750 /*
13751 * Special case 'help'
13752 */
13753 if (strcmp(cmdname, "help") == 0)
13754 return (zpool_do_help(argc, argv));
13755
13756 if ((g_zfs = libzfs_init()) == NULL) {
13757 (void) fprintf(stderr, "%s\n", libzfs_error_init(errno));
13758 return (1);
13759 }
13760
13761 libzfs_print_on_error(g_zfs, B_TRUE);
13762
13763 zfs_save_arguments(argc, argv, history_str, sizeof (history_str));
13764
13765 /*
13766 * Many commands modify input strings for string parsing reasons.
13767 * We create a copy to protect the original argv.
13768 */
13769 newargv = safe_malloc((argc + 1) * sizeof (newargv[0]));
13770 for (i = 0; i < argc; i++)
13771 newargv[i] = strdup(argv[i]);
13772 newargv[argc] = NULL;
13773
13774 /*
13775 * Run the appropriate command.
13776 */
13777 if (find_command_idx(cmdname, &i) == 0) {
13778 current_command = &command_table[i];
13779 ret = command_table[i].func(argc - 1, newargv + 1);
13780 } else if (strchr(cmdname, '=')) {
13781 verify(find_command_idx("set", &i) == 0);
13782 current_command = &command_table[i];
13783 ret = command_table[i].func(argc, newargv);
13784 } else if (strcmp(cmdname, "freeze") == 0 && argc == 3) {
13785 /*
13786 * 'freeze' is a vile debugging abomination, so we treat
13787 * it as such.
13788 */
13789 zfs_cmd_t zc = {"\0"};
13790
13791 (void) strlcpy(zc.zc_name, argv[2], sizeof (zc.zc_name));
13792 ret = zfs_ioctl(g_zfs, ZFS_IOC_POOL_FREEZE, &zc);
13793 if (ret != 0) {
13794 (void) fprintf(stderr,
13795 gettext("failed to freeze pool: %d\n"), errno);
13796 ret = 1;
13797 }
13798
13799 log_history = 0;
13800 } else {
13801 (void) fprintf(stderr, gettext("unrecognized "
13802 "command '%s'\n"), cmdname);
13803 usage(B_FALSE);
13804 ret = 1;
13805 }
13806
13807 for (i = 0; i < argc; i++)
13808 free(newargv[i]);
13809 free(newargv);
13810
13811 if (ret == 0 && log_history)
13812 (void) zpool_log_history(g_zfs, history_str);
13813
13814 libzfs_fini(g_zfs);
13815
13816 /*
13817 * The 'ZFS_ABORT' environment variable causes us to dump core on exit
13818 * for the purposes of running ::findleaks.
13819 */
13820 if (getenv("ZFS_ABORT") != NULL) {
13821 (void) printf("dumping core by request\n");
13822 abort();
13823 }
13824
13825 return (ret);
13826 }
13827