1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or https://opensource.org/licenses/CDDL-1.0. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 25 * Copyright (c) 2011, 2024 by Delphix. All rights reserved. 26 * Copyright (c) 2012 by Frederik Wessels. All rights reserved. 27 * Copyright (c) 2012 by Cyril Plisko. All rights reserved. 28 * Copyright (c) 2013 by Prasad Joshi (sTec). All rights reserved. 29 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>. 30 * Copyright (c) 2017 Datto Inc. 31 * Copyright (c) 2017 Open-E, Inc. All Rights Reserved. 32 * Copyright (c) 2017, Intel Corporation. 33 * Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com> 34 * Copyright (c) 2021, Colm Buckley <colm@tuatha.org> 35 * Copyright (c) 2021, 2023, Klara Inc. 36 * Copyright [2021] Hewlett Packard Enterprise Development LP 37 */ 38 39 #include <assert.h> 40 #include <ctype.h> 41 #include <dirent.h> 42 #include <errno.h> 43 #include <fcntl.h> 44 #include <getopt.h> 45 #include <libgen.h> 46 #include <libintl.h> 47 #include <libuutil.h> 48 #include <locale.h> 49 #include <pthread.h> 50 #include <stdio.h> 51 #include <stdlib.h> 52 #include <string.h> 53 #include <thread_pool.h> 54 #include <time.h> 55 #include <unistd.h> 56 #include <pwd.h> 57 #include <zone.h> 58 #include <sys/wait.h> 59 #include <zfs_prop.h> 60 #include <sys/fs/zfs.h> 61 #include <sys/stat.h> 62 #include <sys/systeminfo.h> 63 #include <sys/fm/fs/zfs.h> 64 #include <sys/fm/util.h> 65 #include <sys/fm/protocol.h> 66 #include <sys/zfs_ioctl.h> 67 #include <sys/mount.h> 68 #include <sys/sysmacros.h> 69 #include <string.h> 70 #include <math.h> 71 72 #include <libzfs.h> 73 #include <libzutil.h> 74 75 #include "zpool_util.h" 76 #include "zfs_comutil.h" 77 #include "zfeature_common.h" 78 #include "zfs_valstr.h" 79 80 #include "statcommon.h" 81 82 libzfs_handle_t *g_zfs; 83 84 static int mount_tp_nthr = 512; /* tpool threads for multi-threaded mounting */ 85 86 static int zpool_do_create(int, char **); 87 static int zpool_do_destroy(int, char **); 88 89 static int zpool_do_add(int, char **); 90 static int zpool_do_remove(int, char **); 91 static int zpool_do_labelclear(int, char **); 92 93 static int zpool_do_checkpoint(int, char **); 94 static int zpool_do_prefetch(int, char **); 95 96 static int zpool_do_list(int, char **); 97 static int zpool_do_iostat(int, char **); 98 static int zpool_do_status(int, char **); 99 100 static int zpool_do_online(int, char **); 101 static int zpool_do_offline(int, char **); 102 static int zpool_do_clear(int, char **); 103 static int zpool_do_reopen(int, char **); 104 105 static int zpool_do_reguid(int, char **); 106 107 static int zpool_do_attach(int, char **); 108 static int zpool_do_detach(int, char **); 109 static int zpool_do_replace(int, char **); 110 static int zpool_do_split(int, char **); 111 112 static int zpool_do_initialize(int, char **); 113 static int zpool_do_scrub(int, char **); 114 static int zpool_do_resilver(int, char **); 115 static int zpool_do_trim(int, char **); 116 117 static int zpool_do_import(int, char **); 118 static int zpool_do_export(int, char **); 119 120 static int zpool_do_upgrade(int, char **); 121 122 static int zpool_do_history(int, char **); 123 static int zpool_do_events(int, char **); 124 125 static int zpool_do_get(int, char **); 126 static int zpool_do_set(int, char **); 127 128 static int zpool_do_sync(int, char **); 129 130 static int zpool_do_version(int, char **); 131 132 static int zpool_do_wait(int, char **); 133 134 static int zpool_do_ddt_prune(int, char **); 135 136 static int zpool_do_help(int argc, char **argv); 137 138 static zpool_compat_status_t zpool_do_load_compat( 139 const char *, boolean_t *); 140 141 enum zpool_options { 142 ZPOOL_OPTION_POWER = 1024, 143 ZPOOL_OPTION_ALLOW_INUSE, 144 ZPOOL_OPTION_ALLOW_REPLICATION_MISMATCH, 145 ZPOOL_OPTION_ALLOW_ASHIFT_MISMATCH, 146 ZPOOL_OPTION_POOL_KEY_GUID, 147 ZPOOL_OPTION_JSON_NUMS_AS_INT, 148 ZPOOL_OPTION_JSON_FLAT_VDEVS 149 }; 150 151 /* 152 * These libumem hooks provide a reasonable set of defaults for the allocator's 153 * debugging facilities. 154 */ 155 156 #ifdef DEBUG 157 const char * 158 _umem_debug_init(void) 159 { 160 return ("default,verbose"); /* $UMEM_DEBUG setting */ 161 } 162 163 const char * 164 _umem_logging_init(void) 165 { 166 return ("fail,contents"); /* $UMEM_LOGGING setting */ 167 } 168 #endif 169 170 typedef enum { 171 HELP_ADD, 172 HELP_ATTACH, 173 HELP_CLEAR, 174 HELP_CREATE, 175 HELP_CHECKPOINT, 176 HELP_DDT_PRUNE, 177 HELP_DESTROY, 178 HELP_DETACH, 179 HELP_EXPORT, 180 HELP_HISTORY, 181 HELP_IMPORT, 182 HELP_IOSTAT, 183 HELP_LABELCLEAR, 184 HELP_LIST, 185 HELP_OFFLINE, 186 HELP_ONLINE, 187 HELP_PREFETCH, 188 HELP_REPLACE, 189 HELP_REMOVE, 190 HELP_INITIALIZE, 191 HELP_SCRUB, 192 HELP_RESILVER, 193 HELP_TRIM, 194 HELP_STATUS, 195 HELP_UPGRADE, 196 HELP_EVENTS, 197 HELP_GET, 198 HELP_SET, 199 HELP_SPLIT, 200 HELP_SYNC, 201 HELP_REGUID, 202 HELP_REOPEN, 203 HELP_VERSION, 204 HELP_WAIT 205 } zpool_help_t; 206 207 208 /* 209 * Flags for stats to display with "zpool iostats" 210 */ 211 enum iostat_type { 212 IOS_DEFAULT = 0, 213 IOS_LATENCY = 1, 214 IOS_QUEUES = 2, 215 IOS_L_HISTO = 3, 216 IOS_RQ_HISTO = 4, 217 IOS_COUNT, /* always last element */ 218 }; 219 220 /* iostat_type entries as bitmasks */ 221 #define IOS_DEFAULT_M (1ULL << IOS_DEFAULT) 222 #define IOS_LATENCY_M (1ULL << IOS_LATENCY) 223 #define IOS_QUEUES_M (1ULL << IOS_QUEUES) 224 #define IOS_L_HISTO_M (1ULL << IOS_L_HISTO) 225 #define IOS_RQ_HISTO_M (1ULL << IOS_RQ_HISTO) 226 227 /* Mask of all the histo bits */ 228 #define IOS_ANYHISTO_M (IOS_L_HISTO_M | IOS_RQ_HISTO_M) 229 230 /* 231 * Lookup table for iostat flags to nvlist names. Basically a list 232 * of all the nvlists a flag requires. Also specifies the order in 233 * which data gets printed in zpool iostat. 234 */ 235 static const char *vsx_type_to_nvlist[IOS_COUNT][15] = { 236 [IOS_L_HISTO] = { 237 ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO, 238 ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO, 239 ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO, 240 ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO, 241 ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO, 242 ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO, 243 ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO, 244 ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO, 245 ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO, 246 ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO, 247 ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO, 248 NULL}, 249 [IOS_LATENCY] = { 250 ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO, 251 ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO, 252 ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO, 253 ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO, 254 ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO, 255 ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO, 256 NULL}, 257 [IOS_QUEUES] = { 258 ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE, 259 ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE, 260 ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE, 261 ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE, 262 ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE, 263 ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE, 264 ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE, 265 NULL}, 266 [IOS_RQ_HISTO] = { 267 ZPOOL_CONFIG_VDEV_SYNC_IND_R_HISTO, 268 ZPOOL_CONFIG_VDEV_SYNC_AGG_R_HISTO, 269 ZPOOL_CONFIG_VDEV_SYNC_IND_W_HISTO, 270 ZPOOL_CONFIG_VDEV_SYNC_AGG_W_HISTO, 271 ZPOOL_CONFIG_VDEV_ASYNC_IND_R_HISTO, 272 ZPOOL_CONFIG_VDEV_ASYNC_AGG_R_HISTO, 273 ZPOOL_CONFIG_VDEV_ASYNC_IND_W_HISTO, 274 ZPOOL_CONFIG_VDEV_ASYNC_AGG_W_HISTO, 275 ZPOOL_CONFIG_VDEV_IND_SCRUB_HISTO, 276 ZPOOL_CONFIG_VDEV_AGG_SCRUB_HISTO, 277 ZPOOL_CONFIG_VDEV_IND_TRIM_HISTO, 278 ZPOOL_CONFIG_VDEV_AGG_TRIM_HISTO, 279 ZPOOL_CONFIG_VDEV_IND_REBUILD_HISTO, 280 ZPOOL_CONFIG_VDEV_AGG_REBUILD_HISTO, 281 NULL}, 282 }; 283 284 static const char *pool_scan_func_str[] = { 285 "NONE", 286 "SCRUB", 287 "RESILVER", 288 "ERRORSCRUB" 289 }; 290 291 static const char *pool_scan_state_str[] = { 292 "NONE", 293 "SCANNING", 294 "FINISHED", 295 "CANCELED", 296 "ERRORSCRUBBING" 297 }; 298 299 static const char *vdev_rebuild_state_str[] = { 300 "NONE", 301 "ACTIVE", 302 "CANCELED", 303 "COMPLETE" 304 }; 305 306 static const char *checkpoint_state_str[] = { 307 "NONE", 308 "EXISTS", 309 "DISCARDING" 310 }; 311 312 static const char *vdev_state_str[] = { 313 "UNKNOWN", 314 "CLOSED", 315 "OFFLINE", 316 "REMOVED", 317 "CANT_OPEN", 318 "FAULTED", 319 "DEGRADED", 320 "ONLINE" 321 }; 322 323 static const char *vdev_aux_str[] = { 324 "NONE", 325 "OPEN_FAILED", 326 "CORRUPT_DATA", 327 "NO_REPLICAS", 328 "BAD_GUID_SUM", 329 "TOO_SMALL", 330 "BAD_LABEL", 331 "VERSION_NEWER", 332 "VERSION_OLDER", 333 "UNSUP_FEAT", 334 "SPARED", 335 "ERR_EXCEEDED", 336 "IO_FAILURE", 337 "BAD_LOG", 338 "EXTERNAL", 339 "SPLIT_POOL", 340 "BAD_ASHIFT", 341 "EXTERNAL_PERSIST", 342 "ACTIVE", 343 "CHILDREN_OFFLINE", 344 "ASHIFT_TOO_BIG" 345 }; 346 347 static const char *vdev_init_state_str[] = { 348 "NONE", 349 "ACTIVE", 350 "CANCELED", 351 "SUSPENDED", 352 "COMPLETE" 353 }; 354 355 static const char *vdev_trim_state_str[] = { 356 "NONE", 357 "ACTIVE", 358 "CANCELED", 359 "SUSPENDED", 360 "COMPLETE" 361 }; 362 363 #define ZFS_NICE_TIMESTAMP 100 364 365 /* 366 * Given a cb->cb_flags with a histogram bit set, return the iostat_type. 367 * Right now, only one histo bit is ever set at one time, so we can 368 * just do a highbit64(a) 369 */ 370 #define IOS_HISTO_IDX(a) (highbit64(a & IOS_ANYHISTO_M) - 1) 371 372 typedef struct zpool_command { 373 const char *name; 374 int (*func)(int, char **); 375 zpool_help_t usage; 376 } zpool_command_t; 377 378 /* 379 * Master command table. Each ZFS command has a name, associated function, and 380 * usage message. The usage messages need to be internationalized, so we have 381 * to have a function to return the usage message based on a command index. 382 * 383 * These commands are organized according to how they are displayed in the usage 384 * message. An empty command (one with a NULL name) indicates an empty line in 385 * the generic usage message. 386 */ 387 static zpool_command_t command_table[] = { 388 { "version", zpool_do_version, HELP_VERSION }, 389 { NULL }, 390 { "create", zpool_do_create, HELP_CREATE }, 391 { "destroy", zpool_do_destroy, HELP_DESTROY }, 392 { NULL }, 393 { "add", zpool_do_add, HELP_ADD }, 394 { "remove", zpool_do_remove, HELP_REMOVE }, 395 { NULL }, 396 { "labelclear", zpool_do_labelclear, HELP_LABELCLEAR }, 397 { NULL }, 398 { "checkpoint", zpool_do_checkpoint, HELP_CHECKPOINT }, 399 { "prefetch", zpool_do_prefetch, HELP_PREFETCH }, 400 { NULL }, 401 { "list", zpool_do_list, HELP_LIST }, 402 { "iostat", zpool_do_iostat, HELP_IOSTAT }, 403 { "status", zpool_do_status, HELP_STATUS }, 404 { NULL }, 405 { "online", zpool_do_online, HELP_ONLINE }, 406 { "offline", zpool_do_offline, HELP_OFFLINE }, 407 { "clear", zpool_do_clear, HELP_CLEAR }, 408 { "reopen", zpool_do_reopen, HELP_REOPEN }, 409 { NULL }, 410 { "attach", zpool_do_attach, HELP_ATTACH }, 411 { "detach", zpool_do_detach, HELP_DETACH }, 412 { "replace", zpool_do_replace, HELP_REPLACE }, 413 { "split", zpool_do_split, HELP_SPLIT }, 414 { NULL }, 415 { "initialize", zpool_do_initialize, HELP_INITIALIZE }, 416 { "resilver", zpool_do_resilver, HELP_RESILVER }, 417 { "scrub", zpool_do_scrub, HELP_SCRUB }, 418 { "trim", zpool_do_trim, HELP_TRIM }, 419 { NULL }, 420 { "import", zpool_do_import, HELP_IMPORT }, 421 { "export", zpool_do_export, HELP_EXPORT }, 422 { "upgrade", zpool_do_upgrade, HELP_UPGRADE }, 423 { "reguid", zpool_do_reguid, HELP_REGUID }, 424 { NULL }, 425 { "history", zpool_do_history, HELP_HISTORY }, 426 { "events", zpool_do_events, HELP_EVENTS }, 427 { NULL }, 428 { "get", zpool_do_get, HELP_GET }, 429 { "set", zpool_do_set, HELP_SET }, 430 { "sync", zpool_do_sync, HELP_SYNC }, 431 { NULL }, 432 { "wait", zpool_do_wait, HELP_WAIT }, 433 { NULL }, 434 { "ddtprune", zpool_do_ddt_prune, HELP_DDT_PRUNE }, 435 }; 436 437 #define NCOMMAND (ARRAY_SIZE(command_table)) 438 439 #define VDEV_ALLOC_CLASS_LOGS "logs" 440 441 #define MAX_CMD_LEN 256 442 443 static zpool_command_t *current_command; 444 static zfs_type_t current_prop_type = (ZFS_TYPE_POOL | ZFS_TYPE_VDEV); 445 static char history_str[HIS_MAX_RECORD_LEN]; 446 static boolean_t log_history = B_TRUE; 447 static uint_t timestamp_fmt = NODATE; 448 449 static const char * 450 get_usage(zpool_help_t idx) 451 { 452 switch (idx) { 453 case HELP_ADD: 454 return (gettext("\tadd [-afgLnP] [-o property=value] " 455 "<pool> <vdev> ...\n")); 456 case HELP_ATTACH: 457 return (gettext("\tattach [-fsw] [-o property=value] " 458 "<pool> <device> <new-device>\n")); 459 case HELP_CLEAR: 460 return (gettext("\tclear [[--power]|[-nF]] <pool> [device]\n")); 461 case HELP_CREATE: 462 return (gettext("\tcreate [-fnd] [-o property=value] ... \n" 463 "\t [-O file-system-property=value] ... \n" 464 "\t [-m mountpoint] [-R root] <pool> <vdev> ...\n")); 465 case HELP_CHECKPOINT: 466 return (gettext("\tcheckpoint [-d [-w]] <pool> ...\n")); 467 case HELP_DESTROY: 468 return (gettext("\tdestroy [-f] <pool>\n")); 469 case HELP_DETACH: 470 return (gettext("\tdetach <pool> <device>\n")); 471 case HELP_EXPORT: 472 return (gettext("\texport [-af] <pool> ...\n")); 473 case HELP_HISTORY: 474 return (gettext("\thistory [-il] [<pool>] ...\n")); 475 case HELP_IMPORT: 476 return (gettext("\timport [-d dir] [-D]\n" 477 "\timport [-o mntopts] [-o property=value] ... \n" 478 "\t [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] " 479 "[-R root] [-F [-n]] -a\n" 480 "\timport [-o mntopts] [-o property=value] ... \n" 481 "\t [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] " 482 "[-R root] [-F [-n]]\n" 483 "\t [--rewind-to-checkpoint] <pool | id> [newpool]\n")); 484 case HELP_IOSTAT: 485 return (gettext("\tiostat [[[-c [script1,script2,...]" 486 "[-lq]]|[-rw]] [-T d | u] [-ghHLpPvy]\n" 487 "\t [[pool ...]|[pool vdev ...]|[vdev ...]]" 488 " [[-n] interval [count]]\n")); 489 case HELP_LABELCLEAR: 490 return (gettext("\tlabelclear [-f] <vdev>\n")); 491 case HELP_LIST: 492 return (gettext("\tlist [-gHLpPv] [-o property[,...]] [-j " 493 "[--json-int, --json-pool-key-guid]] ...\n" 494 "\t [-T d|u] [pool] [interval [count]]\n")); 495 case HELP_PREFETCH: 496 return (gettext("\tprefetch -t <type> [<type opts>] <pool>\n" 497 "\t -t ddt <pool>\n")); 498 case HELP_OFFLINE: 499 return (gettext("\toffline [--power]|[[-f][-t]] <pool> " 500 "<device> ...\n")); 501 case HELP_ONLINE: 502 return (gettext("\tonline [--power][-e] <pool> <device> " 503 "...\n")); 504 case HELP_REPLACE: 505 return (gettext("\treplace [-fsw] [-o property=value] " 506 "<pool> <device> [new-device]\n")); 507 case HELP_REMOVE: 508 return (gettext("\tremove [-npsw] <pool> <device> ...\n")); 509 case HELP_REOPEN: 510 return (gettext("\treopen [-n] <pool>\n")); 511 case HELP_INITIALIZE: 512 return (gettext("\tinitialize [-c | -s | -u] [-w] <pool> " 513 "[<device> ...]\n")); 514 case HELP_SCRUB: 515 return (gettext("\tscrub [-s | -p] [-w] [-e] <pool> ...\n")); 516 case HELP_RESILVER: 517 return (gettext("\tresilver <pool> ...\n")); 518 case HELP_TRIM: 519 return (gettext("\ttrim [-dw] [-r <rate>] [-c | -s] <pool> " 520 "[<device> ...]\n")); 521 case HELP_STATUS: 522 return (gettext("\tstatus [--power] [-j [--json-int, " 523 "--json-flat-vdevs, ...\n" 524 "\t --json-pool-key-guid]] [-c [script1,script2,...]] " 525 "[-DegiLpPstvx] ...\n" 526 "\t [-T d|u] [pool] [interval [count]]\n")); 527 case HELP_UPGRADE: 528 return (gettext("\tupgrade\n" 529 "\tupgrade -v\n" 530 "\tupgrade [-V version] <-a | pool ...>\n")); 531 case HELP_EVENTS: 532 return (gettext("\tevents [-vHf [pool] | -c]\n")); 533 case HELP_GET: 534 return (gettext("\tget [-Hp] [-j [--json-int, " 535 "--json-pool-key-guid]] ...\n" 536 "\t [-o \"all\" | field[,...]] " 537 "<\"all\" | property[,...]> <pool> ...\n")); 538 case HELP_SET: 539 return (gettext("\tset <property=value> <pool>\n" 540 "\tset <vdev_property=value> <pool> <vdev>\n")); 541 case HELP_SPLIT: 542 return (gettext("\tsplit [-gLnPl] [-R altroot] [-o mntopts]\n" 543 "\t [-o property=value] <pool> <newpool> " 544 "[<device> ...]\n")); 545 case HELP_REGUID: 546 return (gettext("\treguid [-g guid] <pool>\n")); 547 case HELP_SYNC: 548 return (gettext("\tsync [pool] ...\n")); 549 case HELP_VERSION: 550 return (gettext("\tversion [-j]\n")); 551 case HELP_WAIT: 552 return (gettext("\twait [-Hp] [-T d|u] [-t <activity>[,...]] " 553 "<pool> [interval]\n")); 554 case HELP_DDT_PRUNE: 555 return (gettext("\tddtprune -d|-p <amount> <pool>\n")); 556 default: 557 __builtin_unreachable(); 558 } 559 } 560 561 static void 562 zpool_collect_leaves(zpool_handle_t *zhp, nvlist_t *nvroot, nvlist_t *res) 563 { 564 uint_t children = 0; 565 nvlist_t **child; 566 uint_t i; 567 568 (void) nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 569 &child, &children); 570 571 if (children == 0) { 572 char *path = zpool_vdev_name(g_zfs, zhp, nvroot, 573 VDEV_NAME_PATH); 574 575 if (strcmp(path, VDEV_TYPE_INDIRECT) != 0 && 576 strcmp(path, VDEV_TYPE_HOLE) != 0) 577 fnvlist_add_boolean(res, path); 578 579 free(path); 580 return; 581 } 582 583 for (i = 0; i < children; i++) { 584 zpool_collect_leaves(zhp, child[i], res); 585 } 586 } 587 588 /* 589 * Callback routine that will print out a pool property value. 590 */ 591 static int 592 print_pool_prop_cb(int prop, void *cb) 593 { 594 FILE *fp = cb; 595 596 (void) fprintf(fp, "\t%-19s ", zpool_prop_to_name(prop)); 597 598 if (zpool_prop_readonly(prop)) 599 (void) fprintf(fp, " NO "); 600 else 601 (void) fprintf(fp, " YES "); 602 603 if (zpool_prop_values(prop) == NULL) 604 (void) fprintf(fp, "-\n"); 605 else 606 (void) fprintf(fp, "%s\n", zpool_prop_values(prop)); 607 608 return (ZPROP_CONT); 609 } 610 611 /* 612 * Callback routine that will print out a vdev property value. 613 */ 614 static int 615 print_vdev_prop_cb(int prop, void *cb) 616 { 617 FILE *fp = cb; 618 619 (void) fprintf(fp, "\t%-19s ", vdev_prop_to_name(prop)); 620 621 if (vdev_prop_readonly(prop)) 622 (void) fprintf(fp, " NO "); 623 else 624 (void) fprintf(fp, " YES "); 625 626 if (vdev_prop_values(prop) == NULL) 627 (void) fprintf(fp, "-\n"); 628 else 629 (void) fprintf(fp, "%s\n", vdev_prop_values(prop)); 630 631 return (ZPROP_CONT); 632 } 633 634 /* 635 * Given a leaf vdev name like 'L5' return its VDEV_CONFIG_PATH like 636 * '/dev/disk/by-vdev/L5'. 637 */ 638 static const char * 639 vdev_name_to_path(zpool_handle_t *zhp, char *vdev) 640 { 641 nvlist_t *vdev_nv = zpool_find_vdev(zhp, vdev, NULL, NULL, NULL); 642 if (vdev_nv == NULL) { 643 return (NULL); 644 } 645 return (fnvlist_lookup_string(vdev_nv, ZPOOL_CONFIG_PATH)); 646 } 647 648 static int 649 zpool_power_on(zpool_handle_t *zhp, char *vdev) 650 { 651 return (zpool_power(zhp, vdev, B_TRUE)); 652 } 653 654 static int 655 zpool_power_on_and_disk_wait(zpool_handle_t *zhp, char *vdev) 656 { 657 int rc; 658 659 rc = zpool_power_on(zhp, vdev); 660 if (rc != 0) 661 return (rc); 662 663 zpool_disk_wait(vdev_name_to_path(zhp, vdev)); 664 665 return (0); 666 } 667 668 static int 669 zpool_power_on_pool_and_wait_for_devices(zpool_handle_t *zhp) 670 { 671 nvlist_t *nv; 672 const char *path = NULL; 673 int rc; 674 675 /* Power up all the devices first */ 676 FOR_EACH_REAL_LEAF_VDEV(zhp, nv) { 677 path = fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH); 678 if (path != NULL) { 679 rc = zpool_power_on(zhp, (char *)path); 680 if (rc != 0) { 681 return (rc); 682 } 683 } 684 } 685 686 /* 687 * Wait for their devices to show up. Since we powered them on 688 * at roughly the same time, they should all come online around 689 * the same time. 690 */ 691 FOR_EACH_REAL_LEAF_VDEV(zhp, nv) { 692 path = fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH); 693 zpool_disk_wait(path); 694 } 695 696 return (0); 697 } 698 699 static int 700 zpool_power_off(zpool_handle_t *zhp, char *vdev) 701 { 702 return (zpool_power(zhp, vdev, B_FALSE)); 703 } 704 705 /* 706 * Display usage message. If we're inside a command, display only the usage for 707 * that command. Otherwise, iterate over the entire command table and display 708 * a complete usage message. 709 */ 710 static __attribute__((noreturn)) void 711 usage(boolean_t requested) 712 { 713 FILE *fp = requested ? stdout : stderr; 714 715 if (current_command == NULL) { 716 int i; 717 718 (void) fprintf(fp, gettext("usage: zpool command args ...\n")); 719 (void) fprintf(fp, 720 gettext("where 'command' is one of the following:\n\n")); 721 722 for (i = 0; i < NCOMMAND; i++) { 723 if (command_table[i].name == NULL) 724 (void) fprintf(fp, "\n"); 725 else 726 (void) fprintf(fp, "%s", 727 get_usage(command_table[i].usage)); 728 } 729 730 (void) fprintf(fp, 731 gettext("\nFor further help on a command or topic, " 732 "run: %s\n"), "zpool help [<topic>]"); 733 } else { 734 (void) fprintf(fp, gettext("usage:\n")); 735 (void) fprintf(fp, "%s", get_usage(current_command->usage)); 736 } 737 738 if (current_command != NULL && 739 current_prop_type != (ZFS_TYPE_POOL | ZFS_TYPE_VDEV) && 740 ((strcmp(current_command->name, "set") == 0) || 741 (strcmp(current_command->name, "get") == 0) || 742 (strcmp(current_command->name, "list") == 0))) { 743 744 (void) fprintf(fp, "%s", 745 gettext("\nthe following properties are supported:\n")); 746 747 (void) fprintf(fp, "\n\t%-19s %s %s\n\n", 748 "PROPERTY", "EDIT", "VALUES"); 749 750 /* Iterate over all properties */ 751 if (current_prop_type == ZFS_TYPE_POOL) { 752 (void) zprop_iter(print_pool_prop_cb, fp, B_FALSE, 753 B_TRUE, current_prop_type); 754 755 (void) fprintf(fp, "\t%-19s ", "feature@..."); 756 (void) fprintf(fp, "YES " 757 "disabled | enabled | active\n"); 758 759 (void) fprintf(fp, gettext("\nThe feature@ properties " 760 "must be appended with a feature name.\n" 761 "See zpool-features(7).\n")); 762 } else if (current_prop_type == ZFS_TYPE_VDEV) { 763 (void) zprop_iter(print_vdev_prop_cb, fp, B_FALSE, 764 B_TRUE, current_prop_type); 765 } 766 } 767 768 /* 769 * See comments at end of main(). 770 */ 771 if (getenv("ZFS_ABORT") != NULL) { 772 (void) printf("dumping core by request\n"); 773 abort(); 774 } 775 776 exit(requested ? 0 : 2); 777 } 778 779 /* 780 * zpool initialize [-c | -s | -u] [-w] <pool> [<vdev> ...] 781 * Initialize all unused blocks in the specified vdevs, or all vdevs in the pool 782 * if none specified. 783 * 784 * -c Cancel. Ends active initializing. 785 * -s Suspend. Initializing can then be restarted with no flags. 786 * -u Uninitialize. Clears initialization state. 787 * -w Wait. Blocks until initializing has completed. 788 */ 789 int 790 zpool_do_initialize(int argc, char **argv) 791 { 792 int c; 793 char *poolname; 794 zpool_handle_t *zhp; 795 nvlist_t *vdevs; 796 int err = 0; 797 boolean_t wait = B_FALSE; 798 799 struct option long_options[] = { 800 {"cancel", no_argument, NULL, 'c'}, 801 {"suspend", no_argument, NULL, 's'}, 802 {"uninit", no_argument, NULL, 'u'}, 803 {"wait", no_argument, NULL, 'w'}, 804 {0, 0, 0, 0} 805 }; 806 807 pool_initialize_func_t cmd_type = POOL_INITIALIZE_START; 808 while ((c = getopt_long(argc, argv, "csuw", long_options, 809 NULL)) != -1) { 810 switch (c) { 811 case 'c': 812 if (cmd_type != POOL_INITIALIZE_START && 813 cmd_type != POOL_INITIALIZE_CANCEL) { 814 (void) fprintf(stderr, gettext("-c cannot be " 815 "combined with other options\n")); 816 usage(B_FALSE); 817 } 818 cmd_type = POOL_INITIALIZE_CANCEL; 819 break; 820 case 's': 821 if (cmd_type != POOL_INITIALIZE_START && 822 cmd_type != POOL_INITIALIZE_SUSPEND) { 823 (void) fprintf(stderr, gettext("-s cannot be " 824 "combined with other options\n")); 825 usage(B_FALSE); 826 } 827 cmd_type = POOL_INITIALIZE_SUSPEND; 828 break; 829 case 'u': 830 if (cmd_type != POOL_INITIALIZE_START && 831 cmd_type != POOL_INITIALIZE_UNINIT) { 832 (void) fprintf(stderr, gettext("-u cannot be " 833 "combined with other options\n")); 834 usage(B_FALSE); 835 } 836 cmd_type = POOL_INITIALIZE_UNINIT; 837 break; 838 case 'w': 839 wait = B_TRUE; 840 break; 841 case '?': 842 if (optopt != 0) { 843 (void) fprintf(stderr, 844 gettext("invalid option '%c'\n"), optopt); 845 } else { 846 (void) fprintf(stderr, 847 gettext("invalid option '%s'\n"), 848 argv[optind - 1]); 849 } 850 usage(B_FALSE); 851 } 852 } 853 854 argc -= optind; 855 argv += optind; 856 857 if (argc < 1) { 858 (void) fprintf(stderr, gettext("missing pool name argument\n")); 859 usage(B_FALSE); 860 return (-1); 861 } 862 863 if (wait && (cmd_type != POOL_INITIALIZE_START)) { 864 (void) fprintf(stderr, gettext("-w cannot be used with -c, -s" 865 "or -u\n")); 866 usage(B_FALSE); 867 } 868 869 poolname = argv[0]; 870 zhp = zpool_open(g_zfs, poolname); 871 if (zhp == NULL) 872 return (-1); 873 874 vdevs = fnvlist_alloc(); 875 if (argc == 1) { 876 /* no individual leaf vdevs specified, so add them all */ 877 nvlist_t *config = zpool_get_config(zhp, NULL); 878 nvlist_t *nvroot = fnvlist_lookup_nvlist(config, 879 ZPOOL_CONFIG_VDEV_TREE); 880 zpool_collect_leaves(zhp, nvroot, vdevs); 881 } else { 882 for (int i = 1; i < argc; i++) { 883 fnvlist_add_boolean(vdevs, argv[i]); 884 } 885 } 886 887 if (wait) 888 err = zpool_initialize_wait(zhp, cmd_type, vdevs); 889 else 890 err = zpool_initialize(zhp, cmd_type, vdevs); 891 892 fnvlist_free(vdevs); 893 zpool_close(zhp); 894 895 return (err); 896 } 897 898 /* 899 * print a pool vdev config for dry runs 900 */ 901 static void 902 print_vdev_tree(zpool_handle_t *zhp, const char *name, nvlist_t *nv, int indent, 903 const char *match, int name_flags) 904 { 905 nvlist_t **child; 906 uint_t c, children; 907 char *vname; 908 boolean_t printed = B_FALSE; 909 910 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 911 &child, &children) != 0) { 912 if (name != NULL) 913 (void) printf("\t%*s%s\n", indent, "", name); 914 return; 915 } 916 917 for (c = 0; c < children; c++) { 918 uint64_t is_log = B_FALSE, is_hole = B_FALSE; 919 const char *class = ""; 920 921 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 922 &is_hole); 923 924 if (is_hole == B_TRUE) { 925 continue; 926 } 927 928 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 929 &is_log); 930 if (is_log) 931 class = VDEV_ALLOC_BIAS_LOG; 932 (void) nvlist_lookup_string(child[c], 933 ZPOOL_CONFIG_ALLOCATION_BIAS, &class); 934 if (strcmp(match, class) != 0) 935 continue; 936 937 if (!printed && name != NULL) { 938 (void) printf("\t%*s%s\n", indent, "", name); 939 printed = B_TRUE; 940 } 941 vname = zpool_vdev_name(g_zfs, zhp, child[c], name_flags); 942 print_vdev_tree(zhp, vname, child[c], indent + 2, "", 943 name_flags); 944 free(vname); 945 } 946 } 947 948 /* 949 * Print the list of l2cache devices for dry runs. 950 */ 951 static void 952 print_cache_list(nvlist_t *nv, int indent) 953 { 954 nvlist_t **child; 955 uint_t c, children; 956 957 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, 958 &child, &children) == 0 && children > 0) { 959 (void) printf("\t%*s%s\n", indent, "", "cache"); 960 } else { 961 return; 962 } 963 for (c = 0; c < children; c++) { 964 char *vname; 965 966 vname = zpool_vdev_name(g_zfs, NULL, child[c], 0); 967 (void) printf("\t%*s%s\n", indent + 2, "", vname); 968 free(vname); 969 } 970 } 971 972 /* 973 * Print the list of spares for dry runs. 974 */ 975 static void 976 print_spare_list(nvlist_t *nv, int indent) 977 { 978 nvlist_t **child; 979 uint_t c, children; 980 981 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, 982 &child, &children) == 0 && children > 0) { 983 (void) printf("\t%*s%s\n", indent, "", "spares"); 984 } else { 985 return; 986 } 987 for (c = 0; c < children; c++) { 988 char *vname; 989 990 vname = zpool_vdev_name(g_zfs, NULL, child[c], 0); 991 (void) printf("\t%*s%s\n", indent + 2, "", vname); 992 free(vname); 993 } 994 } 995 996 typedef struct spare_cbdata { 997 uint64_t cb_guid; 998 zpool_handle_t *cb_zhp; 999 } spare_cbdata_t; 1000 1001 static boolean_t 1002 find_vdev(nvlist_t *nv, uint64_t search) 1003 { 1004 uint64_t guid; 1005 nvlist_t **child; 1006 uint_t c, children; 1007 1008 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0 && 1009 search == guid) 1010 return (B_TRUE); 1011 1012 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1013 &child, &children) == 0) { 1014 for (c = 0; c < children; c++) 1015 if (find_vdev(child[c], search)) 1016 return (B_TRUE); 1017 } 1018 1019 return (B_FALSE); 1020 } 1021 1022 static int 1023 find_spare(zpool_handle_t *zhp, void *data) 1024 { 1025 spare_cbdata_t *cbp = data; 1026 nvlist_t *config, *nvroot; 1027 1028 config = zpool_get_config(zhp, NULL); 1029 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 1030 &nvroot) == 0); 1031 1032 if (find_vdev(nvroot, cbp->cb_guid)) { 1033 cbp->cb_zhp = zhp; 1034 return (1); 1035 } 1036 1037 zpool_close(zhp); 1038 return (0); 1039 } 1040 1041 static void 1042 nice_num_str_nvlist(nvlist_t *item, const char *key, uint64_t value, 1043 boolean_t literal, boolean_t as_int, int format) 1044 { 1045 char buf[256]; 1046 if (literal) { 1047 if (!as_int) 1048 snprintf(buf, 256, "%llu", (u_longlong_t)value); 1049 } else { 1050 switch (format) { 1051 case ZFS_NICENUM_1024: 1052 zfs_nicenum_format(value, buf, 256, ZFS_NICENUM_1024); 1053 break; 1054 case ZFS_NICENUM_BYTES: 1055 zfs_nicenum_format(value, buf, 256, ZFS_NICENUM_BYTES); 1056 break; 1057 case ZFS_NICENUM_TIME: 1058 zfs_nicenum_format(value, buf, 256, ZFS_NICENUM_TIME); 1059 break; 1060 case ZFS_NICE_TIMESTAMP: 1061 format_timestamp(value, buf, 256); 1062 break; 1063 default: 1064 fprintf(stderr, "Invalid number format"); 1065 exit(1); 1066 } 1067 } 1068 if (as_int) 1069 fnvlist_add_uint64(item, key, value); 1070 else 1071 fnvlist_add_string(item, key, buf); 1072 } 1073 1074 /* 1075 * Generates an nvlist with output version for every command based on params. 1076 * Purpose of this is to add a version of JSON output, considering the schema 1077 * format might be updated for each command in future. 1078 * 1079 * Schema: 1080 * 1081 * "output_version": { 1082 * "command": string, 1083 * "vers_major": integer, 1084 * "vers_minor": integer, 1085 * } 1086 */ 1087 static nvlist_t * 1088 zpool_json_schema(int maj_v, int min_v) 1089 { 1090 char cmd[MAX_CMD_LEN]; 1091 nvlist_t *sch = fnvlist_alloc(); 1092 nvlist_t *ov = fnvlist_alloc(); 1093 1094 snprintf(cmd, MAX_CMD_LEN, "zpool %s", current_command->name); 1095 fnvlist_add_string(ov, "command", cmd); 1096 fnvlist_add_uint32(ov, "vers_major", maj_v); 1097 fnvlist_add_uint32(ov, "vers_minor", min_v); 1098 fnvlist_add_nvlist(sch, "output_version", ov); 1099 fnvlist_free(ov); 1100 return (sch); 1101 } 1102 1103 static void 1104 fill_pool_info(nvlist_t *list, zpool_handle_t *zhp, boolean_t addtype, 1105 boolean_t as_int) 1106 { 1107 nvlist_t *config = zpool_get_config(zhp, NULL); 1108 uint64_t guid = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID); 1109 uint64_t txg = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG); 1110 1111 fnvlist_add_string(list, "name", zpool_get_name(zhp)); 1112 if (addtype) 1113 fnvlist_add_string(list, "type", "POOL"); 1114 fnvlist_add_string(list, "state", zpool_get_state_str(zhp)); 1115 if (as_int) { 1116 if (guid) 1117 fnvlist_add_uint64(list, ZPOOL_CONFIG_POOL_GUID, guid); 1118 if (txg) 1119 fnvlist_add_uint64(list, ZPOOL_CONFIG_POOL_TXG, txg); 1120 fnvlist_add_uint64(list, "spa_version", SPA_VERSION); 1121 fnvlist_add_uint64(list, "zpl_version", ZPL_VERSION); 1122 } else { 1123 char value[ZFS_MAXPROPLEN]; 1124 if (guid) { 1125 snprintf(value, ZFS_MAXPROPLEN, "%llu", 1126 (u_longlong_t)guid); 1127 fnvlist_add_string(list, ZPOOL_CONFIG_POOL_GUID, value); 1128 } 1129 if (txg) { 1130 snprintf(value, ZFS_MAXPROPLEN, "%llu", 1131 (u_longlong_t)txg); 1132 fnvlist_add_string(list, ZPOOL_CONFIG_POOL_TXG, value); 1133 } 1134 fnvlist_add_string(list, "spa_version", SPA_VERSION_STRING); 1135 fnvlist_add_string(list, "zpl_version", ZPL_VERSION_STRING); 1136 } 1137 } 1138 1139 static void 1140 used_by_other(zpool_handle_t *zhp, nvlist_t *nvdev, nvlist_t *list) 1141 { 1142 spare_cbdata_t spare_cb; 1143 verify(nvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_GUID, 1144 &spare_cb.cb_guid) == 0); 1145 if (zpool_iter(g_zfs, find_spare, &spare_cb) == 1) { 1146 if (strcmp(zpool_get_name(spare_cb.cb_zhp), 1147 zpool_get_name(zhp)) != 0) { 1148 fnvlist_add_string(list, "used_by", 1149 zpool_get_name(spare_cb.cb_zhp)); 1150 } 1151 zpool_close(spare_cb.cb_zhp); 1152 } 1153 } 1154 1155 static void 1156 fill_vdev_info(nvlist_t *list, zpool_handle_t *zhp, char *name, 1157 boolean_t addtype, boolean_t as_int) 1158 { 1159 boolean_t l2c = B_FALSE; 1160 const char *path, *phys, *devid, *bias = NULL; 1161 uint64_t hole = 0, log = 0, spare = 0; 1162 vdev_stat_t *vs; 1163 uint_t c; 1164 nvlist_t *nvdev; 1165 nvlist_t *nvdev_parent = NULL; 1166 char *_name; 1167 1168 if (strcmp(name, zpool_get_name(zhp)) != 0) 1169 _name = name; 1170 else 1171 _name = (char *)"root-0"; 1172 1173 nvdev = zpool_find_vdev(zhp, _name, NULL, &l2c, NULL); 1174 1175 fnvlist_add_string(list, "name", name); 1176 if (addtype) 1177 fnvlist_add_string(list, "type", "VDEV"); 1178 if (nvdev) { 1179 const char *type = fnvlist_lookup_string(nvdev, 1180 ZPOOL_CONFIG_TYPE); 1181 if (type) 1182 fnvlist_add_string(list, "vdev_type", type); 1183 uint64_t guid = fnvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_GUID); 1184 if (guid) { 1185 if (as_int) { 1186 fnvlist_add_uint64(list, "guid", guid); 1187 } else { 1188 char buf[ZFS_MAXPROPLEN]; 1189 snprintf(buf, ZFS_MAXPROPLEN, "%llu", 1190 (u_longlong_t)guid); 1191 fnvlist_add_string(list, "guid", buf); 1192 } 1193 } 1194 if (nvlist_lookup_string(nvdev, ZPOOL_CONFIG_PATH, &path) == 0) 1195 fnvlist_add_string(list, "path", path); 1196 if (nvlist_lookup_string(nvdev, ZPOOL_CONFIG_PHYS_PATH, 1197 &phys) == 0) 1198 fnvlist_add_string(list, "phys_path", phys); 1199 if (nvlist_lookup_string(nvdev, ZPOOL_CONFIG_DEVID, 1200 &devid) == 0) 1201 fnvlist_add_string(list, "devid", devid); 1202 (void) nvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_IS_LOG, &log); 1203 (void) nvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_IS_SPARE, 1204 &spare); 1205 (void) nvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_IS_HOLE, &hole); 1206 if (hole) 1207 fnvlist_add_string(list, "class", VDEV_TYPE_HOLE); 1208 else if (l2c) 1209 fnvlist_add_string(list, "class", VDEV_TYPE_L2CACHE); 1210 else if (spare) 1211 fnvlist_add_string(list, "class", VDEV_TYPE_SPARE); 1212 else if (log) 1213 fnvlist_add_string(list, "class", VDEV_TYPE_LOG); 1214 else { 1215 (void) nvlist_lookup_string(nvdev, 1216 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias); 1217 if (bias != NULL) 1218 fnvlist_add_string(list, "class", bias); 1219 else { 1220 nvdev_parent = NULL; 1221 nvdev_parent = zpool_find_parent_vdev(zhp, 1222 _name, NULL, NULL, NULL); 1223 1224 /* 1225 * With a mirrored special device, the parent 1226 * "mirror" vdev will have 1227 * ZPOOL_CONFIG_ALLOCATION_BIAS set to "special" 1228 * not the leaf vdevs. If we're a leaf vdev 1229 * in that case we need to look at our parent 1230 * to see if they're "special" to know if we 1231 * are "special" too. 1232 */ 1233 if (nvdev_parent) { 1234 (void) nvlist_lookup_string( 1235 nvdev_parent, 1236 ZPOOL_CONFIG_ALLOCATION_BIAS, 1237 &bias); 1238 } 1239 if (bias != NULL) 1240 fnvlist_add_string(list, "class", bias); 1241 else 1242 fnvlist_add_string(list, "class", 1243 "normal"); 1244 } 1245 } 1246 if (nvlist_lookup_uint64_array(nvdev, ZPOOL_CONFIG_VDEV_STATS, 1247 (uint64_t **)&vs, &c) == 0) { 1248 fnvlist_add_string(list, "state", 1249 vdev_state_str[vs->vs_state]); 1250 } 1251 } 1252 } 1253 1254 static boolean_t 1255 prop_list_contains_feature(nvlist_t *proplist) 1256 { 1257 nvpair_t *nvp; 1258 for (nvp = nvlist_next_nvpair(proplist, NULL); NULL != nvp; 1259 nvp = nvlist_next_nvpair(proplist, nvp)) { 1260 if (zpool_prop_feature(nvpair_name(nvp))) 1261 return (B_TRUE); 1262 } 1263 return (B_FALSE); 1264 } 1265 1266 /* 1267 * Add a property pair (name, string-value) into a property nvlist. 1268 */ 1269 static int 1270 add_prop_list(const char *propname, const char *propval, nvlist_t **props, 1271 boolean_t poolprop) 1272 { 1273 zpool_prop_t prop = ZPOOL_PROP_INVAL; 1274 nvlist_t *proplist; 1275 const char *normnm; 1276 const char *strval; 1277 1278 if (*props == NULL && 1279 nvlist_alloc(props, NV_UNIQUE_NAME, 0) != 0) { 1280 (void) fprintf(stderr, 1281 gettext("internal error: out of memory\n")); 1282 return (1); 1283 } 1284 1285 proplist = *props; 1286 1287 if (poolprop) { 1288 const char *vname = zpool_prop_to_name(ZPOOL_PROP_VERSION); 1289 const char *cname = 1290 zpool_prop_to_name(ZPOOL_PROP_COMPATIBILITY); 1291 1292 if ((prop = zpool_name_to_prop(propname)) == ZPOOL_PROP_INVAL && 1293 (!zpool_prop_feature(propname) && 1294 !zpool_prop_vdev(propname))) { 1295 (void) fprintf(stderr, gettext("property '%s' is " 1296 "not a valid pool or vdev property\n"), propname); 1297 return (2); 1298 } 1299 1300 /* 1301 * feature@ properties and version should not be specified 1302 * at the same time. 1303 */ 1304 if ((prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname) && 1305 nvlist_exists(proplist, vname)) || 1306 (prop == ZPOOL_PROP_VERSION && 1307 prop_list_contains_feature(proplist))) { 1308 (void) fprintf(stderr, gettext("'feature@' and " 1309 "'version' properties cannot be specified " 1310 "together\n")); 1311 return (2); 1312 } 1313 1314 /* 1315 * if version is specified, only "legacy" compatibility 1316 * may be requested 1317 */ 1318 if ((prop == ZPOOL_PROP_COMPATIBILITY && 1319 strcmp(propval, ZPOOL_COMPAT_LEGACY) != 0 && 1320 nvlist_exists(proplist, vname)) || 1321 (prop == ZPOOL_PROP_VERSION && 1322 nvlist_exists(proplist, cname) && 1323 strcmp(fnvlist_lookup_string(proplist, cname), 1324 ZPOOL_COMPAT_LEGACY) != 0)) { 1325 (void) fprintf(stderr, gettext("when 'version' is " 1326 "specified, the 'compatibility' feature may only " 1327 "be set to '" ZPOOL_COMPAT_LEGACY "'\n")); 1328 return (2); 1329 } 1330 1331 if (zpool_prop_feature(propname) || zpool_prop_vdev(propname)) 1332 normnm = propname; 1333 else 1334 normnm = zpool_prop_to_name(prop); 1335 } else { 1336 zfs_prop_t fsprop = zfs_name_to_prop(propname); 1337 1338 if (zfs_prop_valid_for_type(fsprop, ZFS_TYPE_FILESYSTEM, 1339 B_FALSE)) { 1340 normnm = zfs_prop_to_name(fsprop); 1341 } else if (zfs_prop_user(propname) || 1342 zfs_prop_userquota(propname)) { 1343 normnm = propname; 1344 } else { 1345 (void) fprintf(stderr, gettext("property '%s' is " 1346 "not a valid filesystem property\n"), propname); 1347 return (2); 1348 } 1349 } 1350 1351 if (nvlist_lookup_string(proplist, normnm, &strval) == 0 && 1352 prop != ZPOOL_PROP_CACHEFILE) { 1353 (void) fprintf(stderr, gettext("property '%s' " 1354 "specified multiple times\n"), propname); 1355 return (2); 1356 } 1357 1358 if (nvlist_add_string(proplist, normnm, propval) != 0) { 1359 (void) fprintf(stderr, gettext("internal " 1360 "error: out of memory\n")); 1361 return (1); 1362 } 1363 1364 return (0); 1365 } 1366 1367 /* 1368 * Set a default property pair (name, string-value) in a property nvlist 1369 */ 1370 static int 1371 add_prop_list_default(const char *propname, const char *propval, 1372 nvlist_t **props) 1373 { 1374 const char *pval; 1375 1376 if (nvlist_lookup_string(*props, propname, &pval) == 0) 1377 return (0); 1378 1379 return (add_prop_list(propname, propval, props, B_TRUE)); 1380 } 1381 1382 /* 1383 * zpool add [-afgLnP] [-o property=value] <pool> <vdev> ... 1384 * 1385 * -a Disable the ashift validation checks 1386 * -f Force addition of devices, even if they appear in use 1387 * -g Display guid for individual vdev name. 1388 * -L Follow links when resolving vdev path name. 1389 * -n Do not add the devices, but display the resulting layout if 1390 * they were to be added. 1391 * -o Set property=value. 1392 * -P Display full path for vdev name. 1393 * 1394 * Adds the given vdevs to 'pool'. As with create, the bulk of this work is 1395 * handled by make_root_vdev(), which constructs the nvlist needed to pass to 1396 * libzfs. 1397 */ 1398 int 1399 zpool_do_add(int argc, char **argv) 1400 { 1401 boolean_t check_replication = B_TRUE; 1402 boolean_t check_inuse = B_TRUE; 1403 boolean_t dryrun = B_FALSE; 1404 boolean_t check_ashift = B_TRUE; 1405 boolean_t force = B_FALSE; 1406 int name_flags = 0; 1407 int c; 1408 nvlist_t *nvroot; 1409 char *poolname; 1410 int ret; 1411 zpool_handle_t *zhp; 1412 nvlist_t *config; 1413 nvlist_t *props = NULL; 1414 char *propval; 1415 1416 struct option long_options[] = { 1417 {"allow-in-use", no_argument, NULL, ZPOOL_OPTION_ALLOW_INUSE}, 1418 {"allow-replication-mismatch", no_argument, NULL, 1419 ZPOOL_OPTION_ALLOW_REPLICATION_MISMATCH}, 1420 {"allow-ashift-mismatch", no_argument, NULL, 1421 ZPOOL_OPTION_ALLOW_ASHIFT_MISMATCH}, 1422 {0, 0, 0, 0} 1423 }; 1424 1425 /* check options */ 1426 while ((c = getopt_long(argc, argv, "fgLno:P", long_options, NULL)) 1427 != -1) { 1428 switch (c) { 1429 case 'f': 1430 force = B_TRUE; 1431 break; 1432 case 'g': 1433 name_flags |= VDEV_NAME_GUID; 1434 break; 1435 case 'L': 1436 name_flags |= VDEV_NAME_FOLLOW_LINKS; 1437 break; 1438 case 'n': 1439 dryrun = B_TRUE; 1440 break; 1441 case 'o': 1442 if ((propval = strchr(optarg, '=')) == NULL) { 1443 (void) fprintf(stderr, gettext("missing " 1444 "'=' for -o option\n")); 1445 usage(B_FALSE); 1446 } 1447 *propval = '\0'; 1448 propval++; 1449 1450 if ((strcmp(optarg, ZPOOL_CONFIG_ASHIFT) != 0) || 1451 (add_prop_list(optarg, propval, &props, B_TRUE))) 1452 usage(B_FALSE); 1453 break; 1454 case 'P': 1455 name_flags |= VDEV_NAME_PATH; 1456 break; 1457 case ZPOOL_OPTION_ALLOW_INUSE: 1458 check_inuse = B_FALSE; 1459 break; 1460 case ZPOOL_OPTION_ALLOW_REPLICATION_MISMATCH: 1461 check_replication = B_FALSE; 1462 break; 1463 case ZPOOL_OPTION_ALLOW_ASHIFT_MISMATCH: 1464 check_ashift = B_FALSE; 1465 break; 1466 case '?': 1467 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 1468 optopt); 1469 usage(B_FALSE); 1470 } 1471 } 1472 1473 argc -= optind; 1474 argv += optind; 1475 1476 /* get pool name and check number of arguments */ 1477 if (argc < 1) { 1478 (void) fprintf(stderr, gettext("missing pool name argument\n")); 1479 usage(B_FALSE); 1480 } 1481 if (argc < 2) { 1482 (void) fprintf(stderr, gettext("missing vdev specification\n")); 1483 usage(B_FALSE); 1484 } 1485 1486 if (force) { 1487 if (!check_inuse || !check_replication || !check_ashift) { 1488 (void) fprintf(stderr, gettext("'-f' option is not " 1489 "allowed with '--allow-replication-mismatch', " 1490 "'--allow-ashift-mismatch', or " 1491 "'--allow-in-use'\n")); 1492 usage(B_FALSE); 1493 } 1494 check_inuse = B_FALSE; 1495 check_replication = B_FALSE; 1496 check_ashift = B_FALSE; 1497 } 1498 1499 poolname = argv[0]; 1500 1501 argc--; 1502 argv++; 1503 1504 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) 1505 return (1); 1506 1507 if ((config = zpool_get_config(zhp, NULL)) == NULL) { 1508 (void) fprintf(stderr, gettext("pool '%s' is unavailable\n"), 1509 poolname); 1510 zpool_close(zhp); 1511 return (1); 1512 } 1513 1514 /* unless manually specified use "ashift" pool property (if set) */ 1515 if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) { 1516 int intval; 1517 zprop_source_t src; 1518 char strval[ZPOOL_MAXPROPLEN]; 1519 1520 intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src); 1521 if (src != ZPROP_SRC_DEFAULT) { 1522 (void) sprintf(strval, "%" PRId32, intval); 1523 verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval, 1524 &props, B_TRUE) == 0); 1525 } 1526 } 1527 1528 /* pass off to make_root_vdev for processing */ 1529 nvroot = make_root_vdev(zhp, props, !check_inuse, 1530 check_replication, B_FALSE, dryrun, argc, argv); 1531 if (nvroot == NULL) { 1532 zpool_close(zhp); 1533 return (1); 1534 } 1535 1536 if (dryrun) { 1537 nvlist_t *poolnvroot; 1538 nvlist_t **l2child, **sparechild; 1539 uint_t l2children, sparechildren, c; 1540 char *vname; 1541 boolean_t hadcache = B_FALSE, hadspare = B_FALSE; 1542 1543 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 1544 &poolnvroot) == 0); 1545 1546 (void) printf(gettext("would update '%s' to the following " 1547 "configuration:\n\n"), zpool_get_name(zhp)); 1548 1549 /* print original main pool and new tree */ 1550 print_vdev_tree(zhp, poolname, poolnvroot, 0, "", 1551 name_flags | VDEV_NAME_TYPE_ID); 1552 print_vdev_tree(zhp, NULL, nvroot, 0, "", name_flags); 1553 1554 /* print other classes: 'dedup', 'special', and 'log' */ 1555 if (zfs_special_devs(poolnvroot, VDEV_ALLOC_BIAS_DEDUP)) { 1556 print_vdev_tree(zhp, "dedup", poolnvroot, 0, 1557 VDEV_ALLOC_BIAS_DEDUP, name_flags); 1558 print_vdev_tree(zhp, NULL, nvroot, 0, 1559 VDEV_ALLOC_BIAS_DEDUP, name_flags); 1560 } else if (zfs_special_devs(nvroot, VDEV_ALLOC_BIAS_DEDUP)) { 1561 print_vdev_tree(zhp, "dedup", nvroot, 0, 1562 VDEV_ALLOC_BIAS_DEDUP, name_flags); 1563 } 1564 1565 if (zfs_special_devs(poolnvroot, VDEV_ALLOC_BIAS_SPECIAL)) { 1566 print_vdev_tree(zhp, "special", poolnvroot, 0, 1567 VDEV_ALLOC_BIAS_SPECIAL, name_flags); 1568 print_vdev_tree(zhp, NULL, nvroot, 0, 1569 VDEV_ALLOC_BIAS_SPECIAL, name_flags); 1570 } else if (zfs_special_devs(nvroot, VDEV_ALLOC_BIAS_SPECIAL)) { 1571 print_vdev_tree(zhp, "special", nvroot, 0, 1572 VDEV_ALLOC_BIAS_SPECIAL, name_flags); 1573 } 1574 1575 if (num_logs(poolnvroot) > 0) { 1576 print_vdev_tree(zhp, "logs", poolnvroot, 0, 1577 VDEV_ALLOC_BIAS_LOG, name_flags); 1578 print_vdev_tree(zhp, NULL, nvroot, 0, 1579 VDEV_ALLOC_BIAS_LOG, name_flags); 1580 } else if (num_logs(nvroot) > 0) { 1581 print_vdev_tree(zhp, "logs", nvroot, 0, 1582 VDEV_ALLOC_BIAS_LOG, name_flags); 1583 } 1584 1585 /* Do the same for the caches */ 1586 if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_L2CACHE, 1587 &l2child, &l2children) == 0 && l2children) { 1588 hadcache = B_TRUE; 1589 (void) printf(gettext("\tcache\n")); 1590 for (c = 0; c < l2children; c++) { 1591 vname = zpool_vdev_name(g_zfs, NULL, 1592 l2child[c], name_flags); 1593 (void) printf("\t %s\n", vname); 1594 free(vname); 1595 } 1596 } 1597 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 1598 &l2child, &l2children) == 0 && l2children) { 1599 if (!hadcache) 1600 (void) printf(gettext("\tcache\n")); 1601 for (c = 0; c < l2children; c++) { 1602 vname = zpool_vdev_name(g_zfs, NULL, 1603 l2child[c], name_flags); 1604 (void) printf("\t %s\n", vname); 1605 free(vname); 1606 } 1607 } 1608 /* And finally the spares */ 1609 if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_SPARES, 1610 &sparechild, &sparechildren) == 0 && sparechildren > 0) { 1611 hadspare = B_TRUE; 1612 (void) printf(gettext("\tspares\n")); 1613 for (c = 0; c < sparechildren; c++) { 1614 vname = zpool_vdev_name(g_zfs, NULL, 1615 sparechild[c], name_flags); 1616 (void) printf("\t %s\n", vname); 1617 free(vname); 1618 } 1619 } 1620 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 1621 &sparechild, &sparechildren) == 0 && sparechildren > 0) { 1622 if (!hadspare) 1623 (void) printf(gettext("\tspares\n")); 1624 for (c = 0; c < sparechildren; c++) { 1625 vname = zpool_vdev_name(g_zfs, NULL, 1626 sparechild[c], name_flags); 1627 (void) printf("\t %s\n", vname); 1628 free(vname); 1629 } 1630 } 1631 1632 ret = 0; 1633 } else { 1634 ret = (zpool_add(zhp, nvroot, check_ashift) != 0); 1635 } 1636 1637 nvlist_free(props); 1638 nvlist_free(nvroot); 1639 zpool_close(zhp); 1640 1641 return (ret); 1642 } 1643 1644 /* 1645 * zpool remove [-npsw] <pool> <vdev> ... 1646 * 1647 * Removes the given vdev from the pool. 1648 */ 1649 int 1650 zpool_do_remove(int argc, char **argv) 1651 { 1652 char *poolname; 1653 int i, ret = 0; 1654 zpool_handle_t *zhp = NULL; 1655 boolean_t stop = B_FALSE; 1656 int c; 1657 boolean_t noop = B_FALSE; 1658 boolean_t parsable = B_FALSE; 1659 boolean_t wait = B_FALSE; 1660 1661 /* check options */ 1662 while ((c = getopt(argc, argv, "npsw")) != -1) { 1663 switch (c) { 1664 case 'n': 1665 noop = B_TRUE; 1666 break; 1667 case 'p': 1668 parsable = B_TRUE; 1669 break; 1670 case 's': 1671 stop = B_TRUE; 1672 break; 1673 case 'w': 1674 wait = B_TRUE; 1675 break; 1676 case '?': 1677 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 1678 optopt); 1679 usage(B_FALSE); 1680 } 1681 } 1682 1683 argc -= optind; 1684 argv += optind; 1685 1686 /* get pool name and check number of arguments */ 1687 if (argc < 1) { 1688 (void) fprintf(stderr, gettext("missing pool name argument\n")); 1689 usage(B_FALSE); 1690 } 1691 1692 poolname = argv[0]; 1693 1694 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) 1695 return (1); 1696 1697 if (stop && noop) { 1698 zpool_close(zhp); 1699 (void) fprintf(stderr, gettext("stop request ignored\n")); 1700 return (0); 1701 } 1702 1703 if (stop) { 1704 if (argc > 1) { 1705 (void) fprintf(stderr, gettext("too many arguments\n")); 1706 usage(B_FALSE); 1707 } 1708 if (zpool_vdev_remove_cancel(zhp) != 0) 1709 ret = 1; 1710 if (wait) { 1711 (void) fprintf(stderr, gettext("invalid option " 1712 "combination: -w cannot be used with -s\n")); 1713 usage(B_FALSE); 1714 } 1715 } else { 1716 if (argc < 2) { 1717 (void) fprintf(stderr, gettext("missing device\n")); 1718 usage(B_FALSE); 1719 } 1720 1721 for (i = 1; i < argc; i++) { 1722 if (noop) { 1723 uint64_t size; 1724 1725 if (zpool_vdev_indirect_size(zhp, argv[i], 1726 &size) != 0) { 1727 ret = 1; 1728 break; 1729 } 1730 if (parsable) { 1731 (void) printf("%s %llu\n", 1732 argv[i], (unsigned long long)size); 1733 } else { 1734 char valstr[32]; 1735 zfs_nicenum(size, valstr, 1736 sizeof (valstr)); 1737 (void) printf("Memory that will be " 1738 "used after removing %s: %s\n", 1739 argv[i], valstr); 1740 } 1741 } else { 1742 if (zpool_vdev_remove(zhp, argv[i]) != 0) 1743 ret = 1; 1744 } 1745 } 1746 1747 if (ret == 0 && wait) 1748 ret = zpool_wait(zhp, ZPOOL_WAIT_REMOVE); 1749 } 1750 zpool_close(zhp); 1751 1752 return (ret); 1753 } 1754 1755 /* 1756 * Return 1 if a vdev is active (being used in a pool) 1757 * Return 0 if a vdev is inactive (offlined or faulted, or not in active pool) 1758 * 1759 * This is useful for checking if a disk in an active pool is offlined or 1760 * faulted. 1761 */ 1762 static int 1763 vdev_is_active(char *vdev_path) 1764 { 1765 int fd; 1766 fd = open(vdev_path, O_EXCL); 1767 if (fd < 0) { 1768 return (1); /* cant open O_EXCL - disk is active */ 1769 } 1770 1771 close(fd); 1772 return (0); /* disk is inactive in the pool */ 1773 } 1774 1775 /* 1776 * zpool labelclear [-f] <vdev> 1777 * 1778 * -f Force clearing the label for the vdevs which are members of 1779 * the exported or foreign pools. 1780 * 1781 * Verifies that the vdev is not active and zeros out the label information 1782 * on the device. 1783 */ 1784 int 1785 zpool_do_labelclear(int argc, char **argv) 1786 { 1787 char vdev[MAXPATHLEN]; 1788 char *name = NULL; 1789 int c, fd = -1, ret = 0; 1790 nvlist_t *config; 1791 pool_state_t state; 1792 boolean_t inuse = B_FALSE; 1793 boolean_t force = B_FALSE; 1794 1795 /* check options */ 1796 while ((c = getopt(argc, argv, "f")) != -1) { 1797 switch (c) { 1798 case 'f': 1799 force = B_TRUE; 1800 break; 1801 default: 1802 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 1803 optopt); 1804 usage(B_FALSE); 1805 } 1806 } 1807 1808 argc -= optind; 1809 argv += optind; 1810 1811 /* get vdev name */ 1812 if (argc < 1) { 1813 (void) fprintf(stderr, gettext("missing vdev name\n")); 1814 usage(B_FALSE); 1815 } 1816 if (argc > 1) { 1817 (void) fprintf(stderr, gettext("too many arguments\n")); 1818 usage(B_FALSE); 1819 } 1820 1821 (void) strlcpy(vdev, argv[0], sizeof (vdev)); 1822 1823 /* 1824 * If we cannot open an absolute path, we quit. 1825 * Otherwise if the provided vdev name doesn't point to a file, 1826 * try prepending expected disk paths and partition numbers. 1827 */ 1828 if ((fd = open(vdev, O_RDWR)) < 0) { 1829 int error; 1830 if (vdev[0] == '/') { 1831 (void) fprintf(stderr, gettext("failed to open " 1832 "%s: %s\n"), vdev, strerror(errno)); 1833 return (1); 1834 } 1835 1836 error = zfs_resolve_shortname(argv[0], vdev, MAXPATHLEN); 1837 if (error == 0 && zfs_dev_is_whole_disk(vdev)) { 1838 if (zfs_append_partition(vdev, MAXPATHLEN) == -1) 1839 error = ENOENT; 1840 } 1841 1842 if (error || ((fd = open(vdev, O_RDWR)) < 0)) { 1843 if (errno == ENOENT) { 1844 (void) fprintf(stderr, gettext( 1845 "failed to find device %s, try " 1846 "specifying absolute path instead\n"), 1847 argv[0]); 1848 return (1); 1849 } 1850 1851 (void) fprintf(stderr, gettext("failed to open %s:" 1852 " %s\n"), vdev, strerror(errno)); 1853 return (1); 1854 } 1855 } 1856 1857 /* 1858 * Flush all dirty pages for the block device. This should not be 1859 * fatal when the device does not support BLKFLSBUF as would be the 1860 * case for a file vdev. 1861 */ 1862 if ((zfs_dev_flush(fd) != 0) && (errno != ENOTTY)) 1863 (void) fprintf(stderr, gettext("failed to invalidate " 1864 "cache for %s: %s\n"), vdev, strerror(errno)); 1865 1866 if (zpool_read_label(fd, &config, NULL) != 0) { 1867 (void) fprintf(stderr, 1868 gettext("failed to read label from %s\n"), vdev); 1869 ret = 1; 1870 goto errout; 1871 } 1872 nvlist_free(config); 1873 1874 ret = zpool_in_use(g_zfs, fd, &state, &name, &inuse); 1875 if (ret != 0) { 1876 (void) fprintf(stderr, 1877 gettext("failed to check state for %s\n"), vdev); 1878 ret = 1; 1879 goto errout; 1880 } 1881 1882 if (!inuse) 1883 goto wipe_label; 1884 1885 switch (state) { 1886 default: 1887 case POOL_STATE_ACTIVE: 1888 case POOL_STATE_SPARE: 1889 case POOL_STATE_L2CACHE: 1890 /* 1891 * We allow the user to call 'zpool offline -f' 1892 * on an offlined disk in an active pool. We can check if 1893 * the disk is online by calling vdev_is_active(). 1894 */ 1895 if (force && !vdev_is_active(vdev)) 1896 break; 1897 1898 (void) fprintf(stderr, gettext( 1899 "%s is a member (%s) of pool \"%s\""), 1900 vdev, zpool_pool_state_to_name(state), name); 1901 1902 if (force) { 1903 (void) fprintf(stderr, gettext( 1904 ". Offline the disk first to clear its label.")); 1905 } 1906 printf("\n"); 1907 ret = 1; 1908 goto errout; 1909 1910 case POOL_STATE_EXPORTED: 1911 if (force) 1912 break; 1913 (void) fprintf(stderr, gettext( 1914 "use '-f' to override the following error:\n" 1915 "%s is a member of exported pool \"%s\"\n"), 1916 vdev, name); 1917 ret = 1; 1918 goto errout; 1919 1920 case POOL_STATE_POTENTIALLY_ACTIVE: 1921 if (force) 1922 break; 1923 (void) fprintf(stderr, gettext( 1924 "use '-f' to override the following error:\n" 1925 "%s is a member of potentially active pool \"%s\"\n"), 1926 vdev, name); 1927 ret = 1; 1928 goto errout; 1929 1930 case POOL_STATE_DESTROYED: 1931 /* inuse should never be set for a destroyed pool */ 1932 assert(0); 1933 break; 1934 } 1935 1936 wipe_label: 1937 ret = zpool_clear_label(fd); 1938 if (ret != 0) { 1939 (void) fprintf(stderr, 1940 gettext("failed to clear label for %s\n"), vdev); 1941 } 1942 1943 errout: 1944 free(name); 1945 (void) close(fd); 1946 1947 return (ret); 1948 } 1949 1950 /* 1951 * zpool create [-fnd] [-o property=value] ... 1952 * [-O file-system-property=value] ... 1953 * [-R root] [-m mountpoint] <pool> <dev> ... 1954 * 1955 * -f Force creation, even if devices appear in use 1956 * -n Do not create the pool, but display the resulting layout if it 1957 * were to be created. 1958 * -R Create a pool under an alternate root 1959 * -m Set default mountpoint for the root dataset. By default it's 1960 * '/<pool>' 1961 * -o Set property=value. 1962 * -o Set feature@feature=enabled|disabled. 1963 * -d Don't automatically enable all supported pool features 1964 * (individual features can be enabled with -o). 1965 * -O Set fsproperty=value in the pool's root file system 1966 * 1967 * Creates the named pool according to the given vdev specification. The 1968 * bulk of the vdev processing is done in make_root_vdev() in zpool_vdev.c. 1969 * Once we get the nvlist back from make_root_vdev(), we either print out the 1970 * contents (if '-n' was specified), or pass it to libzfs to do the creation. 1971 */ 1972 int 1973 zpool_do_create(int argc, char **argv) 1974 { 1975 boolean_t force = B_FALSE; 1976 boolean_t dryrun = B_FALSE; 1977 boolean_t enable_pool_features = B_TRUE; 1978 1979 int c; 1980 nvlist_t *nvroot = NULL; 1981 char *poolname; 1982 char *tname = NULL; 1983 int ret = 1; 1984 char *altroot = NULL; 1985 char *compat = NULL; 1986 char *mountpoint = NULL; 1987 nvlist_t *fsprops = NULL; 1988 nvlist_t *props = NULL; 1989 char *propval; 1990 1991 /* check options */ 1992 while ((c = getopt(argc, argv, ":fndR:m:o:O:t:")) != -1) { 1993 switch (c) { 1994 case 'f': 1995 force = B_TRUE; 1996 break; 1997 case 'n': 1998 dryrun = B_TRUE; 1999 break; 2000 case 'd': 2001 enable_pool_features = B_FALSE; 2002 break; 2003 case 'R': 2004 altroot = optarg; 2005 if (add_prop_list(zpool_prop_to_name( 2006 ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE)) 2007 goto errout; 2008 if (add_prop_list_default(zpool_prop_to_name( 2009 ZPOOL_PROP_CACHEFILE), "none", &props)) 2010 goto errout; 2011 break; 2012 case 'm': 2013 /* Equivalent to -O mountpoint=optarg */ 2014 mountpoint = optarg; 2015 break; 2016 case 'o': 2017 if ((propval = strchr(optarg, '=')) == NULL) { 2018 (void) fprintf(stderr, gettext("missing " 2019 "'=' for -o option\n")); 2020 goto errout; 2021 } 2022 *propval = '\0'; 2023 propval++; 2024 2025 if (add_prop_list(optarg, propval, &props, B_TRUE)) 2026 goto errout; 2027 2028 /* 2029 * If the user is creating a pool that doesn't support 2030 * feature flags, don't enable any features. 2031 */ 2032 if (zpool_name_to_prop(optarg) == ZPOOL_PROP_VERSION) { 2033 char *end; 2034 u_longlong_t ver; 2035 2036 ver = strtoull(propval, &end, 0); 2037 if (*end == '\0' && 2038 ver < SPA_VERSION_FEATURES) { 2039 enable_pool_features = B_FALSE; 2040 } 2041 } 2042 if (zpool_name_to_prop(optarg) == ZPOOL_PROP_ALTROOT) 2043 altroot = propval; 2044 if (zpool_name_to_prop(optarg) == 2045 ZPOOL_PROP_COMPATIBILITY) 2046 compat = propval; 2047 break; 2048 case 'O': 2049 if ((propval = strchr(optarg, '=')) == NULL) { 2050 (void) fprintf(stderr, gettext("missing " 2051 "'=' for -O option\n")); 2052 goto errout; 2053 } 2054 *propval = '\0'; 2055 propval++; 2056 2057 /* 2058 * Mountpoints are checked and then added later. 2059 * Uniquely among properties, they can be specified 2060 * more than once, to avoid conflict with -m. 2061 */ 2062 if (0 == strcmp(optarg, 2063 zfs_prop_to_name(ZFS_PROP_MOUNTPOINT))) { 2064 mountpoint = propval; 2065 } else if (add_prop_list(optarg, propval, &fsprops, 2066 B_FALSE)) { 2067 goto errout; 2068 } 2069 break; 2070 case 't': 2071 /* 2072 * Sanity check temporary pool name. 2073 */ 2074 if (strchr(optarg, '/') != NULL) { 2075 (void) fprintf(stderr, gettext("cannot create " 2076 "'%s': invalid character '/' in temporary " 2077 "name\n"), optarg); 2078 (void) fprintf(stderr, gettext("use 'zfs " 2079 "create' to create a dataset\n")); 2080 goto errout; 2081 } 2082 2083 if (add_prop_list(zpool_prop_to_name( 2084 ZPOOL_PROP_TNAME), optarg, &props, B_TRUE)) 2085 goto errout; 2086 if (add_prop_list_default(zpool_prop_to_name( 2087 ZPOOL_PROP_CACHEFILE), "none", &props)) 2088 goto errout; 2089 tname = optarg; 2090 break; 2091 case ':': 2092 (void) fprintf(stderr, gettext("missing argument for " 2093 "'%c' option\n"), optopt); 2094 goto badusage; 2095 case '?': 2096 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 2097 optopt); 2098 goto badusage; 2099 } 2100 } 2101 2102 argc -= optind; 2103 argv += optind; 2104 2105 /* get pool name and check number of arguments */ 2106 if (argc < 1) { 2107 (void) fprintf(stderr, gettext("missing pool name argument\n")); 2108 goto badusage; 2109 } 2110 if (argc < 2) { 2111 (void) fprintf(stderr, gettext("missing vdev specification\n")); 2112 goto badusage; 2113 } 2114 2115 poolname = argv[0]; 2116 2117 /* 2118 * As a special case, check for use of '/' in the name, and direct the 2119 * user to use 'zfs create' instead. 2120 */ 2121 if (strchr(poolname, '/') != NULL) { 2122 (void) fprintf(stderr, gettext("cannot create '%s': invalid " 2123 "character '/' in pool name\n"), poolname); 2124 (void) fprintf(stderr, gettext("use 'zfs create' to " 2125 "create a dataset\n")); 2126 goto errout; 2127 } 2128 2129 /* pass off to make_root_vdev for bulk processing */ 2130 nvroot = make_root_vdev(NULL, props, force, !force, B_FALSE, dryrun, 2131 argc - 1, argv + 1); 2132 if (nvroot == NULL) 2133 goto errout; 2134 2135 /* make_root_vdev() allows 0 toplevel children if there are spares */ 2136 if (!zfs_allocatable_devs(nvroot)) { 2137 (void) fprintf(stderr, gettext("invalid vdev " 2138 "specification: at least one toplevel vdev must be " 2139 "specified\n")); 2140 goto errout; 2141 } 2142 2143 if (altroot != NULL && altroot[0] != '/') { 2144 (void) fprintf(stderr, gettext("invalid alternate root '%s': " 2145 "must be an absolute path\n"), altroot); 2146 goto errout; 2147 } 2148 2149 /* 2150 * Check the validity of the mountpoint and direct the user to use the 2151 * '-m' mountpoint option if it looks like its in use. 2152 */ 2153 if (mountpoint == NULL || 2154 (strcmp(mountpoint, ZFS_MOUNTPOINT_LEGACY) != 0 && 2155 strcmp(mountpoint, ZFS_MOUNTPOINT_NONE) != 0)) { 2156 char buf[MAXPATHLEN]; 2157 DIR *dirp; 2158 2159 if (mountpoint && mountpoint[0] != '/') { 2160 (void) fprintf(stderr, gettext("invalid mountpoint " 2161 "'%s': must be an absolute path, 'legacy', or " 2162 "'none'\n"), mountpoint); 2163 goto errout; 2164 } 2165 2166 if (mountpoint == NULL) { 2167 if (altroot != NULL) 2168 (void) snprintf(buf, sizeof (buf), "%s/%s", 2169 altroot, poolname); 2170 else 2171 (void) snprintf(buf, sizeof (buf), "/%s", 2172 poolname); 2173 } else { 2174 if (altroot != NULL) 2175 (void) snprintf(buf, sizeof (buf), "%s%s", 2176 altroot, mountpoint); 2177 else 2178 (void) snprintf(buf, sizeof (buf), "%s", 2179 mountpoint); 2180 } 2181 2182 if ((dirp = opendir(buf)) == NULL && errno != ENOENT) { 2183 (void) fprintf(stderr, gettext("mountpoint '%s' : " 2184 "%s\n"), buf, strerror(errno)); 2185 (void) fprintf(stderr, gettext("use '-m' " 2186 "option to provide a different default\n")); 2187 goto errout; 2188 } else if (dirp) { 2189 int count = 0; 2190 2191 while (count < 3 && readdir(dirp) != NULL) 2192 count++; 2193 (void) closedir(dirp); 2194 2195 if (count > 2) { 2196 (void) fprintf(stderr, gettext("mountpoint " 2197 "'%s' exists and is not empty\n"), buf); 2198 (void) fprintf(stderr, gettext("use '-m' " 2199 "option to provide a " 2200 "different default\n")); 2201 goto errout; 2202 } 2203 } 2204 } 2205 2206 /* 2207 * Now that the mountpoint's validity has been checked, ensure that 2208 * the property is set appropriately prior to creating the pool. 2209 */ 2210 if (mountpoint != NULL) { 2211 ret = add_prop_list(zfs_prop_to_name(ZFS_PROP_MOUNTPOINT), 2212 mountpoint, &fsprops, B_FALSE); 2213 if (ret != 0) 2214 goto errout; 2215 } 2216 2217 ret = 1; 2218 if (dryrun) { 2219 /* 2220 * For a dry run invocation, print out a basic message and run 2221 * through all the vdevs in the list and print out in an 2222 * appropriate hierarchy. 2223 */ 2224 (void) printf(gettext("would create '%s' with the " 2225 "following layout:\n\n"), poolname); 2226 2227 print_vdev_tree(NULL, poolname, nvroot, 0, "", 0); 2228 print_vdev_tree(NULL, "dedup", nvroot, 0, 2229 VDEV_ALLOC_BIAS_DEDUP, 0); 2230 print_vdev_tree(NULL, "special", nvroot, 0, 2231 VDEV_ALLOC_BIAS_SPECIAL, 0); 2232 print_vdev_tree(NULL, "logs", nvroot, 0, 2233 VDEV_ALLOC_BIAS_LOG, 0); 2234 print_cache_list(nvroot, 0); 2235 print_spare_list(nvroot, 0); 2236 2237 ret = 0; 2238 } else { 2239 /* 2240 * Load in feature set. 2241 * Note: if compatibility property not given, we'll have 2242 * NULL, which means 'all features'. 2243 */ 2244 boolean_t requested_features[SPA_FEATURES]; 2245 if (zpool_do_load_compat(compat, requested_features) != 2246 ZPOOL_COMPATIBILITY_OK) 2247 goto errout; 2248 2249 /* 2250 * props contains list of features to enable. 2251 * For each feature: 2252 * - remove it if feature@name=disabled 2253 * - leave it there if feature@name=enabled 2254 * - add it if: 2255 * - enable_pool_features (ie: no '-d' or '-o version') 2256 * - it's supported by the kernel module 2257 * - it's in the requested feature set 2258 * - warn if it's enabled but not in compat 2259 */ 2260 for (spa_feature_t i = 0; i < SPA_FEATURES; i++) { 2261 char propname[MAXPATHLEN]; 2262 const char *propval; 2263 zfeature_info_t *feat = &spa_feature_table[i]; 2264 2265 (void) snprintf(propname, sizeof (propname), 2266 "feature@%s", feat->fi_uname); 2267 2268 if (!nvlist_lookup_string(props, propname, &propval)) { 2269 if (strcmp(propval, 2270 ZFS_FEATURE_DISABLED) == 0) { 2271 (void) nvlist_remove_all(props, 2272 propname); 2273 } else if (strcmp(propval, 2274 ZFS_FEATURE_ENABLED) == 0 && 2275 !requested_features[i]) { 2276 (void) fprintf(stderr, gettext( 2277 "Warning: feature \"%s\" enabled " 2278 "but is not in specified " 2279 "'compatibility' feature set.\n"), 2280 feat->fi_uname); 2281 } 2282 } else if ( 2283 enable_pool_features && 2284 feat->fi_zfs_mod_supported && 2285 requested_features[i]) { 2286 ret = add_prop_list(propname, 2287 ZFS_FEATURE_ENABLED, &props, B_TRUE); 2288 if (ret != 0) 2289 goto errout; 2290 } 2291 } 2292 2293 ret = 1; 2294 if (zpool_create(g_zfs, poolname, 2295 nvroot, props, fsprops) == 0) { 2296 zfs_handle_t *pool = zfs_open(g_zfs, 2297 tname ? tname : poolname, ZFS_TYPE_FILESYSTEM); 2298 if (pool != NULL) { 2299 if (zfs_mount(pool, NULL, 0) == 0) { 2300 ret = zfs_share(pool, NULL); 2301 zfs_commit_shares(NULL); 2302 } 2303 zfs_close(pool); 2304 } 2305 } else if (libzfs_errno(g_zfs) == EZFS_INVALIDNAME) { 2306 (void) fprintf(stderr, gettext("pool name may have " 2307 "been omitted\n")); 2308 } 2309 } 2310 2311 errout: 2312 nvlist_free(nvroot); 2313 nvlist_free(fsprops); 2314 nvlist_free(props); 2315 return (ret); 2316 badusage: 2317 nvlist_free(fsprops); 2318 nvlist_free(props); 2319 usage(B_FALSE); 2320 return (2); 2321 } 2322 2323 /* 2324 * zpool destroy <pool> 2325 * 2326 * -f Forcefully unmount any datasets 2327 * 2328 * Destroy the given pool. Automatically unmounts any datasets in the pool. 2329 */ 2330 int 2331 zpool_do_destroy(int argc, char **argv) 2332 { 2333 boolean_t force = B_FALSE; 2334 int c; 2335 char *pool; 2336 zpool_handle_t *zhp; 2337 int ret; 2338 2339 /* check options */ 2340 while ((c = getopt(argc, argv, "f")) != -1) { 2341 switch (c) { 2342 case 'f': 2343 force = B_TRUE; 2344 break; 2345 case '?': 2346 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 2347 optopt); 2348 usage(B_FALSE); 2349 } 2350 } 2351 2352 argc -= optind; 2353 argv += optind; 2354 2355 /* check arguments */ 2356 if (argc < 1) { 2357 (void) fprintf(stderr, gettext("missing pool argument\n")); 2358 usage(B_FALSE); 2359 } 2360 if (argc > 1) { 2361 (void) fprintf(stderr, gettext("too many arguments\n")); 2362 usage(B_FALSE); 2363 } 2364 2365 pool = argv[0]; 2366 2367 if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) { 2368 /* 2369 * As a special case, check for use of '/' in the name, and 2370 * direct the user to use 'zfs destroy' instead. 2371 */ 2372 if (strchr(pool, '/') != NULL) 2373 (void) fprintf(stderr, gettext("use 'zfs destroy' to " 2374 "destroy a dataset\n")); 2375 return (1); 2376 } 2377 2378 if (zpool_disable_datasets(zhp, force) != 0) { 2379 (void) fprintf(stderr, gettext("could not destroy '%s': " 2380 "could not unmount datasets\n"), zpool_get_name(zhp)); 2381 zpool_close(zhp); 2382 return (1); 2383 } 2384 2385 /* The history must be logged as part of the export */ 2386 log_history = B_FALSE; 2387 2388 ret = (zpool_destroy(zhp, history_str) != 0); 2389 2390 zpool_close(zhp); 2391 2392 return (ret); 2393 } 2394 2395 typedef struct export_cbdata { 2396 tpool_t *tpool; 2397 pthread_mutex_t mnttab_lock; 2398 boolean_t force; 2399 boolean_t hardforce; 2400 int retval; 2401 } export_cbdata_t; 2402 2403 2404 typedef struct { 2405 char *aea_poolname; 2406 export_cbdata_t *aea_cbdata; 2407 } async_export_args_t; 2408 2409 /* 2410 * Export one pool 2411 */ 2412 static int 2413 zpool_export_one(zpool_handle_t *zhp, void *data) 2414 { 2415 export_cbdata_t *cb = data; 2416 2417 /* 2418 * zpool_disable_datasets() is not thread-safe for mnttab access. 2419 * So we serialize access here for 'zpool export -a' parallel case. 2420 */ 2421 if (cb->tpool != NULL) 2422 pthread_mutex_lock(&cb->mnttab_lock); 2423 2424 int retval = zpool_disable_datasets(zhp, cb->force); 2425 2426 if (cb->tpool != NULL) 2427 pthread_mutex_unlock(&cb->mnttab_lock); 2428 2429 if (retval) 2430 return (1); 2431 2432 if (cb->hardforce) { 2433 if (zpool_export_force(zhp, history_str) != 0) 2434 return (1); 2435 } else if (zpool_export(zhp, cb->force, history_str) != 0) { 2436 return (1); 2437 } 2438 2439 return (0); 2440 } 2441 2442 /* 2443 * Asynchronous export request 2444 */ 2445 static void 2446 zpool_export_task(void *arg) 2447 { 2448 async_export_args_t *aea = arg; 2449 2450 zpool_handle_t *zhp = zpool_open(g_zfs, aea->aea_poolname); 2451 if (zhp != NULL) { 2452 int ret = zpool_export_one(zhp, aea->aea_cbdata); 2453 if (ret != 0) 2454 aea->aea_cbdata->retval = ret; 2455 zpool_close(zhp); 2456 } else { 2457 aea->aea_cbdata->retval = 1; 2458 } 2459 2460 free(aea->aea_poolname); 2461 free(aea); 2462 } 2463 2464 /* 2465 * Process an export request in parallel 2466 */ 2467 static int 2468 zpool_export_one_async(zpool_handle_t *zhp, void *data) 2469 { 2470 tpool_t *tpool = ((export_cbdata_t *)data)->tpool; 2471 async_export_args_t *aea = safe_malloc(sizeof (async_export_args_t)); 2472 2473 /* save pool name since zhp will go out of scope */ 2474 aea->aea_poolname = strdup(zpool_get_name(zhp)); 2475 aea->aea_cbdata = data; 2476 2477 /* ship off actual export to another thread */ 2478 if (tpool_dispatch(tpool, zpool_export_task, (void *)aea) != 0) 2479 return (errno); /* unlikely */ 2480 else 2481 return (0); 2482 } 2483 2484 /* 2485 * zpool export [-f] <pool> ... 2486 * 2487 * -a Export all pools 2488 * -f Forcefully unmount datasets 2489 * 2490 * Export the given pools. By default, the command will attempt to cleanly 2491 * unmount any active datasets within the pool. If the '-f' flag is specified, 2492 * then the datasets will be forcefully unmounted. 2493 */ 2494 int 2495 zpool_do_export(int argc, char **argv) 2496 { 2497 export_cbdata_t cb; 2498 boolean_t do_all = B_FALSE; 2499 boolean_t force = B_FALSE; 2500 boolean_t hardforce = B_FALSE; 2501 int c, ret; 2502 2503 /* check options */ 2504 while ((c = getopt(argc, argv, "afF")) != -1) { 2505 switch (c) { 2506 case 'a': 2507 do_all = B_TRUE; 2508 break; 2509 case 'f': 2510 force = B_TRUE; 2511 break; 2512 case 'F': 2513 hardforce = B_TRUE; 2514 break; 2515 case '?': 2516 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 2517 optopt); 2518 usage(B_FALSE); 2519 } 2520 } 2521 2522 cb.force = force; 2523 cb.hardforce = hardforce; 2524 cb.tpool = NULL; 2525 cb.retval = 0; 2526 argc -= optind; 2527 argv += optind; 2528 2529 /* The history will be logged as part of the export itself */ 2530 log_history = B_FALSE; 2531 2532 if (do_all) { 2533 if (argc != 0) { 2534 (void) fprintf(stderr, gettext("too many arguments\n")); 2535 usage(B_FALSE); 2536 } 2537 2538 cb.tpool = tpool_create(1, 5 * sysconf(_SC_NPROCESSORS_ONLN), 2539 0, NULL); 2540 pthread_mutex_init(&cb.mnttab_lock, NULL); 2541 2542 /* Asynchronously call zpool_export_one using thread pool */ 2543 ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL, 2544 B_FALSE, zpool_export_one_async, &cb); 2545 2546 tpool_wait(cb.tpool); 2547 tpool_destroy(cb.tpool); 2548 (void) pthread_mutex_destroy(&cb.mnttab_lock); 2549 2550 return (ret | cb.retval); 2551 } 2552 2553 /* check arguments */ 2554 if (argc < 1) { 2555 (void) fprintf(stderr, gettext("missing pool argument\n")); 2556 usage(B_FALSE); 2557 } 2558 2559 ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL, 2560 B_FALSE, zpool_export_one, &cb); 2561 2562 return (ret); 2563 } 2564 2565 /* 2566 * Given a vdev configuration, determine the maximum width needed for the device 2567 * name column. 2568 */ 2569 static int 2570 max_width(zpool_handle_t *zhp, nvlist_t *nv, int depth, int max, 2571 int name_flags) 2572 { 2573 static const char *const subtypes[] = 2574 {ZPOOL_CONFIG_SPARES, ZPOOL_CONFIG_L2CACHE, ZPOOL_CONFIG_CHILDREN}; 2575 2576 char *name = zpool_vdev_name(g_zfs, zhp, nv, name_flags); 2577 max = MAX(strlen(name) + depth, max); 2578 free(name); 2579 2580 nvlist_t **child; 2581 uint_t children; 2582 for (size_t i = 0; i < ARRAY_SIZE(subtypes); ++i) 2583 if (nvlist_lookup_nvlist_array(nv, subtypes[i], 2584 &child, &children) == 0) 2585 for (uint_t c = 0; c < children; ++c) 2586 max = MAX(max_width(zhp, child[c], depth + 2, 2587 max, name_flags), max); 2588 2589 return (max); 2590 } 2591 2592 typedef struct status_cbdata { 2593 int cb_count; 2594 int cb_name_flags; 2595 int cb_namewidth; 2596 boolean_t cb_allpools; 2597 boolean_t cb_verbose; 2598 boolean_t cb_literal; 2599 boolean_t cb_explain; 2600 boolean_t cb_first; 2601 boolean_t cb_dedup_stats; 2602 boolean_t cb_print_unhealthy; 2603 boolean_t cb_print_status; 2604 boolean_t cb_print_slow_ios; 2605 boolean_t cb_print_vdev_init; 2606 boolean_t cb_print_vdev_trim; 2607 vdev_cmd_data_list_t *vcdl; 2608 boolean_t cb_print_power; 2609 boolean_t cb_json; 2610 boolean_t cb_flat_vdevs; 2611 nvlist_t *cb_jsobj; 2612 boolean_t cb_json_as_int; 2613 boolean_t cb_json_pool_key_guid; 2614 } status_cbdata_t; 2615 2616 /* Return 1 if string is NULL, empty, or whitespace; return 0 otherwise. */ 2617 static boolean_t 2618 is_blank_str(const char *str) 2619 { 2620 for (; str != NULL && *str != '\0'; ++str) 2621 if (!isblank(*str)) 2622 return (B_FALSE); 2623 return (B_TRUE); 2624 } 2625 2626 static void 2627 zpool_nvlist_cmd(vdev_cmd_data_list_t *vcdl, const char *pool, const char *path, 2628 nvlist_t *item) 2629 { 2630 vdev_cmd_data_t *data; 2631 int i, j, k = 1; 2632 char tmp[256]; 2633 const char *val; 2634 2635 for (i = 0; i < vcdl->count; i++) { 2636 if ((strcmp(vcdl->data[i].path, path) != 0) || 2637 (strcmp(vcdl->data[i].pool, pool) != 0)) 2638 continue; 2639 2640 data = &vcdl->data[i]; 2641 for (j = 0; j < vcdl->uniq_cols_cnt; j++) { 2642 val = NULL; 2643 for (int k = 0; k < data->cols_cnt; k++) { 2644 if (strcmp(data->cols[k], 2645 vcdl->uniq_cols[j]) == 0) { 2646 val = data->lines[k]; 2647 break; 2648 } 2649 } 2650 if (val == NULL || is_blank_str(val)) 2651 val = "-"; 2652 fnvlist_add_string(item, vcdl->uniq_cols[j], val); 2653 } 2654 2655 for (j = data->cols_cnt; j < data->lines_cnt; j++) { 2656 if (data->lines[j]) { 2657 snprintf(tmp, 256, "extra_%d", k++); 2658 fnvlist_add_string(item, tmp, 2659 data->lines[j]); 2660 } 2661 } 2662 break; 2663 } 2664 } 2665 2666 /* Print command output lines for specific vdev in a specific pool */ 2667 static void 2668 zpool_print_cmd(vdev_cmd_data_list_t *vcdl, const char *pool, const char *path) 2669 { 2670 vdev_cmd_data_t *data; 2671 int i, j; 2672 const char *val; 2673 2674 for (i = 0; i < vcdl->count; i++) { 2675 if ((strcmp(vcdl->data[i].path, path) != 0) || 2676 (strcmp(vcdl->data[i].pool, pool) != 0)) { 2677 /* Not the vdev we're looking for */ 2678 continue; 2679 } 2680 2681 data = &vcdl->data[i]; 2682 /* Print out all the output values for this vdev */ 2683 for (j = 0; j < vcdl->uniq_cols_cnt; j++) { 2684 val = NULL; 2685 /* Does this vdev have values for this column? */ 2686 for (int k = 0; k < data->cols_cnt; k++) { 2687 if (strcmp(data->cols[k], 2688 vcdl->uniq_cols[j]) == 0) { 2689 /* yes it does, record the value */ 2690 val = data->lines[k]; 2691 break; 2692 } 2693 } 2694 /* 2695 * Mark empty values with dashes to make output 2696 * awk-able. 2697 */ 2698 if (val == NULL || is_blank_str(val)) 2699 val = "-"; 2700 2701 printf("%*s", vcdl->uniq_cols_width[j], val); 2702 if (j < vcdl->uniq_cols_cnt - 1) 2703 fputs(" ", stdout); 2704 } 2705 2706 /* Print out any values that aren't in a column at the end */ 2707 for (j = data->cols_cnt; j < data->lines_cnt; j++) { 2708 /* Did we have any columns? If so print a spacer. */ 2709 if (vcdl->uniq_cols_cnt > 0) 2710 fputs(" ", stdout); 2711 2712 val = data->lines[j]; 2713 fputs(val ?: "", stdout); 2714 } 2715 break; 2716 } 2717 } 2718 2719 /* 2720 * Print vdev initialization status for leaves 2721 */ 2722 static void 2723 print_status_initialize(vdev_stat_t *vs, boolean_t verbose) 2724 { 2725 if (verbose) { 2726 if ((vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE || 2727 vs->vs_initialize_state == VDEV_INITIALIZE_SUSPENDED || 2728 vs->vs_initialize_state == VDEV_INITIALIZE_COMPLETE) && 2729 !vs->vs_scan_removing) { 2730 char zbuf[1024]; 2731 char tbuf[256]; 2732 2733 time_t t = vs->vs_initialize_action_time; 2734 int initialize_pct = 100; 2735 if (vs->vs_initialize_state != 2736 VDEV_INITIALIZE_COMPLETE) { 2737 initialize_pct = (vs->vs_initialize_bytes_done * 2738 100 / (vs->vs_initialize_bytes_est + 1)); 2739 } 2740 2741 (void) ctime_r(&t, tbuf); 2742 tbuf[24] = 0; 2743 2744 switch (vs->vs_initialize_state) { 2745 case VDEV_INITIALIZE_SUSPENDED: 2746 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s", 2747 gettext("suspended, started at"), tbuf); 2748 break; 2749 case VDEV_INITIALIZE_ACTIVE: 2750 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s", 2751 gettext("started at"), tbuf); 2752 break; 2753 case VDEV_INITIALIZE_COMPLETE: 2754 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s", 2755 gettext("completed at"), tbuf); 2756 break; 2757 } 2758 2759 (void) printf(gettext(" (%d%% initialized%s)"), 2760 initialize_pct, zbuf); 2761 } else { 2762 (void) printf(gettext(" (uninitialized)")); 2763 } 2764 } else if (vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE) { 2765 (void) printf(gettext(" (initializing)")); 2766 } 2767 } 2768 2769 /* 2770 * Print vdev TRIM status for leaves 2771 */ 2772 static void 2773 print_status_trim(vdev_stat_t *vs, boolean_t verbose) 2774 { 2775 if (verbose) { 2776 if ((vs->vs_trim_state == VDEV_TRIM_ACTIVE || 2777 vs->vs_trim_state == VDEV_TRIM_SUSPENDED || 2778 vs->vs_trim_state == VDEV_TRIM_COMPLETE) && 2779 !vs->vs_scan_removing) { 2780 char zbuf[1024]; 2781 char tbuf[256]; 2782 2783 time_t t = vs->vs_trim_action_time; 2784 int trim_pct = 100; 2785 if (vs->vs_trim_state != VDEV_TRIM_COMPLETE) { 2786 trim_pct = (vs->vs_trim_bytes_done * 2787 100 / (vs->vs_trim_bytes_est + 1)); 2788 } 2789 2790 (void) ctime_r(&t, tbuf); 2791 tbuf[24] = 0; 2792 2793 switch (vs->vs_trim_state) { 2794 case VDEV_TRIM_SUSPENDED: 2795 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s", 2796 gettext("suspended, started at"), tbuf); 2797 break; 2798 case VDEV_TRIM_ACTIVE: 2799 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s", 2800 gettext("started at"), tbuf); 2801 break; 2802 case VDEV_TRIM_COMPLETE: 2803 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s", 2804 gettext("completed at"), tbuf); 2805 break; 2806 } 2807 2808 (void) printf(gettext(" (%d%% trimmed%s)"), 2809 trim_pct, zbuf); 2810 } else if (vs->vs_trim_notsup) { 2811 (void) printf(gettext(" (trim unsupported)")); 2812 } else { 2813 (void) printf(gettext(" (untrimmed)")); 2814 } 2815 } else if (vs->vs_trim_state == VDEV_TRIM_ACTIVE) { 2816 (void) printf(gettext(" (trimming)")); 2817 } 2818 } 2819 2820 /* 2821 * Return the color associated with a health string. This includes returning 2822 * NULL for no color change. 2823 */ 2824 static const char * 2825 health_str_to_color(const char *health) 2826 { 2827 if (strcmp(health, gettext("FAULTED")) == 0 || 2828 strcmp(health, gettext("SUSPENDED")) == 0 || 2829 strcmp(health, gettext("UNAVAIL")) == 0) { 2830 return (ANSI_RED); 2831 } 2832 2833 if (strcmp(health, gettext("OFFLINE")) == 0 || 2834 strcmp(health, gettext("DEGRADED")) == 0 || 2835 strcmp(health, gettext("REMOVED")) == 0) { 2836 return (ANSI_YELLOW); 2837 } 2838 2839 return (NULL); 2840 } 2841 2842 /* 2843 * Called for each leaf vdev. Returns 0 if the vdev is healthy. 2844 * A vdev is unhealthy if any of the following are true: 2845 * 1) there are read, write, or checksum errors, 2846 * 2) its state is not ONLINE, or 2847 * 3) slow IO reporting was requested (-s) and there are slow IOs. 2848 */ 2849 static int 2850 vdev_health_check_cb(void *hdl_data, nvlist_t *nv, void *data) 2851 { 2852 status_cbdata_t *cb = data; 2853 vdev_stat_t *vs; 2854 uint_t vsc; 2855 (void) hdl_data; 2856 2857 if (nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, 2858 (uint64_t **)&vs, &vsc) != 0) 2859 return (1); 2860 2861 if (vs->vs_checksum_errors || vs->vs_read_errors || 2862 vs->vs_write_errors || vs->vs_state != VDEV_STATE_HEALTHY) 2863 return (1); 2864 2865 if (cb->cb_print_slow_ios && vs->vs_slow_ios) 2866 return (1); 2867 2868 return (0); 2869 } 2870 2871 /* 2872 * Print out configuration state as requested by status_callback. 2873 */ 2874 static void 2875 print_status_config(zpool_handle_t *zhp, status_cbdata_t *cb, const char *name, 2876 nvlist_t *nv, int depth, boolean_t isspare, vdev_rebuild_stat_t *vrs) 2877 { 2878 nvlist_t **child, *root; 2879 uint_t c, i, vsc, children; 2880 pool_scan_stat_t *ps = NULL; 2881 vdev_stat_t *vs; 2882 char rbuf[6], wbuf[6], cbuf[6]; 2883 char *vname; 2884 uint64_t notpresent; 2885 spare_cbdata_t spare_cb; 2886 const char *state; 2887 const char *type; 2888 const char *path = NULL; 2889 const char *rcolor = NULL, *wcolor = NULL, *ccolor = NULL, 2890 *scolor = NULL; 2891 2892 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 2893 &child, &children) != 0) 2894 children = 0; 2895 2896 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, 2897 (uint64_t **)&vs, &vsc) == 0); 2898 2899 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0); 2900 2901 if (strcmp(type, VDEV_TYPE_INDIRECT) == 0) 2902 return; 2903 2904 state = zpool_state_to_name(vs->vs_state, vs->vs_aux); 2905 2906 if (isspare) { 2907 /* 2908 * For hot spares, we use the terms 'INUSE' and 'AVAILABLE' for 2909 * online drives. 2910 */ 2911 if (vs->vs_aux == VDEV_AUX_SPARED) 2912 state = gettext("INUSE"); 2913 else if (vs->vs_state == VDEV_STATE_HEALTHY) 2914 state = gettext("AVAIL"); 2915 } 2916 2917 /* 2918 * If '-e' is specified then top-level vdevs and their children 2919 * can be pruned if all of their leaves are healthy. 2920 */ 2921 if (cb->cb_print_unhealthy && depth > 0 && 2922 for_each_vdev_in_nvlist(nv, vdev_health_check_cb, cb) == 0) { 2923 return; 2924 } 2925 2926 printf_color(health_str_to_color(state), 2927 "\t%*s%-*s %-8s", depth, "", cb->cb_namewidth - depth, 2928 name, state); 2929 2930 if (!isspare) { 2931 if (vs->vs_read_errors) 2932 rcolor = ANSI_RED; 2933 2934 if (vs->vs_write_errors) 2935 wcolor = ANSI_RED; 2936 2937 if (vs->vs_checksum_errors) 2938 ccolor = ANSI_RED; 2939 2940 if (vs->vs_slow_ios) 2941 scolor = ANSI_BLUE; 2942 2943 if (cb->cb_literal) { 2944 fputc(' ', stdout); 2945 printf_color(rcolor, "%5llu", 2946 (u_longlong_t)vs->vs_read_errors); 2947 fputc(' ', stdout); 2948 printf_color(wcolor, "%5llu", 2949 (u_longlong_t)vs->vs_write_errors); 2950 fputc(' ', stdout); 2951 printf_color(ccolor, "%5llu", 2952 (u_longlong_t)vs->vs_checksum_errors); 2953 } else { 2954 zfs_nicenum(vs->vs_read_errors, rbuf, sizeof (rbuf)); 2955 zfs_nicenum(vs->vs_write_errors, wbuf, sizeof (wbuf)); 2956 zfs_nicenum(vs->vs_checksum_errors, cbuf, 2957 sizeof (cbuf)); 2958 fputc(' ', stdout); 2959 printf_color(rcolor, "%5s", rbuf); 2960 fputc(' ', stdout); 2961 printf_color(wcolor, "%5s", wbuf); 2962 fputc(' ', stdout); 2963 printf_color(ccolor, "%5s", cbuf); 2964 } 2965 if (cb->cb_print_slow_ios) { 2966 if (children == 0) { 2967 /* Only leafs vdevs have slow IOs */ 2968 zfs_nicenum(vs->vs_slow_ios, rbuf, 2969 sizeof (rbuf)); 2970 } else { 2971 snprintf(rbuf, sizeof (rbuf), "-"); 2972 } 2973 2974 if (cb->cb_literal) 2975 printf_color(scolor, " %5llu", 2976 (u_longlong_t)vs->vs_slow_ios); 2977 else 2978 printf_color(scolor, " %5s", rbuf); 2979 } 2980 if (cb->cb_print_power) { 2981 if (children == 0) { 2982 /* Only leaf vdevs have physical slots */ 2983 switch (zpool_power_current_state(zhp, (char *) 2984 fnvlist_lookup_string(nv, 2985 ZPOOL_CONFIG_PATH))) { 2986 case 0: 2987 printf_color(ANSI_RED, " %5s", 2988 gettext("off")); 2989 break; 2990 case 1: 2991 printf(" %5s", gettext("on")); 2992 break; 2993 default: 2994 printf(" %5s", "-"); 2995 } 2996 } else { 2997 printf(" %5s", "-"); 2998 } 2999 } 3000 } 3001 3002 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 3003 ¬present) == 0) { 3004 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0); 3005 (void) printf(" %s %s", gettext("was"), path); 3006 } else if (vs->vs_aux != 0) { 3007 (void) printf(" "); 3008 color_start(ANSI_RED); 3009 switch (vs->vs_aux) { 3010 case VDEV_AUX_OPEN_FAILED: 3011 (void) printf(gettext("cannot open")); 3012 break; 3013 3014 case VDEV_AUX_BAD_GUID_SUM: 3015 (void) printf(gettext("missing device")); 3016 break; 3017 3018 case VDEV_AUX_NO_REPLICAS: 3019 (void) printf(gettext("insufficient replicas")); 3020 break; 3021 3022 case VDEV_AUX_VERSION_NEWER: 3023 (void) printf(gettext("newer version")); 3024 break; 3025 3026 case VDEV_AUX_UNSUP_FEAT: 3027 (void) printf(gettext("unsupported feature(s)")); 3028 break; 3029 3030 case VDEV_AUX_ASHIFT_TOO_BIG: 3031 (void) printf(gettext("unsupported minimum blocksize")); 3032 break; 3033 3034 case VDEV_AUX_SPARED: 3035 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 3036 &spare_cb.cb_guid) == 0); 3037 if (zpool_iter(g_zfs, find_spare, &spare_cb) == 1) { 3038 if (strcmp(zpool_get_name(spare_cb.cb_zhp), 3039 zpool_get_name(zhp)) == 0) 3040 (void) printf(gettext("currently in " 3041 "use")); 3042 else 3043 (void) printf(gettext("in use by " 3044 "pool '%s'"), 3045 zpool_get_name(spare_cb.cb_zhp)); 3046 zpool_close(spare_cb.cb_zhp); 3047 } else { 3048 (void) printf(gettext("currently in use")); 3049 } 3050 break; 3051 3052 case VDEV_AUX_ERR_EXCEEDED: 3053 if (vs->vs_read_errors + vs->vs_write_errors + 3054 vs->vs_checksum_errors == 0 && children == 0 && 3055 vs->vs_slow_ios > 0) { 3056 (void) printf(gettext("too many slow I/Os")); 3057 } else { 3058 (void) printf(gettext("too many errors")); 3059 } 3060 break; 3061 3062 case VDEV_AUX_IO_FAILURE: 3063 (void) printf(gettext("experienced I/O failures")); 3064 break; 3065 3066 case VDEV_AUX_BAD_LOG: 3067 (void) printf(gettext("bad intent log")); 3068 break; 3069 3070 case VDEV_AUX_EXTERNAL: 3071 (void) printf(gettext("external device fault")); 3072 break; 3073 3074 case VDEV_AUX_SPLIT_POOL: 3075 (void) printf(gettext("split into new pool")); 3076 break; 3077 3078 case VDEV_AUX_ACTIVE: 3079 (void) printf(gettext("currently in use")); 3080 break; 3081 3082 case VDEV_AUX_CHILDREN_OFFLINE: 3083 (void) printf(gettext("all children offline")); 3084 break; 3085 3086 case VDEV_AUX_BAD_LABEL: 3087 (void) printf(gettext("invalid label")); 3088 break; 3089 3090 default: 3091 (void) printf(gettext("corrupted data")); 3092 break; 3093 } 3094 color_end(); 3095 } else if (children == 0 && !isspare && 3096 getenv("ZPOOL_STATUS_NON_NATIVE_ASHIFT_IGNORE") == NULL && 3097 VDEV_STAT_VALID(vs_physical_ashift, vsc) && 3098 vs->vs_configured_ashift < vs->vs_physical_ashift) { 3099 (void) printf( 3100 gettext(" block size: %dB configured, %dB native"), 3101 1 << vs->vs_configured_ashift, 1 << vs->vs_physical_ashift); 3102 } 3103 3104 if (vs->vs_scan_removing != 0) { 3105 (void) printf(gettext(" (removing)")); 3106 } else if (VDEV_STAT_VALID(vs_noalloc, vsc) && vs->vs_noalloc != 0) { 3107 (void) printf(gettext(" (non-allocating)")); 3108 } 3109 3110 /* The root vdev has the scrub/resilver stats */ 3111 root = fnvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 3112 ZPOOL_CONFIG_VDEV_TREE); 3113 (void) nvlist_lookup_uint64_array(root, ZPOOL_CONFIG_SCAN_STATS, 3114 (uint64_t **)&ps, &c); 3115 3116 /* 3117 * If you force fault a drive that's resilvering, its scan stats can 3118 * get frozen in time, giving the false impression that it's 3119 * being resilvered. That's why we check the state to see if the vdev 3120 * is healthy before reporting "resilvering" or "repairing". 3121 */ 3122 if (ps != NULL && ps->pss_state == DSS_SCANNING && children == 0 && 3123 vs->vs_state == VDEV_STATE_HEALTHY) { 3124 if (vs->vs_scan_processed != 0) { 3125 (void) printf(gettext(" (%s)"), 3126 (ps->pss_func == POOL_SCAN_RESILVER) ? 3127 "resilvering" : "repairing"); 3128 } else if (vs->vs_resilver_deferred) { 3129 (void) printf(gettext(" (awaiting resilver)")); 3130 } 3131 } 3132 3133 /* The top-level vdevs have the rebuild stats */ 3134 if (vrs != NULL && vrs->vrs_state == VDEV_REBUILD_ACTIVE && 3135 children == 0 && vs->vs_state == VDEV_STATE_HEALTHY) { 3136 if (vs->vs_rebuild_processed != 0) { 3137 (void) printf(gettext(" (resilvering)")); 3138 } 3139 } 3140 3141 if (cb->vcdl != NULL) { 3142 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) { 3143 printf(" "); 3144 zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path); 3145 } 3146 } 3147 3148 /* Display vdev initialization and trim status for leaves. */ 3149 if (children == 0) { 3150 print_status_initialize(vs, cb->cb_print_vdev_init); 3151 print_status_trim(vs, cb->cb_print_vdev_trim); 3152 } 3153 3154 (void) printf("\n"); 3155 3156 for (c = 0; c < children; c++) { 3157 uint64_t islog = B_FALSE, ishole = B_FALSE; 3158 3159 /* Don't print logs or holes here */ 3160 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 3161 &islog); 3162 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 3163 &ishole); 3164 if (islog || ishole) 3165 continue; 3166 /* Only print normal classes here */ 3167 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS)) 3168 continue; 3169 3170 /* Provide vdev_rebuild_stats to children if available */ 3171 if (vrs == NULL) { 3172 (void) nvlist_lookup_uint64_array(nv, 3173 ZPOOL_CONFIG_REBUILD_STATS, 3174 (uint64_t **)&vrs, &i); 3175 } 3176 3177 vname = zpool_vdev_name(g_zfs, zhp, child[c], 3178 cb->cb_name_flags | VDEV_NAME_TYPE_ID); 3179 print_status_config(zhp, cb, vname, child[c], depth + 2, 3180 isspare, vrs); 3181 free(vname); 3182 } 3183 } 3184 3185 /* 3186 * Print the configuration of an exported pool. Iterate over all vdevs in the 3187 * pool, printing out the name and status for each one. 3188 */ 3189 static void 3190 print_import_config(status_cbdata_t *cb, const char *name, nvlist_t *nv, 3191 int depth) 3192 { 3193 nvlist_t **child; 3194 uint_t c, children; 3195 vdev_stat_t *vs; 3196 const char *type; 3197 char *vname; 3198 3199 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0); 3200 if (strcmp(type, VDEV_TYPE_MISSING) == 0 || 3201 strcmp(type, VDEV_TYPE_HOLE) == 0) 3202 return; 3203 3204 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, 3205 (uint64_t **)&vs, &c) == 0); 3206 3207 (void) printf("\t%*s%-*s", depth, "", cb->cb_namewidth - depth, name); 3208 (void) printf(" %s", zpool_state_to_name(vs->vs_state, vs->vs_aux)); 3209 3210 if (vs->vs_aux != 0) { 3211 (void) printf(" "); 3212 3213 switch (vs->vs_aux) { 3214 case VDEV_AUX_OPEN_FAILED: 3215 (void) printf(gettext("cannot open")); 3216 break; 3217 3218 case VDEV_AUX_BAD_GUID_SUM: 3219 (void) printf(gettext("missing device")); 3220 break; 3221 3222 case VDEV_AUX_NO_REPLICAS: 3223 (void) printf(gettext("insufficient replicas")); 3224 break; 3225 3226 case VDEV_AUX_VERSION_NEWER: 3227 (void) printf(gettext("newer version")); 3228 break; 3229 3230 case VDEV_AUX_UNSUP_FEAT: 3231 (void) printf(gettext("unsupported feature(s)")); 3232 break; 3233 3234 case VDEV_AUX_ERR_EXCEEDED: 3235 (void) printf(gettext("too many errors")); 3236 break; 3237 3238 case VDEV_AUX_ACTIVE: 3239 (void) printf(gettext("currently in use")); 3240 break; 3241 3242 case VDEV_AUX_CHILDREN_OFFLINE: 3243 (void) printf(gettext("all children offline")); 3244 break; 3245 3246 case VDEV_AUX_BAD_LABEL: 3247 (void) printf(gettext("invalid label")); 3248 break; 3249 3250 default: 3251 (void) printf(gettext("corrupted data")); 3252 break; 3253 } 3254 } 3255 (void) printf("\n"); 3256 3257 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 3258 &child, &children) != 0) 3259 return; 3260 3261 for (c = 0; c < children; c++) { 3262 uint64_t is_log = B_FALSE; 3263 3264 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 3265 &is_log); 3266 if (is_log) 3267 continue; 3268 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS)) 3269 continue; 3270 3271 vname = zpool_vdev_name(g_zfs, NULL, child[c], 3272 cb->cb_name_flags | VDEV_NAME_TYPE_ID); 3273 print_import_config(cb, vname, child[c], depth + 2); 3274 free(vname); 3275 } 3276 3277 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, 3278 &child, &children) == 0) { 3279 (void) printf(gettext("\tcache\n")); 3280 for (c = 0; c < children; c++) { 3281 vname = zpool_vdev_name(g_zfs, NULL, child[c], 3282 cb->cb_name_flags); 3283 (void) printf("\t %s\n", vname); 3284 free(vname); 3285 } 3286 } 3287 3288 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, 3289 &child, &children) == 0) { 3290 (void) printf(gettext("\tspares\n")); 3291 for (c = 0; c < children; c++) { 3292 vname = zpool_vdev_name(g_zfs, NULL, child[c], 3293 cb->cb_name_flags); 3294 (void) printf("\t %s\n", vname); 3295 free(vname); 3296 } 3297 } 3298 } 3299 3300 /* 3301 * Print specialized class vdevs. 3302 * 3303 * These are recorded as top level vdevs in the main pool child array 3304 * but with "is_log" set to 1 or an "alloc_bias" string. We use either 3305 * print_status_config() or print_import_config() to print the top level 3306 * class vdevs then any of their children (eg mirrored slogs) are printed 3307 * recursively - which works because only the top level vdev is marked. 3308 */ 3309 static void 3310 print_class_vdevs(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv, 3311 const char *class) 3312 { 3313 uint_t c, children; 3314 nvlist_t **child; 3315 boolean_t printed = B_FALSE; 3316 3317 assert(zhp != NULL || !cb->cb_verbose); 3318 3319 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &child, 3320 &children) != 0) 3321 return; 3322 3323 for (c = 0; c < children; c++) { 3324 uint64_t is_log = B_FALSE; 3325 const char *bias = NULL; 3326 const char *type = NULL; 3327 3328 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 3329 &is_log); 3330 3331 if (is_log) { 3332 bias = (char *)VDEV_ALLOC_CLASS_LOGS; 3333 } else { 3334 (void) nvlist_lookup_string(child[c], 3335 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias); 3336 (void) nvlist_lookup_string(child[c], 3337 ZPOOL_CONFIG_TYPE, &type); 3338 } 3339 3340 if (bias == NULL || strcmp(bias, class) != 0) 3341 continue; 3342 if (!is_log && strcmp(type, VDEV_TYPE_INDIRECT) == 0) 3343 continue; 3344 3345 if (!printed) { 3346 (void) printf("\t%s\t\n", gettext(class)); 3347 printed = B_TRUE; 3348 } 3349 3350 char *name = zpool_vdev_name(g_zfs, zhp, child[c], 3351 cb->cb_name_flags | VDEV_NAME_TYPE_ID); 3352 if (cb->cb_print_status) 3353 print_status_config(zhp, cb, name, child[c], 2, 3354 B_FALSE, NULL); 3355 else 3356 print_import_config(cb, name, child[c], 2); 3357 free(name); 3358 } 3359 } 3360 3361 /* 3362 * Display the status for the given pool. 3363 */ 3364 static int 3365 show_import(nvlist_t *config, boolean_t report_error) 3366 { 3367 uint64_t pool_state; 3368 vdev_stat_t *vs; 3369 const char *name; 3370 uint64_t guid; 3371 uint64_t hostid = 0; 3372 const char *msgid; 3373 const char *hostname = "unknown"; 3374 nvlist_t *nvroot, *nvinfo; 3375 zpool_status_t reason; 3376 zpool_errata_t errata; 3377 const char *health; 3378 uint_t vsc; 3379 const char *comment; 3380 const char *indent; 3381 char buf[2048]; 3382 status_cbdata_t cb = { 0 }; 3383 3384 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 3385 &name) == 0); 3386 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 3387 &guid) == 0); 3388 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, 3389 &pool_state) == 0); 3390 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 3391 &nvroot) == 0); 3392 3393 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS, 3394 (uint64_t **)&vs, &vsc) == 0); 3395 health = zpool_state_to_name(vs->vs_state, vs->vs_aux); 3396 3397 reason = zpool_import_status(config, &msgid, &errata); 3398 3399 /* 3400 * If we're importing using a cachefile, then we won't report any 3401 * errors unless we are in the scan phase of the import. 3402 */ 3403 if (reason != ZPOOL_STATUS_OK && !report_error) 3404 return (reason); 3405 3406 if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0) { 3407 indent = " "; 3408 } else { 3409 comment = NULL; 3410 indent = ""; 3411 } 3412 3413 (void) printf(gettext("%s pool: %s\n"), indent, name); 3414 (void) printf(gettext("%s id: %llu\n"), indent, (u_longlong_t)guid); 3415 (void) printf(gettext("%s state: %s"), indent, health); 3416 if (pool_state == POOL_STATE_DESTROYED) 3417 (void) printf(gettext(" (DESTROYED)")); 3418 (void) printf("\n"); 3419 3420 if (reason != ZPOOL_STATUS_OK) { 3421 (void) printf("%s", indent); 3422 printf_color(ANSI_BOLD, gettext("status: ")); 3423 } 3424 switch (reason) { 3425 case ZPOOL_STATUS_MISSING_DEV_R: 3426 case ZPOOL_STATUS_MISSING_DEV_NR: 3427 case ZPOOL_STATUS_BAD_GUID_SUM: 3428 printf_color(ANSI_YELLOW, gettext("One or more devices are " 3429 "missing from the system.\n")); 3430 break; 3431 3432 case ZPOOL_STATUS_CORRUPT_LABEL_R: 3433 case ZPOOL_STATUS_CORRUPT_LABEL_NR: 3434 printf_color(ANSI_YELLOW, gettext("One or more devices " 3435 "contains corrupted data.\n")); 3436 break; 3437 3438 case ZPOOL_STATUS_CORRUPT_DATA: 3439 printf_color(ANSI_YELLOW, gettext("The pool data is " 3440 "corrupted.\n")); 3441 break; 3442 3443 case ZPOOL_STATUS_OFFLINE_DEV: 3444 printf_color(ANSI_YELLOW, gettext("One or more devices " 3445 "are offlined.\n")); 3446 break; 3447 3448 case ZPOOL_STATUS_CORRUPT_POOL: 3449 printf_color(ANSI_YELLOW, gettext("The pool metadata is " 3450 "corrupted.\n")); 3451 break; 3452 3453 case ZPOOL_STATUS_VERSION_OLDER: 3454 printf_color(ANSI_YELLOW, gettext("The pool is formatted using " 3455 "a legacy on-disk version.\n")); 3456 break; 3457 3458 case ZPOOL_STATUS_VERSION_NEWER: 3459 printf_color(ANSI_YELLOW, gettext("The pool is formatted using " 3460 "an incompatible version.\n")); 3461 break; 3462 3463 case ZPOOL_STATUS_FEAT_DISABLED: 3464 printf_color(ANSI_YELLOW, gettext("Some supported " 3465 "features are not enabled on the pool.\n" 3466 "\t%s(Note that they may be intentionally disabled if the\n" 3467 "\t%s'compatibility' property is set.)\n"), indent, indent); 3468 break; 3469 3470 case ZPOOL_STATUS_COMPATIBILITY_ERR: 3471 printf_color(ANSI_YELLOW, gettext("Error reading or parsing " 3472 "the file(s) indicated by the 'compatibility'\n" 3473 "\t%sproperty.\n"), indent); 3474 break; 3475 3476 case ZPOOL_STATUS_INCOMPATIBLE_FEAT: 3477 printf_color(ANSI_YELLOW, gettext("One or more features " 3478 "are enabled on the pool despite not being\n" 3479 "\t%srequested by the 'compatibility' property.\n"), 3480 indent); 3481 break; 3482 3483 case ZPOOL_STATUS_UNSUP_FEAT_READ: 3484 printf_color(ANSI_YELLOW, gettext("The pool uses the following " 3485 "feature(s) not supported on this system:\n")); 3486 color_start(ANSI_YELLOW); 3487 zpool_collect_unsup_feat(config, buf, 2048); 3488 (void) printf("%s", buf); 3489 color_end(); 3490 break; 3491 3492 case ZPOOL_STATUS_UNSUP_FEAT_WRITE: 3493 printf_color(ANSI_YELLOW, gettext("The pool can only be " 3494 "accessed in read-only mode on this system. It\n" 3495 "\t%scannot be accessed in read-write mode because it uses " 3496 "the following\n" 3497 "\t%sfeature(s) not supported on this system:\n"), 3498 indent, indent); 3499 color_start(ANSI_YELLOW); 3500 zpool_collect_unsup_feat(config, buf, 2048); 3501 (void) printf("%s", buf); 3502 color_end(); 3503 break; 3504 3505 case ZPOOL_STATUS_HOSTID_ACTIVE: 3506 printf_color(ANSI_YELLOW, gettext("The pool is currently " 3507 "imported by another system.\n")); 3508 break; 3509 3510 case ZPOOL_STATUS_HOSTID_REQUIRED: 3511 printf_color(ANSI_YELLOW, gettext("The pool has the " 3512 "multihost property on. It cannot\n" 3513 "\t%sbe safely imported when the system hostid is not " 3514 "set.\n"), indent); 3515 break; 3516 3517 case ZPOOL_STATUS_HOSTID_MISMATCH: 3518 printf_color(ANSI_YELLOW, gettext("The pool was last accessed " 3519 "by another system.\n")); 3520 break; 3521 3522 case ZPOOL_STATUS_FAULTED_DEV_R: 3523 case ZPOOL_STATUS_FAULTED_DEV_NR: 3524 printf_color(ANSI_YELLOW, gettext("One or more devices are " 3525 "faulted.\n")); 3526 break; 3527 3528 case ZPOOL_STATUS_BAD_LOG: 3529 printf_color(ANSI_YELLOW, gettext("An intent log record cannot " 3530 "be read.\n")); 3531 break; 3532 3533 case ZPOOL_STATUS_RESILVERING: 3534 case ZPOOL_STATUS_REBUILDING: 3535 printf_color(ANSI_YELLOW, gettext("One or more devices were " 3536 "being resilvered.\n")); 3537 break; 3538 3539 case ZPOOL_STATUS_ERRATA: 3540 printf_color(ANSI_YELLOW, gettext("Errata #%d detected.\n"), 3541 errata); 3542 break; 3543 3544 case ZPOOL_STATUS_NON_NATIVE_ASHIFT: 3545 printf_color(ANSI_YELLOW, gettext("One or more devices are " 3546 "configured to use a non-native block size.\n" 3547 "\t%sExpect reduced performance.\n"), indent); 3548 break; 3549 3550 default: 3551 /* 3552 * No other status can be seen when importing pools. 3553 */ 3554 assert(reason == ZPOOL_STATUS_OK); 3555 } 3556 3557 /* 3558 * Print out an action according to the overall state of the pool. 3559 */ 3560 if (vs->vs_state != VDEV_STATE_HEALTHY || 3561 reason != ZPOOL_STATUS_ERRATA || errata != ZPOOL_ERRATA_NONE) { 3562 (void) printf("%s", indent); 3563 (void) printf(gettext("action: ")); 3564 } 3565 if (vs->vs_state == VDEV_STATE_HEALTHY) { 3566 if (reason == ZPOOL_STATUS_VERSION_OLDER || 3567 reason == ZPOOL_STATUS_FEAT_DISABLED) { 3568 (void) printf(gettext("The pool can be imported using " 3569 "its name or numeric identifier, though\n" 3570 "\t%ssome features will not be available without " 3571 "an explicit 'zpool upgrade'.\n"), indent); 3572 } else if (reason == ZPOOL_STATUS_COMPATIBILITY_ERR) { 3573 (void) printf(gettext("The pool can be imported using " 3574 "its name or numeric\n" 3575 "\t%sidentifier, though the file(s) indicated by " 3576 "its 'compatibility'\n" 3577 "\t%sproperty cannot be parsed at this time.\n"), 3578 indent, indent); 3579 } else if (reason == ZPOOL_STATUS_HOSTID_MISMATCH) { 3580 (void) printf(gettext("The pool can be imported using " 3581 "its name or numeric identifier and\n" 3582 "\t%sthe '-f' flag.\n"), indent); 3583 } else if (reason == ZPOOL_STATUS_ERRATA) { 3584 switch (errata) { 3585 case ZPOOL_ERRATA_ZOL_2094_SCRUB: 3586 (void) printf(gettext("The pool can be " 3587 "imported using its name or numeric " 3588 "identifier,\n" 3589 "\t%showever there is a compatibility " 3590 "issue which should be corrected\n" 3591 "\t%sby running 'zpool scrub'\n"), 3592 indent, indent); 3593 break; 3594 3595 case ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY: 3596 (void) printf(gettext("The pool cannot be " 3597 "imported with this version of ZFS due to\n" 3598 "\t%san active asynchronous destroy. " 3599 "Revert to an earlier version\n" 3600 "\t%sand allow the destroy to complete " 3601 "before updating.\n"), indent, indent); 3602 break; 3603 3604 case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION: 3605 (void) printf(gettext("Existing encrypted " 3606 "datasets contain an on-disk " 3607 "incompatibility, which\n" 3608 "\t%sneeds to be corrected. Backup these " 3609 "datasets to new encrypted datasets\n" 3610 "\t%sand destroy the old ones.\n"), 3611 indent, indent); 3612 break; 3613 3614 case ZPOOL_ERRATA_ZOL_8308_ENCRYPTION: 3615 (void) printf(gettext("Existing encrypted " 3616 "snapshots and bookmarks contain an " 3617 "on-disk\n" 3618 "\t%sincompatibility. This may cause " 3619 "on-disk corruption if they are used\n" 3620 "\t%swith 'zfs recv'. To correct the " 3621 "issue, enable the bookmark_v2 feature.\n" 3622 "\t%sNo additional action is needed if " 3623 "there are no encrypted snapshots or\n" 3624 "\t%sbookmarks. If preserving the " 3625 "encrypted snapshots and bookmarks is\n" 3626 "\t%srequired, use a non-raw send to " 3627 "backup and restore them. Alternately,\n" 3628 "\t%sthey may be removed to resolve the " 3629 "incompatibility.\n"), indent, indent, 3630 indent, indent, indent, indent); 3631 break; 3632 default: 3633 /* 3634 * All errata must contain an action message. 3635 */ 3636 assert(errata == ZPOOL_ERRATA_NONE); 3637 } 3638 } else { 3639 (void) printf(gettext("The pool can be imported using " 3640 "its name or numeric identifier.\n")); 3641 } 3642 } else if (vs->vs_state == VDEV_STATE_DEGRADED) { 3643 (void) printf(gettext("The pool can be imported despite " 3644 "missing or damaged devices. The\n" 3645 "\t%sfault tolerance of the pool may be compromised if " 3646 "imported.\n"), indent); 3647 } else { 3648 switch (reason) { 3649 case ZPOOL_STATUS_VERSION_NEWER: 3650 (void) printf(gettext("The pool cannot be imported. " 3651 "Access the pool on a system running newer\n" 3652 "\t%ssoftware, or recreate the pool from " 3653 "backup.\n"), indent); 3654 break; 3655 case ZPOOL_STATUS_UNSUP_FEAT_READ: 3656 (void) printf(gettext("The pool cannot be imported. " 3657 "Access the pool on a system that supports\n" 3658 "\t%sthe required feature(s), or recreate the pool " 3659 "from backup.\n"), indent); 3660 break; 3661 case ZPOOL_STATUS_UNSUP_FEAT_WRITE: 3662 (void) printf(gettext("The pool cannot be imported in " 3663 "read-write mode. Import the pool with\n" 3664 "\t%s'-o readonly=on', access the pool on a system " 3665 "that supports the\n" 3666 "\t%srequired feature(s), or recreate the pool " 3667 "from backup.\n"), indent, indent); 3668 break; 3669 case ZPOOL_STATUS_MISSING_DEV_R: 3670 case ZPOOL_STATUS_MISSING_DEV_NR: 3671 case ZPOOL_STATUS_BAD_GUID_SUM: 3672 (void) printf(gettext("The pool cannot be imported. " 3673 "Attach the missing\n" 3674 "\t%sdevices and try again.\n"), indent); 3675 break; 3676 case ZPOOL_STATUS_HOSTID_ACTIVE: 3677 VERIFY0(nvlist_lookup_nvlist(config, 3678 ZPOOL_CONFIG_LOAD_INFO, &nvinfo)); 3679 3680 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME)) 3681 hostname = fnvlist_lookup_string(nvinfo, 3682 ZPOOL_CONFIG_MMP_HOSTNAME); 3683 3684 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID)) 3685 hostid = fnvlist_lookup_uint64(nvinfo, 3686 ZPOOL_CONFIG_MMP_HOSTID); 3687 3688 (void) printf(gettext("The pool must be exported from " 3689 "%s (hostid=%"PRIx64")\n" 3690 "\t%sbefore it can be safely imported.\n"), 3691 hostname, hostid, indent); 3692 break; 3693 case ZPOOL_STATUS_HOSTID_REQUIRED: 3694 (void) printf(gettext("Set a unique system hostid with " 3695 "the zgenhostid(8) command.\n")); 3696 break; 3697 default: 3698 (void) printf(gettext("The pool cannot be imported due " 3699 "to damaged devices or data.\n")); 3700 } 3701 } 3702 3703 /* Print the comment attached to the pool. */ 3704 if (comment != NULL) 3705 (void) printf(gettext("comment: %s\n"), comment); 3706 3707 /* 3708 * If the state is "closed" or "can't open", and the aux state 3709 * is "corrupt data": 3710 */ 3711 if ((vs->vs_state == VDEV_STATE_CLOSED || 3712 vs->vs_state == VDEV_STATE_CANT_OPEN) && 3713 vs->vs_aux == VDEV_AUX_CORRUPT_DATA) { 3714 if (pool_state == POOL_STATE_DESTROYED) 3715 (void) printf(gettext("\t%sThe pool was destroyed, " 3716 "but can be imported using the '-Df' flags.\n"), 3717 indent); 3718 else if (pool_state != POOL_STATE_EXPORTED) 3719 (void) printf(gettext("\t%sThe pool may be active on " 3720 "another system, but can be imported using\n" 3721 "\t%sthe '-f' flag.\n"), indent, indent); 3722 } 3723 3724 if (msgid != NULL) { 3725 (void) printf(gettext("%s see: " 3726 "https://openzfs.github.io/openzfs-docs/msg/%s\n"), 3727 indent, msgid); 3728 } 3729 3730 (void) printf(gettext("%sconfig:\n\n"), indent); 3731 3732 cb.cb_namewidth = max_width(NULL, nvroot, 0, strlen(name), 3733 VDEV_NAME_TYPE_ID); 3734 if (cb.cb_namewidth < 10) 3735 cb.cb_namewidth = 10; 3736 3737 print_import_config(&cb, name, nvroot, 0); 3738 3739 print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_BIAS_DEDUP); 3740 print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_BIAS_SPECIAL); 3741 print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_CLASS_LOGS); 3742 3743 if (reason == ZPOOL_STATUS_BAD_GUID_SUM) { 3744 (void) printf(gettext("\n\t%sAdditional devices are known to " 3745 "be part of this pool, though their\n" 3746 "\t%sexact configuration cannot be determined.\n"), 3747 indent, indent); 3748 } 3749 return (0); 3750 } 3751 3752 static boolean_t 3753 zfs_force_import_required(nvlist_t *config) 3754 { 3755 uint64_t state; 3756 uint64_t hostid = 0; 3757 nvlist_t *nvinfo; 3758 3759 state = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE); 3760 nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO); 3761 3762 /* 3763 * The hostid on LOAD_INFO comes from the MOS label via 3764 * spa_tryimport(). If its not there then we're likely talking to an 3765 * older kernel, so use the top one, which will be from the label 3766 * discovered in zpool_find_import(), or if a cachefile is in use, the 3767 * local hostid. 3768 */ 3769 if (nvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_HOSTID, &hostid) != 0) 3770 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_HOSTID, 3771 &hostid); 3772 3773 if (state != POOL_STATE_EXPORTED && hostid != get_system_hostid()) 3774 return (B_TRUE); 3775 3776 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE)) { 3777 mmp_state_t mmp_state = fnvlist_lookup_uint64(nvinfo, 3778 ZPOOL_CONFIG_MMP_STATE); 3779 3780 if (mmp_state != MMP_STATE_INACTIVE) 3781 return (B_TRUE); 3782 } 3783 3784 return (B_FALSE); 3785 } 3786 3787 /* 3788 * Perform the import for the given configuration. This passes the heavy 3789 * lifting off to zpool_import_props(), and then mounts the datasets contained 3790 * within the pool. 3791 */ 3792 static int 3793 do_import(nvlist_t *config, const char *newname, const char *mntopts, 3794 nvlist_t *props, int flags, uint_t mntthreads) 3795 { 3796 int ret = 0; 3797 int ms_status = 0; 3798 zpool_handle_t *zhp; 3799 const char *name; 3800 uint64_t version; 3801 3802 name = fnvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME); 3803 version = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION); 3804 3805 if (!SPA_VERSION_IS_SUPPORTED(version)) { 3806 (void) fprintf(stderr, gettext("cannot import '%s': pool " 3807 "is formatted using an unsupported ZFS version\n"), name); 3808 return (1); 3809 } else if (zfs_force_import_required(config) && 3810 !(flags & ZFS_IMPORT_ANY_HOST)) { 3811 mmp_state_t mmp_state = MMP_STATE_INACTIVE; 3812 nvlist_t *nvinfo; 3813 3814 nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO); 3815 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE)) 3816 mmp_state = fnvlist_lookup_uint64(nvinfo, 3817 ZPOOL_CONFIG_MMP_STATE); 3818 3819 if (mmp_state == MMP_STATE_ACTIVE) { 3820 const char *hostname = "<unknown>"; 3821 uint64_t hostid = 0; 3822 3823 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME)) 3824 hostname = fnvlist_lookup_string(nvinfo, 3825 ZPOOL_CONFIG_MMP_HOSTNAME); 3826 3827 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID)) 3828 hostid = fnvlist_lookup_uint64(nvinfo, 3829 ZPOOL_CONFIG_MMP_HOSTID); 3830 3831 (void) fprintf(stderr, gettext("cannot import '%s': " 3832 "pool is imported on %s (hostid: " 3833 "0x%"PRIx64")\nExport the pool on the other " 3834 "system, then run 'zpool import'.\n"), 3835 name, hostname, hostid); 3836 } else if (mmp_state == MMP_STATE_NO_HOSTID) { 3837 (void) fprintf(stderr, gettext("Cannot import '%s': " 3838 "pool has the multihost property on and the\n" 3839 "system's hostid is not set. Set a unique hostid " 3840 "with the zgenhostid(8) command.\n"), name); 3841 } else { 3842 const char *hostname = "<unknown>"; 3843 time_t timestamp = 0; 3844 uint64_t hostid = 0; 3845 3846 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_HOSTNAME)) 3847 hostname = fnvlist_lookup_string(nvinfo, 3848 ZPOOL_CONFIG_HOSTNAME); 3849 else if (nvlist_exists(config, ZPOOL_CONFIG_HOSTNAME)) 3850 hostname = fnvlist_lookup_string(config, 3851 ZPOOL_CONFIG_HOSTNAME); 3852 3853 if (nvlist_exists(config, ZPOOL_CONFIG_TIMESTAMP)) 3854 timestamp = fnvlist_lookup_uint64(config, 3855 ZPOOL_CONFIG_TIMESTAMP); 3856 3857 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_HOSTID)) 3858 hostid = fnvlist_lookup_uint64(nvinfo, 3859 ZPOOL_CONFIG_HOSTID); 3860 else if (nvlist_exists(config, ZPOOL_CONFIG_HOSTID)) 3861 hostid = fnvlist_lookup_uint64(config, 3862 ZPOOL_CONFIG_HOSTID); 3863 3864 (void) fprintf(stderr, gettext("cannot import '%s': " 3865 "pool was previously in use from another system.\n" 3866 "Last accessed by %s (hostid=%"PRIx64") at %s" 3867 "The pool can be imported, use 'zpool import -f' " 3868 "to import the pool.\n"), name, hostname, 3869 hostid, ctime(×tamp)); 3870 } 3871 3872 return (1); 3873 } 3874 3875 if (zpool_import_props(g_zfs, config, newname, props, flags) != 0) 3876 return (1); 3877 3878 if (newname != NULL) 3879 name = newname; 3880 3881 if ((zhp = zpool_open_canfail(g_zfs, name)) == NULL) 3882 return (1); 3883 3884 /* 3885 * Loading keys is best effort. We don't want to return immediately 3886 * if it fails but we do want to give the error to the caller. 3887 */ 3888 if (flags & ZFS_IMPORT_LOAD_KEYS && 3889 zfs_crypto_attempt_load_keys(g_zfs, name) != 0) 3890 ret = 1; 3891 3892 if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL && 3893 !(flags & ZFS_IMPORT_ONLY)) { 3894 ms_status = zpool_enable_datasets(zhp, mntopts, 0, mntthreads); 3895 if (ms_status == EZFS_SHAREFAILED) { 3896 (void) fprintf(stderr, gettext("Import was " 3897 "successful, but unable to share some datasets\n")); 3898 } else if (ms_status == EZFS_MOUNTFAILED) { 3899 (void) fprintf(stderr, gettext("Import was " 3900 "successful, but unable to mount some datasets\n")); 3901 } 3902 } 3903 3904 zpool_close(zhp); 3905 return (ret); 3906 } 3907 3908 typedef struct import_parameters { 3909 nvlist_t *ip_config; 3910 const char *ip_mntopts; 3911 nvlist_t *ip_props; 3912 int ip_flags; 3913 uint_t ip_mntthreads; 3914 int *ip_err; 3915 } import_parameters_t; 3916 3917 static void 3918 do_import_task(void *arg) 3919 { 3920 import_parameters_t *ip = arg; 3921 *ip->ip_err |= do_import(ip->ip_config, NULL, ip->ip_mntopts, 3922 ip->ip_props, ip->ip_flags, ip->ip_mntthreads); 3923 free(ip); 3924 } 3925 3926 3927 static int 3928 import_pools(nvlist_t *pools, nvlist_t *props, char *mntopts, int flags, 3929 char *orig_name, char *new_name, importargs_t *import) 3930 { 3931 nvlist_t *config = NULL; 3932 nvlist_t *found_config = NULL; 3933 uint64_t pool_state; 3934 boolean_t pool_specified = (import->poolname != NULL || 3935 import->guid != 0); 3936 uint_t npools = 0; 3937 3938 3939 tpool_t *tp = NULL; 3940 if (import->do_all) { 3941 tp = tpool_create(1, 5 * sysconf(_SC_NPROCESSORS_ONLN), 3942 0, NULL); 3943 } 3944 3945 /* 3946 * At this point we have a list of import candidate configs. Even if 3947 * we were searching by pool name or guid, we still need to 3948 * post-process the list to deal with pool state and possible 3949 * duplicate names. 3950 */ 3951 int err = 0; 3952 nvpair_t *elem = NULL; 3953 boolean_t first = B_TRUE; 3954 if (!pool_specified && import->do_all) { 3955 while ((elem = nvlist_next_nvpair(pools, elem)) != NULL) 3956 npools++; 3957 } 3958 while ((elem = nvlist_next_nvpair(pools, elem)) != NULL) { 3959 3960 verify(nvpair_value_nvlist(elem, &config) == 0); 3961 3962 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, 3963 &pool_state) == 0); 3964 if (!import->do_destroyed && 3965 pool_state == POOL_STATE_DESTROYED) 3966 continue; 3967 if (import->do_destroyed && 3968 pool_state != POOL_STATE_DESTROYED) 3969 continue; 3970 3971 verify(nvlist_add_nvlist(config, ZPOOL_LOAD_POLICY, 3972 import->policy) == 0); 3973 3974 if (!pool_specified) { 3975 if (first) 3976 first = B_FALSE; 3977 else if (!import->do_all) 3978 (void) fputc('\n', stdout); 3979 3980 if (import->do_all) { 3981 import_parameters_t *ip = safe_malloc( 3982 sizeof (import_parameters_t)); 3983 3984 ip->ip_config = config; 3985 ip->ip_mntopts = mntopts; 3986 ip->ip_props = props; 3987 ip->ip_flags = flags; 3988 ip->ip_mntthreads = mount_tp_nthr / npools; 3989 ip->ip_err = &err; 3990 3991 (void) tpool_dispatch(tp, do_import_task, 3992 (void *)ip); 3993 } else { 3994 /* 3995 * If we're importing from cachefile, then 3996 * we don't want to report errors until we 3997 * are in the scan phase of the import. If 3998 * we get an error, then we return that error 3999 * to invoke the scan phase. 4000 */ 4001 if (import->cachefile && !import->scan) 4002 err = show_import(config, B_FALSE); 4003 else 4004 (void) show_import(config, B_TRUE); 4005 } 4006 } else if (import->poolname != NULL) { 4007 const char *name; 4008 4009 /* 4010 * We are searching for a pool based on name. 4011 */ 4012 verify(nvlist_lookup_string(config, 4013 ZPOOL_CONFIG_POOL_NAME, &name) == 0); 4014 4015 if (strcmp(name, import->poolname) == 0) { 4016 if (found_config != NULL) { 4017 (void) fprintf(stderr, gettext( 4018 "cannot import '%s': more than " 4019 "one matching pool\n"), 4020 import->poolname); 4021 (void) fprintf(stderr, gettext( 4022 "import by numeric ID instead\n")); 4023 err = B_TRUE; 4024 } 4025 found_config = config; 4026 } 4027 } else { 4028 uint64_t guid; 4029 4030 /* 4031 * Search for a pool by guid. 4032 */ 4033 verify(nvlist_lookup_uint64(config, 4034 ZPOOL_CONFIG_POOL_GUID, &guid) == 0); 4035 4036 if (guid == import->guid) 4037 found_config = config; 4038 } 4039 } 4040 if (import->do_all) { 4041 tpool_wait(tp); 4042 tpool_destroy(tp); 4043 } 4044 4045 /* 4046 * If we were searching for a specific pool, verify that we found a 4047 * pool, and then do the import. 4048 */ 4049 if (pool_specified && err == 0) { 4050 if (found_config == NULL) { 4051 (void) fprintf(stderr, gettext("cannot import '%s': " 4052 "no such pool available\n"), orig_name); 4053 err = B_TRUE; 4054 } else { 4055 err |= do_import(found_config, new_name, 4056 mntopts, props, flags, mount_tp_nthr); 4057 } 4058 } 4059 4060 /* 4061 * If we were just looking for pools, report an error if none were 4062 * found. 4063 */ 4064 if (!pool_specified && first) 4065 (void) fprintf(stderr, 4066 gettext("no pools available to import\n")); 4067 return (err); 4068 } 4069 4070 typedef struct target_exists_args { 4071 const char *poolname; 4072 uint64_t poolguid; 4073 } target_exists_args_t; 4074 4075 static int 4076 name_or_guid_exists(zpool_handle_t *zhp, void *data) 4077 { 4078 target_exists_args_t *args = data; 4079 nvlist_t *config = zpool_get_config(zhp, NULL); 4080 int found = 0; 4081 4082 if (config == NULL) 4083 return (0); 4084 4085 if (args->poolname != NULL) { 4086 const char *pool_name; 4087 4088 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 4089 &pool_name) == 0); 4090 if (strcmp(pool_name, args->poolname) == 0) 4091 found = 1; 4092 } else { 4093 uint64_t pool_guid; 4094 4095 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 4096 &pool_guid) == 0); 4097 if (pool_guid == args->poolguid) 4098 found = 1; 4099 } 4100 zpool_close(zhp); 4101 4102 return (found); 4103 } 4104 /* 4105 * zpool checkpoint <pool> 4106 * checkpoint --discard <pool> 4107 * 4108 * -d Discard the checkpoint from a checkpointed 4109 * --discard pool. 4110 * 4111 * -w Wait for discarding a checkpoint to complete. 4112 * --wait 4113 * 4114 * Checkpoints the specified pool, by taking a "snapshot" of its 4115 * current state. A pool can only have one checkpoint at a time. 4116 */ 4117 int 4118 zpool_do_checkpoint(int argc, char **argv) 4119 { 4120 boolean_t discard, wait; 4121 char *pool; 4122 zpool_handle_t *zhp; 4123 int c, err; 4124 4125 struct option long_options[] = { 4126 {"discard", no_argument, NULL, 'd'}, 4127 {"wait", no_argument, NULL, 'w'}, 4128 {0, 0, 0, 0} 4129 }; 4130 4131 discard = B_FALSE; 4132 wait = B_FALSE; 4133 while ((c = getopt_long(argc, argv, ":dw", long_options, NULL)) != -1) { 4134 switch (c) { 4135 case 'd': 4136 discard = B_TRUE; 4137 break; 4138 case 'w': 4139 wait = B_TRUE; 4140 break; 4141 case '?': 4142 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 4143 optopt); 4144 usage(B_FALSE); 4145 } 4146 } 4147 4148 if (wait && !discard) { 4149 (void) fprintf(stderr, gettext("--wait only valid when " 4150 "--discard also specified\n")); 4151 usage(B_FALSE); 4152 } 4153 4154 argc -= optind; 4155 argv += optind; 4156 4157 if (argc < 1) { 4158 (void) fprintf(stderr, gettext("missing pool argument\n")); 4159 usage(B_FALSE); 4160 } 4161 4162 if (argc > 1) { 4163 (void) fprintf(stderr, gettext("too many arguments\n")); 4164 usage(B_FALSE); 4165 } 4166 4167 pool = argv[0]; 4168 4169 if ((zhp = zpool_open(g_zfs, pool)) == NULL) { 4170 /* As a special case, check for use of '/' in the name */ 4171 if (strchr(pool, '/') != NULL) 4172 (void) fprintf(stderr, gettext("'zpool checkpoint' " 4173 "doesn't work on datasets. To save the state " 4174 "of a dataset from a specific point in time " 4175 "please use 'zfs snapshot'\n")); 4176 return (1); 4177 } 4178 4179 if (discard) { 4180 err = (zpool_discard_checkpoint(zhp) != 0); 4181 if (err == 0 && wait) 4182 err = zpool_wait(zhp, ZPOOL_WAIT_CKPT_DISCARD); 4183 } else { 4184 err = (zpool_checkpoint(zhp) != 0); 4185 } 4186 4187 zpool_close(zhp); 4188 4189 return (err); 4190 } 4191 4192 #define CHECKPOINT_OPT 1024 4193 4194 /* 4195 * zpool prefetch <type> [<type opts>] <pool> 4196 * 4197 * Prefetchs a particular type of data in the specified pool. 4198 */ 4199 int 4200 zpool_do_prefetch(int argc, char **argv) 4201 { 4202 int c; 4203 char *poolname; 4204 char *typestr = NULL; 4205 zpool_prefetch_type_t type; 4206 zpool_handle_t *zhp; 4207 int err = 0; 4208 4209 while ((c = getopt(argc, argv, "t:")) != -1) { 4210 switch (c) { 4211 case 't': 4212 typestr = optarg; 4213 break; 4214 case ':': 4215 (void) fprintf(stderr, gettext("missing argument for " 4216 "'%c' option\n"), optopt); 4217 usage(B_FALSE); 4218 break; 4219 case '?': 4220 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 4221 optopt); 4222 usage(B_FALSE); 4223 } 4224 } 4225 argc -= optind; 4226 argv += optind; 4227 4228 if (argc < 1) { 4229 (void) fprintf(stderr, gettext("missing pool name argument\n")); 4230 usage(B_FALSE); 4231 } 4232 4233 if (argc > 1) { 4234 (void) fprintf(stderr, gettext("too many arguments\n")); 4235 usage(B_FALSE); 4236 } 4237 4238 poolname = argv[0]; 4239 4240 argc--; 4241 argv++; 4242 4243 if (strcmp(typestr, "ddt") == 0) { 4244 type = ZPOOL_PREFETCH_DDT; 4245 } else { 4246 (void) fprintf(stderr, gettext("unsupported prefetch type\n")); 4247 usage(B_FALSE); 4248 } 4249 4250 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) 4251 return (1); 4252 4253 err = zpool_prefetch(zhp, type); 4254 4255 zpool_close(zhp); 4256 4257 return (err); 4258 } 4259 4260 /* 4261 * zpool import [-d dir] [-D] 4262 * import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l] 4263 * [-d dir | -c cachefile | -s] [-f] -a 4264 * import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l] 4265 * [-d dir | -c cachefile | -s] [-f] [-n] [-F] <pool | id> 4266 * [newpool] 4267 * 4268 * -c Read pool information from a cachefile instead of searching 4269 * devices. If importing from a cachefile config fails, then 4270 * fallback to searching for devices only in the directories that 4271 * exist in the cachefile. 4272 * 4273 * -d Scan in a specific directory, other than /dev/. More than 4274 * one directory can be specified using multiple '-d' options. 4275 * 4276 * -D Scan for previously destroyed pools or import all or only 4277 * specified destroyed pools. 4278 * 4279 * -R Temporarily import the pool, with all mountpoints relative to 4280 * the given root. The pool will remain exported when the machine 4281 * is rebooted. 4282 * 4283 * -V Import even in the presence of faulted vdevs. This is an 4284 * intentionally undocumented option for testing purposes, and 4285 * treats the pool configuration as complete, leaving any bad 4286 * vdevs in the FAULTED state. In other words, it does verbatim 4287 * import. 4288 * 4289 * -f Force import, even if it appears that the pool is active. 4290 * 4291 * -F Attempt rewind if necessary. 4292 * 4293 * -n See if rewind would work, but don't actually rewind. 4294 * 4295 * -N Import the pool but don't mount datasets. 4296 * 4297 * -T Specify a starting txg to use for import. This option is 4298 * intentionally undocumented option for testing purposes. 4299 * 4300 * -a Import all pools found. 4301 * 4302 * -l Load encryption keys while importing. 4303 * 4304 * -o Set property=value and/or temporary mount options (without '='). 4305 * 4306 * -s Scan using the default search path, the libblkid cache will 4307 * not be consulted. 4308 * 4309 * --rewind-to-checkpoint 4310 * Import the pool and revert back to the checkpoint. 4311 * 4312 * The import command scans for pools to import, and import pools based on pool 4313 * name and GUID. The pool can also be renamed as part of the import process. 4314 */ 4315 int 4316 zpool_do_import(int argc, char **argv) 4317 { 4318 char **searchdirs = NULL; 4319 char *env, *envdup = NULL; 4320 int nsearch = 0; 4321 int c; 4322 int err = 0; 4323 nvlist_t *pools = NULL; 4324 boolean_t do_all = B_FALSE; 4325 boolean_t do_destroyed = B_FALSE; 4326 char *mntopts = NULL; 4327 uint64_t searchguid = 0; 4328 char *searchname = NULL; 4329 char *propval; 4330 nvlist_t *policy = NULL; 4331 nvlist_t *props = NULL; 4332 int flags = ZFS_IMPORT_NORMAL; 4333 uint32_t rewind_policy = ZPOOL_NO_REWIND; 4334 boolean_t dryrun = B_FALSE; 4335 boolean_t do_rewind = B_FALSE; 4336 boolean_t xtreme_rewind = B_FALSE; 4337 boolean_t do_scan = B_FALSE; 4338 boolean_t pool_exists = B_FALSE; 4339 uint64_t txg = -1ULL; 4340 char *cachefile = NULL; 4341 importargs_t idata = { 0 }; 4342 char *endptr; 4343 4344 struct option long_options[] = { 4345 {"rewind-to-checkpoint", no_argument, NULL, CHECKPOINT_OPT}, 4346 {0, 0, 0, 0} 4347 }; 4348 4349 /* check options */ 4350 while ((c = getopt_long(argc, argv, ":aCc:d:DEfFlmnNo:R:stT:VX", 4351 long_options, NULL)) != -1) { 4352 switch (c) { 4353 case 'a': 4354 do_all = B_TRUE; 4355 break; 4356 case 'c': 4357 cachefile = optarg; 4358 break; 4359 case 'd': 4360 searchdirs = safe_realloc(searchdirs, 4361 (nsearch + 1) * sizeof (char *)); 4362 searchdirs[nsearch++] = optarg; 4363 break; 4364 case 'D': 4365 do_destroyed = B_TRUE; 4366 break; 4367 case 'f': 4368 flags |= ZFS_IMPORT_ANY_HOST; 4369 break; 4370 case 'F': 4371 do_rewind = B_TRUE; 4372 break; 4373 case 'l': 4374 flags |= ZFS_IMPORT_LOAD_KEYS; 4375 break; 4376 case 'm': 4377 flags |= ZFS_IMPORT_MISSING_LOG; 4378 break; 4379 case 'n': 4380 dryrun = B_TRUE; 4381 break; 4382 case 'N': 4383 flags |= ZFS_IMPORT_ONLY; 4384 break; 4385 case 'o': 4386 if ((propval = strchr(optarg, '=')) != NULL) { 4387 *propval = '\0'; 4388 propval++; 4389 if (add_prop_list(optarg, propval, 4390 &props, B_TRUE)) 4391 goto error; 4392 } else { 4393 mntopts = optarg; 4394 } 4395 break; 4396 case 'R': 4397 if (add_prop_list(zpool_prop_to_name( 4398 ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE)) 4399 goto error; 4400 if (add_prop_list_default(zpool_prop_to_name( 4401 ZPOOL_PROP_CACHEFILE), "none", &props)) 4402 goto error; 4403 break; 4404 case 's': 4405 do_scan = B_TRUE; 4406 break; 4407 case 't': 4408 flags |= ZFS_IMPORT_TEMP_NAME; 4409 if (add_prop_list_default(zpool_prop_to_name( 4410 ZPOOL_PROP_CACHEFILE), "none", &props)) 4411 goto error; 4412 break; 4413 4414 case 'T': 4415 errno = 0; 4416 txg = strtoull(optarg, &endptr, 0); 4417 if (errno != 0 || *endptr != '\0') { 4418 (void) fprintf(stderr, 4419 gettext("invalid txg value\n")); 4420 usage(B_FALSE); 4421 } 4422 rewind_policy = ZPOOL_DO_REWIND | ZPOOL_EXTREME_REWIND; 4423 break; 4424 case 'V': 4425 flags |= ZFS_IMPORT_VERBATIM; 4426 break; 4427 case 'X': 4428 xtreme_rewind = B_TRUE; 4429 break; 4430 case CHECKPOINT_OPT: 4431 flags |= ZFS_IMPORT_CHECKPOINT; 4432 break; 4433 case ':': 4434 (void) fprintf(stderr, gettext("missing argument for " 4435 "'%c' option\n"), optopt); 4436 usage(B_FALSE); 4437 break; 4438 case '?': 4439 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 4440 optopt); 4441 usage(B_FALSE); 4442 } 4443 } 4444 4445 argc -= optind; 4446 argv += optind; 4447 4448 if (cachefile && nsearch != 0) { 4449 (void) fprintf(stderr, gettext("-c is incompatible with -d\n")); 4450 usage(B_FALSE); 4451 } 4452 4453 if (cachefile && do_scan) { 4454 (void) fprintf(stderr, gettext("-c is incompatible with -s\n")); 4455 usage(B_FALSE); 4456 } 4457 4458 if ((flags & ZFS_IMPORT_LOAD_KEYS) && (flags & ZFS_IMPORT_ONLY)) { 4459 (void) fprintf(stderr, gettext("-l is incompatible with -N\n")); 4460 usage(B_FALSE); 4461 } 4462 4463 if ((flags & ZFS_IMPORT_LOAD_KEYS) && !do_all && argc == 0) { 4464 (void) fprintf(stderr, gettext("-l is only meaningful during " 4465 "an import\n")); 4466 usage(B_FALSE); 4467 } 4468 4469 if ((dryrun || xtreme_rewind) && !do_rewind) { 4470 (void) fprintf(stderr, 4471 gettext("-n or -X only meaningful with -F\n")); 4472 usage(B_FALSE); 4473 } 4474 if (dryrun) 4475 rewind_policy = ZPOOL_TRY_REWIND; 4476 else if (do_rewind) 4477 rewind_policy = ZPOOL_DO_REWIND; 4478 if (xtreme_rewind) 4479 rewind_policy |= ZPOOL_EXTREME_REWIND; 4480 4481 /* In the future, we can capture further policy and include it here */ 4482 if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 || 4483 nvlist_add_uint64(policy, ZPOOL_LOAD_REQUEST_TXG, txg) != 0 || 4484 nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY, 4485 rewind_policy) != 0) 4486 goto error; 4487 4488 /* check argument count */ 4489 if (do_all) { 4490 if (argc != 0) { 4491 (void) fprintf(stderr, gettext("too many arguments\n")); 4492 usage(B_FALSE); 4493 } 4494 } else { 4495 if (argc > 2) { 4496 (void) fprintf(stderr, gettext("too many arguments\n")); 4497 usage(B_FALSE); 4498 } 4499 } 4500 4501 /* 4502 * Check for the effective uid. We do this explicitly here because 4503 * otherwise any attempt to discover pools will silently fail. 4504 */ 4505 if (argc == 0 && geteuid() != 0) { 4506 (void) fprintf(stderr, gettext("cannot " 4507 "discover pools: permission denied\n")); 4508 4509 free(searchdirs); 4510 nvlist_free(props); 4511 nvlist_free(policy); 4512 return (1); 4513 } 4514 4515 /* 4516 * Depending on the arguments given, we do one of the following: 4517 * 4518 * <none> Iterate through all pools and display information about 4519 * each one. 4520 * 4521 * -a Iterate through all pools and try to import each one. 4522 * 4523 * <id> Find the pool that corresponds to the given GUID/pool 4524 * name and import that one. 4525 * 4526 * -D Above options applies only to destroyed pools. 4527 */ 4528 if (argc != 0) { 4529 char *endptr; 4530 4531 errno = 0; 4532 searchguid = strtoull(argv[0], &endptr, 10); 4533 if (errno != 0 || *endptr != '\0') { 4534 searchname = argv[0]; 4535 searchguid = 0; 4536 } 4537 4538 /* 4539 * User specified a name or guid. Ensure it's unique. 4540 */ 4541 target_exists_args_t search = {searchname, searchguid}; 4542 pool_exists = zpool_iter(g_zfs, name_or_guid_exists, &search); 4543 } 4544 4545 /* 4546 * Check the environment for the preferred search path. 4547 */ 4548 if ((searchdirs == NULL) && (env = getenv("ZPOOL_IMPORT_PATH"))) { 4549 char *dir, *tmp = NULL; 4550 4551 envdup = strdup(env); 4552 4553 for (dir = strtok_r(envdup, ":", &tmp); 4554 dir != NULL; 4555 dir = strtok_r(NULL, ":", &tmp)) { 4556 searchdirs = safe_realloc(searchdirs, 4557 (nsearch + 1) * sizeof (char *)); 4558 searchdirs[nsearch++] = dir; 4559 } 4560 } 4561 4562 idata.path = searchdirs; 4563 idata.paths = nsearch; 4564 idata.poolname = searchname; 4565 idata.guid = searchguid; 4566 idata.cachefile = cachefile; 4567 idata.scan = do_scan; 4568 idata.policy = policy; 4569 idata.do_destroyed = do_destroyed; 4570 idata.do_all = do_all; 4571 4572 libpc_handle_t lpch = { 4573 .lpc_lib_handle = g_zfs, 4574 .lpc_ops = &libzfs_config_ops, 4575 .lpc_printerr = B_TRUE 4576 }; 4577 pools = zpool_search_import(&lpch, &idata); 4578 4579 if (pools != NULL && pool_exists && 4580 (argc == 1 || strcmp(argv[0], argv[1]) == 0)) { 4581 (void) fprintf(stderr, gettext("cannot import '%s': " 4582 "a pool with that name already exists\n"), 4583 argv[0]); 4584 (void) fprintf(stderr, gettext("use the form '%s " 4585 "<pool | id> <newpool>' to give it a new name\n"), 4586 "zpool import"); 4587 err = 1; 4588 } else if (pools == NULL && pool_exists) { 4589 (void) fprintf(stderr, gettext("cannot import '%s': " 4590 "a pool with that name is already created/imported,\n"), 4591 argv[0]); 4592 (void) fprintf(stderr, gettext("and no additional pools " 4593 "with that name were found\n")); 4594 err = 1; 4595 } else if (pools == NULL) { 4596 if (argc != 0) { 4597 (void) fprintf(stderr, gettext("cannot import '%s': " 4598 "no such pool available\n"), argv[0]); 4599 } 4600 err = 1; 4601 } 4602 4603 if (err == 1) { 4604 free(searchdirs); 4605 free(envdup); 4606 nvlist_free(policy); 4607 nvlist_free(pools); 4608 nvlist_free(props); 4609 return (1); 4610 } 4611 4612 err = import_pools(pools, props, mntopts, flags, 4613 argc >= 1 ? argv[0] : NULL, argc >= 2 ? argv[1] : NULL, &idata); 4614 4615 /* 4616 * If we're using the cachefile and we failed to import, then 4617 * fallback to scanning the directory for pools that match 4618 * those in the cachefile. 4619 */ 4620 if (err != 0 && cachefile != NULL) { 4621 (void) printf(gettext("cachefile import failed, retrying\n")); 4622 4623 /* 4624 * We use the scan flag to gather the directories that exist 4625 * in the cachefile. If we need to fallback to searching for 4626 * the pool config, we will only search devices in these 4627 * directories. 4628 */ 4629 idata.scan = B_TRUE; 4630 nvlist_free(pools); 4631 pools = zpool_search_import(&lpch, &idata); 4632 4633 err = import_pools(pools, props, mntopts, flags, 4634 argc >= 1 ? argv[0] : NULL, argc >= 2 ? argv[1] : NULL, 4635 &idata); 4636 } 4637 4638 error: 4639 nvlist_free(props); 4640 nvlist_free(pools); 4641 nvlist_free(policy); 4642 free(searchdirs); 4643 free(envdup); 4644 4645 return (err ? 1 : 0); 4646 } 4647 4648 /* 4649 * zpool sync [-f] [pool] ... 4650 * 4651 * -f (undocumented) force uberblock (and config including zpool cache file) 4652 * update. 4653 * 4654 * Sync the specified pool(s). 4655 * Without arguments "zpool sync" will sync all pools. 4656 * This command initiates TXG sync(s) and will return after the TXG(s) commit. 4657 * 4658 */ 4659 static int 4660 zpool_do_sync(int argc, char **argv) 4661 { 4662 int ret; 4663 boolean_t force = B_FALSE; 4664 4665 /* check options */ 4666 while ((ret = getopt(argc, argv, "f")) != -1) { 4667 switch (ret) { 4668 case 'f': 4669 force = B_TRUE; 4670 break; 4671 case '?': 4672 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 4673 optopt); 4674 usage(B_FALSE); 4675 } 4676 } 4677 4678 argc -= optind; 4679 argv += optind; 4680 4681 /* if argc == 0 we will execute zpool_sync_one on all pools */ 4682 ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL, 4683 B_FALSE, zpool_sync_one, &force); 4684 4685 return (ret); 4686 } 4687 4688 typedef struct iostat_cbdata { 4689 uint64_t cb_flags; 4690 int cb_namewidth; 4691 int cb_iteration; 4692 boolean_t cb_verbose; 4693 boolean_t cb_literal; 4694 boolean_t cb_scripted; 4695 zpool_list_t *cb_list; 4696 vdev_cmd_data_list_t *vcdl; 4697 vdev_cbdata_t cb_vdevs; 4698 } iostat_cbdata_t; 4699 4700 /* iostat labels */ 4701 typedef struct name_and_columns { 4702 const char *name; /* Column name */ 4703 unsigned int columns; /* Center name to this number of columns */ 4704 } name_and_columns_t; 4705 4706 #define IOSTAT_MAX_LABELS 15 /* Max number of labels on one line */ 4707 4708 static const name_and_columns_t iostat_top_labels[][IOSTAT_MAX_LABELS] = 4709 { 4710 [IOS_DEFAULT] = {{"capacity", 2}, {"operations", 2}, {"bandwidth", 2}, 4711 {NULL}}, 4712 [IOS_LATENCY] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2}, 4713 {"asyncq_wait", 2}, {"scrub", 1}, {"trim", 1}, {"rebuild", 1}, 4714 {NULL}}, 4715 [IOS_QUEUES] = {{"syncq_read", 2}, {"syncq_write", 2}, 4716 {"asyncq_read", 2}, {"asyncq_write", 2}, {"scrubq_read", 2}, 4717 {"trimq_write", 2}, {"rebuildq_write", 2}, {NULL}}, 4718 [IOS_L_HISTO] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2}, 4719 {"asyncq_wait", 2}, {NULL}}, 4720 [IOS_RQ_HISTO] = {{"sync_read", 2}, {"sync_write", 2}, 4721 {"async_read", 2}, {"async_write", 2}, {"scrub", 2}, 4722 {"trim", 2}, {"rebuild", 2}, {NULL}}, 4723 }; 4724 4725 /* Shorthand - if "columns" field not set, default to 1 column */ 4726 static const name_and_columns_t iostat_bottom_labels[][IOSTAT_MAX_LABELS] = 4727 { 4728 [IOS_DEFAULT] = {{"alloc"}, {"free"}, {"read"}, {"write"}, {"read"}, 4729 {"write"}, {NULL}}, 4730 [IOS_LATENCY] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"}, 4731 {"write"}, {"read"}, {"write"}, {"wait"}, {"wait"}, {"wait"}, 4732 {NULL}}, 4733 [IOS_QUEUES] = {{"pend"}, {"activ"}, {"pend"}, {"activ"}, {"pend"}, 4734 {"activ"}, {"pend"}, {"activ"}, {"pend"}, {"activ"}, 4735 {"pend"}, {"activ"}, {"pend"}, {"activ"}, {NULL}}, 4736 [IOS_L_HISTO] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"}, 4737 {"write"}, {"read"}, {"write"}, {"scrub"}, {"trim"}, {"rebuild"}, 4738 {NULL}}, 4739 [IOS_RQ_HISTO] = {{"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"}, 4740 {"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"}, 4741 {"ind"}, {"agg"}, {NULL}}, 4742 }; 4743 4744 static const char *histo_to_title[] = { 4745 [IOS_L_HISTO] = "latency", 4746 [IOS_RQ_HISTO] = "req_size", 4747 }; 4748 4749 /* 4750 * Return the number of labels in a null-terminated name_and_columns_t 4751 * array. 4752 * 4753 */ 4754 static unsigned int 4755 label_array_len(const name_and_columns_t *labels) 4756 { 4757 int i = 0; 4758 4759 while (labels[i].name) 4760 i++; 4761 4762 return (i); 4763 } 4764 4765 /* 4766 * Return the number of strings in a null-terminated string array. 4767 * For example: 4768 * 4769 * const char foo[] = {"bar", "baz", NULL} 4770 * 4771 * returns 2 4772 */ 4773 static uint64_t 4774 str_array_len(const char *array[]) 4775 { 4776 uint64_t i = 0; 4777 while (array[i]) 4778 i++; 4779 4780 return (i); 4781 } 4782 4783 4784 /* 4785 * Return a default column width for default/latency/queue columns. This does 4786 * not include histograms, which have their columns autosized. 4787 */ 4788 static unsigned int 4789 default_column_width(iostat_cbdata_t *cb, enum iostat_type type) 4790 { 4791 unsigned long column_width = 5; /* Normal niceprint */ 4792 static unsigned long widths[] = { 4793 /* 4794 * Choose some sane default column sizes for printing the 4795 * raw numbers. 4796 */ 4797 [IOS_DEFAULT] = 15, /* 1PB capacity */ 4798 [IOS_LATENCY] = 10, /* 1B ns = 10sec */ 4799 [IOS_QUEUES] = 6, /* 1M queue entries */ 4800 [IOS_L_HISTO] = 10, /* 1B ns = 10sec */ 4801 [IOS_RQ_HISTO] = 6, /* 1M queue entries */ 4802 }; 4803 4804 if (cb->cb_literal) 4805 column_width = widths[type]; 4806 4807 return (column_width); 4808 } 4809 4810 /* 4811 * Print the column labels, i.e: 4812 * 4813 * capacity operations bandwidth 4814 * alloc free read write read write ... 4815 * 4816 * If force_column_width is set, use it for the column width. If not set, use 4817 * the default column width. 4818 */ 4819 static void 4820 print_iostat_labels(iostat_cbdata_t *cb, unsigned int force_column_width, 4821 const name_and_columns_t labels[][IOSTAT_MAX_LABELS]) 4822 { 4823 int i, idx, s; 4824 int text_start, rw_column_width, spaces_to_end; 4825 uint64_t flags = cb->cb_flags; 4826 uint64_t f; 4827 unsigned int column_width = force_column_width; 4828 4829 /* For each bit set in flags */ 4830 for (f = flags; f; f &= ~(1ULL << idx)) { 4831 idx = lowbit64(f) - 1; 4832 if (!force_column_width) 4833 column_width = default_column_width(cb, idx); 4834 /* Print our top labels centered over "read write" label. */ 4835 for (i = 0; i < label_array_len(labels[idx]); i++) { 4836 const char *name = labels[idx][i].name; 4837 /* 4838 * We treat labels[][].columns == 0 as shorthand 4839 * for one column. It makes writing out the label 4840 * tables more concise. 4841 */ 4842 unsigned int columns = MAX(1, labels[idx][i].columns); 4843 unsigned int slen = strlen(name); 4844 4845 rw_column_width = (column_width * columns) + 4846 (2 * (columns - 1)); 4847 4848 text_start = (int)((rw_column_width) / columns - 4849 slen / columns); 4850 if (text_start < 0) 4851 text_start = 0; 4852 4853 printf(" "); /* Two spaces between columns */ 4854 4855 /* Space from beginning of column to label */ 4856 for (s = 0; s < text_start; s++) 4857 printf(" "); 4858 4859 printf("%s", name); 4860 4861 /* Print space after label to end of column */ 4862 spaces_to_end = rw_column_width - text_start - slen; 4863 if (spaces_to_end < 0) 4864 spaces_to_end = 0; 4865 4866 for (s = 0; s < spaces_to_end; s++) 4867 printf(" "); 4868 } 4869 } 4870 } 4871 4872 4873 /* 4874 * print_cmd_columns - Print custom column titles from -c 4875 * 4876 * If the user specified the "zpool status|iostat -c" then print their custom 4877 * column titles in the header. For example, print_cmd_columns() would print 4878 * the " col1 col2" part of this: 4879 * 4880 * $ zpool iostat -vc 'echo col1=val1; echo col2=val2' 4881 * ... 4882 * capacity operations bandwidth 4883 * pool alloc free read write read write col1 col2 4884 * ---------- ----- ----- ----- ----- ----- ----- ---- ---- 4885 * mypool 269K 1008M 0 0 107 946 4886 * mirror 269K 1008M 0 0 107 946 4887 * sdb - - 0 0 102 473 val1 val2 4888 * sdc - - 0 0 5 473 val1 val2 4889 * ---------- ----- ----- ----- ----- ----- ----- ---- ---- 4890 */ 4891 static void 4892 print_cmd_columns(vdev_cmd_data_list_t *vcdl, int use_dashes) 4893 { 4894 int i, j; 4895 vdev_cmd_data_t *data = &vcdl->data[0]; 4896 4897 if (vcdl->count == 0 || data == NULL) 4898 return; 4899 4900 /* 4901 * Each vdev cmd should have the same column names unless the user did 4902 * something weird with their cmd. Just take the column names from the 4903 * first vdev and assume it works for all of them. 4904 */ 4905 for (i = 0; i < vcdl->uniq_cols_cnt; i++) { 4906 printf(" "); 4907 if (use_dashes) { 4908 for (j = 0; j < vcdl->uniq_cols_width[i]; j++) 4909 printf("-"); 4910 } else { 4911 printf_color(ANSI_BOLD, "%*s", vcdl->uniq_cols_width[i], 4912 vcdl->uniq_cols[i]); 4913 } 4914 } 4915 } 4916 4917 4918 /* 4919 * Utility function to print out a line of dashes like: 4920 * 4921 * -------------------------------- ----- ----- ----- ----- ----- 4922 * 4923 * ...or a dashed named-row line like: 4924 * 4925 * logs - - - - - 4926 * 4927 * @cb: iostat data 4928 * 4929 * @force_column_width If non-zero, use the value as the column width. 4930 * Otherwise use the default column widths. 4931 * 4932 * @name: Print a dashed named-row line starting 4933 * with @name. Otherwise, print a regular 4934 * dashed line. 4935 */ 4936 static void 4937 print_iostat_dashes(iostat_cbdata_t *cb, unsigned int force_column_width, 4938 const char *name) 4939 { 4940 int i; 4941 unsigned int namewidth; 4942 uint64_t flags = cb->cb_flags; 4943 uint64_t f; 4944 int idx; 4945 const name_and_columns_t *labels; 4946 const char *title; 4947 4948 4949 if (cb->cb_flags & IOS_ANYHISTO_M) { 4950 title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)]; 4951 } else if (cb->cb_vdevs.cb_names_count) { 4952 title = "vdev"; 4953 } else { 4954 title = "pool"; 4955 } 4956 4957 namewidth = MAX(MAX(strlen(title), cb->cb_namewidth), 4958 name ? strlen(name) : 0); 4959 4960 4961 if (name) { 4962 printf("%-*s", namewidth, name); 4963 } else { 4964 for (i = 0; i < namewidth; i++) 4965 (void) printf("-"); 4966 } 4967 4968 /* For each bit in flags */ 4969 for (f = flags; f; f &= ~(1ULL << idx)) { 4970 unsigned int column_width; 4971 idx = lowbit64(f) - 1; 4972 if (force_column_width) 4973 column_width = force_column_width; 4974 else 4975 column_width = default_column_width(cb, idx); 4976 4977 labels = iostat_bottom_labels[idx]; 4978 for (i = 0; i < label_array_len(labels); i++) { 4979 if (name) 4980 printf(" %*s-", column_width - 1, " "); 4981 else 4982 printf(" %.*s", column_width, 4983 "--------------------"); 4984 } 4985 } 4986 } 4987 4988 4989 static void 4990 print_iostat_separator_impl(iostat_cbdata_t *cb, 4991 unsigned int force_column_width) 4992 { 4993 print_iostat_dashes(cb, force_column_width, NULL); 4994 } 4995 4996 static void 4997 print_iostat_separator(iostat_cbdata_t *cb) 4998 { 4999 print_iostat_separator_impl(cb, 0); 5000 } 5001 5002 static void 5003 print_iostat_header_impl(iostat_cbdata_t *cb, unsigned int force_column_width, 5004 const char *histo_vdev_name) 5005 { 5006 unsigned int namewidth; 5007 const char *title; 5008 5009 color_start(ANSI_BOLD); 5010 5011 if (cb->cb_flags & IOS_ANYHISTO_M) { 5012 title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)]; 5013 } else if (cb->cb_vdevs.cb_names_count) { 5014 title = "vdev"; 5015 } else { 5016 title = "pool"; 5017 } 5018 5019 namewidth = MAX(MAX(strlen(title), cb->cb_namewidth), 5020 histo_vdev_name ? strlen(histo_vdev_name) : 0); 5021 5022 if (histo_vdev_name) 5023 printf("%-*s", namewidth, histo_vdev_name); 5024 else 5025 printf("%*s", namewidth, ""); 5026 5027 5028 print_iostat_labels(cb, force_column_width, iostat_top_labels); 5029 printf("\n"); 5030 5031 printf("%-*s", namewidth, title); 5032 5033 print_iostat_labels(cb, force_column_width, iostat_bottom_labels); 5034 if (cb->vcdl != NULL) 5035 print_cmd_columns(cb->vcdl, 0); 5036 5037 printf("\n"); 5038 5039 print_iostat_separator_impl(cb, force_column_width); 5040 5041 if (cb->vcdl != NULL) 5042 print_cmd_columns(cb->vcdl, 1); 5043 5044 color_end(); 5045 5046 printf("\n"); 5047 } 5048 5049 static void 5050 print_iostat_header(iostat_cbdata_t *cb) 5051 { 5052 print_iostat_header_impl(cb, 0, NULL); 5053 } 5054 5055 /* 5056 * Prints a size string (i.e. 120M) with the suffix ("M") colored 5057 * by order of magnitude. Uses column_size to add padding. 5058 */ 5059 static void 5060 print_stat_color(const char *statbuf, unsigned int column_size) 5061 { 5062 fputs(" ", stdout); 5063 size_t len = strlen(statbuf); 5064 while (len < column_size) { 5065 fputc(' ', stdout); 5066 column_size--; 5067 } 5068 if (*statbuf == '0') { 5069 color_start(ANSI_GRAY); 5070 fputc('0', stdout); 5071 } else { 5072 for (; *statbuf; statbuf++) { 5073 if (*statbuf == 'K') color_start(ANSI_GREEN); 5074 else if (*statbuf == 'M') color_start(ANSI_YELLOW); 5075 else if (*statbuf == 'G') color_start(ANSI_RED); 5076 else if (*statbuf == 'T') color_start(ANSI_BOLD_BLUE); 5077 else if (*statbuf == 'P') color_start(ANSI_MAGENTA); 5078 else if (*statbuf == 'E') color_start(ANSI_CYAN); 5079 fputc(*statbuf, stdout); 5080 if (--column_size <= 0) 5081 break; 5082 } 5083 } 5084 color_end(); 5085 } 5086 5087 /* 5088 * Display a single statistic. 5089 */ 5090 static void 5091 print_one_stat(uint64_t value, enum zfs_nicenum_format format, 5092 unsigned int column_size, boolean_t scripted) 5093 { 5094 char buf[64]; 5095 5096 zfs_nicenum_format(value, buf, sizeof (buf), format); 5097 5098 if (scripted) 5099 printf("\t%s", buf); 5100 else 5101 print_stat_color(buf, column_size); 5102 } 5103 5104 /* 5105 * Calculate the default vdev stats 5106 * 5107 * Subtract oldvs from newvs, apply a scaling factor, and save the resulting 5108 * stats into calcvs. 5109 */ 5110 static void 5111 calc_default_iostats(vdev_stat_t *oldvs, vdev_stat_t *newvs, 5112 vdev_stat_t *calcvs) 5113 { 5114 int i; 5115 5116 memcpy(calcvs, newvs, sizeof (*calcvs)); 5117 for (i = 0; i < ARRAY_SIZE(calcvs->vs_ops); i++) 5118 calcvs->vs_ops[i] = (newvs->vs_ops[i] - oldvs->vs_ops[i]); 5119 5120 for (i = 0; i < ARRAY_SIZE(calcvs->vs_bytes); i++) 5121 calcvs->vs_bytes[i] = (newvs->vs_bytes[i] - oldvs->vs_bytes[i]); 5122 } 5123 5124 /* 5125 * Internal representation of the extended iostats data. 5126 * 5127 * The extended iostat stats are exported in nvlists as either uint64_t arrays 5128 * or single uint64_t's. We make both look like arrays to make them easier 5129 * to process. In order to make single uint64_t's look like arrays, we set 5130 * __data to the stat data, and then set *data = &__data with count = 1. Then, 5131 * we can just use *data and count. 5132 */ 5133 struct stat_array { 5134 uint64_t *data; 5135 uint_t count; /* Number of entries in data[] */ 5136 uint64_t __data; /* Only used when data is a single uint64_t */ 5137 }; 5138 5139 static uint64_t 5140 stat_histo_max(struct stat_array *nva, unsigned int len) 5141 { 5142 uint64_t max = 0; 5143 int i; 5144 for (i = 0; i < len; i++) 5145 max = MAX(max, array64_max(nva[i].data, nva[i].count)); 5146 5147 return (max); 5148 } 5149 5150 /* 5151 * Helper function to lookup a uint64_t array or uint64_t value and store its 5152 * data as a stat_array. If the nvpair is a single uint64_t value, then we make 5153 * it look like a one element array to make it easier to process. 5154 */ 5155 static int 5156 nvpair64_to_stat_array(nvlist_t *nvl, const char *name, 5157 struct stat_array *nva) 5158 { 5159 nvpair_t *tmp; 5160 int ret; 5161 5162 verify(nvlist_lookup_nvpair(nvl, name, &tmp) == 0); 5163 switch (nvpair_type(tmp)) { 5164 case DATA_TYPE_UINT64_ARRAY: 5165 ret = nvpair_value_uint64_array(tmp, &nva->data, &nva->count); 5166 break; 5167 case DATA_TYPE_UINT64: 5168 ret = nvpair_value_uint64(tmp, &nva->__data); 5169 nva->data = &nva->__data; 5170 nva->count = 1; 5171 break; 5172 default: 5173 /* Not a uint64_t */ 5174 ret = EINVAL; 5175 break; 5176 } 5177 5178 return (ret); 5179 } 5180 5181 /* 5182 * Given a list of nvlist names, look up the extended stats in newnv and oldnv, 5183 * subtract them, and return the results in a newly allocated stat_array. 5184 * You must free the returned array after you are done with it with 5185 * free_calc_stats(). 5186 * 5187 * Additionally, you can set "oldnv" to NULL if you simply want the newnv 5188 * values. 5189 */ 5190 static struct stat_array * 5191 calc_and_alloc_stats_ex(const char **names, unsigned int len, nvlist_t *oldnv, 5192 nvlist_t *newnv) 5193 { 5194 nvlist_t *oldnvx = NULL, *newnvx; 5195 struct stat_array *oldnva, *newnva, *calcnva; 5196 int i, j; 5197 unsigned int alloc_size = (sizeof (struct stat_array)) * len; 5198 5199 /* Extract our extended stats nvlist from the main list */ 5200 verify(nvlist_lookup_nvlist(newnv, ZPOOL_CONFIG_VDEV_STATS_EX, 5201 &newnvx) == 0); 5202 if (oldnv) { 5203 verify(nvlist_lookup_nvlist(oldnv, ZPOOL_CONFIG_VDEV_STATS_EX, 5204 &oldnvx) == 0); 5205 } 5206 5207 newnva = safe_malloc(alloc_size); 5208 oldnva = safe_malloc(alloc_size); 5209 calcnva = safe_malloc(alloc_size); 5210 5211 for (j = 0; j < len; j++) { 5212 verify(nvpair64_to_stat_array(newnvx, names[j], 5213 &newnva[j]) == 0); 5214 calcnva[j].count = newnva[j].count; 5215 alloc_size = calcnva[j].count * sizeof (calcnva[j].data[0]); 5216 calcnva[j].data = safe_malloc(alloc_size); 5217 memcpy(calcnva[j].data, newnva[j].data, alloc_size); 5218 5219 if (oldnvx) { 5220 verify(nvpair64_to_stat_array(oldnvx, names[j], 5221 &oldnva[j]) == 0); 5222 for (i = 0; i < oldnva[j].count; i++) 5223 calcnva[j].data[i] -= oldnva[j].data[i]; 5224 } 5225 } 5226 free(newnva); 5227 free(oldnva); 5228 return (calcnva); 5229 } 5230 5231 static void 5232 free_calc_stats(struct stat_array *nva, unsigned int len) 5233 { 5234 int i; 5235 for (i = 0; i < len; i++) 5236 free(nva[i].data); 5237 5238 free(nva); 5239 } 5240 5241 static void 5242 print_iostat_histo(struct stat_array *nva, unsigned int len, 5243 iostat_cbdata_t *cb, unsigned int column_width, unsigned int namewidth, 5244 double scale) 5245 { 5246 int i, j; 5247 char buf[6]; 5248 uint64_t val; 5249 enum zfs_nicenum_format format; 5250 unsigned int buckets; 5251 unsigned int start_bucket; 5252 5253 if (cb->cb_literal) 5254 format = ZFS_NICENUM_RAW; 5255 else 5256 format = ZFS_NICENUM_1024; 5257 5258 /* All these histos are the same size, so just use nva[0].count */ 5259 buckets = nva[0].count; 5260 5261 if (cb->cb_flags & IOS_RQ_HISTO_M) { 5262 /* Start at 512 - req size should never be lower than this */ 5263 start_bucket = 9; 5264 } else { 5265 start_bucket = 0; 5266 } 5267 5268 for (j = start_bucket; j < buckets; j++) { 5269 /* Print histogram bucket label */ 5270 if (cb->cb_flags & IOS_L_HISTO_M) { 5271 /* Ending range of this bucket */ 5272 val = (1UL << (j + 1)) - 1; 5273 zfs_nicetime(val, buf, sizeof (buf)); 5274 } else { 5275 /* Request size (starting range of bucket) */ 5276 val = (1UL << j); 5277 zfs_nicenum(val, buf, sizeof (buf)); 5278 } 5279 5280 if (cb->cb_scripted) 5281 printf("%llu", (u_longlong_t)val); 5282 else 5283 printf("%-*s", namewidth, buf); 5284 5285 /* Print the values on the line */ 5286 for (i = 0; i < len; i++) { 5287 print_one_stat(nva[i].data[j] * scale, format, 5288 column_width, cb->cb_scripted); 5289 } 5290 printf("\n"); 5291 } 5292 } 5293 5294 static void 5295 print_solid_separator(unsigned int length) 5296 { 5297 while (length--) 5298 printf("-"); 5299 printf("\n"); 5300 } 5301 5302 static void 5303 print_iostat_histos(iostat_cbdata_t *cb, nvlist_t *oldnv, 5304 nvlist_t *newnv, double scale, const char *name) 5305 { 5306 unsigned int column_width; 5307 unsigned int namewidth; 5308 unsigned int entire_width; 5309 enum iostat_type type; 5310 struct stat_array *nva; 5311 const char **names; 5312 unsigned int names_len; 5313 5314 /* What type of histo are we? */ 5315 type = IOS_HISTO_IDX(cb->cb_flags); 5316 5317 /* Get NULL-terminated array of nvlist names for our histo */ 5318 names = vsx_type_to_nvlist[type]; 5319 names_len = str_array_len(names); /* num of names */ 5320 5321 nva = calc_and_alloc_stats_ex(names, names_len, oldnv, newnv); 5322 5323 if (cb->cb_literal) { 5324 column_width = MAX(5, 5325 (unsigned int) log10(stat_histo_max(nva, names_len)) + 1); 5326 } else { 5327 column_width = 5; 5328 } 5329 5330 namewidth = MAX(cb->cb_namewidth, 5331 strlen(histo_to_title[IOS_HISTO_IDX(cb->cb_flags)])); 5332 5333 /* 5334 * Calculate the entire line width of what we're printing. The 5335 * +2 is for the two spaces between columns: 5336 */ 5337 /* read write */ 5338 /* ----- ----- */ 5339 /* |___| <---------- column_width */ 5340 /* */ 5341 /* |__________| <--- entire_width */ 5342 /* */ 5343 entire_width = namewidth + (column_width + 2) * 5344 label_array_len(iostat_bottom_labels[type]); 5345 5346 if (cb->cb_scripted) 5347 printf("%s\n", name); 5348 else 5349 print_iostat_header_impl(cb, column_width, name); 5350 5351 print_iostat_histo(nva, names_len, cb, column_width, 5352 namewidth, scale); 5353 5354 free_calc_stats(nva, names_len); 5355 if (!cb->cb_scripted) 5356 print_solid_separator(entire_width); 5357 } 5358 5359 /* 5360 * Calculate the average latency of a power-of-two latency histogram 5361 */ 5362 static uint64_t 5363 single_histo_average(uint64_t *histo, unsigned int buckets) 5364 { 5365 int i; 5366 uint64_t count = 0, total = 0; 5367 5368 for (i = 0; i < buckets; i++) { 5369 /* 5370 * Our buckets are power-of-two latency ranges. Use the 5371 * midpoint latency of each bucket to calculate the average. 5372 * For example: 5373 * 5374 * Bucket Midpoint 5375 * 8ns-15ns: 12ns 5376 * 16ns-31ns: 24ns 5377 * ... 5378 */ 5379 if (histo[i] != 0) { 5380 total += histo[i] * (((1UL << i) + ((1UL << i)/2))); 5381 count += histo[i]; 5382 } 5383 } 5384 5385 /* Prevent divide by zero */ 5386 return (count == 0 ? 0 : total / count); 5387 } 5388 5389 static void 5390 print_iostat_queues(iostat_cbdata_t *cb, nvlist_t *newnv) 5391 { 5392 const char *names[] = { 5393 ZPOOL_CONFIG_VDEV_SYNC_R_PEND_QUEUE, 5394 ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE, 5395 ZPOOL_CONFIG_VDEV_SYNC_W_PEND_QUEUE, 5396 ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE, 5397 ZPOOL_CONFIG_VDEV_ASYNC_R_PEND_QUEUE, 5398 ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE, 5399 ZPOOL_CONFIG_VDEV_ASYNC_W_PEND_QUEUE, 5400 ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE, 5401 ZPOOL_CONFIG_VDEV_SCRUB_PEND_QUEUE, 5402 ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE, 5403 ZPOOL_CONFIG_VDEV_TRIM_PEND_QUEUE, 5404 ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE, 5405 ZPOOL_CONFIG_VDEV_REBUILD_PEND_QUEUE, 5406 ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE, 5407 }; 5408 5409 struct stat_array *nva; 5410 5411 unsigned int column_width = default_column_width(cb, IOS_QUEUES); 5412 enum zfs_nicenum_format format; 5413 5414 nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), NULL, newnv); 5415 5416 if (cb->cb_literal) 5417 format = ZFS_NICENUM_RAW; 5418 else 5419 format = ZFS_NICENUM_1024; 5420 5421 for (int i = 0; i < ARRAY_SIZE(names); i++) { 5422 uint64_t val = nva[i].data[0]; 5423 print_one_stat(val, format, column_width, cb->cb_scripted); 5424 } 5425 5426 free_calc_stats(nva, ARRAY_SIZE(names)); 5427 } 5428 5429 static void 5430 print_iostat_latency(iostat_cbdata_t *cb, nvlist_t *oldnv, 5431 nvlist_t *newnv) 5432 { 5433 int i; 5434 uint64_t val; 5435 const char *names[] = { 5436 ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO, 5437 ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO, 5438 ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO, 5439 ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO, 5440 ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO, 5441 ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO, 5442 ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO, 5443 ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO, 5444 ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO, 5445 ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO, 5446 ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO, 5447 }; 5448 struct stat_array *nva; 5449 5450 unsigned int column_width = default_column_width(cb, IOS_LATENCY); 5451 enum zfs_nicenum_format format; 5452 5453 nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), oldnv, newnv); 5454 5455 if (cb->cb_literal) 5456 format = ZFS_NICENUM_RAWTIME; 5457 else 5458 format = ZFS_NICENUM_TIME; 5459 5460 /* Print our avg latencies on the line */ 5461 for (i = 0; i < ARRAY_SIZE(names); i++) { 5462 /* Compute average latency for a latency histo */ 5463 val = single_histo_average(nva[i].data, nva[i].count); 5464 print_one_stat(val, format, column_width, cb->cb_scripted); 5465 } 5466 free_calc_stats(nva, ARRAY_SIZE(names)); 5467 } 5468 5469 /* 5470 * Print default statistics (capacity/operations/bandwidth) 5471 */ 5472 static void 5473 print_iostat_default(vdev_stat_t *vs, iostat_cbdata_t *cb, double scale) 5474 { 5475 unsigned int column_width = default_column_width(cb, IOS_DEFAULT); 5476 enum zfs_nicenum_format format; 5477 char na; /* char to print for "not applicable" values */ 5478 5479 if (cb->cb_literal) { 5480 format = ZFS_NICENUM_RAW; 5481 na = '0'; 5482 } else { 5483 format = ZFS_NICENUM_1024; 5484 na = '-'; 5485 } 5486 5487 /* only toplevel vdevs have capacity stats */ 5488 if (vs->vs_space == 0) { 5489 if (cb->cb_scripted) 5490 printf("\t%c\t%c", na, na); 5491 else 5492 printf(" %*c %*c", column_width, na, column_width, 5493 na); 5494 } else { 5495 print_one_stat(vs->vs_alloc, format, column_width, 5496 cb->cb_scripted); 5497 print_one_stat(vs->vs_space - vs->vs_alloc, format, 5498 column_width, cb->cb_scripted); 5499 } 5500 5501 print_one_stat((uint64_t)(vs->vs_ops[ZIO_TYPE_READ] * scale), 5502 format, column_width, cb->cb_scripted); 5503 print_one_stat((uint64_t)(vs->vs_ops[ZIO_TYPE_WRITE] * scale), 5504 format, column_width, cb->cb_scripted); 5505 print_one_stat((uint64_t)(vs->vs_bytes[ZIO_TYPE_READ] * scale), 5506 format, column_width, cb->cb_scripted); 5507 print_one_stat((uint64_t)(vs->vs_bytes[ZIO_TYPE_WRITE] * scale), 5508 format, column_width, cb->cb_scripted); 5509 } 5510 5511 static const char *const class_name[] = { 5512 VDEV_ALLOC_BIAS_DEDUP, 5513 VDEV_ALLOC_BIAS_SPECIAL, 5514 VDEV_ALLOC_CLASS_LOGS 5515 }; 5516 5517 /* 5518 * Print out all the statistics for the given vdev. This can either be the 5519 * toplevel configuration, or called recursively. If 'name' is NULL, then this 5520 * is a verbose output, and we don't want to display the toplevel pool stats. 5521 * 5522 * Returns the number of stat lines printed. 5523 */ 5524 static unsigned int 5525 print_vdev_stats(zpool_handle_t *zhp, const char *name, nvlist_t *oldnv, 5526 nvlist_t *newnv, iostat_cbdata_t *cb, int depth) 5527 { 5528 nvlist_t **oldchild, **newchild; 5529 uint_t c, children, oldchildren; 5530 vdev_stat_t *oldvs, *newvs, *calcvs; 5531 vdev_stat_t zerovs = { 0 }; 5532 char *vname; 5533 int i; 5534 int ret = 0; 5535 uint64_t tdelta; 5536 double scale; 5537 5538 if (strcmp(name, VDEV_TYPE_INDIRECT) == 0) 5539 return (ret); 5540 5541 calcvs = safe_malloc(sizeof (*calcvs)); 5542 5543 if (oldnv != NULL) { 5544 verify(nvlist_lookup_uint64_array(oldnv, 5545 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&oldvs, &c) == 0); 5546 } else { 5547 oldvs = &zerovs; 5548 } 5549 5550 /* Do we only want to see a specific vdev? */ 5551 for (i = 0; i < cb->cb_vdevs.cb_names_count; i++) { 5552 /* Yes we do. Is this the vdev? */ 5553 if (strcmp(name, cb->cb_vdevs.cb_names[i]) == 0) { 5554 /* 5555 * This is our vdev. Since it is the only vdev we 5556 * will be displaying, make depth = 0 so that it 5557 * doesn't get indented. 5558 */ 5559 depth = 0; 5560 break; 5561 } 5562 } 5563 5564 if (cb->cb_vdevs.cb_names_count && (i == cb->cb_vdevs.cb_names_count)) { 5565 /* Couldn't match the name */ 5566 goto children; 5567 } 5568 5569 5570 verify(nvlist_lookup_uint64_array(newnv, ZPOOL_CONFIG_VDEV_STATS, 5571 (uint64_t **)&newvs, &c) == 0); 5572 5573 /* 5574 * Print the vdev name unless it's is a histogram. Histograms 5575 * display the vdev name in the header itself. 5576 */ 5577 if (!(cb->cb_flags & IOS_ANYHISTO_M)) { 5578 if (cb->cb_scripted) { 5579 printf("%s", name); 5580 } else { 5581 if (strlen(name) + depth > cb->cb_namewidth) 5582 (void) printf("%*s%s", depth, "", name); 5583 else 5584 (void) printf("%*s%s%*s", depth, "", name, 5585 (int)(cb->cb_namewidth - strlen(name) - 5586 depth), ""); 5587 } 5588 } 5589 5590 /* Calculate our scaling factor */ 5591 tdelta = newvs->vs_timestamp - oldvs->vs_timestamp; 5592 if ((oldvs->vs_timestamp == 0) && (cb->cb_flags & IOS_ANYHISTO_M)) { 5593 /* 5594 * If we specify printing histograms with no time interval, then 5595 * print the histogram numbers over the entire lifetime of the 5596 * vdev. 5597 */ 5598 scale = 1; 5599 } else { 5600 if (tdelta == 0) 5601 scale = 1.0; 5602 else 5603 scale = (double)NANOSEC / tdelta; 5604 } 5605 5606 if (cb->cb_flags & IOS_DEFAULT_M) { 5607 calc_default_iostats(oldvs, newvs, calcvs); 5608 print_iostat_default(calcvs, cb, scale); 5609 } 5610 if (cb->cb_flags & IOS_LATENCY_M) 5611 print_iostat_latency(cb, oldnv, newnv); 5612 if (cb->cb_flags & IOS_QUEUES_M) 5613 print_iostat_queues(cb, newnv); 5614 if (cb->cb_flags & IOS_ANYHISTO_M) { 5615 printf("\n"); 5616 print_iostat_histos(cb, oldnv, newnv, scale, name); 5617 } 5618 5619 if (cb->vcdl != NULL) { 5620 const char *path; 5621 if (nvlist_lookup_string(newnv, ZPOOL_CONFIG_PATH, 5622 &path) == 0) { 5623 printf(" "); 5624 zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path); 5625 } 5626 } 5627 5628 if (!(cb->cb_flags & IOS_ANYHISTO_M)) 5629 printf("\n"); 5630 5631 ret++; 5632 5633 children: 5634 5635 free(calcvs); 5636 5637 if (!cb->cb_verbose) 5638 return (ret); 5639 5640 if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_CHILDREN, 5641 &newchild, &children) != 0) 5642 return (ret); 5643 5644 if (oldnv) { 5645 if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_CHILDREN, 5646 &oldchild, &oldchildren) != 0) 5647 return (ret); 5648 5649 children = MIN(oldchildren, children); 5650 } 5651 5652 /* 5653 * print normal top-level devices 5654 */ 5655 for (c = 0; c < children; c++) { 5656 uint64_t ishole = B_FALSE, islog = B_FALSE; 5657 5658 (void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_HOLE, 5659 &ishole); 5660 5661 (void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_LOG, 5662 &islog); 5663 5664 if (ishole || islog) 5665 continue; 5666 5667 if (nvlist_exists(newchild[c], ZPOOL_CONFIG_ALLOCATION_BIAS)) 5668 continue; 5669 5670 vname = zpool_vdev_name(g_zfs, zhp, newchild[c], 5671 cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID); 5672 ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c] : NULL, 5673 newchild[c], cb, depth + 2); 5674 free(vname); 5675 } 5676 5677 /* 5678 * print all other top-level devices 5679 */ 5680 for (uint_t n = 0; n < ARRAY_SIZE(class_name); n++) { 5681 boolean_t printed = B_FALSE; 5682 5683 for (c = 0; c < children; c++) { 5684 uint64_t islog = B_FALSE; 5685 const char *bias = NULL; 5686 const char *type = NULL; 5687 5688 (void) nvlist_lookup_uint64(newchild[c], 5689 ZPOOL_CONFIG_IS_LOG, &islog); 5690 if (islog) { 5691 bias = VDEV_ALLOC_CLASS_LOGS; 5692 } else { 5693 (void) nvlist_lookup_string(newchild[c], 5694 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias); 5695 (void) nvlist_lookup_string(newchild[c], 5696 ZPOOL_CONFIG_TYPE, &type); 5697 } 5698 if (bias == NULL || strcmp(bias, class_name[n]) != 0) 5699 continue; 5700 if (!islog && strcmp(type, VDEV_TYPE_INDIRECT) == 0) 5701 continue; 5702 5703 if (!printed) { 5704 if ((!(cb->cb_flags & IOS_ANYHISTO_M)) && 5705 !cb->cb_scripted && 5706 !cb->cb_vdevs.cb_names) { 5707 print_iostat_dashes(cb, 0, 5708 class_name[n]); 5709 } 5710 printf("\n"); 5711 printed = B_TRUE; 5712 } 5713 5714 vname = zpool_vdev_name(g_zfs, zhp, newchild[c], 5715 cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID); 5716 ret += print_vdev_stats(zhp, vname, oldnv ? 5717 oldchild[c] : NULL, newchild[c], cb, depth + 2); 5718 free(vname); 5719 } 5720 } 5721 5722 /* 5723 * Include level 2 ARC devices in iostat output 5724 */ 5725 if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_L2CACHE, 5726 &newchild, &children) != 0) 5727 return (ret); 5728 5729 if (oldnv) { 5730 if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_L2CACHE, 5731 &oldchild, &oldchildren) != 0) 5732 return (ret); 5733 5734 children = MIN(oldchildren, children); 5735 } 5736 5737 if (children > 0) { 5738 if ((!(cb->cb_flags & IOS_ANYHISTO_M)) && !cb->cb_scripted && 5739 !cb->cb_vdevs.cb_names) { 5740 print_iostat_dashes(cb, 0, "cache"); 5741 } 5742 printf("\n"); 5743 5744 for (c = 0; c < children; c++) { 5745 vname = zpool_vdev_name(g_zfs, zhp, newchild[c], 5746 cb->cb_vdevs.cb_name_flags); 5747 ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c] 5748 : NULL, newchild[c], cb, depth + 2); 5749 free(vname); 5750 } 5751 } 5752 5753 return (ret); 5754 } 5755 5756 static int 5757 refresh_iostat(zpool_handle_t *zhp, void *data) 5758 { 5759 iostat_cbdata_t *cb = data; 5760 boolean_t missing; 5761 5762 /* 5763 * If the pool has disappeared, remove it from the list and continue. 5764 */ 5765 if (zpool_refresh_stats(zhp, &missing) != 0) 5766 return (-1); 5767 5768 if (missing) 5769 pool_list_remove(cb->cb_list, zhp); 5770 5771 return (0); 5772 } 5773 5774 /* 5775 * Callback to print out the iostats for the given pool. 5776 */ 5777 static int 5778 print_iostat(zpool_handle_t *zhp, void *data) 5779 { 5780 iostat_cbdata_t *cb = data; 5781 nvlist_t *oldconfig, *newconfig; 5782 nvlist_t *oldnvroot, *newnvroot; 5783 int ret; 5784 5785 newconfig = zpool_get_config(zhp, &oldconfig); 5786 5787 if (cb->cb_iteration == 1) 5788 oldconfig = NULL; 5789 5790 verify(nvlist_lookup_nvlist(newconfig, ZPOOL_CONFIG_VDEV_TREE, 5791 &newnvroot) == 0); 5792 5793 if (oldconfig == NULL) 5794 oldnvroot = NULL; 5795 else 5796 verify(nvlist_lookup_nvlist(oldconfig, ZPOOL_CONFIG_VDEV_TREE, 5797 &oldnvroot) == 0); 5798 5799 ret = print_vdev_stats(zhp, zpool_get_name(zhp), oldnvroot, newnvroot, 5800 cb, 0); 5801 if ((ret != 0) && !(cb->cb_flags & IOS_ANYHISTO_M) && 5802 !cb->cb_scripted && cb->cb_verbose && 5803 !cb->cb_vdevs.cb_names_count) { 5804 print_iostat_separator(cb); 5805 if (cb->vcdl != NULL) { 5806 print_cmd_columns(cb->vcdl, 1); 5807 } 5808 printf("\n"); 5809 } 5810 5811 return (ret); 5812 } 5813 5814 static int 5815 get_columns(void) 5816 { 5817 struct winsize ws; 5818 int columns = 80; 5819 int error; 5820 5821 if (isatty(STDOUT_FILENO)) { 5822 error = ioctl(STDOUT_FILENO, TIOCGWINSZ, &ws); 5823 if (error == 0) 5824 columns = ws.ws_col; 5825 } else { 5826 columns = 999; 5827 } 5828 5829 return (columns); 5830 } 5831 5832 /* 5833 * Return the required length of the pool/vdev name column. The minimum 5834 * allowed width and output formatting flags must be provided. 5835 */ 5836 static int 5837 get_namewidth(zpool_handle_t *zhp, int min_width, int flags, boolean_t verbose) 5838 { 5839 nvlist_t *config, *nvroot; 5840 int width = min_width; 5841 5842 if ((config = zpool_get_config(zhp, NULL)) != NULL) { 5843 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 5844 &nvroot) == 0); 5845 size_t poolname_len = strlen(zpool_get_name(zhp)); 5846 if (verbose == B_FALSE) { 5847 width = MAX(poolname_len, min_width); 5848 } else { 5849 width = MAX(poolname_len, 5850 max_width(zhp, nvroot, 0, min_width, flags)); 5851 } 5852 } 5853 5854 return (width); 5855 } 5856 5857 /* 5858 * Parse the input string, get the 'interval' and 'count' value if there is one. 5859 */ 5860 static void 5861 get_interval_count(int *argcp, char **argv, float *iv, 5862 unsigned long *cnt) 5863 { 5864 float interval = 0; 5865 unsigned long count = 0; 5866 int argc = *argcp; 5867 5868 /* 5869 * Determine if the last argument is an integer or a pool name 5870 */ 5871 if (argc > 0 && zfs_isnumber(argv[argc - 1])) { 5872 char *end; 5873 5874 errno = 0; 5875 interval = strtof(argv[argc - 1], &end); 5876 5877 if (*end == '\0' && errno == 0) { 5878 if (interval == 0) { 5879 (void) fprintf(stderr, gettext( 5880 "interval cannot be zero\n")); 5881 usage(B_FALSE); 5882 } 5883 /* 5884 * Ignore the last parameter 5885 */ 5886 argc--; 5887 } else { 5888 /* 5889 * If this is not a valid number, just plow on. The 5890 * user will get a more informative error message later 5891 * on. 5892 */ 5893 interval = 0; 5894 } 5895 } 5896 5897 /* 5898 * If the last argument is also an integer, then we have both a count 5899 * and an interval. 5900 */ 5901 if (argc > 0 && zfs_isnumber(argv[argc - 1])) { 5902 char *end; 5903 5904 errno = 0; 5905 count = interval; 5906 interval = strtof(argv[argc - 1], &end); 5907 5908 if (*end == '\0' && errno == 0) { 5909 if (interval == 0) { 5910 (void) fprintf(stderr, gettext( 5911 "interval cannot be zero\n")); 5912 usage(B_FALSE); 5913 } 5914 5915 /* 5916 * Ignore the last parameter 5917 */ 5918 argc--; 5919 } else { 5920 interval = 0; 5921 } 5922 } 5923 5924 *iv = interval; 5925 *cnt = count; 5926 *argcp = argc; 5927 } 5928 5929 static void 5930 get_timestamp_arg(char c) 5931 { 5932 if (c == 'u') 5933 timestamp_fmt = UDATE; 5934 else if (c == 'd') 5935 timestamp_fmt = DDATE; 5936 else 5937 usage(B_FALSE); 5938 } 5939 5940 /* 5941 * Return stat flags that are supported by all pools by both the module and 5942 * zpool iostat. "*data" should be initialized to all 0xFFs before running. 5943 * It will get ANDed down until only the flags that are supported on all pools 5944 * remain. 5945 */ 5946 static int 5947 get_stat_flags_cb(zpool_handle_t *zhp, void *data) 5948 { 5949 uint64_t *mask = data; 5950 nvlist_t *config, *nvroot, *nvx; 5951 uint64_t flags = 0; 5952 int i, j; 5953 5954 config = zpool_get_config(zhp, NULL); 5955 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 5956 &nvroot) == 0); 5957 5958 /* Default stats are always supported, but for completeness.. */ 5959 if (nvlist_exists(nvroot, ZPOOL_CONFIG_VDEV_STATS)) 5960 flags |= IOS_DEFAULT_M; 5961 5962 /* Get our extended stats nvlist from the main list */ 5963 if (nvlist_lookup_nvlist(nvroot, ZPOOL_CONFIG_VDEV_STATS_EX, 5964 &nvx) != 0) { 5965 /* 5966 * No extended stats; they're probably running an older 5967 * module. No big deal, we support that too. 5968 */ 5969 goto end; 5970 } 5971 5972 /* For each extended stat, make sure all its nvpairs are supported */ 5973 for (j = 0; j < ARRAY_SIZE(vsx_type_to_nvlist); j++) { 5974 if (!vsx_type_to_nvlist[j][0]) 5975 continue; 5976 5977 /* Start off by assuming the flag is supported, then check */ 5978 flags |= (1ULL << j); 5979 for (i = 0; vsx_type_to_nvlist[j][i]; i++) { 5980 if (!nvlist_exists(nvx, vsx_type_to_nvlist[j][i])) { 5981 /* flag isn't supported */ 5982 flags = flags & ~(1ULL << j); 5983 break; 5984 } 5985 } 5986 } 5987 end: 5988 *mask = *mask & flags; 5989 return (0); 5990 } 5991 5992 /* 5993 * Return a bitmask of stats that are supported on all pools by both the module 5994 * and zpool iostat. 5995 */ 5996 static uint64_t 5997 get_stat_flags(zpool_list_t *list) 5998 { 5999 uint64_t mask = -1; 6000 6001 /* 6002 * get_stat_flags_cb() will lop off bits from "mask" until only the 6003 * flags that are supported on all pools remain. 6004 */ 6005 pool_list_iter(list, B_FALSE, get_stat_flags_cb, &mask); 6006 return (mask); 6007 } 6008 6009 /* 6010 * Return 1 if cb_data->cb_names[0] is this vdev's name, 0 otherwise. 6011 */ 6012 static int 6013 is_vdev_cb(void *zhp_data, nvlist_t *nv, void *cb_data) 6014 { 6015 uint64_t guid; 6016 vdev_cbdata_t *cb = cb_data; 6017 zpool_handle_t *zhp = zhp_data; 6018 6019 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 6020 return (0); 6021 6022 return (guid == zpool_vdev_path_to_guid(zhp, cb->cb_names[0])); 6023 } 6024 6025 /* 6026 * Returns 1 if cb_data->cb_names[0] is a vdev name, 0 otherwise. 6027 */ 6028 static int 6029 is_vdev(zpool_handle_t *zhp, void *cb_data) 6030 { 6031 return (for_each_vdev(zhp, is_vdev_cb, cb_data)); 6032 } 6033 6034 /* 6035 * Check if vdevs are in a pool 6036 * 6037 * Return 1 if all argv[] strings are vdev names in pool "pool_name". Otherwise 6038 * return 0. If pool_name is NULL, then search all pools. 6039 */ 6040 static int 6041 are_vdevs_in_pool(int argc, char **argv, char *pool_name, 6042 vdev_cbdata_t *cb) 6043 { 6044 char **tmp_name; 6045 int ret = 0; 6046 int i; 6047 int pool_count = 0; 6048 6049 if ((argc == 0) || !*argv) 6050 return (0); 6051 6052 if (pool_name) 6053 pool_count = 1; 6054 6055 /* Temporarily hijack cb_names for a second... */ 6056 tmp_name = cb->cb_names; 6057 6058 /* Go though our list of prospective vdev names */ 6059 for (i = 0; i < argc; i++) { 6060 cb->cb_names = argv + i; 6061 6062 /* Is this name a vdev in our pools? */ 6063 ret = for_each_pool(pool_count, &pool_name, B_TRUE, NULL, 6064 ZFS_TYPE_POOL, B_FALSE, is_vdev, cb); 6065 if (!ret) { 6066 /* No match */ 6067 break; 6068 } 6069 } 6070 6071 cb->cb_names = tmp_name; 6072 6073 return (ret); 6074 } 6075 6076 static int 6077 is_pool_cb(zpool_handle_t *zhp, void *data) 6078 { 6079 char *name = data; 6080 if (strcmp(name, zpool_get_name(zhp)) == 0) 6081 return (1); 6082 6083 return (0); 6084 } 6085 6086 /* 6087 * Do we have a pool named *name? If so, return 1, otherwise 0. 6088 */ 6089 static int 6090 is_pool(char *name) 6091 { 6092 return (for_each_pool(0, NULL, B_TRUE, NULL, ZFS_TYPE_POOL, B_FALSE, 6093 is_pool_cb, name)); 6094 } 6095 6096 /* Are all our argv[] strings pool names? If so return 1, 0 otherwise. */ 6097 static int 6098 are_all_pools(int argc, char **argv) 6099 { 6100 if ((argc == 0) || !*argv) 6101 return (0); 6102 6103 while (--argc >= 0) 6104 if (!is_pool(argv[argc])) 6105 return (0); 6106 6107 return (1); 6108 } 6109 6110 /* 6111 * Helper function to print out vdev/pool names we can't resolve. Used for an 6112 * error message. 6113 */ 6114 static void 6115 error_list_unresolved_vdevs(int argc, char **argv, char *pool_name, 6116 vdev_cbdata_t *cb) 6117 { 6118 int i; 6119 char *name; 6120 char *str; 6121 for (i = 0; i < argc; i++) { 6122 name = argv[i]; 6123 6124 if (is_pool(name)) 6125 str = gettext("pool"); 6126 else if (are_vdevs_in_pool(1, &name, pool_name, cb)) 6127 str = gettext("vdev in this pool"); 6128 else if (are_vdevs_in_pool(1, &name, NULL, cb)) 6129 str = gettext("vdev in another pool"); 6130 else 6131 str = gettext("unknown"); 6132 6133 fprintf(stderr, "\t%s (%s)\n", name, str); 6134 } 6135 } 6136 6137 /* 6138 * Same as get_interval_count(), but with additional checks to not misinterpret 6139 * guids as interval/count values. Assumes VDEV_NAME_GUID is set in 6140 * cb.cb_vdevs.cb_name_flags. 6141 */ 6142 static void 6143 get_interval_count_filter_guids(int *argc, char **argv, float *interval, 6144 unsigned long *count, iostat_cbdata_t *cb) 6145 { 6146 char **tmpargv = argv; 6147 int argc_for_interval = 0; 6148 6149 /* Is the last arg an interval value? Or a guid? */ 6150 if (*argc >= 1 && !are_vdevs_in_pool(1, &argv[*argc - 1], NULL, 6151 &cb->cb_vdevs)) { 6152 /* 6153 * The last arg is not a guid, so it's probably an 6154 * interval value. 6155 */ 6156 argc_for_interval++; 6157 6158 if (*argc >= 2 && 6159 !are_vdevs_in_pool(1, &argv[*argc - 2], NULL, 6160 &cb->cb_vdevs)) { 6161 /* 6162 * The 2nd to last arg is not a guid, so it's probably 6163 * an interval value. 6164 */ 6165 argc_for_interval++; 6166 } 6167 } 6168 6169 /* Point to our list of possible intervals */ 6170 tmpargv = &argv[*argc - argc_for_interval]; 6171 6172 *argc = *argc - argc_for_interval; 6173 get_interval_count(&argc_for_interval, tmpargv, 6174 interval, count); 6175 } 6176 6177 /* 6178 * Terminal height, in rows. Returns -1 if stdout is not connected to a TTY or 6179 * if we were unable to determine its size. 6180 */ 6181 static int 6182 terminal_height(void) 6183 { 6184 struct winsize win; 6185 6186 if (isatty(STDOUT_FILENO) == 0) 6187 return (-1); 6188 6189 if (ioctl(STDOUT_FILENO, TIOCGWINSZ, &win) != -1 && win.ws_row > 0) 6190 return (win.ws_row); 6191 6192 return (-1); 6193 } 6194 6195 /* 6196 * Run one of the zpool status/iostat -c scripts with the help (-h) option and 6197 * print the result. 6198 * 6199 * name: Short name of the script ('iostat'). 6200 * path: Full path to the script ('/usr/local/etc/zfs/zpool.d/iostat'); 6201 */ 6202 static void 6203 print_zpool_script_help(char *name, char *path) 6204 { 6205 char *argv[] = {path, (char *)"-h", NULL}; 6206 char **lines = NULL; 6207 int lines_cnt = 0; 6208 int rc; 6209 6210 rc = libzfs_run_process_get_stdout_nopath(path, argv, NULL, &lines, 6211 &lines_cnt); 6212 if (rc != 0 || lines == NULL || lines_cnt <= 0) { 6213 if (lines != NULL) 6214 libzfs_free_str_array(lines, lines_cnt); 6215 return; 6216 } 6217 6218 for (int i = 0; i < lines_cnt; i++) 6219 if (!is_blank_str(lines[i])) 6220 printf(" %-14s %s\n", name, lines[i]); 6221 6222 libzfs_free_str_array(lines, lines_cnt); 6223 } 6224 6225 /* 6226 * Go though the zpool status/iostat -c scripts in the user's path, run their 6227 * help option (-h), and print out the results. 6228 */ 6229 static void 6230 print_zpool_dir_scripts(char *dirpath) 6231 { 6232 DIR *dir; 6233 struct dirent *ent; 6234 char fullpath[MAXPATHLEN]; 6235 struct stat dir_stat; 6236 6237 if ((dir = opendir(dirpath)) != NULL) { 6238 /* print all the files and directories within directory */ 6239 while ((ent = readdir(dir)) != NULL) { 6240 if (snprintf(fullpath, sizeof (fullpath), "%s/%s", 6241 dirpath, ent->d_name) >= sizeof (fullpath)) { 6242 (void) fprintf(stderr, 6243 gettext("internal error: " 6244 "ZPOOL_SCRIPTS_PATH too large.\n")); 6245 exit(1); 6246 } 6247 6248 /* Print the scripts */ 6249 if (stat(fullpath, &dir_stat) == 0) 6250 if (dir_stat.st_mode & S_IXUSR && 6251 S_ISREG(dir_stat.st_mode)) 6252 print_zpool_script_help(ent->d_name, 6253 fullpath); 6254 } 6255 closedir(dir); 6256 } 6257 } 6258 6259 /* 6260 * Print out help text for all zpool status/iostat -c scripts. 6261 */ 6262 static void 6263 print_zpool_script_list(const char *subcommand) 6264 { 6265 char *dir, *sp, *tmp; 6266 6267 printf(gettext("Available 'zpool %s -c' commands:\n"), subcommand); 6268 6269 sp = zpool_get_cmd_search_path(); 6270 if (sp == NULL) 6271 return; 6272 6273 for (dir = strtok_r(sp, ":", &tmp); 6274 dir != NULL; 6275 dir = strtok_r(NULL, ":", &tmp)) 6276 print_zpool_dir_scripts(dir); 6277 6278 free(sp); 6279 } 6280 6281 /* 6282 * Set the minimum pool/vdev name column width. The width must be at least 10, 6283 * but may be as large as the column width - 42 so it still fits on one line. 6284 * NOTE: 42 is the width of the default capacity/operations/bandwidth output 6285 */ 6286 static int 6287 get_namewidth_iostat(zpool_handle_t *zhp, void *data) 6288 { 6289 iostat_cbdata_t *cb = data; 6290 int width, available_width; 6291 6292 /* 6293 * get_namewidth() returns the maximum width of any name in that column 6294 * for any pool/vdev/device line that will be output. 6295 */ 6296 width = get_namewidth(zhp, cb->cb_namewidth, 6297 cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID, cb->cb_verbose); 6298 6299 /* 6300 * The width we are calculating is the width of the header and also the 6301 * padding width for names that are less than maximum width. The stats 6302 * take up 42 characters, so the width available for names is: 6303 */ 6304 available_width = get_columns() - 42; 6305 6306 /* 6307 * If the maximum width fits on a screen, then great! Make everything 6308 * line up by justifying all lines to the same width. If that max 6309 * width is larger than what's available, the name plus stats won't fit 6310 * on one line, and justifying to that width would cause every line to 6311 * wrap on the screen. We only want lines with long names to wrap. 6312 * Limit the padding to what won't wrap. 6313 */ 6314 if (width > available_width) 6315 width = available_width; 6316 6317 /* 6318 * And regardless of whatever the screen width is (get_columns can 6319 * return 0 if the width is not known or less than 42 for a narrow 6320 * terminal) have the width be a minimum of 10. 6321 */ 6322 if (width < 10) 6323 width = 10; 6324 6325 /* Save the calculated width */ 6326 cb->cb_namewidth = width; 6327 6328 return (0); 6329 } 6330 6331 /* 6332 * zpool iostat [[-c [script1,script2,...]] [-lq]|[-rw]] [-ghHLpPvy] [-n name] 6333 * [-T d|u] [[ pool ...]|[pool vdev ...]|[vdev ...]] 6334 * [interval [count]] 6335 * 6336 * -c CMD For each vdev, run command CMD 6337 * -g Display guid for individual vdev name. 6338 * -L Follow links when resolving vdev path name. 6339 * -P Display full path for vdev name. 6340 * -v Display statistics for individual vdevs 6341 * -h Display help 6342 * -p Display values in parsable (exact) format. 6343 * -H Scripted mode. Don't display headers, and separate properties 6344 * by a single tab. 6345 * -l Display average latency 6346 * -q Display queue depths 6347 * -w Display latency histograms 6348 * -r Display request size histogram 6349 * -T Display a timestamp in date(1) or Unix format 6350 * -n Only print headers once 6351 * 6352 * This command can be tricky because we want to be able to deal with pool 6353 * creation/destruction as well as vdev configuration changes. The bulk of this 6354 * processing is handled by the pool_list_* routines in zpool_iter.c. We rely 6355 * on pool_list_update() to detect the addition of new pools. Configuration 6356 * changes are all handled within libzfs. 6357 */ 6358 int 6359 zpool_do_iostat(int argc, char **argv) 6360 { 6361 int c; 6362 int ret; 6363 int npools; 6364 float interval = 0; 6365 unsigned long count = 0; 6366 int winheight = 24; 6367 zpool_list_t *list; 6368 boolean_t verbose = B_FALSE; 6369 boolean_t latency = B_FALSE, l_histo = B_FALSE, rq_histo = B_FALSE; 6370 boolean_t queues = B_FALSE, parsable = B_FALSE, scripted = B_FALSE; 6371 boolean_t omit_since_boot = B_FALSE; 6372 boolean_t guid = B_FALSE; 6373 boolean_t follow_links = B_FALSE; 6374 boolean_t full_name = B_FALSE; 6375 boolean_t headers_once = B_FALSE; 6376 iostat_cbdata_t cb = { 0 }; 6377 char *cmd = NULL; 6378 6379 /* Used for printing error message */ 6380 const char flag_to_arg[] = {[IOS_LATENCY] = 'l', [IOS_QUEUES] = 'q', 6381 [IOS_L_HISTO] = 'w', [IOS_RQ_HISTO] = 'r'}; 6382 6383 uint64_t unsupported_flags; 6384 6385 /* check options */ 6386 while ((c = getopt(argc, argv, "c:gLPT:vyhplqrwnH")) != -1) { 6387 switch (c) { 6388 case 'c': 6389 if (cmd != NULL) { 6390 fprintf(stderr, 6391 gettext("Can't set -c flag twice\n")); 6392 exit(1); 6393 } 6394 6395 if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL && 6396 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) { 6397 fprintf(stderr, gettext( 6398 "Can't run -c, disabled by " 6399 "ZPOOL_SCRIPTS_ENABLED.\n")); 6400 exit(1); 6401 } 6402 6403 if ((getuid() <= 0 || geteuid() <= 0) && 6404 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) { 6405 fprintf(stderr, gettext( 6406 "Can't run -c with root privileges " 6407 "unless ZPOOL_SCRIPTS_AS_ROOT is set.\n")); 6408 exit(1); 6409 } 6410 cmd = optarg; 6411 verbose = B_TRUE; 6412 break; 6413 case 'g': 6414 guid = B_TRUE; 6415 break; 6416 case 'L': 6417 follow_links = B_TRUE; 6418 break; 6419 case 'P': 6420 full_name = B_TRUE; 6421 break; 6422 case 'T': 6423 get_timestamp_arg(*optarg); 6424 break; 6425 case 'v': 6426 verbose = B_TRUE; 6427 break; 6428 case 'p': 6429 parsable = B_TRUE; 6430 break; 6431 case 'l': 6432 latency = B_TRUE; 6433 break; 6434 case 'q': 6435 queues = B_TRUE; 6436 break; 6437 case 'H': 6438 scripted = B_TRUE; 6439 break; 6440 case 'w': 6441 l_histo = B_TRUE; 6442 break; 6443 case 'r': 6444 rq_histo = B_TRUE; 6445 break; 6446 case 'y': 6447 omit_since_boot = B_TRUE; 6448 break; 6449 case 'n': 6450 headers_once = B_TRUE; 6451 break; 6452 case 'h': 6453 usage(B_FALSE); 6454 break; 6455 case '?': 6456 if (optopt == 'c') { 6457 print_zpool_script_list("iostat"); 6458 exit(0); 6459 } else { 6460 fprintf(stderr, 6461 gettext("invalid option '%c'\n"), optopt); 6462 } 6463 usage(B_FALSE); 6464 } 6465 } 6466 6467 argc -= optind; 6468 argv += optind; 6469 6470 cb.cb_literal = parsable; 6471 cb.cb_scripted = scripted; 6472 6473 if (guid) 6474 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_GUID; 6475 if (follow_links) 6476 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS; 6477 if (full_name) 6478 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_PATH; 6479 cb.cb_iteration = 0; 6480 cb.cb_namewidth = 0; 6481 cb.cb_verbose = verbose; 6482 6483 /* Get our interval and count values (if any) */ 6484 if (guid) { 6485 get_interval_count_filter_guids(&argc, argv, &interval, 6486 &count, &cb); 6487 } else { 6488 get_interval_count(&argc, argv, &interval, &count); 6489 } 6490 6491 if (argc == 0) { 6492 /* No args, so just print the defaults. */ 6493 } else if (are_all_pools(argc, argv)) { 6494 /* All the args are pool names */ 6495 } else if (are_vdevs_in_pool(argc, argv, NULL, &cb.cb_vdevs)) { 6496 /* All the args are vdevs */ 6497 cb.cb_vdevs.cb_names = argv; 6498 cb.cb_vdevs.cb_names_count = argc; 6499 argc = 0; /* No pools to process */ 6500 } else if (are_all_pools(1, argv)) { 6501 /* The first arg is a pool name */ 6502 if (are_vdevs_in_pool(argc - 1, argv + 1, argv[0], 6503 &cb.cb_vdevs)) { 6504 /* ...and the rest are vdev names */ 6505 cb.cb_vdevs.cb_names = argv + 1; 6506 cb.cb_vdevs.cb_names_count = argc - 1; 6507 argc = 1; /* One pool to process */ 6508 } else { 6509 fprintf(stderr, gettext("Expected either a list of ")); 6510 fprintf(stderr, gettext("pools, or list of vdevs in")); 6511 fprintf(stderr, " \"%s\", ", argv[0]); 6512 fprintf(stderr, gettext("but got:\n")); 6513 error_list_unresolved_vdevs(argc - 1, argv + 1, 6514 argv[0], &cb.cb_vdevs); 6515 fprintf(stderr, "\n"); 6516 usage(B_FALSE); 6517 return (1); 6518 } 6519 } else { 6520 /* 6521 * The args don't make sense. The first arg isn't a pool name, 6522 * nor are all the args vdevs. 6523 */ 6524 fprintf(stderr, gettext("Unable to parse pools/vdevs list.\n")); 6525 fprintf(stderr, "\n"); 6526 return (1); 6527 } 6528 6529 if (cb.cb_vdevs.cb_names_count != 0) { 6530 /* 6531 * If user specified vdevs, it implies verbose. 6532 */ 6533 cb.cb_verbose = B_TRUE; 6534 } 6535 6536 /* 6537 * Construct the list of all interesting pools. 6538 */ 6539 ret = 0; 6540 if ((list = pool_list_get(argc, argv, NULL, ZFS_TYPE_POOL, parsable, 6541 &ret)) == NULL) 6542 return (1); 6543 6544 if (pool_list_count(list) == 0 && argc != 0) { 6545 pool_list_free(list); 6546 return (1); 6547 } 6548 6549 if (pool_list_count(list) == 0 && interval == 0) { 6550 pool_list_free(list); 6551 (void) fprintf(stderr, gettext("no pools available\n")); 6552 return (1); 6553 } 6554 6555 if ((l_histo || rq_histo) && (cmd != NULL || latency || queues)) { 6556 pool_list_free(list); 6557 (void) fprintf(stderr, 6558 gettext("[-r|-w] isn't allowed with [-c|-l|-q]\n")); 6559 usage(B_FALSE); 6560 return (1); 6561 } 6562 6563 if (l_histo && rq_histo) { 6564 pool_list_free(list); 6565 (void) fprintf(stderr, 6566 gettext("Only one of [-r|-w] can be passed at a time\n")); 6567 usage(B_FALSE); 6568 return (1); 6569 } 6570 6571 /* 6572 * Enter the main iostat loop. 6573 */ 6574 cb.cb_list = list; 6575 6576 if (l_histo) { 6577 /* 6578 * Histograms tables look out of place when you try to display 6579 * them with the other stats, so make a rule that you can only 6580 * print histograms by themselves. 6581 */ 6582 cb.cb_flags = IOS_L_HISTO_M; 6583 } else if (rq_histo) { 6584 cb.cb_flags = IOS_RQ_HISTO_M; 6585 } else { 6586 cb.cb_flags = IOS_DEFAULT_M; 6587 if (latency) 6588 cb.cb_flags |= IOS_LATENCY_M; 6589 if (queues) 6590 cb.cb_flags |= IOS_QUEUES_M; 6591 } 6592 6593 /* 6594 * See if the module supports all the stats we want to display. 6595 */ 6596 unsupported_flags = cb.cb_flags & ~get_stat_flags(list); 6597 if (unsupported_flags) { 6598 uint64_t f; 6599 int idx; 6600 fprintf(stderr, 6601 gettext("The loaded zfs module doesn't support:")); 6602 6603 /* for each bit set in unsupported_flags */ 6604 for (f = unsupported_flags; f; f &= ~(1ULL << idx)) { 6605 idx = lowbit64(f) - 1; 6606 fprintf(stderr, " -%c", flag_to_arg[idx]); 6607 } 6608 6609 fprintf(stderr, ". Try running a newer module.\n"); 6610 pool_list_free(list); 6611 6612 return (1); 6613 } 6614 6615 for (;;) { 6616 if ((npools = pool_list_count(list)) == 0) 6617 (void) fprintf(stderr, gettext("no pools available\n")); 6618 else { 6619 /* 6620 * If this is the first iteration and -y was supplied 6621 * we skip any printing. 6622 */ 6623 boolean_t skip = (omit_since_boot && 6624 cb.cb_iteration == 0); 6625 6626 /* 6627 * Refresh all statistics. This is done as an 6628 * explicit step before calculating the maximum name 6629 * width, so that any * configuration changes are 6630 * properly accounted for. 6631 */ 6632 (void) pool_list_iter(list, B_FALSE, refresh_iostat, 6633 &cb); 6634 6635 /* 6636 * Iterate over all pools to determine the maximum width 6637 * for the pool / device name column across all pools. 6638 */ 6639 cb.cb_namewidth = 0; 6640 (void) pool_list_iter(list, B_FALSE, 6641 get_namewidth_iostat, &cb); 6642 6643 if (timestamp_fmt != NODATE) 6644 print_timestamp(timestamp_fmt); 6645 6646 if (cmd != NULL && cb.cb_verbose && 6647 !(cb.cb_flags & IOS_ANYHISTO_M)) { 6648 cb.vcdl = all_pools_for_each_vdev_run(argc, 6649 argv, cmd, g_zfs, cb.cb_vdevs.cb_names, 6650 cb.cb_vdevs.cb_names_count, 6651 cb.cb_vdevs.cb_name_flags); 6652 } else { 6653 cb.vcdl = NULL; 6654 } 6655 6656 6657 /* 6658 * Check terminal size so we can print headers 6659 * even when terminal window has its height 6660 * changed. 6661 */ 6662 winheight = terminal_height(); 6663 /* 6664 * Are we connected to TTY? If not, headers_once 6665 * should be true, to avoid breaking scripts. 6666 */ 6667 if (winheight < 0) 6668 headers_once = B_TRUE; 6669 6670 /* 6671 * If it's the first time and we're not skipping it, 6672 * or either skip or verbose mode, print the header. 6673 * 6674 * The histogram code explicitly prints its header on 6675 * every vdev, so skip this for histograms. 6676 */ 6677 if (((++cb.cb_iteration == 1 && !skip) || 6678 (skip != verbose) || 6679 (!headers_once && 6680 (cb.cb_iteration % winheight) == 0)) && 6681 (!(cb.cb_flags & IOS_ANYHISTO_M)) && 6682 !cb.cb_scripted) 6683 print_iostat_header(&cb); 6684 6685 if (skip) { 6686 (void) fflush(stdout); 6687 (void) fsleep(interval); 6688 continue; 6689 } 6690 6691 pool_list_iter(list, B_FALSE, print_iostat, &cb); 6692 6693 /* 6694 * If there's more than one pool, and we're not in 6695 * verbose mode (which prints a separator for us), 6696 * then print a separator. 6697 * 6698 * In addition, if we're printing specific vdevs then 6699 * we also want an ending separator. 6700 */ 6701 if (((npools > 1 && !verbose && 6702 !(cb.cb_flags & IOS_ANYHISTO_M)) || 6703 (!(cb.cb_flags & IOS_ANYHISTO_M) && 6704 cb.cb_vdevs.cb_names_count)) && 6705 !cb.cb_scripted) { 6706 print_iostat_separator(&cb); 6707 if (cb.vcdl != NULL) 6708 print_cmd_columns(cb.vcdl, 1); 6709 printf("\n"); 6710 } 6711 6712 if (cb.vcdl != NULL) 6713 free_vdev_cmd_data_list(cb.vcdl); 6714 6715 } 6716 6717 if (interval == 0) 6718 break; 6719 6720 if (count != 0 && --count == 0) 6721 break; 6722 6723 (void) fflush(stdout); 6724 (void) fsleep(interval); 6725 } 6726 6727 pool_list_free(list); 6728 6729 return (ret); 6730 } 6731 6732 typedef struct list_cbdata { 6733 boolean_t cb_verbose; 6734 int cb_name_flags; 6735 int cb_namewidth; 6736 boolean_t cb_json; 6737 boolean_t cb_scripted; 6738 zprop_list_t *cb_proplist; 6739 boolean_t cb_literal; 6740 nvlist_t *cb_jsobj; 6741 boolean_t cb_json_as_int; 6742 boolean_t cb_json_pool_key_guid; 6743 } list_cbdata_t; 6744 6745 6746 /* 6747 * Given a list of columns to display, output appropriate headers for each one. 6748 */ 6749 static void 6750 print_header(list_cbdata_t *cb) 6751 { 6752 zprop_list_t *pl = cb->cb_proplist; 6753 char headerbuf[ZPOOL_MAXPROPLEN]; 6754 const char *header; 6755 boolean_t first = B_TRUE; 6756 boolean_t right_justify; 6757 size_t width = 0; 6758 6759 for (; pl != NULL; pl = pl->pl_next) { 6760 width = pl->pl_width; 6761 if (first && cb->cb_verbose) { 6762 /* 6763 * Reset the width to accommodate the verbose listing 6764 * of devices. 6765 */ 6766 width = cb->cb_namewidth; 6767 } 6768 6769 if (!first) 6770 (void) fputs(" ", stdout); 6771 else 6772 first = B_FALSE; 6773 6774 right_justify = B_FALSE; 6775 if (pl->pl_prop != ZPROP_USERPROP) { 6776 header = zpool_prop_column_name(pl->pl_prop); 6777 right_justify = zpool_prop_align_right(pl->pl_prop); 6778 } else { 6779 int i; 6780 6781 for (i = 0; pl->pl_user_prop[i] != '\0'; i++) 6782 headerbuf[i] = toupper(pl->pl_user_prop[i]); 6783 headerbuf[i] = '\0'; 6784 header = headerbuf; 6785 } 6786 6787 if (pl->pl_next == NULL && !right_justify) 6788 (void) fputs(header, stdout); 6789 else if (right_justify) 6790 (void) printf("%*s", (int)width, header); 6791 else 6792 (void) printf("%-*s", (int)width, header); 6793 } 6794 6795 (void) fputc('\n', stdout); 6796 } 6797 6798 /* 6799 * Given a pool and a list of properties, print out all the properties according 6800 * to the described layout. Used by zpool_do_list(). 6801 */ 6802 static void 6803 collect_pool(zpool_handle_t *zhp, list_cbdata_t *cb) 6804 { 6805 zprop_list_t *pl = cb->cb_proplist; 6806 boolean_t first = B_TRUE; 6807 char property[ZPOOL_MAXPROPLEN]; 6808 const char *propstr; 6809 boolean_t right_justify; 6810 size_t width; 6811 zprop_source_t sourcetype = ZPROP_SRC_NONE; 6812 nvlist_t *item, *d, *props; 6813 item = d = props = NULL; 6814 6815 if (cb->cb_json) { 6816 item = fnvlist_alloc(); 6817 props = fnvlist_alloc(); 6818 d = fnvlist_lookup_nvlist(cb->cb_jsobj, "pools"); 6819 if (d == NULL) { 6820 fprintf(stderr, "pools obj not found.\n"); 6821 exit(1); 6822 } 6823 fill_pool_info(item, zhp, B_TRUE, cb->cb_json_as_int); 6824 } 6825 6826 for (; pl != NULL; pl = pl->pl_next) { 6827 6828 width = pl->pl_width; 6829 if (first && cb->cb_verbose) { 6830 /* 6831 * Reset the width to accommodate the verbose listing 6832 * of devices. 6833 */ 6834 width = cb->cb_namewidth; 6835 } 6836 6837 if (!cb->cb_json && !first) { 6838 if (cb->cb_scripted) 6839 (void) fputc('\t', stdout); 6840 else 6841 (void) fputs(" ", stdout); 6842 } else { 6843 first = B_FALSE; 6844 } 6845 6846 right_justify = B_FALSE; 6847 if (pl->pl_prop != ZPROP_USERPROP) { 6848 if (zpool_get_prop(zhp, pl->pl_prop, property, 6849 sizeof (property), &sourcetype, 6850 cb->cb_literal) != 0) 6851 propstr = "-"; 6852 else 6853 propstr = property; 6854 6855 right_justify = zpool_prop_align_right(pl->pl_prop); 6856 } else if ((zpool_prop_feature(pl->pl_user_prop) || 6857 zpool_prop_unsupported(pl->pl_user_prop)) && 6858 zpool_prop_get_feature(zhp, pl->pl_user_prop, property, 6859 sizeof (property)) == 0) { 6860 propstr = property; 6861 sourcetype = ZPROP_SRC_LOCAL; 6862 } else if (zfs_prop_user(pl->pl_user_prop) && 6863 zpool_get_userprop(zhp, pl->pl_user_prop, property, 6864 sizeof (property), &sourcetype) == 0) { 6865 propstr = property; 6866 } else { 6867 propstr = "-"; 6868 } 6869 6870 if (cb->cb_json) { 6871 if (pl->pl_prop == ZPOOL_PROP_NAME) 6872 continue; 6873 (void) zprop_nvlist_one_property( 6874 zpool_prop_to_name(pl->pl_prop), propstr, 6875 sourcetype, NULL, NULL, props, cb->cb_json_as_int); 6876 } else { 6877 /* 6878 * If this is being called in scripted mode, or if this 6879 * is the last column and it is left-justified, don't 6880 * include a width format specifier. 6881 */ 6882 if (cb->cb_scripted || (pl->pl_next == NULL && 6883 !right_justify)) 6884 (void) fputs(propstr, stdout); 6885 else if (right_justify) 6886 (void) printf("%*s", (int)width, propstr); 6887 else 6888 (void) printf("%-*s", (int)width, propstr); 6889 } 6890 } 6891 6892 if (cb->cb_json) { 6893 fnvlist_add_nvlist(item, "properties", props); 6894 if (cb->cb_json_pool_key_guid) { 6895 char pool_guid[256]; 6896 uint64_t guid = fnvlist_lookup_uint64( 6897 zpool_get_config(zhp, NULL), 6898 ZPOOL_CONFIG_POOL_GUID); 6899 snprintf(pool_guid, 256, "%llu", 6900 (u_longlong_t)guid); 6901 fnvlist_add_nvlist(d, pool_guid, item); 6902 } else { 6903 fnvlist_add_nvlist(d, zpool_get_name(zhp), 6904 item); 6905 } 6906 fnvlist_free(props); 6907 fnvlist_free(item); 6908 } else 6909 (void) fputc('\n', stdout); 6910 } 6911 6912 static void 6913 collect_vdev_prop(zpool_prop_t prop, uint64_t value, const char *str, 6914 boolean_t scripted, boolean_t valid, enum zfs_nicenum_format format, 6915 boolean_t json, nvlist_t *nvl, boolean_t as_int) 6916 { 6917 char propval[64]; 6918 boolean_t fixed; 6919 size_t width = zprop_width(prop, &fixed, ZFS_TYPE_POOL); 6920 6921 switch (prop) { 6922 case ZPOOL_PROP_SIZE: 6923 case ZPOOL_PROP_EXPANDSZ: 6924 case ZPOOL_PROP_CHECKPOINT: 6925 case ZPOOL_PROP_DEDUPRATIO: 6926 case ZPOOL_PROP_DEDUPCACHED: 6927 if (value == 0) 6928 (void) strlcpy(propval, "-", sizeof (propval)); 6929 else 6930 zfs_nicenum_format(value, propval, sizeof (propval), 6931 format); 6932 break; 6933 case ZPOOL_PROP_FRAGMENTATION: 6934 if (value == ZFS_FRAG_INVALID) { 6935 (void) strlcpy(propval, "-", sizeof (propval)); 6936 } else if (format == ZFS_NICENUM_RAW) { 6937 (void) snprintf(propval, sizeof (propval), "%llu", 6938 (unsigned long long)value); 6939 } else { 6940 (void) snprintf(propval, sizeof (propval), "%llu%%", 6941 (unsigned long long)value); 6942 } 6943 break; 6944 case ZPOOL_PROP_CAPACITY: 6945 /* capacity value is in parts-per-10,000 (aka permyriad) */ 6946 if (format == ZFS_NICENUM_RAW) 6947 (void) snprintf(propval, sizeof (propval), "%llu", 6948 (unsigned long long)value / 100); 6949 else 6950 (void) snprintf(propval, sizeof (propval), 6951 value < 1000 ? "%1.2f%%" : value < 10000 ? 6952 "%2.1f%%" : "%3.0f%%", value / 100.0); 6953 break; 6954 case ZPOOL_PROP_HEALTH: 6955 width = 8; 6956 (void) strlcpy(propval, str, sizeof (propval)); 6957 break; 6958 default: 6959 zfs_nicenum_format(value, propval, sizeof (propval), format); 6960 } 6961 6962 if (!valid) 6963 (void) strlcpy(propval, "-", sizeof (propval)); 6964 6965 if (json) { 6966 zprop_nvlist_one_property(zpool_prop_to_name(prop), propval, 6967 ZPROP_SRC_NONE, NULL, NULL, nvl, as_int); 6968 } else { 6969 if (scripted) 6970 (void) printf("\t%s", propval); 6971 else 6972 (void) printf(" %*s", (int)width, propval); 6973 } 6974 } 6975 6976 /* 6977 * print static default line per vdev 6978 * not compatible with '-o' <proplist> option 6979 */ 6980 static void 6981 collect_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv, 6982 list_cbdata_t *cb, int depth, boolean_t isspare, nvlist_t *item) 6983 { 6984 nvlist_t **child; 6985 vdev_stat_t *vs; 6986 uint_t c, children = 0; 6987 char *vname; 6988 boolean_t scripted = cb->cb_scripted; 6989 uint64_t islog = B_FALSE; 6990 nvlist_t *props, *ent, *ch, *obj, *l2c, *sp; 6991 props = ent = ch = obj = sp = l2c = NULL; 6992 const char *dashes = "%-*s - - - - " 6993 "- - - - -\n"; 6994 6995 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, 6996 (uint64_t **)&vs, &c) == 0); 6997 6998 if (name != NULL) { 6999 boolean_t toplevel = (vs->vs_space != 0); 7000 uint64_t cap; 7001 enum zfs_nicenum_format format; 7002 const char *state; 7003 7004 if (cb->cb_literal) 7005 format = ZFS_NICENUM_RAW; 7006 else 7007 format = ZFS_NICENUM_1024; 7008 7009 if (strcmp(name, VDEV_TYPE_INDIRECT) == 0) 7010 return; 7011 7012 if (cb->cb_json) { 7013 props = fnvlist_alloc(); 7014 ent = fnvlist_alloc(); 7015 fill_vdev_info(ent, zhp, (char *)name, B_FALSE, 7016 cb->cb_json_as_int); 7017 } else { 7018 if (scripted) 7019 (void) printf("\t%s", name); 7020 else if (strlen(name) + depth > cb->cb_namewidth) 7021 (void) printf("%*s%s", depth, "", name); 7022 else 7023 (void) printf("%*s%s%*s", depth, "", name, 7024 (int)(cb->cb_namewidth - strlen(name) - 7025 depth), ""); 7026 } 7027 7028 /* 7029 * Print the properties for the individual vdevs. Some 7030 * properties are only applicable to toplevel vdevs. The 7031 * 'toplevel' boolean value is passed to the print_one_column() 7032 * to indicate that the value is valid. 7033 */ 7034 if (VDEV_STAT_VALID(vs_pspace, c) && vs->vs_pspace) { 7035 collect_vdev_prop(ZPOOL_PROP_SIZE, vs->vs_pspace, NULL, 7036 scripted, B_TRUE, format, cb->cb_json, props, 7037 cb->cb_json_as_int); 7038 } else { 7039 collect_vdev_prop(ZPOOL_PROP_SIZE, vs->vs_space, NULL, 7040 scripted, toplevel, format, cb->cb_json, props, 7041 cb->cb_json_as_int); 7042 } 7043 collect_vdev_prop(ZPOOL_PROP_ALLOCATED, vs->vs_alloc, NULL, 7044 scripted, toplevel, format, cb->cb_json, props, 7045 cb->cb_json_as_int); 7046 collect_vdev_prop(ZPOOL_PROP_FREE, vs->vs_space - vs->vs_alloc, 7047 NULL, scripted, toplevel, format, cb->cb_json, props, 7048 cb->cb_json_as_int); 7049 collect_vdev_prop(ZPOOL_PROP_CHECKPOINT, 7050 vs->vs_checkpoint_space, NULL, scripted, toplevel, format, 7051 cb->cb_json, props, cb->cb_json_as_int); 7052 collect_vdev_prop(ZPOOL_PROP_EXPANDSZ, vs->vs_esize, NULL, 7053 scripted, B_TRUE, format, cb->cb_json, props, 7054 cb->cb_json_as_int); 7055 collect_vdev_prop(ZPOOL_PROP_FRAGMENTATION, 7056 vs->vs_fragmentation, NULL, scripted, 7057 (vs->vs_fragmentation != ZFS_FRAG_INVALID && toplevel), 7058 format, cb->cb_json, props, cb->cb_json_as_int); 7059 cap = (vs->vs_space == 0) ? 0 : 7060 (vs->vs_alloc * 10000 / vs->vs_space); 7061 collect_vdev_prop(ZPOOL_PROP_CAPACITY, cap, NULL, 7062 scripted, toplevel, format, cb->cb_json, props, 7063 cb->cb_json_as_int); 7064 collect_vdev_prop(ZPOOL_PROP_DEDUPRATIO, 0, NULL, 7065 scripted, toplevel, format, cb->cb_json, props, 7066 cb->cb_json_as_int); 7067 state = zpool_state_to_name(vs->vs_state, vs->vs_aux); 7068 if (isspare) { 7069 if (vs->vs_aux == VDEV_AUX_SPARED) 7070 state = "INUSE"; 7071 else if (vs->vs_state == VDEV_STATE_HEALTHY) 7072 state = "AVAIL"; 7073 } 7074 collect_vdev_prop(ZPOOL_PROP_HEALTH, 0, state, scripted, 7075 B_TRUE, format, cb->cb_json, props, cb->cb_json_as_int); 7076 7077 if (cb->cb_json) { 7078 fnvlist_add_nvlist(ent, "properties", props); 7079 fnvlist_free(props); 7080 } else 7081 (void) fputc('\n', stdout); 7082 } 7083 7084 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 7085 &child, &children) != 0) { 7086 if (cb->cb_json) { 7087 fnvlist_add_nvlist(item, name, ent); 7088 fnvlist_free(ent); 7089 } 7090 return; 7091 } 7092 7093 if (cb->cb_json) { 7094 ch = fnvlist_alloc(); 7095 } 7096 7097 /* list the normal vdevs first */ 7098 for (c = 0; c < children; c++) { 7099 uint64_t ishole = B_FALSE; 7100 7101 if (nvlist_lookup_uint64(child[c], 7102 ZPOOL_CONFIG_IS_HOLE, &ishole) == 0 && ishole) 7103 continue; 7104 7105 if (nvlist_lookup_uint64(child[c], 7106 ZPOOL_CONFIG_IS_LOG, &islog) == 0 && islog) 7107 continue; 7108 7109 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS)) 7110 continue; 7111 7112 vname = zpool_vdev_name(g_zfs, zhp, child[c], 7113 cb->cb_name_flags | VDEV_NAME_TYPE_ID); 7114 7115 if (name == NULL || cb->cb_json != B_TRUE) 7116 collect_list_stats(zhp, vname, child[c], cb, depth + 2, 7117 B_FALSE, item); 7118 else if (cb->cb_json) { 7119 collect_list_stats(zhp, vname, child[c], cb, depth + 2, 7120 B_FALSE, ch); 7121 } 7122 free(vname); 7123 } 7124 7125 if (cb->cb_json) { 7126 if (!nvlist_empty(ch)) 7127 fnvlist_add_nvlist(ent, "vdevs", ch); 7128 fnvlist_free(ch); 7129 } 7130 7131 /* list the classes: 'logs', 'dedup', and 'special' */ 7132 for (uint_t n = 0; n < ARRAY_SIZE(class_name); n++) { 7133 boolean_t printed = B_FALSE; 7134 if (cb->cb_json) 7135 obj = fnvlist_alloc(); 7136 for (c = 0; c < children; c++) { 7137 const char *bias = NULL; 7138 const char *type = NULL; 7139 7140 if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 7141 &islog) == 0 && islog) { 7142 bias = VDEV_ALLOC_CLASS_LOGS; 7143 } else { 7144 (void) nvlist_lookup_string(child[c], 7145 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias); 7146 (void) nvlist_lookup_string(child[c], 7147 ZPOOL_CONFIG_TYPE, &type); 7148 } 7149 if (bias == NULL || strcmp(bias, class_name[n]) != 0) 7150 continue; 7151 if (!islog && strcmp(type, VDEV_TYPE_INDIRECT) == 0) 7152 continue; 7153 7154 if (!printed && !cb->cb_json) { 7155 /* LINTED E_SEC_PRINTF_VAR_FMT */ 7156 (void) printf(dashes, cb->cb_namewidth, 7157 class_name[n]); 7158 printed = B_TRUE; 7159 } 7160 vname = zpool_vdev_name(g_zfs, zhp, child[c], 7161 cb->cb_name_flags | VDEV_NAME_TYPE_ID); 7162 collect_list_stats(zhp, vname, child[c], cb, depth + 2, 7163 B_FALSE, obj); 7164 free(vname); 7165 } 7166 if (cb->cb_json) { 7167 if (!nvlist_empty(obj)) 7168 fnvlist_add_nvlist(item, class_name[n], obj); 7169 fnvlist_free(obj); 7170 } 7171 } 7172 7173 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, 7174 &child, &children) == 0 && children > 0) { 7175 if (cb->cb_json) { 7176 l2c = fnvlist_alloc(); 7177 } else { 7178 /* LINTED E_SEC_PRINTF_VAR_FMT */ 7179 (void) printf(dashes, cb->cb_namewidth, "cache"); 7180 } 7181 for (c = 0; c < children; c++) { 7182 vname = zpool_vdev_name(g_zfs, zhp, child[c], 7183 cb->cb_name_flags); 7184 collect_list_stats(zhp, vname, child[c], cb, depth + 2, 7185 B_FALSE, l2c); 7186 free(vname); 7187 } 7188 if (cb->cb_json) { 7189 if (!nvlist_empty(l2c)) 7190 fnvlist_add_nvlist(item, "l2cache", l2c); 7191 fnvlist_free(l2c); 7192 } 7193 } 7194 7195 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, &child, 7196 &children) == 0 && children > 0) { 7197 if (cb->cb_json) { 7198 sp = fnvlist_alloc(); 7199 } else { 7200 /* LINTED E_SEC_PRINTF_VAR_FMT */ 7201 (void) printf(dashes, cb->cb_namewidth, "spare"); 7202 } 7203 for (c = 0; c < children; c++) { 7204 vname = zpool_vdev_name(g_zfs, zhp, child[c], 7205 cb->cb_name_flags); 7206 collect_list_stats(zhp, vname, child[c], cb, depth + 2, 7207 B_TRUE, sp); 7208 free(vname); 7209 } 7210 if (cb->cb_json) { 7211 if (!nvlist_empty(sp)) 7212 fnvlist_add_nvlist(item, "spares", sp); 7213 fnvlist_free(sp); 7214 } 7215 } 7216 7217 if (name != NULL && cb->cb_json) { 7218 fnvlist_add_nvlist(item, name, ent); 7219 fnvlist_free(ent); 7220 } 7221 } 7222 7223 /* 7224 * Generic callback function to list a pool. 7225 */ 7226 static int 7227 list_callback(zpool_handle_t *zhp, void *data) 7228 { 7229 nvlist_t *p, *d, *nvdevs; 7230 uint64_t guid; 7231 char pool_guid[256]; 7232 const char *pool_name = zpool_get_name(zhp); 7233 list_cbdata_t *cbp = data; 7234 p = d = nvdevs = NULL; 7235 7236 collect_pool(zhp, cbp); 7237 7238 if (cbp->cb_verbose) { 7239 nvlist_t *config, *nvroot; 7240 config = zpool_get_config(zhp, NULL); 7241 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 7242 &nvroot) == 0); 7243 if (cbp->cb_json) { 7244 d = fnvlist_lookup_nvlist(cbp->cb_jsobj, 7245 "pools"); 7246 if (cbp->cb_json_pool_key_guid) { 7247 guid = fnvlist_lookup_uint64(config, 7248 ZPOOL_CONFIG_POOL_GUID); 7249 snprintf(pool_guid, 256, "%llu", 7250 (u_longlong_t)guid); 7251 p = fnvlist_lookup_nvlist(d, pool_guid); 7252 } else { 7253 p = fnvlist_lookup_nvlist(d, pool_name); 7254 } 7255 nvdevs = fnvlist_alloc(); 7256 } 7257 collect_list_stats(zhp, NULL, nvroot, cbp, 0, B_FALSE, nvdevs); 7258 if (cbp->cb_json) { 7259 fnvlist_add_nvlist(p, "vdevs", nvdevs); 7260 if (cbp->cb_json_pool_key_guid) 7261 fnvlist_add_nvlist(d, pool_guid, p); 7262 else 7263 fnvlist_add_nvlist(d, pool_name, p); 7264 fnvlist_add_nvlist(cbp->cb_jsobj, "pools", d); 7265 fnvlist_free(nvdevs); 7266 } 7267 } 7268 7269 return (0); 7270 } 7271 7272 /* 7273 * Set the minimum pool/vdev name column width. The width must be at least 9, 7274 * but may be as large as needed. 7275 */ 7276 static int 7277 get_namewidth_list(zpool_handle_t *zhp, void *data) 7278 { 7279 list_cbdata_t *cb = data; 7280 int width; 7281 7282 width = get_namewidth(zhp, cb->cb_namewidth, 7283 cb->cb_name_flags | VDEV_NAME_TYPE_ID, cb->cb_verbose); 7284 7285 if (width < 9) 7286 width = 9; 7287 7288 cb->cb_namewidth = width; 7289 7290 return (0); 7291 } 7292 7293 /* 7294 * zpool list [-gHLpP] [-o prop[,prop]*] [-T d|u] [pool] ... [interval [count]] 7295 * 7296 * -g Display guid for individual vdev name. 7297 * -H Scripted mode. Don't display headers, and separate properties 7298 * by a single tab. 7299 * -L Follow links when resolving vdev path name. 7300 * -o List of properties to display. Defaults to 7301 * "name,size,allocated,free,expandsize,fragmentation,capacity," 7302 * "dedupratio,health,altroot" 7303 * -p Display values in parsable (exact) format. 7304 * -P Display full path for vdev name. 7305 * -T Display a timestamp in date(1) or Unix format 7306 * -j Display the output in JSON format 7307 * --json-int Display the numbers as integer instead of strings. 7308 * --json-pool-key-guid Set pool GUID as key for pool objects. 7309 * 7310 * List all pools in the system, whether or not they're healthy. Output space 7311 * statistics for each one, as well as health status summary. 7312 */ 7313 int 7314 zpool_do_list(int argc, char **argv) 7315 { 7316 int c; 7317 int ret = 0; 7318 list_cbdata_t cb = { 0 }; 7319 static char default_props[] = 7320 "name,size,allocated,free,checkpoint,expandsize,fragmentation," 7321 "capacity,dedupratio,health,altroot"; 7322 char *props = default_props; 7323 float interval = 0; 7324 unsigned long count = 0; 7325 zpool_list_t *list; 7326 boolean_t first = B_TRUE; 7327 nvlist_t *data = NULL; 7328 current_prop_type = ZFS_TYPE_POOL; 7329 7330 struct option long_options[] = { 7331 {"json-int", no_argument, NULL, ZPOOL_OPTION_JSON_NUMS_AS_INT}, 7332 {"json-pool-key-guid", no_argument, NULL, 7333 ZPOOL_OPTION_POOL_KEY_GUID}, 7334 {0, 0, 0, 0} 7335 }; 7336 7337 /* check options */ 7338 while ((c = getopt_long(argc, argv, ":gjHLo:pPT:v", long_options, 7339 NULL)) != -1) { 7340 switch (c) { 7341 case 'g': 7342 cb.cb_name_flags |= VDEV_NAME_GUID; 7343 break; 7344 case 'H': 7345 cb.cb_scripted = B_TRUE; 7346 break; 7347 case 'L': 7348 cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS; 7349 break; 7350 case 'o': 7351 props = optarg; 7352 break; 7353 case 'P': 7354 cb.cb_name_flags |= VDEV_NAME_PATH; 7355 break; 7356 case 'p': 7357 cb.cb_literal = B_TRUE; 7358 break; 7359 case 'j': 7360 cb.cb_json = B_TRUE; 7361 break; 7362 case ZPOOL_OPTION_JSON_NUMS_AS_INT: 7363 cb.cb_json_as_int = B_TRUE; 7364 cb.cb_literal = B_TRUE; 7365 break; 7366 case ZPOOL_OPTION_POOL_KEY_GUID: 7367 cb.cb_json_pool_key_guid = B_TRUE; 7368 break; 7369 case 'T': 7370 get_timestamp_arg(*optarg); 7371 break; 7372 case 'v': 7373 cb.cb_verbose = B_TRUE; 7374 cb.cb_namewidth = 8; /* 8 until precalc is avail */ 7375 break; 7376 case ':': 7377 (void) fprintf(stderr, gettext("missing argument for " 7378 "'%c' option\n"), optopt); 7379 usage(B_FALSE); 7380 break; 7381 case '?': 7382 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 7383 optopt); 7384 usage(B_FALSE); 7385 } 7386 } 7387 7388 argc -= optind; 7389 argv += optind; 7390 7391 if (!cb.cb_json && cb.cb_json_as_int) { 7392 (void) fprintf(stderr, gettext("'--json-int' only works with" 7393 " '-j' option\n")); 7394 usage(B_FALSE); 7395 } 7396 7397 if (!cb.cb_json && cb.cb_json_pool_key_guid) { 7398 (void) fprintf(stderr, gettext("'json-pool-key-guid' only" 7399 " works with '-j' option\n")); 7400 usage(B_FALSE); 7401 } 7402 7403 get_interval_count(&argc, argv, &interval, &count); 7404 7405 if (zprop_get_list(g_zfs, props, &cb.cb_proplist, ZFS_TYPE_POOL) != 0) 7406 usage(B_FALSE); 7407 7408 for (;;) { 7409 if ((list = pool_list_get(argc, argv, &cb.cb_proplist, 7410 ZFS_TYPE_POOL, cb.cb_literal, &ret)) == NULL) 7411 return (1); 7412 7413 if (pool_list_count(list) == 0) 7414 break; 7415 7416 if (cb.cb_json) { 7417 cb.cb_jsobj = zpool_json_schema(0, 1); 7418 data = fnvlist_alloc(); 7419 fnvlist_add_nvlist(cb.cb_jsobj, "pools", data); 7420 fnvlist_free(data); 7421 } 7422 7423 cb.cb_namewidth = 0; 7424 (void) pool_list_iter(list, B_FALSE, get_namewidth_list, &cb); 7425 7426 if (timestamp_fmt != NODATE) { 7427 if (cb.cb_json) { 7428 if (cb.cb_json_as_int) { 7429 fnvlist_add_uint64(cb.cb_jsobj, "time", 7430 time(NULL)); 7431 } else { 7432 char ts[128]; 7433 get_timestamp(timestamp_fmt, ts, 128); 7434 fnvlist_add_string(cb.cb_jsobj, "time", 7435 ts); 7436 } 7437 } else 7438 print_timestamp(timestamp_fmt); 7439 } 7440 7441 if (!cb.cb_scripted && (first || cb.cb_verbose) && 7442 !cb.cb_json) { 7443 print_header(&cb); 7444 first = B_FALSE; 7445 } 7446 ret = pool_list_iter(list, B_TRUE, list_callback, &cb); 7447 7448 if (ret == 0 && cb.cb_json) 7449 zcmd_print_json(cb.cb_jsobj); 7450 else if (ret != 0 && cb.cb_json) 7451 nvlist_free(cb.cb_jsobj); 7452 7453 if (interval == 0) 7454 break; 7455 7456 if (count != 0 && --count == 0) 7457 break; 7458 7459 pool_list_free(list); 7460 7461 (void) fflush(stdout); 7462 (void) fsleep(interval); 7463 } 7464 7465 if (argc == 0 && !cb.cb_scripted && !cb.cb_json && 7466 pool_list_count(list) == 0) { 7467 (void) printf(gettext("no pools available\n")); 7468 ret = 0; 7469 } 7470 7471 pool_list_free(list); 7472 zprop_free_list(cb.cb_proplist); 7473 return (ret); 7474 } 7475 7476 static int 7477 zpool_do_attach_or_replace(int argc, char **argv, int replacing) 7478 { 7479 boolean_t force = B_FALSE; 7480 boolean_t rebuild = B_FALSE; 7481 boolean_t wait = B_FALSE; 7482 int c; 7483 nvlist_t *nvroot; 7484 char *poolname, *old_disk, *new_disk; 7485 zpool_handle_t *zhp; 7486 nvlist_t *props = NULL; 7487 char *propval; 7488 int ret; 7489 7490 /* check options */ 7491 while ((c = getopt(argc, argv, "fo:sw")) != -1) { 7492 switch (c) { 7493 case 'f': 7494 force = B_TRUE; 7495 break; 7496 case 'o': 7497 if ((propval = strchr(optarg, '=')) == NULL) { 7498 (void) fprintf(stderr, gettext("missing " 7499 "'=' for -o option\n")); 7500 usage(B_FALSE); 7501 } 7502 *propval = '\0'; 7503 propval++; 7504 7505 if ((strcmp(optarg, ZPOOL_CONFIG_ASHIFT) != 0) || 7506 (add_prop_list(optarg, propval, &props, B_TRUE))) 7507 usage(B_FALSE); 7508 break; 7509 case 's': 7510 rebuild = B_TRUE; 7511 break; 7512 case 'w': 7513 wait = B_TRUE; 7514 break; 7515 case '?': 7516 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 7517 optopt); 7518 usage(B_FALSE); 7519 } 7520 } 7521 7522 argc -= optind; 7523 argv += optind; 7524 7525 /* get pool name and check number of arguments */ 7526 if (argc < 1) { 7527 (void) fprintf(stderr, gettext("missing pool name argument\n")); 7528 usage(B_FALSE); 7529 } 7530 7531 poolname = argv[0]; 7532 7533 if (argc < 2) { 7534 (void) fprintf(stderr, 7535 gettext("missing <device> specification\n")); 7536 usage(B_FALSE); 7537 } 7538 7539 old_disk = argv[1]; 7540 7541 if (argc < 3) { 7542 if (!replacing) { 7543 (void) fprintf(stderr, 7544 gettext("missing <new_device> specification\n")); 7545 usage(B_FALSE); 7546 } 7547 new_disk = old_disk; 7548 argc -= 1; 7549 argv += 1; 7550 } else { 7551 new_disk = argv[2]; 7552 argc -= 2; 7553 argv += 2; 7554 } 7555 7556 if (argc > 1) { 7557 (void) fprintf(stderr, gettext("too many arguments\n")); 7558 usage(B_FALSE); 7559 } 7560 7561 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) { 7562 nvlist_free(props); 7563 return (1); 7564 } 7565 7566 if (zpool_get_config(zhp, NULL) == NULL) { 7567 (void) fprintf(stderr, gettext("pool '%s' is unavailable\n"), 7568 poolname); 7569 zpool_close(zhp); 7570 nvlist_free(props); 7571 return (1); 7572 } 7573 7574 /* unless manually specified use "ashift" pool property (if set) */ 7575 if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) { 7576 int intval; 7577 zprop_source_t src; 7578 char strval[ZPOOL_MAXPROPLEN]; 7579 7580 intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src); 7581 if (src != ZPROP_SRC_DEFAULT) { 7582 (void) sprintf(strval, "%" PRId32, intval); 7583 verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval, 7584 &props, B_TRUE) == 0); 7585 } 7586 } 7587 7588 nvroot = make_root_vdev(zhp, props, force, B_FALSE, replacing, B_FALSE, 7589 argc, argv); 7590 if (nvroot == NULL) { 7591 zpool_close(zhp); 7592 nvlist_free(props); 7593 return (1); 7594 } 7595 7596 ret = zpool_vdev_attach(zhp, old_disk, new_disk, nvroot, replacing, 7597 rebuild); 7598 7599 if (ret == 0 && wait) { 7600 zpool_wait_activity_t activity = ZPOOL_WAIT_RESILVER; 7601 char raidz_prefix[] = "raidz"; 7602 if (replacing) { 7603 activity = ZPOOL_WAIT_REPLACE; 7604 } else if (strncmp(old_disk, 7605 raidz_prefix, strlen(raidz_prefix)) == 0) { 7606 activity = ZPOOL_WAIT_RAIDZ_EXPAND; 7607 } 7608 ret = zpool_wait(zhp, activity); 7609 } 7610 7611 nvlist_free(props); 7612 nvlist_free(nvroot); 7613 zpool_close(zhp); 7614 7615 return (ret); 7616 } 7617 7618 /* 7619 * zpool replace [-fsw] [-o property=value] <pool> <device> <new_device> 7620 * 7621 * -f Force attach, even if <new_device> appears to be in use. 7622 * -s Use sequential instead of healing reconstruction for resilver. 7623 * -o Set property=value. 7624 * -w Wait for replacing to complete before returning 7625 * 7626 * Replace <device> with <new_device>. 7627 */ 7628 int 7629 zpool_do_replace(int argc, char **argv) 7630 { 7631 return (zpool_do_attach_or_replace(argc, argv, B_TRUE)); 7632 } 7633 7634 /* 7635 * zpool attach [-fsw] [-o property=value] <pool> <device>|<vdev> <new_device> 7636 * 7637 * -f Force attach, even if <new_device> appears to be in use. 7638 * -s Use sequential instead of healing reconstruction for resilver. 7639 * -o Set property=value. 7640 * -w Wait for resilvering (mirror) or expansion (raidz) to complete 7641 * before returning. 7642 * 7643 * Attach <new_device> to a <device> or <vdev>, where the vdev can be of type 7644 * mirror or raidz. If <device> is not part of a mirror, then <device> will 7645 * be transformed into a mirror of <device> and <new_device>. When a mirror 7646 * is involved, <new_device> will begin life with a DTL of [0, now], and will 7647 * immediately begin to resilver itself. For the raidz case, a expansion will 7648 * commence and reflow the raidz data across all the disks including the 7649 * <new_device>. 7650 */ 7651 int 7652 zpool_do_attach(int argc, char **argv) 7653 { 7654 return (zpool_do_attach_or_replace(argc, argv, B_FALSE)); 7655 } 7656 7657 /* 7658 * zpool detach [-f] <pool> <device> 7659 * 7660 * -f Force detach of <device>, even if DTLs argue against it 7661 * (not supported yet) 7662 * 7663 * Detach a device from a mirror. The operation will be refused if <device> 7664 * is the last device in the mirror, or if the DTLs indicate that this device 7665 * has the only valid copy of some data. 7666 */ 7667 int 7668 zpool_do_detach(int argc, char **argv) 7669 { 7670 int c; 7671 char *poolname, *path; 7672 zpool_handle_t *zhp; 7673 int ret; 7674 7675 /* check options */ 7676 while ((c = getopt(argc, argv, "")) != -1) { 7677 switch (c) { 7678 case '?': 7679 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 7680 optopt); 7681 usage(B_FALSE); 7682 } 7683 } 7684 7685 argc -= optind; 7686 argv += optind; 7687 7688 /* get pool name and check number of arguments */ 7689 if (argc < 1) { 7690 (void) fprintf(stderr, gettext("missing pool name argument\n")); 7691 usage(B_FALSE); 7692 } 7693 7694 if (argc < 2) { 7695 (void) fprintf(stderr, 7696 gettext("missing <device> specification\n")); 7697 usage(B_FALSE); 7698 } 7699 7700 poolname = argv[0]; 7701 path = argv[1]; 7702 7703 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) 7704 return (1); 7705 7706 ret = zpool_vdev_detach(zhp, path); 7707 7708 zpool_close(zhp); 7709 7710 return (ret); 7711 } 7712 7713 /* 7714 * zpool split [-gLnP] [-o prop=val] ... 7715 * [-o mntopt] ... 7716 * [-R altroot] <pool> <newpool> [<device> ...] 7717 * 7718 * -g Display guid for individual vdev name. 7719 * -L Follow links when resolving vdev path name. 7720 * -n Do not split the pool, but display the resulting layout if 7721 * it were to be split. 7722 * -o Set property=value, or set mount options. 7723 * -P Display full path for vdev name. 7724 * -R Mount the split-off pool under an alternate root. 7725 * -l Load encryption keys while importing. 7726 * 7727 * Splits the named pool and gives it the new pool name. Devices to be split 7728 * off may be listed, provided that no more than one device is specified 7729 * per top-level vdev mirror. The newly split pool is left in an exported 7730 * state unless -R is specified. 7731 * 7732 * Restrictions: the top-level of the pool pool must only be made up of 7733 * mirrors; all devices in the pool must be healthy; no device may be 7734 * undergoing a resilvering operation. 7735 */ 7736 int 7737 zpool_do_split(int argc, char **argv) 7738 { 7739 char *srcpool, *newpool, *propval; 7740 char *mntopts = NULL; 7741 splitflags_t flags; 7742 int c, ret = 0; 7743 int ms_status = 0; 7744 boolean_t loadkeys = B_FALSE; 7745 zpool_handle_t *zhp; 7746 nvlist_t *config, *props = NULL; 7747 7748 flags.dryrun = B_FALSE; 7749 flags.import = B_FALSE; 7750 flags.name_flags = 0; 7751 7752 /* check options */ 7753 while ((c = getopt(argc, argv, ":gLR:lno:P")) != -1) { 7754 switch (c) { 7755 case 'g': 7756 flags.name_flags |= VDEV_NAME_GUID; 7757 break; 7758 case 'L': 7759 flags.name_flags |= VDEV_NAME_FOLLOW_LINKS; 7760 break; 7761 case 'R': 7762 flags.import = B_TRUE; 7763 if (add_prop_list( 7764 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), optarg, 7765 &props, B_TRUE) != 0) { 7766 nvlist_free(props); 7767 usage(B_FALSE); 7768 } 7769 break; 7770 case 'l': 7771 loadkeys = B_TRUE; 7772 break; 7773 case 'n': 7774 flags.dryrun = B_TRUE; 7775 break; 7776 case 'o': 7777 if ((propval = strchr(optarg, '=')) != NULL) { 7778 *propval = '\0'; 7779 propval++; 7780 if (add_prop_list(optarg, propval, 7781 &props, B_TRUE) != 0) { 7782 nvlist_free(props); 7783 usage(B_FALSE); 7784 } 7785 } else { 7786 mntopts = optarg; 7787 } 7788 break; 7789 case 'P': 7790 flags.name_flags |= VDEV_NAME_PATH; 7791 break; 7792 case ':': 7793 (void) fprintf(stderr, gettext("missing argument for " 7794 "'%c' option\n"), optopt); 7795 usage(B_FALSE); 7796 break; 7797 case '?': 7798 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 7799 optopt); 7800 usage(B_FALSE); 7801 break; 7802 } 7803 } 7804 7805 if (!flags.import && mntopts != NULL) { 7806 (void) fprintf(stderr, gettext("setting mntopts is only " 7807 "valid when importing the pool\n")); 7808 usage(B_FALSE); 7809 } 7810 7811 if (!flags.import && loadkeys) { 7812 (void) fprintf(stderr, gettext("loading keys is only " 7813 "valid when importing the pool\n")); 7814 usage(B_FALSE); 7815 } 7816 7817 argc -= optind; 7818 argv += optind; 7819 7820 if (argc < 1) { 7821 (void) fprintf(stderr, gettext("Missing pool name\n")); 7822 usage(B_FALSE); 7823 } 7824 if (argc < 2) { 7825 (void) fprintf(stderr, gettext("Missing new pool name\n")); 7826 usage(B_FALSE); 7827 } 7828 7829 srcpool = argv[0]; 7830 newpool = argv[1]; 7831 7832 argc -= 2; 7833 argv += 2; 7834 7835 if ((zhp = zpool_open(g_zfs, srcpool)) == NULL) { 7836 nvlist_free(props); 7837 return (1); 7838 } 7839 7840 config = split_mirror_vdev(zhp, newpool, props, flags, argc, argv); 7841 if (config == NULL) { 7842 ret = 1; 7843 } else { 7844 if (flags.dryrun) { 7845 (void) printf(gettext("would create '%s' with the " 7846 "following layout:\n\n"), newpool); 7847 print_vdev_tree(NULL, newpool, config, 0, "", 7848 flags.name_flags); 7849 print_vdev_tree(NULL, "dedup", config, 0, 7850 VDEV_ALLOC_BIAS_DEDUP, 0); 7851 print_vdev_tree(NULL, "special", config, 0, 7852 VDEV_ALLOC_BIAS_SPECIAL, 0); 7853 } 7854 } 7855 7856 zpool_close(zhp); 7857 7858 if (ret != 0 || flags.dryrun || !flags.import) { 7859 nvlist_free(config); 7860 nvlist_free(props); 7861 return (ret); 7862 } 7863 7864 /* 7865 * The split was successful. Now we need to open the new 7866 * pool and import it. 7867 */ 7868 if ((zhp = zpool_open_canfail(g_zfs, newpool)) == NULL) { 7869 nvlist_free(config); 7870 nvlist_free(props); 7871 return (1); 7872 } 7873 7874 if (loadkeys) { 7875 ret = zfs_crypto_attempt_load_keys(g_zfs, newpool); 7876 if (ret != 0) 7877 ret = 1; 7878 } 7879 7880 if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL) { 7881 ms_status = zpool_enable_datasets(zhp, mntopts, 0, 7882 mount_tp_nthr); 7883 if (ms_status == EZFS_SHAREFAILED) { 7884 (void) fprintf(stderr, gettext("Split was successful, " 7885 "datasets are mounted but sharing of some datasets " 7886 "has failed\n")); 7887 } else if (ms_status == EZFS_MOUNTFAILED) { 7888 (void) fprintf(stderr, gettext("Split was successful" 7889 ", but some datasets could not be mounted\n")); 7890 (void) fprintf(stderr, gettext("Try doing '%s' with a " 7891 "different altroot\n"), "zpool import"); 7892 } 7893 } 7894 zpool_close(zhp); 7895 nvlist_free(config); 7896 nvlist_free(props); 7897 7898 return (ret); 7899 } 7900 7901 7902 /* 7903 * zpool online [--power] <pool> <device> ... 7904 * 7905 * --power: Power on the enclosure slot to the drive (if possible) 7906 */ 7907 int 7908 zpool_do_online(int argc, char **argv) 7909 { 7910 int c, i; 7911 char *poolname; 7912 zpool_handle_t *zhp; 7913 int ret = 0; 7914 vdev_state_t newstate; 7915 int flags = 0; 7916 boolean_t is_power_on = B_FALSE; 7917 struct option long_options[] = { 7918 {"power", no_argument, NULL, ZPOOL_OPTION_POWER}, 7919 {0, 0, 0, 0} 7920 }; 7921 7922 /* check options */ 7923 while ((c = getopt_long(argc, argv, "e", long_options, NULL)) != -1) { 7924 switch (c) { 7925 case 'e': 7926 flags |= ZFS_ONLINE_EXPAND; 7927 break; 7928 case ZPOOL_OPTION_POWER: 7929 is_power_on = B_TRUE; 7930 break; 7931 case '?': 7932 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 7933 optopt); 7934 usage(B_FALSE); 7935 } 7936 } 7937 7938 if (libzfs_envvar_is_set("ZPOOL_AUTO_POWER_ON_SLOT")) 7939 is_power_on = B_TRUE; 7940 7941 argc -= optind; 7942 argv += optind; 7943 7944 /* get pool name and check number of arguments */ 7945 if (argc < 1) { 7946 (void) fprintf(stderr, gettext("missing pool name\n")); 7947 usage(B_FALSE); 7948 } 7949 if (argc < 2) { 7950 (void) fprintf(stderr, gettext("missing device name\n")); 7951 usage(B_FALSE); 7952 } 7953 7954 poolname = argv[0]; 7955 7956 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) 7957 return (1); 7958 7959 for (i = 1; i < argc; i++) { 7960 vdev_state_t oldstate; 7961 boolean_t avail_spare, l2cache; 7962 int rc; 7963 7964 if (is_power_on) { 7965 rc = zpool_power_on_and_disk_wait(zhp, argv[i]); 7966 if (rc == ENOTSUP) { 7967 (void) fprintf(stderr, 7968 gettext("Power control not supported\n")); 7969 } 7970 if (rc != 0) 7971 return (rc); 7972 } 7973 7974 nvlist_t *tgt = zpool_find_vdev(zhp, argv[i], &avail_spare, 7975 &l2cache, NULL); 7976 if (tgt == NULL) { 7977 ret = 1; 7978 continue; 7979 } 7980 uint_t vsc; 7981 oldstate = ((vdev_stat_t *)fnvlist_lookup_uint64_array(tgt, 7982 ZPOOL_CONFIG_VDEV_STATS, &vsc))->vs_state; 7983 if (zpool_vdev_online(zhp, argv[i], flags, &newstate) == 0) { 7984 if (newstate != VDEV_STATE_HEALTHY) { 7985 (void) printf(gettext("warning: device '%s' " 7986 "onlined, but remains in faulted state\n"), 7987 argv[i]); 7988 if (newstate == VDEV_STATE_FAULTED) 7989 (void) printf(gettext("use 'zpool " 7990 "clear' to restore a faulted " 7991 "device\n")); 7992 else 7993 (void) printf(gettext("use 'zpool " 7994 "replace' to replace devices " 7995 "that are no longer present\n")); 7996 if ((flags & ZFS_ONLINE_EXPAND)) { 7997 (void) printf(gettext("%s: failed " 7998 "to expand usable space on " 7999 "unhealthy device '%s'\n"), 8000 (oldstate >= VDEV_STATE_DEGRADED ? 8001 "error" : "warning"), argv[i]); 8002 if (oldstate >= VDEV_STATE_DEGRADED) { 8003 ret = 1; 8004 break; 8005 } 8006 } 8007 } 8008 } else { 8009 ret = 1; 8010 } 8011 } 8012 8013 zpool_close(zhp); 8014 8015 return (ret); 8016 } 8017 8018 /* 8019 * zpool offline [-ft]|[--power] <pool> <device> ... 8020 * 8021 * 8022 * -f Force the device into a faulted state. 8023 * 8024 * -t Only take the device off-line temporarily. The offline/faulted 8025 * state will not be persistent across reboots. 8026 * 8027 * --power Power off the enclosure slot to the drive (if possible) 8028 */ 8029 int 8030 zpool_do_offline(int argc, char **argv) 8031 { 8032 int c, i; 8033 char *poolname; 8034 zpool_handle_t *zhp; 8035 int ret = 0; 8036 boolean_t istmp = B_FALSE; 8037 boolean_t fault = B_FALSE; 8038 boolean_t is_power_off = B_FALSE; 8039 8040 struct option long_options[] = { 8041 {"power", no_argument, NULL, ZPOOL_OPTION_POWER}, 8042 {0, 0, 0, 0} 8043 }; 8044 8045 /* check options */ 8046 while ((c = getopt_long(argc, argv, "ft", long_options, NULL)) != -1) { 8047 switch (c) { 8048 case 'f': 8049 fault = B_TRUE; 8050 break; 8051 case 't': 8052 istmp = B_TRUE; 8053 break; 8054 case ZPOOL_OPTION_POWER: 8055 is_power_off = B_TRUE; 8056 break; 8057 case '?': 8058 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 8059 optopt); 8060 usage(B_FALSE); 8061 } 8062 } 8063 8064 if (is_power_off && fault) { 8065 (void) fprintf(stderr, 8066 gettext("-0 and -f cannot be used together\n")); 8067 usage(B_FALSE); 8068 return (1); 8069 } 8070 8071 if (is_power_off && istmp) { 8072 (void) fprintf(stderr, 8073 gettext("-0 and -t cannot be used together\n")); 8074 usage(B_FALSE); 8075 return (1); 8076 } 8077 8078 argc -= optind; 8079 argv += optind; 8080 8081 /* get pool name and check number of arguments */ 8082 if (argc < 1) { 8083 (void) fprintf(stderr, gettext("missing pool name\n")); 8084 usage(B_FALSE); 8085 } 8086 if (argc < 2) { 8087 (void) fprintf(stderr, gettext("missing device name\n")); 8088 usage(B_FALSE); 8089 } 8090 8091 poolname = argv[0]; 8092 8093 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) 8094 return (1); 8095 8096 for (i = 1; i < argc; i++) { 8097 uint64_t guid = zpool_vdev_path_to_guid(zhp, argv[i]); 8098 if (is_power_off) { 8099 /* 8100 * Note: we have to power off first, then set REMOVED, 8101 * or else zpool_vdev_set_removed_state() returns 8102 * EAGAIN. 8103 */ 8104 ret = zpool_power_off(zhp, argv[i]); 8105 if (ret != 0) { 8106 (void) fprintf(stderr, "%s %s %d\n", 8107 gettext("unable to power off slot for"), 8108 argv[i], ret); 8109 } 8110 zpool_vdev_set_removed_state(zhp, guid, VDEV_AUX_NONE); 8111 8112 } else if (fault) { 8113 vdev_aux_t aux; 8114 if (istmp == B_FALSE) { 8115 /* Force the fault to persist across imports */ 8116 aux = VDEV_AUX_EXTERNAL_PERSIST; 8117 } else { 8118 aux = VDEV_AUX_EXTERNAL; 8119 } 8120 8121 if (guid == 0 || zpool_vdev_fault(zhp, guid, aux) != 0) 8122 ret = 1; 8123 } else { 8124 if (zpool_vdev_offline(zhp, argv[i], istmp) != 0) 8125 ret = 1; 8126 } 8127 } 8128 8129 zpool_close(zhp); 8130 8131 return (ret); 8132 } 8133 8134 /* 8135 * zpool clear [-nF]|[--power] <pool> [device] 8136 * 8137 * Clear all errors associated with a pool or a particular device. 8138 */ 8139 int 8140 zpool_do_clear(int argc, char **argv) 8141 { 8142 int c; 8143 int ret = 0; 8144 boolean_t dryrun = B_FALSE; 8145 boolean_t do_rewind = B_FALSE; 8146 boolean_t xtreme_rewind = B_FALSE; 8147 boolean_t is_power_on = B_FALSE; 8148 uint32_t rewind_policy = ZPOOL_NO_REWIND; 8149 nvlist_t *policy = NULL; 8150 zpool_handle_t *zhp; 8151 char *pool, *device; 8152 8153 struct option long_options[] = { 8154 {"power", no_argument, NULL, ZPOOL_OPTION_POWER}, 8155 {0, 0, 0, 0} 8156 }; 8157 8158 /* check options */ 8159 while ((c = getopt_long(argc, argv, "FnX", long_options, 8160 NULL)) != -1) { 8161 switch (c) { 8162 case 'F': 8163 do_rewind = B_TRUE; 8164 break; 8165 case 'n': 8166 dryrun = B_TRUE; 8167 break; 8168 case 'X': 8169 xtreme_rewind = B_TRUE; 8170 break; 8171 case ZPOOL_OPTION_POWER: 8172 is_power_on = B_TRUE; 8173 break; 8174 case '?': 8175 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 8176 optopt); 8177 usage(B_FALSE); 8178 } 8179 } 8180 8181 if (libzfs_envvar_is_set("ZPOOL_AUTO_POWER_ON_SLOT")) 8182 is_power_on = B_TRUE; 8183 8184 argc -= optind; 8185 argv += optind; 8186 8187 if (argc < 1) { 8188 (void) fprintf(stderr, gettext("missing pool name\n")); 8189 usage(B_FALSE); 8190 } 8191 8192 if (argc > 2) { 8193 (void) fprintf(stderr, gettext("too many arguments\n")); 8194 usage(B_FALSE); 8195 } 8196 8197 if ((dryrun || xtreme_rewind) && !do_rewind) { 8198 (void) fprintf(stderr, 8199 gettext("-n or -X only meaningful with -F\n")); 8200 usage(B_FALSE); 8201 } 8202 if (dryrun) 8203 rewind_policy = ZPOOL_TRY_REWIND; 8204 else if (do_rewind) 8205 rewind_policy = ZPOOL_DO_REWIND; 8206 if (xtreme_rewind) 8207 rewind_policy |= ZPOOL_EXTREME_REWIND; 8208 8209 /* In future, further rewind policy choices can be passed along here */ 8210 if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 || 8211 nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY, 8212 rewind_policy) != 0) { 8213 return (1); 8214 } 8215 8216 pool = argv[0]; 8217 device = argc == 2 ? argv[1] : NULL; 8218 8219 if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) { 8220 nvlist_free(policy); 8221 return (1); 8222 } 8223 8224 if (is_power_on) { 8225 if (device == NULL) { 8226 zpool_power_on_pool_and_wait_for_devices(zhp); 8227 } else { 8228 zpool_power_on_and_disk_wait(zhp, device); 8229 } 8230 } 8231 8232 if (zpool_clear(zhp, device, policy) != 0) 8233 ret = 1; 8234 8235 zpool_close(zhp); 8236 8237 nvlist_free(policy); 8238 8239 return (ret); 8240 } 8241 8242 /* 8243 * zpool reguid [-g <guid>] <pool> 8244 */ 8245 int 8246 zpool_do_reguid(int argc, char **argv) 8247 { 8248 uint64_t guid; 8249 uint64_t *guidp = NULL; 8250 int c; 8251 char *endptr; 8252 char *poolname; 8253 zpool_handle_t *zhp; 8254 int ret = 0; 8255 8256 /* check options */ 8257 while ((c = getopt(argc, argv, "g:")) != -1) { 8258 switch (c) { 8259 case 'g': 8260 errno = 0; 8261 guid = strtoull(optarg, &endptr, 10); 8262 if (errno != 0 || *endptr != '\0') { 8263 (void) fprintf(stderr, 8264 gettext("invalid GUID: %s\n"), optarg); 8265 usage(B_FALSE); 8266 } 8267 guidp = &guid; 8268 break; 8269 case '?': 8270 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 8271 optopt); 8272 usage(B_FALSE); 8273 } 8274 } 8275 8276 argc -= optind; 8277 argv += optind; 8278 8279 /* get pool name and check number of arguments */ 8280 if (argc < 1) { 8281 (void) fprintf(stderr, gettext("missing pool name\n")); 8282 usage(B_FALSE); 8283 } 8284 8285 if (argc > 1) { 8286 (void) fprintf(stderr, gettext("too many arguments\n")); 8287 usage(B_FALSE); 8288 } 8289 8290 poolname = argv[0]; 8291 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) 8292 return (1); 8293 8294 ret = zpool_set_guid(zhp, guidp); 8295 8296 zpool_close(zhp); 8297 return (ret); 8298 } 8299 8300 8301 /* 8302 * zpool reopen <pool> 8303 * 8304 * Reopen the pool so that the kernel can update the sizes of all vdevs. 8305 */ 8306 int 8307 zpool_do_reopen(int argc, char **argv) 8308 { 8309 int c; 8310 int ret = 0; 8311 boolean_t scrub_restart = B_TRUE; 8312 8313 /* check options */ 8314 while ((c = getopt(argc, argv, "n")) != -1) { 8315 switch (c) { 8316 case 'n': 8317 scrub_restart = B_FALSE; 8318 break; 8319 case '?': 8320 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 8321 optopt); 8322 usage(B_FALSE); 8323 } 8324 } 8325 8326 argc -= optind; 8327 argv += optind; 8328 8329 /* if argc == 0 we will execute zpool_reopen_one on all pools */ 8330 ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL, 8331 B_FALSE, zpool_reopen_one, &scrub_restart); 8332 8333 return (ret); 8334 } 8335 8336 typedef struct scrub_cbdata { 8337 int cb_type; 8338 pool_scrub_cmd_t cb_scrub_cmd; 8339 } scrub_cbdata_t; 8340 8341 static boolean_t 8342 zpool_has_checkpoint(zpool_handle_t *zhp) 8343 { 8344 nvlist_t *config, *nvroot; 8345 8346 config = zpool_get_config(zhp, NULL); 8347 8348 if (config != NULL) { 8349 pool_checkpoint_stat_t *pcs = NULL; 8350 uint_t c; 8351 8352 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE); 8353 (void) nvlist_lookup_uint64_array(nvroot, 8354 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c); 8355 8356 if (pcs == NULL || pcs->pcs_state == CS_NONE) 8357 return (B_FALSE); 8358 8359 assert(pcs->pcs_state == CS_CHECKPOINT_EXISTS || 8360 pcs->pcs_state == CS_CHECKPOINT_DISCARDING); 8361 return (B_TRUE); 8362 } 8363 8364 return (B_FALSE); 8365 } 8366 8367 static int 8368 scrub_callback(zpool_handle_t *zhp, void *data) 8369 { 8370 scrub_cbdata_t *cb = data; 8371 int err; 8372 8373 /* 8374 * Ignore faulted pools. 8375 */ 8376 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) { 8377 (void) fprintf(stderr, gettext("cannot scan '%s': pool is " 8378 "currently unavailable\n"), zpool_get_name(zhp)); 8379 return (1); 8380 } 8381 8382 err = zpool_scan(zhp, cb->cb_type, cb->cb_scrub_cmd); 8383 8384 if (err == 0 && zpool_has_checkpoint(zhp) && 8385 cb->cb_type == POOL_SCAN_SCRUB) { 8386 (void) printf(gettext("warning: will not scrub state that " 8387 "belongs to the checkpoint of pool '%s'\n"), 8388 zpool_get_name(zhp)); 8389 } 8390 8391 return (err != 0); 8392 } 8393 8394 static int 8395 wait_callback(zpool_handle_t *zhp, void *data) 8396 { 8397 zpool_wait_activity_t *act = data; 8398 return (zpool_wait(zhp, *act)); 8399 } 8400 8401 /* 8402 * zpool scrub [-s | -p] [-w] [-e] <pool> ... 8403 * 8404 * -e Only scrub blocks in the error log. 8405 * -s Stop. Stops any in-progress scrub. 8406 * -p Pause. Pause in-progress scrub. 8407 * -w Wait. Blocks until scrub has completed. 8408 */ 8409 int 8410 zpool_do_scrub(int argc, char **argv) 8411 { 8412 int c; 8413 scrub_cbdata_t cb; 8414 boolean_t wait = B_FALSE; 8415 int error; 8416 8417 cb.cb_type = POOL_SCAN_SCRUB; 8418 cb.cb_scrub_cmd = POOL_SCRUB_NORMAL; 8419 8420 boolean_t is_error_scrub = B_FALSE; 8421 boolean_t is_pause = B_FALSE; 8422 boolean_t is_stop = B_FALSE; 8423 8424 /* check options */ 8425 while ((c = getopt(argc, argv, "spwe")) != -1) { 8426 switch (c) { 8427 case 'e': 8428 is_error_scrub = B_TRUE; 8429 break; 8430 case 's': 8431 is_stop = B_TRUE; 8432 break; 8433 case 'p': 8434 is_pause = B_TRUE; 8435 break; 8436 case 'w': 8437 wait = B_TRUE; 8438 break; 8439 case '?': 8440 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 8441 optopt); 8442 usage(B_FALSE); 8443 } 8444 } 8445 8446 if (is_pause && is_stop) { 8447 (void) fprintf(stderr, gettext("invalid option " 8448 "combination :-s and -p are mutually exclusive\n")); 8449 usage(B_FALSE); 8450 } else { 8451 if (is_error_scrub) 8452 cb.cb_type = POOL_SCAN_ERRORSCRUB; 8453 8454 if (is_pause) { 8455 cb.cb_scrub_cmd = POOL_SCRUB_PAUSE; 8456 } else if (is_stop) { 8457 cb.cb_type = POOL_SCAN_NONE; 8458 } else { 8459 cb.cb_scrub_cmd = POOL_SCRUB_NORMAL; 8460 } 8461 } 8462 8463 if (wait && (cb.cb_type == POOL_SCAN_NONE || 8464 cb.cb_scrub_cmd == POOL_SCRUB_PAUSE)) { 8465 (void) fprintf(stderr, gettext("invalid option combination: " 8466 "-w cannot be used with -p or -s\n")); 8467 usage(B_FALSE); 8468 } 8469 8470 argc -= optind; 8471 argv += optind; 8472 8473 if (argc < 1) { 8474 (void) fprintf(stderr, gettext("missing pool name argument\n")); 8475 usage(B_FALSE); 8476 } 8477 8478 error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL, 8479 B_FALSE, scrub_callback, &cb); 8480 8481 if (wait && !error) { 8482 zpool_wait_activity_t act = ZPOOL_WAIT_SCRUB; 8483 error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL, 8484 B_FALSE, wait_callback, &act); 8485 } 8486 8487 return (error); 8488 } 8489 8490 /* 8491 * zpool resilver <pool> ... 8492 * 8493 * Restarts any in-progress resilver 8494 */ 8495 int 8496 zpool_do_resilver(int argc, char **argv) 8497 { 8498 int c; 8499 scrub_cbdata_t cb; 8500 8501 cb.cb_type = POOL_SCAN_RESILVER; 8502 cb.cb_scrub_cmd = POOL_SCRUB_NORMAL; 8503 8504 /* check options */ 8505 while ((c = getopt(argc, argv, "")) != -1) { 8506 switch (c) { 8507 case '?': 8508 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 8509 optopt); 8510 usage(B_FALSE); 8511 } 8512 } 8513 8514 argc -= optind; 8515 argv += optind; 8516 8517 if (argc < 1) { 8518 (void) fprintf(stderr, gettext("missing pool name argument\n")); 8519 usage(B_FALSE); 8520 } 8521 8522 return (for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL, 8523 B_FALSE, scrub_callback, &cb)); 8524 } 8525 8526 /* 8527 * zpool trim [-d] [-r <rate>] [-c | -s] <pool> [<device> ...] 8528 * 8529 * -c Cancel. Ends any in-progress trim. 8530 * -d Secure trim. Requires kernel and device support. 8531 * -r <rate> Sets the TRIM rate in bytes (per second). Supports 8532 * adding a multiplier suffix such as 'k' or 'm'. 8533 * -s Suspend. TRIM can then be restarted with no flags. 8534 * -w Wait. Blocks until trimming has completed. 8535 */ 8536 int 8537 zpool_do_trim(int argc, char **argv) 8538 { 8539 struct option long_options[] = { 8540 {"cancel", no_argument, NULL, 'c'}, 8541 {"secure", no_argument, NULL, 'd'}, 8542 {"rate", required_argument, NULL, 'r'}, 8543 {"suspend", no_argument, NULL, 's'}, 8544 {"wait", no_argument, NULL, 'w'}, 8545 {0, 0, 0, 0} 8546 }; 8547 8548 pool_trim_func_t cmd_type = POOL_TRIM_START; 8549 uint64_t rate = 0; 8550 boolean_t secure = B_FALSE; 8551 boolean_t wait = B_FALSE; 8552 8553 int c; 8554 while ((c = getopt_long(argc, argv, "cdr:sw", long_options, NULL)) 8555 != -1) { 8556 switch (c) { 8557 case 'c': 8558 if (cmd_type != POOL_TRIM_START && 8559 cmd_type != POOL_TRIM_CANCEL) { 8560 (void) fprintf(stderr, gettext("-c cannot be " 8561 "combined with other options\n")); 8562 usage(B_FALSE); 8563 } 8564 cmd_type = POOL_TRIM_CANCEL; 8565 break; 8566 case 'd': 8567 if (cmd_type != POOL_TRIM_START) { 8568 (void) fprintf(stderr, gettext("-d cannot be " 8569 "combined with the -c or -s options\n")); 8570 usage(B_FALSE); 8571 } 8572 secure = B_TRUE; 8573 break; 8574 case 'r': 8575 if (cmd_type != POOL_TRIM_START) { 8576 (void) fprintf(stderr, gettext("-r cannot be " 8577 "combined with the -c or -s options\n")); 8578 usage(B_FALSE); 8579 } 8580 if (zfs_nicestrtonum(g_zfs, optarg, &rate) == -1) { 8581 (void) fprintf(stderr, "%s: %s\n", 8582 gettext("invalid value for rate"), 8583 libzfs_error_description(g_zfs)); 8584 usage(B_FALSE); 8585 } 8586 break; 8587 case 's': 8588 if (cmd_type != POOL_TRIM_START && 8589 cmd_type != POOL_TRIM_SUSPEND) { 8590 (void) fprintf(stderr, gettext("-s cannot be " 8591 "combined with other options\n")); 8592 usage(B_FALSE); 8593 } 8594 cmd_type = POOL_TRIM_SUSPEND; 8595 break; 8596 case 'w': 8597 wait = B_TRUE; 8598 break; 8599 case '?': 8600 if (optopt != 0) { 8601 (void) fprintf(stderr, 8602 gettext("invalid option '%c'\n"), optopt); 8603 } else { 8604 (void) fprintf(stderr, 8605 gettext("invalid option '%s'\n"), 8606 argv[optind - 1]); 8607 } 8608 usage(B_FALSE); 8609 } 8610 } 8611 8612 argc -= optind; 8613 argv += optind; 8614 8615 if (argc < 1) { 8616 (void) fprintf(stderr, gettext("missing pool name argument\n")); 8617 usage(B_FALSE); 8618 return (-1); 8619 } 8620 8621 if (wait && (cmd_type != POOL_TRIM_START)) { 8622 (void) fprintf(stderr, gettext("-w cannot be used with -c or " 8623 "-s\n")); 8624 usage(B_FALSE); 8625 } 8626 8627 char *poolname = argv[0]; 8628 zpool_handle_t *zhp = zpool_open(g_zfs, poolname); 8629 if (zhp == NULL) 8630 return (-1); 8631 8632 trimflags_t trim_flags = { 8633 .secure = secure, 8634 .rate = rate, 8635 .wait = wait, 8636 }; 8637 8638 nvlist_t *vdevs = fnvlist_alloc(); 8639 if (argc == 1) { 8640 /* no individual leaf vdevs specified, so add them all */ 8641 nvlist_t *config = zpool_get_config(zhp, NULL); 8642 nvlist_t *nvroot = fnvlist_lookup_nvlist(config, 8643 ZPOOL_CONFIG_VDEV_TREE); 8644 zpool_collect_leaves(zhp, nvroot, vdevs); 8645 trim_flags.fullpool = B_TRUE; 8646 } else { 8647 trim_flags.fullpool = B_FALSE; 8648 for (int i = 1; i < argc; i++) { 8649 fnvlist_add_boolean(vdevs, argv[i]); 8650 } 8651 } 8652 8653 int error = zpool_trim(zhp, cmd_type, vdevs, &trim_flags); 8654 8655 fnvlist_free(vdevs); 8656 zpool_close(zhp); 8657 8658 return (error); 8659 } 8660 8661 /* 8662 * Converts a total number of seconds to a human readable string broken 8663 * down in to days/hours/minutes/seconds. 8664 */ 8665 static void 8666 secs_to_dhms(uint64_t total, char *buf) 8667 { 8668 uint64_t days = total / 60 / 60 / 24; 8669 uint64_t hours = (total / 60 / 60) % 24; 8670 uint64_t mins = (total / 60) % 60; 8671 uint64_t secs = (total % 60); 8672 8673 if (days > 0) { 8674 (void) sprintf(buf, "%llu days %02llu:%02llu:%02llu", 8675 (u_longlong_t)days, (u_longlong_t)hours, 8676 (u_longlong_t)mins, (u_longlong_t)secs); 8677 } else { 8678 (void) sprintf(buf, "%02llu:%02llu:%02llu", 8679 (u_longlong_t)hours, (u_longlong_t)mins, 8680 (u_longlong_t)secs); 8681 } 8682 } 8683 8684 /* 8685 * Print out detailed error scrub status. 8686 */ 8687 static void 8688 print_err_scrub_status(pool_scan_stat_t *ps) 8689 { 8690 time_t start, end, pause; 8691 uint64_t total_secs_left; 8692 uint64_t secs_left, mins_left, hours_left, days_left; 8693 uint64_t examined, to_be_examined; 8694 8695 if (ps == NULL || ps->pss_error_scrub_func != POOL_SCAN_ERRORSCRUB) { 8696 return; 8697 } 8698 8699 (void) printf(gettext(" scrub: ")); 8700 8701 start = ps->pss_error_scrub_start; 8702 end = ps->pss_error_scrub_end; 8703 pause = ps->pss_pass_error_scrub_pause; 8704 examined = ps->pss_error_scrub_examined; 8705 to_be_examined = ps->pss_error_scrub_to_be_examined; 8706 8707 assert(ps->pss_error_scrub_func == POOL_SCAN_ERRORSCRUB); 8708 8709 if (ps->pss_error_scrub_state == DSS_FINISHED) { 8710 total_secs_left = end - start; 8711 days_left = total_secs_left / 60 / 60 / 24; 8712 hours_left = (total_secs_left / 60 / 60) % 24; 8713 mins_left = (total_secs_left / 60) % 60; 8714 secs_left = (total_secs_left % 60); 8715 8716 (void) printf(gettext("scrubbed %llu error blocks in %llu days " 8717 "%02llu:%02llu:%02llu on %s"), (u_longlong_t)examined, 8718 (u_longlong_t)days_left, (u_longlong_t)hours_left, 8719 (u_longlong_t)mins_left, (u_longlong_t)secs_left, 8720 ctime(&end)); 8721 8722 return; 8723 } else if (ps->pss_error_scrub_state == DSS_CANCELED) { 8724 (void) printf(gettext("error scrub canceled on %s"), 8725 ctime(&end)); 8726 return; 8727 } 8728 assert(ps->pss_error_scrub_state == DSS_ERRORSCRUBBING); 8729 8730 /* Error scrub is in progress. */ 8731 if (pause == 0) { 8732 (void) printf(gettext("error scrub in progress since %s"), 8733 ctime(&start)); 8734 } else { 8735 (void) printf(gettext("error scrub paused since %s"), 8736 ctime(&pause)); 8737 (void) printf(gettext("\terror scrub started on %s"), 8738 ctime(&start)); 8739 } 8740 8741 double fraction_done = (double)examined / (to_be_examined + examined); 8742 (void) printf(gettext("\t%.2f%% done, issued I/O for %llu error" 8743 " blocks"), 100 * fraction_done, (u_longlong_t)examined); 8744 8745 (void) printf("\n"); 8746 } 8747 8748 /* 8749 * Print out detailed scrub status. 8750 */ 8751 static void 8752 print_scan_scrub_resilver_status(pool_scan_stat_t *ps) 8753 { 8754 time_t start, end, pause; 8755 uint64_t pass_scanned, scanned, pass_issued, issued, total_s, total_i; 8756 uint64_t elapsed, scan_rate, issue_rate; 8757 double fraction_done; 8758 char processed_buf[7], scanned_buf[7], issued_buf[7], total_s_buf[7]; 8759 char total_i_buf[7], srate_buf[7], irate_buf[7], time_buf[32]; 8760 8761 printf(" "); 8762 printf_color(ANSI_BOLD, gettext("scan:")); 8763 printf(" "); 8764 8765 /* If there's never been a scan, there's not much to say. */ 8766 if (ps == NULL || ps->pss_func == POOL_SCAN_NONE || 8767 ps->pss_func >= POOL_SCAN_FUNCS) { 8768 (void) printf(gettext("none requested\n")); 8769 return; 8770 } 8771 8772 start = ps->pss_start_time; 8773 end = ps->pss_end_time; 8774 pause = ps->pss_pass_scrub_pause; 8775 8776 zfs_nicebytes(ps->pss_processed, processed_buf, sizeof (processed_buf)); 8777 8778 int is_resilver = ps->pss_func == POOL_SCAN_RESILVER; 8779 int is_scrub = ps->pss_func == POOL_SCAN_SCRUB; 8780 assert(is_resilver || is_scrub); 8781 8782 /* Scan is finished or canceled. */ 8783 if (ps->pss_state == DSS_FINISHED) { 8784 secs_to_dhms(end - start, time_buf); 8785 8786 if (is_scrub) { 8787 (void) printf(gettext("scrub repaired %s " 8788 "in %s with %llu errors on %s"), processed_buf, 8789 time_buf, (u_longlong_t)ps->pss_errors, 8790 ctime(&end)); 8791 } else if (is_resilver) { 8792 (void) printf(gettext("resilvered %s " 8793 "in %s with %llu errors on %s"), processed_buf, 8794 time_buf, (u_longlong_t)ps->pss_errors, 8795 ctime(&end)); 8796 } 8797 return; 8798 } else if (ps->pss_state == DSS_CANCELED) { 8799 if (is_scrub) { 8800 (void) printf(gettext("scrub canceled on %s"), 8801 ctime(&end)); 8802 } else if (is_resilver) { 8803 (void) printf(gettext("resilver canceled on %s"), 8804 ctime(&end)); 8805 } 8806 return; 8807 } 8808 8809 assert(ps->pss_state == DSS_SCANNING); 8810 8811 /* Scan is in progress. Resilvers can't be paused. */ 8812 if (is_scrub) { 8813 if (pause == 0) { 8814 (void) printf(gettext("scrub in progress since %s"), 8815 ctime(&start)); 8816 } else { 8817 (void) printf(gettext("scrub paused since %s"), 8818 ctime(&pause)); 8819 (void) printf(gettext("\tscrub started on %s"), 8820 ctime(&start)); 8821 } 8822 } else if (is_resilver) { 8823 (void) printf(gettext("resilver in progress since %s"), 8824 ctime(&start)); 8825 } 8826 8827 scanned = ps->pss_examined; 8828 pass_scanned = ps->pss_pass_exam; 8829 issued = ps->pss_issued; 8830 pass_issued = ps->pss_pass_issued; 8831 total_s = ps->pss_to_examine; 8832 total_i = ps->pss_to_examine - ps->pss_skipped; 8833 8834 /* we are only done with a block once we have issued the IO for it */ 8835 fraction_done = (double)issued / total_i; 8836 8837 /* elapsed time for this pass, rounding up to 1 if it's 0 */ 8838 elapsed = time(NULL) - ps->pss_pass_start; 8839 elapsed -= ps->pss_pass_scrub_spent_paused; 8840 elapsed = (elapsed != 0) ? elapsed : 1; 8841 8842 scan_rate = pass_scanned / elapsed; 8843 issue_rate = pass_issued / elapsed; 8844 8845 /* format all of the numbers we will be reporting */ 8846 zfs_nicebytes(scanned, scanned_buf, sizeof (scanned_buf)); 8847 zfs_nicebytes(issued, issued_buf, sizeof (issued_buf)); 8848 zfs_nicebytes(total_s, total_s_buf, sizeof (total_s_buf)); 8849 zfs_nicebytes(total_i, total_i_buf, sizeof (total_i_buf)); 8850 8851 /* do not print estimated time if we have a paused scrub */ 8852 (void) printf(gettext("\t%s / %s scanned"), scanned_buf, total_s_buf); 8853 if (pause == 0 && scan_rate > 0) { 8854 zfs_nicebytes(scan_rate, srate_buf, sizeof (srate_buf)); 8855 (void) printf(gettext(" at %s/s"), srate_buf); 8856 } 8857 (void) printf(gettext(", %s / %s issued"), issued_buf, total_i_buf); 8858 if (pause == 0 && issue_rate > 0) { 8859 zfs_nicebytes(issue_rate, irate_buf, sizeof (irate_buf)); 8860 (void) printf(gettext(" at %s/s"), irate_buf); 8861 } 8862 (void) printf(gettext("\n")); 8863 8864 if (is_resilver) { 8865 (void) printf(gettext("\t%s resilvered, %.2f%% done"), 8866 processed_buf, 100 * fraction_done); 8867 } else if (is_scrub) { 8868 (void) printf(gettext("\t%s repaired, %.2f%% done"), 8869 processed_buf, 100 * fraction_done); 8870 } 8871 8872 if (pause == 0) { 8873 /* 8874 * Only provide an estimate iff: 8875 * 1) we haven't yet issued all we expected, and 8876 * 2) the issue rate exceeds 10 MB/s, and 8877 * 3) it's either: 8878 * a) a resilver which has started repairs, or 8879 * b) a scrub which has entered the issue phase. 8880 */ 8881 if (total_i >= issued && issue_rate >= 10 * 1024 * 1024 && 8882 ((is_resilver && ps->pss_processed > 0) || 8883 (is_scrub && issued > 0))) { 8884 secs_to_dhms((total_i - issued) / issue_rate, time_buf); 8885 (void) printf(gettext(", %s to go\n"), time_buf); 8886 } else { 8887 (void) printf(gettext(", no estimated " 8888 "completion time\n")); 8889 } 8890 } else { 8891 (void) printf(gettext("\n")); 8892 } 8893 } 8894 8895 static void 8896 print_rebuild_status_impl(vdev_rebuild_stat_t *vrs, uint_t c, char *vdev_name) 8897 { 8898 if (vrs == NULL || vrs->vrs_state == VDEV_REBUILD_NONE) 8899 return; 8900 8901 printf(" "); 8902 printf_color(ANSI_BOLD, gettext("scan:")); 8903 printf(" "); 8904 8905 uint64_t bytes_scanned = vrs->vrs_bytes_scanned; 8906 uint64_t bytes_issued = vrs->vrs_bytes_issued; 8907 uint64_t bytes_rebuilt = vrs->vrs_bytes_rebuilt; 8908 uint64_t bytes_est_s = vrs->vrs_bytes_est; 8909 uint64_t bytes_est_i = vrs->vrs_bytes_est; 8910 if (c > offsetof(vdev_rebuild_stat_t, vrs_pass_bytes_skipped) / 8) 8911 bytes_est_i -= vrs->vrs_pass_bytes_skipped; 8912 uint64_t scan_rate = (vrs->vrs_pass_bytes_scanned / 8913 (vrs->vrs_pass_time_ms + 1)) * 1000; 8914 uint64_t issue_rate = (vrs->vrs_pass_bytes_issued / 8915 (vrs->vrs_pass_time_ms + 1)) * 1000; 8916 double scan_pct = MIN((double)bytes_scanned * 100 / 8917 (bytes_est_s + 1), 100); 8918 8919 /* Format all of the numbers we will be reporting */ 8920 char bytes_scanned_buf[7], bytes_issued_buf[7]; 8921 char bytes_rebuilt_buf[7], bytes_est_s_buf[7], bytes_est_i_buf[7]; 8922 char scan_rate_buf[7], issue_rate_buf[7], time_buf[32]; 8923 zfs_nicebytes(bytes_scanned, bytes_scanned_buf, 8924 sizeof (bytes_scanned_buf)); 8925 zfs_nicebytes(bytes_issued, bytes_issued_buf, 8926 sizeof (bytes_issued_buf)); 8927 zfs_nicebytes(bytes_rebuilt, bytes_rebuilt_buf, 8928 sizeof (bytes_rebuilt_buf)); 8929 zfs_nicebytes(bytes_est_s, bytes_est_s_buf, sizeof (bytes_est_s_buf)); 8930 zfs_nicebytes(bytes_est_i, bytes_est_i_buf, sizeof (bytes_est_i_buf)); 8931 8932 time_t start = vrs->vrs_start_time; 8933 time_t end = vrs->vrs_end_time; 8934 8935 /* Rebuild is finished or canceled. */ 8936 if (vrs->vrs_state == VDEV_REBUILD_COMPLETE) { 8937 secs_to_dhms(vrs->vrs_scan_time_ms / 1000, time_buf); 8938 (void) printf(gettext("resilvered (%s) %s in %s " 8939 "with %llu errors on %s"), vdev_name, bytes_rebuilt_buf, 8940 time_buf, (u_longlong_t)vrs->vrs_errors, ctime(&end)); 8941 return; 8942 } else if (vrs->vrs_state == VDEV_REBUILD_CANCELED) { 8943 (void) printf(gettext("resilver (%s) canceled on %s"), 8944 vdev_name, ctime(&end)); 8945 return; 8946 } else if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) { 8947 (void) printf(gettext("resilver (%s) in progress since %s"), 8948 vdev_name, ctime(&start)); 8949 } 8950 8951 assert(vrs->vrs_state == VDEV_REBUILD_ACTIVE); 8952 8953 (void) printf(gettext("\t%s / %s scanned"), bytes_scanned_buf, 8954 bytes_est_s_buf); 8955 if (scan_rate > 0) { 8956 zfs_nicebytes(scan_rate, scan_rate_buf, sizeof (scan_rate_buf)); 8957 (void) printf(gettext(" at %s/s"), scan_rate_buf); 8958 } 8959 (void) printf(gettext(", %s / %s issued"), bytes_issued_buf, 8960 bytes_est_i_buf); 8961 if (issue_rate > 0) { 8962 zfs_nicebytes(issue_rate, issue_rate_buf, 8963 sizeof (issue_rate_buf)); 8964 (void) printf(gettext(" at %s/s"), issue_rate_buf); 8965 } 8966 (void) printf(gettext("\n")); 8967 8968 (void) printf(gettext("\t%s resilvered, %.2f%% done"), 8969 bytes_rebuilt_buf, scan_pct); 8970 8971 if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) { 8972 if (bytes_est_s >= bytes_scanned && 8973 scan_rate >= 10 * 1024 * 1024) { 8974 secs_to_dhms((bytes_est_s - bytes_scanned) / scan_rate, 8975 time_buf); 8976 (void) printf(gettext(", %s to go\n"), time_buf); 8977 } else { 8978 (void) printf(gettext(", no estimated " 8979 "completion time\n")); 8980 } 8981 } else { 8982 (void) printf(gettext("\n")); 8983 } 8984 } 8985 8986 /* 8987 * Print rebuild status for top-level vdevs. 8988 */ 8989 static void 8990 print_rebuild_status(zpool_handle_t *zhp, nvlist_t *nvroot) 8991 { 8992 nvlist_t **child; 8993 uint_t children; 8994 8995 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 8996 &child, &children) != 0) 8997 children = 0; 8998 8999 for (uint_t c = 0; c < children; c++) { 9000 vdev_rebuild_stat_t *vrs; 9001 uint_t i; 9002 9003 if (nvlist_lookup_uint64_array(child[c], 9004 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i) == 0) { 9005 char *name = zpool_vdev_name(g_zfs, zhp, 9006 child[c], VDEV_NAME_TYPE_ID); 9007 print_rebuild_status_impl(vrs, i, name); 9008 free(name); 9009 } 9010 } 9011 } 9012 9013 /* 9014 * As we don't scrub checkpointed blocks, we want to warn the user that we 9015 * skipped scanning some blocks if a checkpoint exists or existed at any 9016 * time during the scan. If a sequential instead of healing reconstruction 9017 * was performed then the blocks were reconstructed. However, their checksums 9018 * have not been verified so we still print the warning. 9019 */ 9020 static void 9021 print_checkpoint_scan_warning(pool_scan_stat_t *ps, pool_checkpoint_stat_t *pcs) 9022 { 9023 if (ps == NULL || pcs == NULL) 9024 return; 9025 9026 if (pcs->pcs_state == CS_NONE || 9027 pcs->pcs_state == CS_CHECKPOINT_DISCARDING) 9028 return; 9029 9030 assert(pcs->pcs_state == CS_CHECKPOINT_EXISTS); 9031 9032 if (ps->pss_state == DSS_NONE) 9033 return; 9034 9035 if ((ps->pss_state == DSS_FINISHED || ps->pss_state == DSS_CANCELED) && 9036 ps->pss_end_time < pcs->pcs_start_time) 9037 return; 9038 9039 if (ps->pss_state == DSS_FINISHED || ps->pss_state == DSS_CANCELED) { 9040 (void) printf(gettext(" scan warning: skipped blocks " 9041 "that are only referenced by the checkpoint.\n")); 9042 } else { 9043 assert(ps->pss_state == DSS_SCANNING); 9044 (void) printf(gettext(" scan warning: skipping blocks " 9045 "that are only referenced by the checkpoint.\n")); 9046 } 9047 } 9048 9049 /* 9050 * Returns B_TRUE if there is an active rebuild in progress. Otherwise, 9051 * B_FALSE is returned and 'rebuild_end_time' is set to the end time for 9052 * the last completed (or cancelled) rebuild. 9053 */ 9054 static boolean_t 9055 check_rebuilding(nvlist_t *nvroot, uint64_t *rebuild_end_time) 9056 { 9057 nvlist_t **child; 9058 uint_t children; 9059 boolean_t rebuilding = B_FALSE; 9060 uint64_t end_time = 0; 9061 9062 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 9063 &child, &children) != 0) 9064 children = 0; 9065 9066 for (uint_t c = 0; c < children; c++) { 9067 vdev_rebuild_stat_t *vrs; 9068 uint_t i; 9069 9070 if (nvlist_lookup_uint64_array(child[c], 9071 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i) == 0) { 9072 9073 if (vrs->vrs_end_time > end_time) 9074 end_time = vrs->vrs_end_time; 9075 9076 if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) { 9077 rebuilding = B_TRUE; 9078 end_time = 0; 9079 break; 9080 } 9081 } 9082 } 9083 9084 if (rebuild_end_time != NULL) 9085 *rebuild_end_time = end_time; 9086 9087 return (rebuilding); 9088 } 9089 9090 static void 9091 vdev_stats_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv, 9092 int depth, boolean_t isspare, char *parent, nvlist_t *item) 9093 { 9094 nvlist_t *vds, **child, *ch = NULL; 9095 uint_t vsc, children; 9096 vdev_stat_t *vs; 9097 char *vname; 9098 uint64_t notpresent; 9099 const char *type, *path; 9100 9101 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 9102 &child, &children) != 0) 9103 children = 0; 9104 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, 9105 (uint64_t **)&vs, &vsc) == 0); 9106 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0); 9107 if (strcmp(type, VDEV_TYPE_INDIRECT) == 0) 9108 return; 9109 9110 if (cb->cb_print_unhealthy && depth > 0 && 9111 for_each_vdev_in_nvlist(nv, vdev_health_check_cb, cb) == 0) { 9112 return; 9113 } 9114 vname = zpool_vdev_name(g_zfs, zhp, nv, 9115 cb->cb_name_flags | VDEV_NAME_TYPE_ID); 9116 vds = fnvlist_alloc(); 9117 fill_vdev_info(vds, zhp, vname, B_FALSE, cb->cb_json_as_int); 9118 if (cb->cb_flat_vdevs && parent != NULL) { 9119 fnvlist_add_string(vds, "parent", parent); 9120 } 9121 9122 if (isspare) { 9123 if (vs->vs_aux == VDEV_AUX_SPARED) { 9124 fnvlist_add_string(vds, "state", "INUSE"); 9125 used_by_other(zhp, nv, vds); 9126 } else if (vs->vs_state == VDEV_STATE_HEALTHY) 9127 fnvlist_add_string(vds, "state", "AVAIL"); 9128 } else { 9129 if (vs->vs_alloc) { 9130 nice_num_str_nvlist(vds, "alloc_space", vs->vs_alloc, 9131 cb->cb_literal, cb->cb_json_as_int, 9132 ZFS_NICENUM_BYTES); 9133 } 9134 if (vs->vs_space) { 9135 nice_num_str_nvlist(vds, "total_space", vs->vs_space, 9136 cb->cb_literal, cb->cb_json_as_int, 9137 ZFS_NICENUM_BYTES); 9138 } 9139 if (vs->vs_dspace) { 9140 nice_num_str_nvlist(vds, "def_space", vs->vs_dspace, 9141 cb->cb_literal, cb->cb_json_as_int, 9142 ZFS_NICENUM_BYTES); 9143 } 9144 if (vs->vs_rsize) { 9145 nice_num_str_nvlist(vds, "rep_dev_size", vs->vs_rsize, 9146 cb->cb_literal, cb->cb_json_as_int, 9147 ZFS_NICENUM_BYTES); 9148 } 9149 if (vs->vs_esize) { 9150 nice_num_str_nvlist(vds, "ex_dev_size", vs->vs_esize, 9151 cb->cb_literal, cb->cb_json_as_int, 9152 ZFS_NICENUM_BYTES); 9153 } 9154 if (vs->vs_self_healed) { 9155 nice_num_str_nvlist(vds, "self_healed", 9156 vs->vs_self_healed, cb->cb_literal, 9157 cb->cb_json_as_int, ZFS_NICENUM_BYTES); 9158 } 9159 if (vs->vs_pspace) { 9160 nice_num_str_nvlist(vds, "phys_space", vs->vs_pspace, 9161 cb->cb_literal, cb->cb_json_as_int, 9162 ZFS_NICENUM_BYTES); 9163 } 9164 nice_num_str_nvlist(vds, "read_errors", vs->vs_read_errors, 9165 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024); 9166 nice_num_str_nvlist(vds, "write_errors", vs->vs_write_errors, 9167 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024); 9168 nice_num_str_nvlist(vds, "checksum_errors", 9169 vs->vs_checksum_errors, cb->cb_literal, 9170 cb->cb_json_as_int, ZFS_NICENUM_1024); 9171 if (vs->vs_scan_processed) { 9172 nice_num_str_nvlist(vds, "scan_processed", 9173 vs->vs_scan_processed, cb->cb_literal, 9174 cb->cb_json_as_int, ZFS_NICENUM_BYTES); 9175 } 9176 if (vs->vs_checkpoint_space) { 9177 nice_num_str_nvlist(vds, "checkpoint_space", 9178 vs->vs_checkpoint_space, cb->cb_literal, 9179 cb->cb_json_as_int, ZFS_NICENUM_BYTES); 9180 } 9181 if (vs->vs_resilver_deferred) { 9182 nice_num_str_nvlist(vds, "resilver_deferred", 9183 vs->vs_resilver_deferred, B_TRUE, 9184 cb->cb_json_as_int, ZFS_NICENUM_1024); 9185 } 9186 if (children == 0) { 9187 nice_num_str_nvlist(vds, "slow_ios", vs->vs_slow_ios, 9188 cb->cb_literal, cb->cb_json_as_int, 9189 ZFS_NICENUM_1024); 9190 } 9191 if (cb->cb_print_power) { 9192 if (children == 0) { 9193 /* Only leaf vdevs have physical slots */ 9194 switch (zpool_power_current_state(zhp, (char *) 9195 fnvlist_lookup_string(nv, 9196 ZPOOL_CONFIG_PATH))) { 9197 case 0: 9198 fnvlist_add_string(vds, "power_state", 9199 "off"); 9200 break; 9201 case 1: 9202 fnvlist_add_string(vds, "power_state", 9203 "on"); 9204 break; 9205 default: 9206 fnvlist_add_string(vds, "power_state", 9207 "-"); 9208 } 9209 } else { 9210 fnvlist_add_string(vds, "power_state", "-"); 9211 } 9212 } 9213 } 9214 9215 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 9216 ¬present) == 0) { 9217 nice_num_str_nvlist(vds, ZPOOL_CONFIG_NOT_PRESENT, 9218 1, B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_BYTES); 9219 fnvlist_add_string(vds, "was", 9220 fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH)); 9221 } else if (vs->vs_aux != VDEV_AUX_NONE) { 9222 fnvlist_add_string(vds, "aux", vdev_aux_str[vs->vs_aux]); 9223 } else if (children == 0 && !isspare && 9224 getenv("ZPOOL_STATUS_NON_NATIVE_ASHIFT_IGNORE") == NULL && 9225 VDEV_STAT_VALID(vs_physical_ashift, vsc) && 9226 vs->vs_configured_ashift < vs->vs_physical_ashift) { 9227 nice_num_str_nvlist(vds, "configured_ashift", 9228 vs->vs_configured_ashift, B_TRUE, cb->cb_json_as_int, 9229 ZFS_NICENUM_1024); 9230 nice_num_str_nvlist(vds, "physical_ashift", 9231 vs->vs_physical_ashift, B_TRUE, cb->cb_json_as_int, 9232 ZFS_NICENUM_1024); 9233 } 9234 if (vs->vs_scan_removing != 0) { 9235 nice_num_str_nvlist(vds, "removing", vs->vs_scan_removing, 9236 B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024); 9237 } else if (VDEV_STAT_VALID(vs_noalloc, vsc) && vs->vs_noalloc != 0) { 9238 nice_num_str_nvlist(vds, "noalloc", vs->vs_noalloc, 9239 B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024); 9240 } 9241 9242 if (cb->vcdl != NULL) { 9243 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) { 9244 zpool_nvlist_cmd(cb->vcdl, zpool_get_name(zhp), 9245 path, vds); 9246 } 9247 } 9248 9249 if (children == 0) { 9250 if (cb->cb_print_vdev_init) { 9251 if (vs->vs_initialize_state != 0) { 9252 uint64_t st = vs->vs_initialize_state; 9253 fnvlist_add_string(vds, "init_state", 9254 vdev_init_state_str[st]); 9255 nice_num_str_nvlist(vds, "initialized", 9256 vs->vs_initialize_bytes_done, 9257 cb->cb_literal, cb->cb_json_as_int, 9258 ZFS_NICENUM_BYTES); 9259 nice_num_str_nvlist(vds, "to_initialize", 9260 vs->vs_initialize_bytes_est, 9261 cb->cb_literal, cb->cb_json_as_int, 9262 ZFS_NICENUM_BYTES); 9263 nice_num_str_nvlist(vds, "init_time", 9264 vs->vs_initialize_action_time, 9265 cb->cb_literal, cb->cb_json_as_int, 9266 ZFS_NICE_TIMESTAMP); 9267 nice_num_str_nvlist(vds, "init_errors", 9268 vs->vs_initialize_errors, 9269 cb->cb_literal, cb->cb_json_as_int, 9270 ZFS_NICENUM_1024); 9271 } else { 9272 fnvlist_add_string(vds, "init_state", 9273 "UNINITIALIZED"); 9274 } 9275 } 9276 if (cb->cb_print_vdev_trim) { 9277 if (vs->vs_trim_notsup == 0) { 9278 if (vs->vs_trim_state != 0) { 9279 uint64_t st = vs->vs_trim_state; 9280 fnvlist_add_string(vds, "trim_state", 9281 vdev_trim_state_str[st]); 9282 nice_num_str_nvlist(vds, "trimmed", 9283 vs->vs_trim_bytes_done, 9284 cb->cb_literal, cb->cb_json_as_int, 9285 ZFS_NICENUM_BYTES); 9286 nice_num_str_nvlist(vds, "to_trim", 9287 vs->vs_trim_bytes_est, 9288 cb->cb_literal, cb->cb_json_as_int, 9289 ZFS_NICENUM_BYTES); 9290 nice_num_str_nvlist(vds, "trim_time", 9291 vs->vs_trim_action_time, 9292 cb->cb_literal, cb->cb_json_as_int, 9293 ZFS_NICE_TIMESTAMP); 9294 nice_num_str_nvlist(vds, "trim_errors", 9295 vs->vs_trim_errors, 9296 cb->cb_literal, cb->cb_json_as_int, 9297 ZFS_NICENUM_1024); 9298 } else 9299 fnvlist_add_string(vds, "trim_state", 9300 "UNTRIMMED"); 9301 } 9302 nice_num_str_nvlist(vds, "trim_notsup", 9303 vs->vs_trim_notsup, B_TRUE, 9304 cb->cb_json_as_int, ZFS_NICENUM_1024); 9305 } 9306 } else { 9307 ch = fnvlist_alloc(); 9308 } 9309 9310 if (cb->cb_flat_vdevs && children == 0) { 9311 fnvlist_add_nvlist(item, vname, vds); 9312 } 9313 9314 for (int c = 0; c < children; c++) { 9315 uint64_t islog = B_FALSE, ishole = B_FALSE; 9316 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 9317 &islog); 9318 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 9319 &ishole); 9320 if (islog || ishole) 9321 continue; 9322 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS)) 9323 continue; 9324 if (cb->cb_flat_vdevs) { 9325 vdev_stats_nvlist(zhp, cb, child[c], depth + 2, isspare, 9326 vname, item); 9327 } 9328 vdev_stats_nvlist(zhp, cb, child[c], depth + 2, isspare, 9329 vname, ch); 9330 } 9331 9332 if (ch != NULL) { 9333 if (!nvlist_empty(ch)) 9334 fnvlist_add_nvlist(vds, "vdevs", ch); 9335 fnvlist_free(ch); 9336 } 9337 fnvlist_add_nvlist(item, vname, vds); 9338 fnvlist_free(vds); 9339 free(vname); 9340 } 9341 9342 static void 9343 class_vdevs_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv, 9344 const char *class, nvlist_t *item) 9345 { 9346 uint_t c, children; 9347 nvlist_t **child; 9348 nvlist_t *class_obj = NULL; 9349 9350 if (!cb->cb_flat_vdevs) 9351 class_obj = fnvlist_alloc(); 9352 9353 assert(zhp != NULL || !cb->cb_verbose); 9354 9355 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &child, 9356 &children) != 0) 9357 return; 9358 9359 for (c = 0; c < children; c++) { 9360 uint64_t is_log = B_FALSE; 9361 const char *bias = NULL; 9362 const char *type = NULL; 9363 char *name = zpool_vdev_name(g_zfs, zhp, child[c], 9364 cb->cb_name_flags | VDEV_NAME_TYPE_ID); 9365 9366 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 9367 &is_log); 9368 9369 if (is_log) { 9370 bias = (char *)VDEV_ALLOC_CLASS_LOGS; 9371 } else { 9372 (void) nvlist_lookup_string(child[c], 9373 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias); 9374 (void) nvlist_lookup_string(child[c], 9375 ZPOOL_CONFIG_TYPE, &type); 9376 } 9377 9378 if (bias == NULL || strcmp(bias, class) != 0) 9379 continue; 9380 if (!is_log && strcmp(type, VDEV_TYPE_INDIRECT) == 0) 9381 continue; 9382 9383 if (cb->cb_flat_vdevs) { 9384 vdev_stats_nvlist(zhp, cb, child[c], 2, B_FALSE, 9385 NULL, item); 9386 } else { 9387 vdev_stats_nvlist(zhp, cb, child[c], 2, B_FALSE, 9388 NULL, class_obj); 9389 } 9390 free(name); 9391 } 9392 if (!cb->cb_flat_vdevs) { 9393 if (!nvlist_empty(class_obj)) 9394 fnvlist_add_nvlist(item, class, class_obj); 9395 fnvlist_free(class_obj); 9396 } 9397 } 9398 9399 static void 9400 l2cache_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv, 9401 nvlist_t *item) 9402 { 9403 nvlist_t *l2c = NULL, **l2cache; 9404 uint_t nl2cache; 9405 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, 9406 &l2cache, &nl2cache) == 0) { 9407 if (nl2cache == 0) 9408 return; 9409 if (!cb->cb_flat_vdevs) 9410 l2c = fnvlist_alloc(); 9411 for (int i = 0; i < nl2cache; i++) { 9412 if (cb->cb_flat_vdevs) { 9413 vdev_stats_nvlist(zhp, cb, l2cache[i], 2, 9414 B_FALSE, NULL, item); 9415 } else { 9416 vdev_stats_nvlist(zhp, cb, l2cache[i], 2, 9417 B_FALSE, NULL, l2c); 9418 } 9419 } 9420 } 9421 if (!cb->cb_flat_vdevs) { 9422 if (!nvlist_empty(l2c)) 9423 fnvlist_add_nvlist(item, "l2cache", l2c); 9424 fnvlist_free(l2c); 9425 } 9426 } 9427 9428 static void 9429 spares_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv, 9430 nvlist_t *item) 9431 { 9432 nvlist_t *sp = NULL, **spares; 9433 uint_t nspares; 9434 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, 9435 &spares, &nspares) == 0) { 9436 if (nspares == 0) 9437 return; 9438 if (!cb->cb_flat_vdevs) 9439 sp = fnvlist_alloc(); 9440 for (int i = 0; i < nspares; i++) { 9441 if (cb->cb_flat_vdevs) { 9442 vdev_stats_nvlist(zhp, cb, spares[i], 2, B_TRUE, 9443 NULL, item); 9444 } else { 9445 vdev_stats_nvlist(zhp, cb, spares[i], 2, B_TRUE, 9446 NULL, sp); 9447 } 9448 } 9449 } 9450 if (!cb->cb_flat_vdevs) { 9451 if (!nvlist_empty(sp)) 9452 fnvlist_add_nvlist(item, "spares", sp); 9453 fnvlist_free(sp); 9454 } 9455 } 9456 9457 static void 9458 errors_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *item) 9459 { 9460 uint64_t nerr; 9461 nvlist_t *config = zpool_get_config(zhp, NULL); 9462 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT, 9463 &nerr) == 0) { 9464 nice_num_str_nvlist(item, ZPOOL_CONFIG_ERRCOUNT, nerr, 9465 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024); 9466 if (nerr != 0 && cb->cb_verbose) { 9467 nvlist_t *nverrlist = NULL; 9468 if (zpool_get_errlog(zhp, &nverrlist) == 0) { 9469 int i = 0; 9470 int count = 0; 9471 size_t len = MAXPATHLEN * 2; 9472 nvpair_t *elem = NULL; 9473 9474 for (nvpair_t *pair = 9475 nvlist_next_nvpair(nverrlist, NULL); 9476 pair != NULL; 9477 pair = nvlist_next_nvpair(nverrlist, pair)) 9478 count++; 9479 char **errl = (char **)malloc( 9480 count * sizeof (char *)); 9481 9482 while ((elem = nvlist_next_nvpair(nverrlist, 9483 elem)) != NULL) { 9484 nvlist_t *nv; 9485 uint64_t dsobj, obj; 9486 9487 verify(nvpair_value_nvlist(elem, 9488 &nv) == 0); 9489 verify(nvlist_lookup_uint64(nv, 9490 ZPOOL_ERR_DATASET, &dsobj) == 0); 9491 verify(nvlist_lookup_uint64(nv, 9492 ZPOOL_ERR_OBJECT, &obj) == 0); 9493 errl[i] = safe_malloc(len); 9494 zpool_obj_to_path(zhp, dsobj, obj, 9495 errl[i++], len); 9496 } 9497 nvlist_free(nverrlist); 9498 fnvlist_add_string_array(item, "errlist", 9499 (const char **)errl, count); 9500 for (int i = 0; i < count; ++i) 9501 free(errl[i]); 9502 free(errl); 9503 } else 9504 fnvlist_add_string(item, "errlist", 9505 strerror(errno)); 9506 } 9507 } 9508 } 9509 9510 static void 9511 ddt_stats_nvlist(ddt_stat_t *dds, status_cbdata_t *cb, nvlist_t *item) 9512 { 9513 nice_num_str_nvlist(item, "blocks", dds->dds_blocks, 9514 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024); 9515 nice_num_str_nvlist(item, "logical_size", dds->dds_lsize, 9516 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES); 9517 nice_num_str_nvlist(item, "physical_size", dds->dds_psize, 9518 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES); 9519 nice_num_str_nvlist(item, "deflated_size", dds->dds_dsize, 9520 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES); 9521 nice_num_str_nvlist(item, "ref_blocks", dds->dds_ref_blocks, 9522 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024); 9523 nice_num_str_nvlist(item, "ref_lsize", dds->dds_ref_lsize, 9524 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES); 9525 nice_num_str_nvlist(item, "ref_psize", dds->dds_ref_psize, 9526 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES); 9527 nice_num_str_nvlist(item, "ref_dsize", dds->dds_ref_dsize, 9528 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES); 9529 } 9530 9531 static void 9532 dedup_stats_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *item) 9533 { 9534 nvlist_t *config; 9535 if (cb->cb_dedup_stats) { 9536 ddt_histogram_t *ddh; 9537 ddt_stat_t *dds; 9538 ddt_object_t *ddo; 9539 nvlist_t *ddt_stat, *ddt_obj, *dedup; 9540 uint_t c; 9541 uint64_t cspace_prop; 9542 9543 config = zpool_get_config(zhp, NULL); 9544 if (nvlist_lookup_uint64_array(config, 9545 ZPOOL_CONFIG_DDT_OBJ_STATS, (uint64_t **)&ddo, &c) != 0) 9546 return; 9547 9548 dedup = fnvlist_alloc(); 9549 ddt_obj = fnvlist_alloc(); 9550 nice_num_str_nvlist(dedup, "obj_count", ddo->ddo_count, 9551 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024); 9552 if (ddo->ddo_count == 0) { 9553 fnvlist_add_nvlist(dedup, ZPOOL_CONFIG_DDT_OBJ_STATS, 9554 ddt_obj); 9555 fnvlist_add_nvlist(item, "dedup_stats", dedup); 9556 fnvlist_free(ddt_obj); 9557 fnvlist_free(dedup); 9558 return; 9559 } else { 9560 nice_num_str_nvlist(dedup, "dspace", ddo->ddo_dspace, 9561 cb->cb_literal, cb->cb_json_as_int, 9562 ZFS_NICENUM_1024); 9563 nice_num_str_nvlist(dedup, "mspace", ddo->ddo_mspace, 9564 cb->cb_literal, cb->cb_json_as_int, 9565 ZFS_NICENUM_1024); 9566 /* 9567 * Squash cached size into in-core size to handle race. 9568 * Only include cached size if it is available. 9569 */ 9570 cspace_prop = zpool_get_prop_int(zhp, 9571 ZPOOL_PROP_DEDUPCACHED, NULL); 9572 cspace_prop = MIN(cspace_prop, ddo->ddo_mspace); 9573 nice_num_str_nvlist(dedup, "cspace", cspace_prop, 9574 cb->cb_literal, cb->cb_json_as_int, 9575 ZFS_NICENUM_1024); 9576 } 9577 9578 ddt_stat = fnvlist_alloc(); 9579 if (nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_STATS, 9580 (uint64_t **)&dds, &c) == 0) { 9581 nvlist_t *total = fnvlist_alloc(); 9582 if (dds->dds_blocks == 0) 9583 fnvlist_add_string(total, "blocks", "0"); 9584 else 9585 ddt_stats_nvlist(dds, cb, total); 9586 fnvlist_add_nvlist(ddt_stat, "total", total); 9587 fnvlist_free(total); 9588 } 9589 if (nvlist_lookup_uint64_array(config, 9590 ZPOOL_CONFIG_DDT_HISTOGRAM, (uint64_t **)&ddh, &c) == 0) { 9591 nvlist_t *hist = fnvlist_alloc(); 9592 nvlist_t *entry = NULL; 9593 char buf[16]; 9594 for (int h = 0; h < 64; h++) { 9595 if (ddh->ddh_stat[h].dds_blocks != 0) { 9596 entry = fnvlist_alloc(); 9597 ddt_stats_nvlist(&ddh->ddh_stat[h], cb, 9598 entry); 9599 snprintf(buf, 16, "%d", h); 9600 fnvlist_add_nvlist(hist, buf, entry); 9601 fnvlist_free(entry); 9602 } 9603 } 9604 if (!nvlist_empty(hist)) 9605 fnvlist_add_nvlist(ddt_stat, "histogram", hist); 9606 fnvlist_free(hist); 9607 } 9608 9609 if (!nvlist_empty(ddt_obj)) { 9610 fnvlist_add_nvlist(dedup, ZPOOL_CONFIG_DDT_OBJ_STATS, 9611 ddt_obj); 9612 } 9613 fnvlist_free(ddt_obj); 9614 if (!nvlist_empty(ddt_stat)) { 9615 fnvlist_add_nvlist(dedup, ZPOOL_CONFIG_DDT_STATS, 9616 ddt_stat); 9617 } 9618 fnvlist_free(ddt_stat); 9619 if (!nvlist_empty(dedup)) 9620 fnvlist_add_nvlist(item, "dedup_stats", dedup); 9621 fnvlist_free(dedup); 9622 } 9623 } 9624 9625 static void 9626 raidz_expand_status_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, 9627 nvlist_t *nvroot, nvlist_t *item) 9628 { 9629 uint_t c; 9630 pool_raidz_expand_stat_t *pres = NULL; 9631 if (nvlist_lookup_uint64_array(nvroot, 9632 ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, (uint64_t **)&pres, &c) == 0) { 9633 nvlist_t **child; 9634 uint_t children; 9635 nvlist_t *nv = fnvlist_alloc(); 9636 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 9637 &child, &children) == 0); 9638 assert(pres->pres_expanding_vdev < children); 9639 char *name = 9640 zpool_vdev_name(g_zfs, zhp, 9641 child[pres->pres_expanding_vdev], 0); 9642 fill_vdev_info(nv, zhp, name, B_FALSE, cb->cb_json_as_int); 9643 fnvlist_add_string(nv, "state", 9644 pool_scan_state_str[pres->pres_state]); 9645 nice_num_str_nvlist(nv, "expanding_vdev", 9646 pres->pres_expanding_vdev, B_TRUE, cb->cb_json_as_int, 9647 ZFS_NICENUM_1024); 9648 nice_num_str_nvlist(nv, "start_time", pres->pres_start_time, 9649 cb->cb_literal, cb->cb_json_as_int, ZFS_NICE_TIMESTAMP); 9650 nice_num_str_nvlist(nv, "end_time", pres->pres_end_time, 9651 cb->cb_literal, cb->cb_json_as_int, ZFS_NICE_TIMESTAMP); 9652 nice_num_str_nvlist(nv, "to_reflow", pres->pres_to_reflow, 9653 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES); 9654 nice_num_str_nvlist(nv, "reflowed", pres->pres_reflowed, 9655 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES); 9656 nice_num_str_nvlist(nv, "waiting_for_resilver", 9657 pres->pres_waiting_for_resilver, B_TRUE, 9658 cb->cb_json_as_int, ZFS_NICENUM_1024); 9659 fnvlist_add_nvlist(item, ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, nv); 9660 fnvlist_free(nv); 9661 free(name); 9662 } 9663 } 9664 9665 static void 9666 checkpoint_status_nvlist(nvlist_t *nvroot, status_cbdata_t *cb, 9667 nvlist_t *item) 9668 { 9669 uint_t c; 9670 pool_checkpoint_stat_t *pcs = NULL; 9671 if (nvlist_lookup_uint64_array(nvroot, 9672 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c) == 0) { 9673 nvlist_t *nv = fnvlist_alloc(); 9674 fnvlist_add_string(nv, "state", 9675 checkpoint_state_str[pcs->pcs_state]); 9676 nice_num_str_nvlist(nv, "start_time", 9677 pcs->pcs_start_time, cb->cb_literal, cb->cb_json_as_int, 9678 ZFS_NICE_TIMESTAMP); 9679 nice_num_str_nvlist(nv, "space", 9680 pcs->pcs_space, cb->cb_literal, cb->cb_json_as_int, 9681 ZFS_NICENUM_BYTES); 9682 fnvlist_add_nvlist(item, ZPOOL_CONFIG_CHECKPOINT_STATS, nv); 9683 fnvlist_free(nv); 9684 } 9685 } 9686 9687 static void 9688 removal_status_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, 9689 nvlist_t *nvroot, nvlist_t *item) 9690 { 9691 uint_t c; 9692 pool_removal_stat_t *prs = NULL; 9693 if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_REMOVAL_STATS, 9694 (uint64_t **)&prs, &c) == 0) { 9695 if (prs->prs_state != DSS_NONE) { 9696 nvlist_t **child; 9697 uint_t children; 9698 verify(nvlist_lookup_nvlist_array(nvroot, 9699 ZPOOL_CONFIG_CHILDREN, &child, &children) == 0); 9700 assert(prs->prs_removing_vdev < children); 9701 char *vdev_name = zpool_vdev_name(g_zfs, zhp, 9702 child[prs->prs_removing_vdev], B_TRUE); 9703 nvlist_t *nv = fnvlist_alloc(); 9704 fill_vdev_info(nv, zhp, vdev_name, B_FALSE, 9705 cb->cb_json_as_int); 9706 fnvlist_add_string(nv, "state", 9707 pool_scan_state_str[prs->prs_state]); 9708 nice_num_str_nvlist(nv, "removing_vdev", 9709 prs->prs_removing_vdev, B_TRUE, cb->cb_json_as_int, 9710 ZFS_NICENUM_1024); 9711 nice_num_str_nvlist(nv, "start_time", 9712 prs->prs_start_time, cb->cb_literal, 9713 cb->cb_json_as_int, ZFS_NICE_TIMESTAMP); 9714 nice_num_str_nvlist(nv, "end_time", prs->prs_end_time, 9715 cb->cb_literal, cb->cb_json_as_int, 9716 ZFS_NICE_TIMESTAMP); 9717 nice_num_str_nvlist(nv, "to_copy", prs->prs_to_copy, 9718 cb->cb_literal, cb->cb_json_as_int, 9719 ZFS_NICENUM_BYTES); 9720 nice_num_str_nvlist(nv, "copied", prs->prs_copied, 9721 cb->cb_literal, cb->cb_json_as_int, 9722 ZFS_NICENUM_BYTES); 9723 nice_num_str_nvlist(nv, "mapping_memory", 9724 prs->prs_mapping_memory, cb->cb_literal, 9725 cb->cb_json_as_int, ZFS_NICENUM_BYTES); 9726 fnvlist_add_nvlist(item, 9727 ZPOOL_CONFIG_REMOVAL_STATS, nv); 9728 fnvlist_free(nv); 9729 free(vdev_name); 9730 } 9731 } 9732 } 9733 9734 static void 9735 scan_status_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, 9736 nvlist_t *nvroot, nvlist_t *item) 9737 { 9738 pool_scan_stat_t *ps = NULL; 9739 uint_t c; 9740 nvlist_t *scan = fnvlist_alloc(); 9741 nvlist_t **child; 9742 uint_t children; 9743 9744 if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_SCAN_STATS, 9745 (uint64_t **)&ps, &c) == 0) { 9746 fnvlist_add_string(scan, "function", 9747 pool_scan_func_str[ps->pss_func]); 9748 fnvlist_add_string(scan, "state", 9749 pool_scan_state_str[ps->pss_state]); 9750 nice_num_str_nvlist(scan, "start_time", ps->pss_start_time, 9751 cb->cb_literal, cb->cb_json_as_int, ZFS_NICE_TIMESTAMP); 9752 nice_num_str_nvlist(scan, "end_time", ps->pss_end_time, 9753 cb->cb_literal, cb->cb_json_as_int, ZFS_NICE_TIMESTAMP); 9754 nice_num_str_nvlist(scan, "to_examine", ps->pss_to_examine, 9755 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES); 9756 nice_num_str_nvlist(scan, "examined", ps->pss_examined, 9757 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES); 9758 nice_num_str_nvlist(scan, "skipped", ps->pss_skipped, 9759 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES); 9760 nice_num_str_nvlist(scan, "processed", ps->pss_processed, 9761 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES); 9762 nice_num_str_nvlist(scan, "errors", ps->pss_errors, 9763 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024); 9764 nice_num_str_nvlist(scan, "bytes_per_scan", ps->pss_pass_exam, 9765 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES); 9766 nice_num_str_nvlist(scan, "pass_start", ps->pss_pass_start, 9767 B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024); 9768 nice_num_str_nvlist(scan, "scrub_pause", 9769 ps->pss_pass_scrub_pause, cb->cb_literal, 9770 cb->cb_json_as_int, ZFS_NICE_TIMESTAMP); 9771 nice_num_str_nvlist(scan, "scrub_spent_paused", 9772 ps->pss_pass_scrub_spent_paused, 9773 B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024); 9774 nice_num_str_nvlist(scan, "issued_bytes_per_scan", 9775 ps->pss_pass_issued, cb->cb_literal, 9776 cb->cb_json_as_int, ZFS_NICENUM_BYTES); 9777 nice_num_str_nvlist(scan, "issued", ps->pss_issued, 9778 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES); 9779 if (ps->pss_error_scrub_func == POOL_SCAN_ERRORSCRUB && 9780 ps->pss_error_scrub_start > ps->pss_start_time) { 9781 fnvlist_add_string(scan, "err_scrub_func", 9782 pool_scan_func_str[ps->pss_error_scrub_func]); 9783 fnvlist_add_string(scan, "err_scrub_state", 9784 pool_scan_state_str[ps->pss_error_scrub_state]); 9785 nice_num_str_nvlist(scan, "err_scrub_start_time", 9786 ps->pss_error_scrub_start, 9787 cb->cb_literal, cb->cb_json_as_int, 9788 ZFS_NICE_TIMESTAMP); 9789 nice_num_str_nvlist(scan, "err_scrub_end_time", 9790 ps->pss_error_scrub_end, 9791 cb->cb_literal, cb->cb_json_as_int, 9792 ZFS_NICE_TIMESTAMP); 9793 nice_num_str_nvlist(scan, "err_scrub_examined", 9794 ps->pss_error_scrub_examined, 9795 cb->cb_literal, cb->cb_json_as_int, 9796 ZFS_NICENUM_1024); 9797 nice_num_str_nvlist(scan, "err_scrub_to_examine", 9798 ps->pss_error_scrub_to_be_examined, 9799 cb->cb_literal, cb->cb_json_as_int, 9800 ZFS_NICENUM_1024); 9801 nice_num_str_nvlist(scan, "err_scrub_pause", 9802 ps->pss_pass_error_scrub_pause, 9803 B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024); 9804 } 9805 } 9806 9807 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 9808 &child, &children) == 0) { 9809 vdev_rebuild_stat_t *vrs; 9810 uint_t i; 9811 char *name; 9812 nvlist_t *nv; 9813 nvlist_t *rebuild = fnvlist_alloc(); 9814 uint64_t st; 9815 for (uint_t c = 0; c < children; c++) { 9816 if (nvlist_lookup_uint64_array(child[c], 9817 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, 9818 &i) == 0) { 9819 if (vrs->vrs_state != VDEV_REBUILD_NONE) { 9820 nv = fnvlist_alloc(); 9821 name = zpool_vdev_name(g_zfs, zhp, 9822 child[c], VDEV_NAME_TYPE_ID); 9823 fill_vdev_info(nv, zhp, name, B_FALSE, 9824 cb->cb_json_as_int); 9825 st = vrs->vrs_state; 9826 fnvlist_add_string(nv, "state", 9827 vdev_rebuild_state_str[st]); 9828 nice_num_str_nvlist(nv, "start_time", 9829 vrs->vrs_start_time, cb->cb_literal, 9830 cb->cb_json_as_int, 9831 ZFS_NICE_TIMESTAMP); 9832 nice_num_str_nvlist(nv, "end_time", 9833 vrs->vrs_end_time, cb->cb_literal, 9834 cb->cb_json_as_int, 9835 ZFS_NICE_TIMESTAMP); 9836 nice_num_str_nvlist(nv, "scan_time", 9837 vrs->vrs_scan_time_ms * 1000000, 9838 cb->cb_literal, cb->cb_json_as_int, 9839 ZFS_NICENUM_TIME); 9840 nice_num_str_nvlist(nv, "scanned", 9841 vrs->vrs_bytes_scanned, 9842 cb->cb_literal, cb->cb_json_as_int, 9843 ZFS_NICENUM_BYTES); 9844 nice_num_str_nvlist(nv, "issued", 9845 vrs->vrs_bytes_issued, 9846 cb->cb_literal, cb->cb_json_as_int, 9847 ZFS_NICENUM_BYTES); 9848 nice_num_str_nvlist(nv, "rebuilt", 9849 vrs->vrs_bytes_rebuilt, 9850 cb->cb_literal, cb->cb_json_as_int, 9851 ZFS_NICENUM_BYTES); 9852 nice_num_str_nvlist(nv, "to_scan", 9853 vrs->vrs_bytes_est, cb->cb_literal, 9854 cb->cb_json_as_int, 9855 ZFS_NICENUM_BYTES); 9856 nice_num_str_nvlist(nv, "errors", 9857 vrs->vrs_errors, cb->cb_literal, 9858 cb->cb_json_as_int, 9859 ZFS_NICENUM_1024); 9860 nice_num_str_nvlist(nv, "pass_time", 9861 vrs->vrs_pass_time_ms * 1000000, 9862 cb->cb_literal, cb->cb_json_as_int, 9863 ZFS_NICENUM_TIME); 9864 nice_num_str_nvlist(nv, "pass_scanned", 9865 vrs->vrs_pass_bytes_scanned, 9866 cb->cb_literal, cb->cb_json_as_int, 9867 ZFS_NICENUM_BYTES); 9868 nice_num_str_nvlist(nv, "pass_issued", 9869 vrs->vrs_pass_bytes_issued, 9870 cb->cb_literal, cb->cb_json_as_int, 9871 ZFS_NICENUM_BYTES); 9872 nice_num_str_nvlist(nv, "pass_skipped", 9873 vrs->vrs_pass_bytes_skipped, 9874 cb->cb_literal, cb->cb_json_as_int, 9875 ZFS_NICENUM_BYTES); 9876 fnvlist_add_nvlist(rebuild, name, nv); 9877 free(name); 9878 } 9879 } 9880 } 9881 if (!nvlist_empty(rebuild)) 9882 fnvlist_add_nvlist(scan, "rebuild_stats", rebuild); 9883 fnvlist_free(rebuild); 9884 } 9885 9886 if (!nvlist_empty(scan)) 9887 fnvlist_add_nvlist(item, ZPOOL_CONFIG_SCAN_STATS, scan); 9888 fnvlist_free(scan); 9889 } 9890 9891 /* 9892 * Print the scan status. 9893 */ 9894 static void 9895 print_scan_status(zpool_handle_t *zhp, nvlist_t *nvroot) 9896 { 9897 uint64_t rebuild_end_time = 0, resilver_end_time = 0; 9898 boolean_t have_resilver = B_FALSE, have_scrub = B_FALSE; 9899 boolean_t have_errorscrub = B_FALSE; 9900 boolean_t active_resilver = B_FALSE; 9901 pool_checkpoint_stat_t *pcs = NULL; 9902 pool_scan_stat_t *ps = NULL; 9903 uint_t c; 9904 time_t scrub_start = 0, errorscrub_start = 0; 9905 9906 if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_SCAN_STATS, 9907 (uint64_t **)&ps, &c) == 0) { 9908 if (ps->pss_func == POOL_SCAN_RESILVER) { 9909 resilver_end_time = ps->pss_end_time; 9910 active_resilver = (ps->pss_state == DSS_SCANNING); 9911 } 9912 9913 have_resilver = (ps->pss_func == POOL_SCAN_RESILVER); 9914 have_scrub = (ps->pss_func == POOL_SCAN_SCRUB); 9915 scrub_start = ps->pss_start_time; 9916 if (c > offsetof(pool_scan_stat_t, 9917 pss_pass_error_scrub_pause) / 8) { 9918 have_errorscrub = (ps->pss_error_scrub_func == 9919 POOL_SCAN_ERRORSCRUB); 9920 errorscrub_start = ps->pss_error_scrub_start; 9921 } 9922 } 9923 9924 boolean_t active_rebuild = check_rebuilding(nvroot, &rebuild_end_time); 9925 boolean_t have_rebuild = (active_rebuild || (rebuild_end_time > 0)); 9926 9927 /* Always print the scrub status when available. */ 9928 if (have_scrub && scrub_start > errorscrub_start) 9929 print_scan_scrub_resilver_status(ps); 9930 else if (have_errorscrub && errorscrub_start >= scrub_start) 9931 print_err_scrub_status(ps); 9932 9933 /* 9934 * When there is an active resilver or rebuild print its status. 9935 * Otherwise print the status of the last resilver or rebuild. 9936 */ 9937 if (active_resilver || (!active_rebuild && have_resilver && 9938 resilver_end_time && resilver_end_time > rebuild_end_time)) { 9939 print_scan_scrub_resilver_status(ps); 9940 } else if (active_rebuild || (!active_resilver && have_rebuild && 9941 rebuild_end_time && rebuild_end_time > resilver_end_time)) { 9942 print_rebuild_status(zhp, nvroot); 9943 } 9944 9945 (void) nvlist_lookup_uint64_array(nvroot, 9946 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c); 9947 print_checkpoint_scan_warning(ps, pcs); 9948 } 9949 9950 /* 9951 * Print out detailed removal status. 9952 */ 9953 static void 9954 print_removal_status(zpool_handle_t *zhp, pool_removal_stat_t *prs) 9955 { 9956 char copied_buf[7], examined_buf[7], total_buf[7], rate_buf[7]; 9957 time_t start, end; 9958 nvlist_t *config, *nvroot; 9959 nvlist_t **child; 9960 uint_t children; 9961 char *vdev_name; 9962 9963 if (prs == NULL || prs->prs_state == DSS_NONE) 9964 return; 9965 9966 /* 9967 * Determine name of vdev. 9968 */ 9969 config = zpool_get_config(zhp, NULL); 9970 nvroot = fnvlist_lookup_nvlist(config, 9971 ZPOOL_CONFIG_VDEV_TREE); 9972 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 9973 &child, &children) == 0); 9974 assert(prs->prs_removing_vdev < children); 9975 vdev_name = zpool_vdev_name(g_zfs, zhp, 9976 child[prs->prs_removing_vdev], B_TRUE); 9977 9978 printf_color(ANSI_BOLD, gettext("remove: ")); 9979 9980 start = prs->prs_start_time; 9981 end = prs->prs_end_time; 9982 zfs_nicenum(prs->prs_copied, copied_buf, sizeof (copied_buf)); 9983 9984 /* 9985 * Removal is finished or canceled. 9986 */ 9987 if (prs->prs_state == DSS_FINISHED) { 9988 uint64_t minutes_taken = (end - start) / 60; 9989 9990 (void) printf(gettext("Removal of vdev %llu copied %s " 9991 "in %lluh%um, completed on %s"), 9992 (longlong_t)prs->prs_removing_vdev, 9993 copied_buf, 9994 (u_longlong_t)(minutes_taken / 60), 9995 (uint_t)(minutes_taken % 60), 9996 ctime((time_t *)&end)); 9997 } else if (prs->prs_state == DSS_CANCELED) { 9998 (void) printf(gettext("Removal of %s canceled on %s"), 9999 vdev_name, ctime(&end)); 10000 } else { 10001 uint64_t copied, total, elapsed, mins_left, hours_left; 10002 double fraction_done; 10003 uint_t rate; 10004 10005 assert(prs->prs_state == DSS_SCANNING); 10006 10007 /* 10008 * Removal is in progress. 10009 */ 10010 (void) printf(gettext( 10011 "Evacuation of %s in progress since %s"), 10012 vdev_name, ctime(&start)); 10013 10014 copied = prs->prs_copied > 0 ? prs->prs_copied : 1; 10015 total = prs->prs_to_copy; 10016 fraction_done = (double)copied / total; 10017 10018 /* elapsed time for this pass */ 10019 elapsed = time(NULL) - prs->prs_start_time; 10020 elapsed = elapsed > 0 ? elapsed : 1; 10021 rate = copied / elapsed; 10022 rate = rate > 0 ? rate : 1; 10023 mins_left = ((total - copied) / rate) / 60; 10024 hours_left = mins_left / 60; 10025 10026 zfs_nicenum(copied, examined_buf, sizeof (examined_buf)); 10027 zfs_nicenum(total, total_buf, sizeof (total_buf)); 10028 zfs_nicenum(rate, rate_buf, sizeof (rate_buf)); 10029 10030 /* 10031 * do not print estimated time if hours_left is more than 10032 * 30 days 10033 */ 10034 (void) printf(gettext( 10035 "\t%s copied out of %s at %s/s, %.2f%% done"), 10036 examined_buf, total_buf, rate_buf, 100 * fraction_done); 10037 if (hours_left < (30 * 24)) { 10038 (void) printf(gettext(", %lluh%um to go\n"), 10039 (u_longlong_t)hours_left, (uint_t)(mins_left % 60)); 10040 } else { 10041 (void) printf(gettext( 10042 ", (copy is slow, no estimated time)\n")); 10043 } 10044 } 10045 free(vdev_name); 10046 10047 if (prs->prs_mapping_memory > 0) { 10048 char mem_buf[7]; 10049 zfs_nicenum(prs->prs_mapping_memory, mem_buf, sizeof (mem_buf)); 10050 (void) printf(gettext( 10051 "\t%s memory used for removed device mappings\n"), 10052 mem_buf); 10053 } 10054 } 10055 10056 /* 10057 * Print out detailed raidz expansion status. 10058 */ 10059 static void 10060 print_raidz_expand_status(zpool_handle_t *zhp, pool_raidz_expand_stat_t *pres) 10061 { 10062 char copied_buf[7]; 10063 10064 if (pres == NULL || pres->pres_state == DSS_NONE) 10065 return; 10066 10067 /* 10068 * Determine name of vdev. 10069 */ 10070 nvlist_t *config = zpool_get_config(zhp, NULL); 10071 nvlist_t *nvroot = fnvlist_lookup_nvlist(config, 10072 ZPOOL_CONFIG_VDEV_TREE); 10073 nvlist_t **child; 10074 uint_t children; 10075 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 10076 &child, &children) == 0); 10077 assert(pres->pres_expanding_vdev < children); 10078 10079 printf_color(ANSI_BOLD, gettext("expand: ")); 10080 10081 time_t start = pres->pres_start_time; 10082 time_t end = pres->pres_end_time; 10083 char *vname = 10084 zpool_vdev_name(g_zfs, zhp, child[pres->pres_expanding_vdev], 0); 10085 zfs_nicenum(pres->pres_reflowed, copied_buf, sizeof (copied_buf)); 10086 10087 /* 10088 * Expansion is finished or canceled. 10089 */ 10090 if (pres->pres_state == DSS_FINISHED) { 10091 char time_buf[32]; 10092 secs_to_dhms(end - start, time_buf); 10093 10094 (void) printf(gettext("expanded %s-%u copied %s in %s, " 10095 "on %s"), vname, (int)pres->pres_expanding_vdev, 10096 copied_buf, time_buf, ctime((time_t *)&end)); 10097 } else { 10098 char examined_buf[7], total_buf[7], rate_buf[7]; 10099 uint64_t copied, total, elapsed, secs_left; 10100 double fraction_done; 10101 uint_t rate; 10102 10103 assert(pres->pres_state == DSS_SCANNING); 10104 10105 /* 10106 * Expansion is in progress. 10107 */ 10108 (void) printf(gettext( 10109 "expansion of %s-%u in progress since %s"), 10110 vname, (int)pres->pres_expanding_vdev, ctime(&start)); 10111 10112 copied = pres->pres_reflowed > 0 ? pres->pres_reflowed : 1; 10113 total = pres->pres_to_reflow; 10114 fraction_done = (double)copied / total; 10115 10116 /* elapsed time for this pass */ 10117 elapsed = time(NULL) - pres->pres_start_time; 10118 elapsed = elapsed > 0 ? elapsed : 1; 10119 rate = copied / elapsed; 10120 rate = rate > 0 ? rate : 1; 10121 secs_left = (total - copied) / rate; 10122 10123 zfs_nicenum(copied, examined_buf, sizeof (examined_buf)); 10124 zfs_nicenum(total, total_buf, sizeof (total_buf)); 10125 zfs_nicenum(rate, rate_buf, sizeof (rate_buf)); 10126 10127 /* 10128 * do not print estimated time if hours_left is more than 10129 * 30 days 10130 */ 10131 (void) printf(gettext("\t%s / %s copied at %s/s, %.2f%% done"), 10132 examined_buf, total_buf, rate_buf, 100 * fraction_done); 10133 if (pres->pres_waiting_for_resilver) { 10134 (void) printf(gettext(", paused for resilver or " 10135 "clear\n")); 10136 } else if (secs_left < (30 * 24 * 3600)) { 10137 char time_buf[32]; 10138 secs_to_dhms(secs_left, time_buf); 10139 (void) printf(gettext(", %s to go\n"), time_buf); 10140 } else { 10141 (void) printf(gettext( 10142 ", (copy is slow, no estimated time)\n")); 10143 } 10144 } 10145 free(vname); 10146 } 10147 static void 10148 print_checkpoint_status(pool_checkpoint_stat_t *pcs) 10149 { 10150 time_t start; 10151 char space_buf[7]; 10152 10153 if (pcs == NULL || pcs->pcs_state == CS_NONE) 10154 return; 10155 10156 (void) printf(gettext("checkpoint: ")); 10157 10158 start = pcs->pcs_start_time; 10159 zfs_nicenum(pcs->pcs_space, space_buf, sizeof (space_buf)); 10160 10161 if (pcs->pcs_state == CS_CHECKPOINT_EXISTS) { 10162 char *date = ctime(&start); 10163 10164 /* 10165 * ctime() adds a newline at the end of the generated 10166 * string, thus the weird format specifier and the 10167 * strlen() call used to chop it off from the output. 10168 */ 10169 (void) printf(gettext("created %.*s, consumes %s\n"), 10170 (int)(strlen(date) - 1), date, space_buf); 10171 return; 10172 } 10173 10174 assert(pcs->pcs_state == CS_CHECKPOINT_DISCARDING); 10175 10176 (void) printf(gettext("discarding, %s remaining.\n"), 10177 space_buf); 10178 } 10179 10180 static void 10181 print_error_log(zpool_handle_t *zhp) 10182 { 10183 nvlist_t *nverrlist = NULL; 10184 nvpair_t *elem; 10185 char *pathname; 10186 size_t len = MAXPATHLEN * 2; 10187 10188 if (zpool_get_errlog(zhp, &nverrlist) != 0) 10189 return; 10190 10191 (void) printf("errors: Permanent errors have been " 10192 "detected in the following files:\n\n"); 10193 10194 pathname = safe_malloc(len); 10195 elem = NULL; 10196 while ((elem = nvlist_next_nvpair(nverrlist, elem)) != NULL) { 10197 nvlist_t *nv; 10198 uint64_t dsobj, obj; 10199 10200 verify(nvpair_value_nvlist(elem, &nv) == 0); 10201 verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_DATASET, 10202 &dsobj) == 0); 10203 verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_OBJECT, 10204 &obj) == 0); 10205 zpool_obj_to_path(zhp, dsobj, obj, pathname, len); 10206 (void) printf("%7s %s\n", "", pathname); 10207 } 10208 free(pathname); 10209 nvlist_free(nverrlist); 10210 } 10211 10212 static void 10213 print_spares(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **spares, 10214 uint_t nspares) 10215 { 10216 uint_t i; 10217 char *name; 10218 10219 if (nspares == 0) 10220 return; 10221 10222 (void) printf(gettext("\tspares\n")); 10223 10224 for (i = 0; i < nspares; i++) { 10225 name = zpool_vdev_name(g_zfs, zhp, spares[i], 10226 cb->cb_name_flags); 10227 print_status_config(zhp, cb, name, spares[i], 2, B_TRUE, NULL); 10228 free(name); 10229 } 10230 } 10231 10232 static void 10233 print_l2cache(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **l2cache, 10234 uint_t nl2cache) 10235 { 10236 uint_t i; 10237 char *name; 10238 10239 if (nl2cache == 0) 10240 return; 10241 10242 (void) printf(gettext("\tcache\n")); 10243 10244 for (i = 0; i < nl2cache; i++) { 10245 name = zpool_vdev_name(g_zfs, zhp, l2cache[i], 10246 cb->cb_name_flags); 10247 print_status_config(zhp, cb, name, l2cache[i], 2, 10248 B_FALSE, NULL); 10249 free(name); 10250 } 10251 } 10252 10253 static void 10254 print_dedup_stats(zpool_handle_t *zhp, nvlist_t *config, boolean_t literal) 10255 { 10256 ddt_histogram_t *ddh; 10257 ddt_stat_t *dds; 10258 ddt_object_t *ddo; 10259 uint_t c; 10260 /* Extra space provided for literal display */ 10261 char dspace[32], mspace[32], cspace[32]; 10262 uint64_t cspace_prop; 10263 enum zfs_nicenum_format format; 10264 zprop_source_t src; 10265 10266 /* 10267 * If the pool was faulted then we may not have been able to 10268 * obtain the config. Otherwise, if we have anything in the dedup 10269 * table continue processing the stats. 10270 */ 10271 if (nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_OBJ_STATS, 10272 (uint64_t **)&ddo, &c) != 0) 10273 return; 10274 10275 (void) printf("\n"); 10276 (void) printf(gettext(" dedup: ")); 10277 if (ddo->ddo_count == 0) { 10278 (void) printf(gettext("no DDT entries\n")); 10279 return; 10280 } 10281 10282 /* 10283 * Squash cached size into in-core size to handle race. 10284 * Only include cached size if it is available. 10285 */ 10286 cspace_prop = zpool_get_prop_int(zhp, ZPOOL_PROP_DEDUPCACHED, &src); 10287 cspace_prop = MIN(cspace_prop, ddo->ddo_mspace); 10288 format = literal ? ZFS_NICENUM_RAW : ZFS_NICENUM_1024; 10289 zfs_nicenum_format(cspace_prop, cspace, sizeof (cspace), format); 10290 zfs_nicenum_format(ddo->ddo_dspace, dspace, sizeof (dspace), format); 10291 zfs_nicenum_format(ddo->ddo_mspace, mspace, sizeof (mspace), format); 10292 (void) printf("DDT entries %llu, size %s on disk, %s in core", 10293 (u_longlong_t)ddo->ddo_count, 10294 dspace, 10295 mspace); 10296 if (src != ZPROP_SRC_DEFAULT) { 10297 (void) printf(", %s cached (%.02f%%)", 10298 cspace, 10299 (double)cspace_prop / (double)ddo->ddo_mspace * 100.0); 10300 } 10301 (void) printf("\n"); 10302 10303 verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_STATS, 10304 (uint64_t **)&dds, &c) == 0); 10305 verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_HISTOGRAM, 10306 (uint64_t **)&ddh, &c) == 0); 10307 zpool_dump_ddt(dds, ddh); 10308 } 10309 10310 #define ST_SIZE 4096 10311 #define AC_SIZE 2048 10312 10313 static void 10314 print_status_reason(zpool_handle_t *zhp, status_cbdata_t *cbp, 10315 zpool_status_t reason, zpool_errata_t errata, nvlist_t *item) 10316 { 10317 char status[ST_SIZE]; 10318 char action[AC_SIZE]; 10319 memset(status, 0, ST_SIZE); 10320 memset(action, 0, AC_SIZE); 10321 10322 switch (reason) { 10323 case ZPOOL_STATUS_MISSING_DEV_R: 10324 snprintf(status, ST_SIZE, gettext("One or more devices could " 10325 "not be opened. Sufficient replicas exist for\n\tthe pool " 10326 "to continue functioning in a degraded state.\n")); 10327 snprintf(action, AC_SIZE, gettext("Attach the missing device " 10328 "and online it using 'zpool online'.\n")); 10329 break; 10330 10331 case ZPOOL_STATUS_MISSING_DEV_NR: 10332 snprintf(status, ST_SIZE, gettext("One or more devices could " 10333 "not be opened. There are insufficient\n\treplicas for the" 10334 " pool to continue functioning.\n")); 10335 snprintf(action, AC_SIZE, gettext("Attach the missing device " 10336 "and online it using 'zpool online'.\n")); 10337 break; 10338 10339 case ZPOOL_STATUS_CORRUPT_LABEL_R: 10340 snprintf(status, ST_SIZE, gettext("One or more devices could " 10341 "not be used because the label is missing or\n\tinvalid. " 10342 "Sufficient replicas exist for the pool to continue\n\t" 10343 "functioning in a degraded state.\n")); 10344 snprintf(action, AC_SIZE, gettext("Replace the device using " 10345 "'zpool replace'.\n")); 10346 break; 10347 10348 case ZPOOL_STATUS_CORRUPT_LABEL_NR: 10349 snprintf(status, ST_SIZE, gettext("One or more devices could " 10350 "not be used because the label is missing \n\tor invalid. " 10351 "There are insufficient replicas for the pool to " 10352 "continue\n\tfunctioning.\n")); 10353 zpool_explain_recover(zpool_get_handle(zhp), 10354 zpool_get_name(zhp), reason, zpool_get_config(zhp, NULL), 10355 action, AC_SIZE); 10356 break; 10357 10358 case ZPOOL_STATUS_FAILING_DEV: 10359 snprintf(status, ST_SIZE, gettext("One or more devices has " 10360 "experienced an unrecoverable error. An\n\tattempt was " 10361 "made to correct the error. Applications are " 10362 "unaffected.\n")); 10363 snprintf(action, AC_SIZE, gettext("Determine if the " 10364 "device needs to be replaced, and clear the errors\n\tusing" 10365 " 'zpool clear' or replace the device with 'zpool " 10366 "replace'.\n")); 10367 break; 10368 10369 case ZPOOL_STATUS_OFFLINE_DEV: 10370 snprintf(status, ST_SIZE, gettext("One or more devices has " 10371 "been taken offline by the administrator.\n\tSufficient " 10372 "replicas exist for the pool to continue functioning in " 10373 "a\n\tdegraded state.\n")); 10374 snprintf(action, AC_SIZE, gettext("Online the device " 10375 "using 'zpool online' or replace the device with\n\t'zpool " 10376 "replace'.\n")); 10377 break; 10378 10379 case ZPOOL_STATUS_REMOVED_DEV: 10380 snprintf(status, ST_SIZE, gettext("One or more devices has " 10381 "been removed by the administrator.\n\tSufficient " 10382 "replicas exist for the pool to continue functioning in " 10383 "a\n\tdegraded state.\n")); 10384 snprintf(action, AC_SIZE, gettext("Online the device " 10385 "using zpool online' or replace the device with\n\t'zpool " 10386 "replace'.\n")); 10387 break; 10388 10389 case ZPOOL_STATUS_RESILVERING: 10390 case ZPOOL_STATUS_REBUILDING: 10391 snprintf(status, ST_SIZE, gettext("One or more devices is " 10392 "currently being resilvered. The pool will\n\tcontinue " 10393 "to function, possibly in a degraded state.\n")); 10394 snprintf(action, AC_SIZE, gettext("Wait for the resilver to " 10395 "complete.\n")); 10396 break; 10397 10398 case ZPOOL_STATUS_REBUILD_SCRUB: 10399 snprintf(status, ST_SIZE, gettext("One or more devices have " 10400 "been sequentially resilvered, scrubbing\n\tthe pool " 10401 "is recommended.\n")); 10402 snprintf(action, AC_SIZE, gettext("Use 'zpool scrub' to " 10403 "verify all data checksums.\n")); 10404 break; 10405 10406 case ZPOOL_STATUS_CORRUPT_DATA: 10407 snprintf(status, ST_SIZE, gettext("One or more devices has " 10408 "experienced an error resulting in data\n\tcorruption. " 10409 "Applications may be affected.\n")); 10410 snprintf(action, AC_SIZE, gettext("Restore the file in question" 10411 " if possible. Otherwise restore the\n\tentire pool from " 10412 "backup.\n")); 10413 break; 10414 10415 case ZPOOL_STATUS_CORRUPT_POOL: 10416 snprintf(status, ST_SIZE, gettext("The pool metadata is " 10417 "corrupted and the pool cannot be opened.\n")); 10418 zpool_explain_recover(zpool_get_handle(zhp), 10419 zpool_get_name(zhp), reason, zpool_get_config(zhp, NULL), 10420 action, AC_SIZE); 10421 break; 10422 10423 case ZPOOL_STATUS_VERSION_OLDER: 10424 snprintf(status, ST_SIZE, gettext("The pool is formatted using " 10425 "a legacy on-disk format. The pool can\n\tstill be used, " 10426 "but some features are unavailable.\n")); 10427 snprintf(action, AC_SIZE, gettext("Upgrade the pool using " 10428 "'zpool upgrade'. Once this is done, the\n\tpool will no " 10429 "longer be accessible on software that does not support\n\t" 10430 "feature flags.\n")); 10431 break; 10432 10433 case ZPOOL_STATUS_VERSION_NEWER: 10434 snprintf(status, ST_SIZE, gettext("The pool has been upgraded " 10435 "to a newer, incompatible on-disk version.\n\tThe pool " 10436 "cannot be accessed on this system.\n")); 10437 snprintf(action, AC_SIZE, gettext("Access the pool from a " 10438 "system running more recent software, or\n\trestore the " 10439 "pool from backup.\n")); 10440 break; 10441 10442 case ZPOOL_STATUS_FEAT_DISABLED: 10443 snprintf(status, ST_SIZE, gettext("Some supported and " 10444 "requested features are not enabled on the pool.\n\t" 10445 "The pool can still be used, but some features are " 10446 "unavailable.\n")); 10447 snprintf(action, AC_SIZE, gettext("Enable all features using " 10448 "'zpool upgrade'. Once this is done,\n\tthe pool may no " 10449 "longer be accessible by software that does not support\n\t" 10450 "the features. See zpool-features(7) for details.\n")); 10451 break; 10452 10453 case ZPOOL_STATUS_COMPATIBILITY_ERR: 10454 snprintf(status, ST_SIZE, gettext("This pool has a " 10455 "compatibility list specified, but it could not be\n\t" 10456 "read/parsed at this time. The pool can still be used, " 10457 "but this\n\tshould be investigated.\n")); 10458 snprintf(action, AC_SIZE, gettext("Check the value of the " 10459 "'compatibility' property against the\n\t" 10460 "appropriate file in " ZPOOL_SYSCONF_COMPAT_D " or " 10461 ZPOOL_DATA_COMPAT_D ".\n")); 10462 break; 10463 10464 case ZPOOL_STATUS_INCOMPATIBLE_FEAT: 10465 snprintf(status, ST_SIZE, gettext("One or more features " 10466 "are enabled on the pool despite not being\n\t" 10467 "requested by the 'compatibility' property.\n")); 10468 snprintf(action, AC_SIZE, gettext("Consider setting " 10469 "'compatibility' to an appropriate value, or\n\t" 10470 "adding needed features to the relevant file in\n\t" 10471 ZPOOL_SYSCONF_COMPAT_D " or " ZPOOL_DATA_COMPAT_D ".\n")); 10472 break; 10473 10474 case ZPOOL_STATUS_UNSUP_FEAT_READ: 10475 snprintf(status, ST_SIZE, gettext("The pool cannot be accessed " 10476 "on this system because it uses the\n\tfollowing feature(s)" 10477 " not supported on this system:\n")); 10478 zpool_collect_unsup_feat(zpool_get_config(zhp, NULL), status, 10479 1024); 10480 snprintf(action, AC_SIZE, gettext("Access the pool from a " 10481 "system that supports the required feature(s),\n\tor " 10482 "restore the pool from backup.\n")); 10483 break; 10484 10485 case ZPOOL_STATUS_UNSUP_FEAT_WRITE: 10486 snprintf(status, ST_SIZE, gettext("The pool can only be " 10487 "accessed in read-only mode on this system. It\n\tcannot be" 10488 " accessed in read-write mode because it uses the " 10489 "following\n\tfeature(s) not supported on this system:\n")); 10490 zpool_collect_unsup_feat(zpool_get_config(zhp, NULL), status, 10491 1024); 10492 snprintf(action, AC_SIZE, gettext("The pool cannot be accessed " 10493 "in read-write mode. Import the pool with\n" 10494 "\t\"-o readonly=on\", access the pool from a system that " 10495 "supports the\n\trequired feature(s), or restore the " 10496 "pool from backup.\n")); 10497 break; 10498 10499 case ZPOOL_STATUS_FAULTED_DEV_R: 10500 snprintf(status, ST_SIZE, gettext("One or more devices are " 10501 "faulted in response to persistent errors.\n\tSufficient " 10502 "replicas exist for the pool to continue functioning " 10503 "in a\n\tdegraded state.\n")); 10504 snprintf(action, AC_SIZE, gettext("Replace the faulted device, " 10505 "or use 'zpool clear' to mark the device\n\trepaired.\n")); 10506 break; 10507 10508 case ZPOOL_STATUS_FAULTED_DEV_NR: 10509 snprintf(status, ST_SIZE, gettext("One or more devices are " 10510 "faulted in response to persistent errors. There are " 10511 "insufficient replicas for the pool to\n\tcontinue " 10512 "functioning.\n")); 10513 snprintf(action, AC_SIZE, gettext("Destroy and re-create the " 10514 "pool from a backup source. Manually marking the device\n" 10515 "\trepaired using 'zpool clear' may allow some data " 10516 "to be recovered.\n")); 10517 break; 10518 10519 case ZPOOL_STATUS_IO_FAILURE_MMP: 10520 snprintf(status, ST_SIZE, gettext("The pool is suspended " 10521 "because multihost writes failed or were delayed;\n\t" 10522 "another system could import the pool undetected.\n")); 10523 snprintf(action, AC_SIZE, gettext("Make sure the pool's devices" 10524 " are connected, then reboot your system and\n\timport the " 10525 "pool or run 'zpool clear' to resume the pool.\n")); 10526 break; 10527 10528 case ZPOOL_STATUS_IO_FAILURE_WAIT: 10529 case ZPOOL_STATUS_IO_FAILURE_CONTINUE: 10530 snprintf(status, ST_SIZE, gettext("One or more devices are " 10531 "faulted in response to IO failures.\n")); 10532 snprintf(action, AC_SIZE, gettext("Make sure the affected " 10533 "devices are connected, then run 'zpool clear'.\n")); 10534 break; 10535 10536 case ZPOOL_STATUS_BAD_LOG: 10537 snprintf(status, ST_SIZE, gettext("An intent log record " 10538 "could not be read.\n" 10539 "\tWaiting for administrator intervention to fix the " 10540 "faulted pool.\n")); 10541 snprintf(action, AC_SIZE, gettext("Either restore the affected " 10542 "device(s) and run 'zpool online',\n" 10543 "\tor ignore the intent log records by running " 10544 "'zpool clear'.\n")); 10545 break; 10546 10547 case ZPOOL_STATUS_NON_NATIVE_ASHIFT: 10548 snprintf(status, ST_SIZE, gettext("One or more devices are " 10549 "configured to use a non-native block size.\n" 10550 "\tExpect reduced performance.\n")); 10551 snprintf(action, AC_SIZE, gettext("Replace affected devices " 10552 "with devices that support the\n\tconfigured block size, " 10553 "or migrate data to a properly configured\n\tpool.\n")); 10554 break; 10555 10556 case ZPOOL_STATUS_HOSTID_MISMATCH: 10557 snprintf(status, ST_SIZE, gettext("Mismatch between pool hostid" 10558 " and system hostid on imported pool.\n\tThis pool was " 10559 "previously imported into a system with a different " 10560 "hostid,\n\tand then was verbatim imported into this " 10561 "system.\n")); 10562 snprintf(action, AC_SIZE, gettext("Export this pool on all " 10563 "systems on which it is imported.\n" 10564 "\tThen import it to correct the mismatch.\n")); 10565 break; 10566 10567 case ZPOOL_STATUS_ERRATA: 10568 snprintf(status, ST_SIZE, gettext("Errata #%d detected.\n"), 10569 errata); 10570 switch (errata) { 10571 case ZPOOL_ERRATA_NONE: 10572 break; 10573 10574 case ZPOOL_ERRATA_ZOL_2094_SCRUB: 10575 snprintf(action, AC_SIZE, gettext("To correct the issue" 10576 " run 'zpool scrub'.\n")); 10577 break; 10578 10579 case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION: 10580 (void) strlcat(status, gettext("\tExisting encrypted " 10581 "datasets contain an on-disk incompatibility\n\t " 10582 "which needs to be corrected.\n"), ST_SIZE); 10583 snprintf(action, AC_SIZE, gettext("To correct the issue" 10584 " backup existing encrypted datasets to new\n\t" 10585 "encrypted datasets and destroy the old ones. " 10586 "'zfs mount -o ro' can\n\tbe used to temporarily " 10587 "mount existing encrypted datasets readonly.\n")); 10588 break; 10589 10590 case ZPOOL_ERRATA_ZOL_8308_ENCRYPTION: 10591 (void) strlcat(status, gettext("\tExisting encrypted " 10592 "snapshots and bookmarks contain an on-disk\n\t" 10593 "incompatibility. This may cause on-disk " 10594 "corruption if they are used\n\twith " 10595 "'zfs recv'.\n"), ST_SIZE); 10596 snprintf(action, AC_SIZE, gettext("To correct the" 10597 "issue, enable the bookmark_v2 feature. No " 10598 "additional\n\taction is needed if there are no " 10599 "encrypted snapshots or bookmarks.\n\tIf preserving" 10600 "the encrypted snapshots and bookmarks is required," 10601 " use\n\ta non-raw send to backup and restore them." 10602 " Alternately, they may be\n\tremoved to resolve " 10603 "the incompatibility.\n")); 10604 break; 10605 10606 default: 10607 /* 10608 * All errata which allow the pool to be imported 10609 * must contain an action message. 10610 */ 10611 assert(0); 10612 } 10613 break; 10614 10615 default: 10616 /* 10617 * The remaining errors can't actually be generated, yet. 10618 */ 10619 assert(reason == ZPOOL_STATUS_OK); 10620 } 10621 10622 if (status[0] != 0) { 10623 if (cbp->cb_json) 10624 fnvlist_add_string(item, "status", status); 10625 else { 10626 printf_color(ANSI_BOLD, gettext("status: ")); 10627 printf_color(ANSI_YELLOW, status); 10628 } 10629 } 10630 10631 if (action[0] != 0) { 10632 if (cbp->cb_json) 10633 fnvlist_add_string(item, "action", action); 10634 else { 10635 printf_color(ANSI_BOLD, gettext("action: ")); 10636 printf_color(ANSI_YELLOW, action); 10637 } 10638 } 10639 } 10640 10641 static int 10642 status_callback_json(zpool_handle_t *zhp, void *data) 10643 { 10644 status_cbdata_t *cbp = data; 10645 nvlist_t *config, *nvroot; 10646 const char *msgid; 10647 char pool_guid[256]; 10648 char msgbuf[256]; 10649 uint64_t guid; 10650 zpool_status_t reason; 10651 zpool_errata_t errata; 10652 uint_t c; 10653 vdev_stat_t *vs; 10654 nvlist_t *item, *d, *load_info, *vds; 10655 item = d = NULL; 10656 10657 /* If dedup stats were requested, also fetch dedupcached. */ 10658 if (cbp->cb_dedup_stats > 1) 10659 zpool_add_propname(zhp, ZPOOL_DEDUPCACHED_PROP_NAME); 10660 reason = zpool_get_status(zhp, &msgid, &errata); 10661 /* 10662 * If we were given 'zpool status -x', only report those pools with 10663 * problems. 10664 */ 10665 if (cbp->cb_explain && 10666 (reason == ZPOOL_STATUS_OK || 10667 reason == ZPOOL_STATUS_VERSION_OLDER || 10668 reason == ZPOOL_STATUS_FEAT_DISABLED || 10669 reason == ZPOOL_STATUS_COMPATIBILITY_ERR || 10670 reason == ZPOOL_STATUS_INCOMPATIBLE_FEAT)) { 10671 return (0); 10672 } 10673 10674 d = fnvlist_lookup_nvlist(cbp->cb_jsobj, "pools"); 10675 item = fnvlist_alloc(); 10676 vds = fnvlist_alloc(); 10677 fill_pool_info(item, zhp, B_FALSE, cbp->cb_json_as_int); 10678 config = zpool_get_config(zhp, NULL); 10679 10680 if (config != NULL) { 10681 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE); 10682 verify(nvlist_lookup_uint64_array(nvroot, 10683 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &c) == 0); 10684 if (cbp->cb_json_pool_key_guid) { 10685 guid = fnvlist_lookup_uint64(config, 10686 ZPOOL_CONFIG_POOL_GUID); 10687 snprintf(pool_guid, 256, "%llu", (u_longlong_t)guid); 10688 } 10689 cbp->cb_count++; 10690 10691 print_status_reason(zhp, cbp, reason, errata, item); 10692 if (msgid != NULL) { 10693 snprintf(msgbuf, 256, 10694 "https://openzfs.github.io/openzfs-docs/msg/%s", 10695 msgid); 10696 fnvlist_add_string(item, "msgid", msgid); 10697 fnvlist_add_string(item, "moreinfo", msgbuf); 10698 } 10699 10700 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, 10701 &load_info) == 0) { 10702 fnvlist_add_nvlist(item, ZPOOL_CONFIG_LOAD_INFO, 10703 load_info); 10704 } 10705 10706 scan_status_nvlist(zhp, cbp, nvroot, item); 10707 removal_status_nvlist(zhp, cbp, nvroot, item); 10708 checkpoint_status_nvlist(nvroot, cbp, item); 10709 raidz_expand_status_nvlist(zhp, cbp, nvroot, item); 10710 vdev_stats_nvlist(zhp, cbp, nvroot, 0, B_FALSE, NULL, vds); 10711 if (cbp->cb_flat_vdevs) { 10712 class_vdevs_nvlist(zhp, cbp, nvroot, 10713 VDEV_ALLOC_BIAS_DEDUP, vds); 10714 class_vdevs_nvlist(zhp, cbp, nvroot, 10715 VDEV_ALLOC_BIAS_SPECIAL, vds); 10716 class_vdevs_nvlist(zhp, cbp, nvroot, 10717 VDEV_ALLOC_CLASS_LOGS, vds); 10718 l2cache_nvlist(zhp, cbp, nvroot, vds); 10719 spares_nvlist(zhp, cbp, nvroot, vds); 10720 10721 fnvlist_add_nvlist(item, "vdevs", vds); 10722 fnvlist_free(vds); 10723 } else { 10724 fnvlist_add_nvlist(item, "vdevs", vds); 10725 fnvlist_free(vds); 10726 10727 class_vdevs_nvlist(zhp, cbp, nvroot, 10728 VDEV_ALLOC_BIAS_DEDUP, item); 10729 class_vdevs_nvlist(zhp, cbp, nvroot, 10730 VDEV_ALLOC_BIAS_SPECIAL, item); 10731 class_vdevs_nvlist(zhp, cbp, nvroot, 10732 VDEV_ALLOC_CLASS_LOGS, item); 10733 l2cache_nvlist(zhp, cbp, nvroot, item); 10734 spares_nvlist(zhp, cbp, nvroot, item); 10735 } 10736 dedup_stats_nvlist(zhp, cbp, item); 10737 errors_nvlist(zhp, cbp, item); 10738 } 10739 if (cbp->cb_json_pool_key_guid) { 10740 fnvlist_add_nvlist(d, pool_guid, item); 10741 } else { 10742 fnvlist_add_nvlist(d, zpool_get_name(zhp), 10743 item); 10744 } 10745 fnvlist_free(item); 10746 return (0); 10747 } 10748 10749 /* 10750 * Display a summary of pool status. Displays a summary such as: 10751 * 10752 * pool: tank 10753 * status: DEGRADED 10754 * reason: One or more devices ... 10755 * see: https://openzfs.github.io/openzfs-docs/msg/ZFS-xxxx-01 10756 * config: 10757 * mirror DEGRADED 10758 * c1t0d0 OK 10759 * c2t0d0 UNAVAIL 10760 * 10761 * When given the '-v' option, we print out the complete config. If the '-e' 10762 * option is specified, then we print out error rate information as well. 10763 */ 10764 static int 10765 status_callback(zpool_handle_t *zhp, void *data) 10766 { 10767 status_cbdata_t *cbp = data; 10768 nvlist_t *config, *nvroot; 10769 const char *msgid; 10770 zpool_status_t reason; 10771 zpool_errata_t errata; 10772 const char *health; 10773 uint_t c; 10774 vdev_stat_t *vs; 10775 10776 /* If dedup stats were requested, also fetch dedupcached. */ 10777 if (cbp->cb_dedup_stats > 1) 10778 zpool_add_propname(zhp, ZPOOL_DEDUPCACHED_PROP_NAME); 10779 10780 config = zpool_get_config(zhp, NULL); 10781 reason = zpool_get_status(zhp, &msgid, &errata); 10782 10783 cbp->cb_count++; 10784 10785 /* 10786 * If we were given 'zpool status -x', only report those pools with 10787 * problems. 10788 */ 10789 if (cbp->cb_explain && 10790 (reason == ZPOOL_STATUS_OK || 10791 reason == ZPOOL_STATUS_VERSION_OLDER || 10792 reason == ZPOOL_STATUS_FEAT_DISABLED || 10793 reason == ZPOOL_STATUS_COMPATIBILITY_ERR || 10794 reason == ZPOOL_STATUS_INCOMPATIBLE_FEAT)) { 10795 if (!cbp->cb_allpools) { 10796 (void) printf(gettext("pool '%s' is healthy\n"), 10797 zpool_get_name(zhp)); 10798 if (cbp->cb_first) 10799 cbp->cb_first = B_FALSE; 10800 } 10801 return (0); 10802 } 10803 10804 if (cbp->cb_first) 10805 cbp->cb_first = B_FALSE; 10806 else 10807 (void) printf("\n"); 10808 10809 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE); 10810 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS, 10811 (uint64_t **)&vs, &c) == 0); 10812 10813 health = zpool_get_state_str(zhp); 10814 10815 printf(" "); 10816 printf_color(ANSI_BOLD, gettext("pool:")); 10817 printf(" %s\n", zpool_get_name(zhp)); 10818 fputc(' ', stdout); 10819 printf_color(ANSI_BOLD, gettext("state: ")); 10820 10821 printf_color(health_str_to_color(health), "%s", health); 10822 10823 fputc('\n', stdout); 10824 print_status_reason(zhp, cbp, reason, errata, NULL); 10825 10826 if (msgid != NULL) { 10827 printf(" "); 10828 printf_color(ANSI_BOLD, gettext("see:")); 10829 printf(gettext( 10830 " https://openzfs.github.io/openzfs-docs/msg/%s\n"), 10831 msgid); 10832 } 10833 10834 if (config != NULL) { 10835 uint64_t nerr; 10836 nvlist_t **spares, **l2cache; 10837 uint_t nspares, nl2cache; 10838 10839 print_scan_status(zhp, nvroot); 10840 10841 pool_removal_stat_t *prs = NULL; 10842 (void) nvlist_lookup_uint64_array(nvroot, 10843 ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c); 10844 print_removal_status(zhp, prs); 10845 10846 pool_checkpoint_stat_t *pcs = NULL; 10847 (void) nvlist_lookup_uint64_array(nvroot, 10848 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c); 10849 print_checkpoint_status(pcs); 10850 10851 pool_raidz_expand_stat_t *pres = NULL; 10852 (void) nvlist_lookup_uint64_array(nvroot, 10853 ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, (uint64_t **)&pres, &c); 10854 print_raidz_expand_status(zhp, pres); 10855 10856 cbp->cb_namewidth = max_width(zhp, nvroot, 0, 0, 10857 cbp->cb_name_flags | VDEV_NAME_TYPE_ID); 10858 if (cbp->cb_namewidth < 10) 10859 cbp->cb_namewidth = 10; 10860 10861 color_start(ANSI_BOLD); 10862 (void) printf(gettext("config:\n\n")); 10863 (void) printf(gettext("\t%-*s %-8s %5s %5s %5s"), 10864 cbp->cb_namewidth, "NAME", "STATE", "READ", "WRITE", 10865 "CKSUM"); 10866 color_end(); 10867 10868 if (cbp->cb_print_slow_ios) { 10869 printf_color(ANSI_BOLD, " %5s", gettext("SLOW")); 10870 } 10871 10872 if (cbp->cb_print_power) { 10873 printf_color(ANSI_BOLD, " %5s", gettext("POWER")); 10874 } 10875 10876 if (cbp->vcdl != NULL) 10877 print_cmd_columns(cbp->vcdl, 0); 10878 10879 printf("\n"); 10880 10881 print_status_config(zhp, cbp, zpool_get_name(zhp), nvroot, 0, 10882 B_FALSE, NULL); 10883 10884 print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_BIAS_DEDUP); 10885 print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_BIAS_SPECIAL); 10886 print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_CLASS_LOGS); 10887 10888 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 10889 &l2cache, &nl2cache) == 0) 10890 print_l2cache(zhp, cbp, l2cache, nl2cache); 10891 10892 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 10893 &spares, &nspares) == 0) 10894 print_spares(zhp, cbp, spares, nspares); 10895 10896 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT, 10897 &nerr) == 0) { 10898 (void) printf("\n"); 10899 if (nerr == 0) { 10900 (void) printf(gettext( 10901 "errors: No known data errors\n")); 10902 } else if (!cbp->cb_verbose) { 10903 color_start(ANSI_RED); 10904 (void) printf(gettext("errors: %llu data " 10905 "errors, use '-v' for a list\n"), 10906 (u_longlong_t)nerr); 10907 color_end(); 10908 } else { 10909 print_error_log(zhp); 10910 } 10911 } 10912 10913 if (cbp->cb_dedup_stats) 10914 print_dedup_stats(zhp, config, cbp->cb_literal); 10915 } else { 10916 (void) printf(gettext("config: The configuration cannot be " 10917 "determined.\n")); 10918 } 10919 10920 return (0); 10921 } 10922 10923 /* 10924 * zpool status [-c [script1,script2,...]] [-DegiLpPstvx] [--power] [-T d|u] ... 10925 * [pool] [interval [count]] 10926 * 10927 * -c CMD For each vdev, run command CMD 10928 * -D Display dedup status (undocumented) 10929 * -e Display only unhealthy vdevs 10930 * -g Display guid for individual vdev name. 10931 * -i Display vdev initialization status. 10932 * -L Follow links when resolving vdev path name. 10933 * -p Display values in parsable (exact) format. 10934 * -P Display full path for vdev name. 10935 * -s Display slow IOs column. 10936 * -t Display vdev TRIM status. 10937 * -T Display a timestamp in date(1) or Unix format 10938 * -v Display complete error logs 10939 * -x Display only pools with potential problems 10940 * -j Display output in JSON format 10941 * --power Display vdev enclosure slot power status 10942 * --json-int Display numbers in inteeger format instead of string 10943 * --json-flat-vdevs Display vdevs in flat hierarchy 10944 * --json-pool-key-guid Use pool GUID as key for pool objects 10945 * 10946 * Describes the health status of all pools or some subset. 10947 */ 10948 int 10949 zpool_do_status(int argc, char **argv) 10950 { 10951 int c; 10952 int ret; 10953 float interval = 0; 10954 unsigned long count = 0; 10955 status_cbdata_t cb = { 0 }; 10956 nvlist_t *data; 10957 char *cmd = NULL; 10958 10959 struct option long_options[] = { 10960 {"power", no_argument, NULL, ZPOOL_OPTION_POWER}, 10961 {"json-int", no_argument, NULL, ZPOOL_OPTION_JSON_NUMS_AS_INT}, 10962 {"json-flat-vdevs", no_argument, NULL, 10963 ZPOOL_OPTION_JSON_FLAT_VDEVS}, 10964 {"json-pool-key-guid", no_argument, NULL, 10965 ZPOOL_OPTION_POOL_KEY_GUID}, 10966 {0, 0, 0, 0} 10967 }; 10968 10969 /* check options */ 10970 while ((c = getopt_long(argc, argv, "c:jDegiLpPstT:vx", long_options, 10971 NULL)) != -1) { 10972 switch (c) { 10973 case 'c': 10974 if (cmd != NULL) { 10975 fprintf(stderr, 10976 gettext("Can't set -c flag twice\n")); 10977 exit(1); 10978 } 10979 10980 if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL && 10981 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) { 10982 fprintf(stderr, gettext( 10983 "Can't run -c, disabled by " 10984 "ZPOOL_SCRIPTS_ENABLED.\n")); 10985 exit(1); 10986 } 10987 10988 if ((getuid() <= 0 || geteuid() <= 0) && 10989 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) { 10990 fprintf(stderr, gettext( 10991 "Can't run -c with root privileges " 10992 "unless ZPOOL_SCRIPTS_AS_ROOT is set.\n")); 10993 exit(1); 10994 } 10995 cmd = optarg; 10996 break; 10997 case 'D': 10998 if (++cb.cb_dedup_stats > 2) 10999 cb.cb_dedup_stats = 2; 11000 break; 11001 case 'e': 11002 cb.cb_print_unhealthy = B_TRUE; 11003 break; 11004 case 'g': 11005 cb.cb_name_flags |= VDEV_NAME_GUID; 11006 break; 11007 case 'i': 11008 cb.cb_print_vdev_init = B_TRUE; 11009 break; 11010 case 'L': 11011 cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS; 11012 break; 11013 case 'p': 11014 cb.cb_literal = B_TRUE; 11015 break; 11016 case 'P': 11017 cb.cb_name_flags |= VDEV_NAME_PATH; 11018 break; 11019 case 's': 11020 cb.cb_print_slow_ios = B_TRUE; 11021 break; 11022 case 't': 11023 cb.cb_print_vdev_trim = B_TRUE; 11024 break; 11025 case 'T': 11026 get_timestamp_arg(*optarg); 11027 break; 11028 case 'v': 11029 cb.cb_verbose = B_TRUE; 11030 break; 11031 case 'j': 11032 cb.cb_json = B_TRUE; 11033 break; 11034 case 'x': 11035 cb.cb_explain = B_TRUE; 11036 break; 11037 case ZPOOL_OPTION_POWER: 11038 cb.cb_print_power = B_TRUE; 11039 break; 11040 case ZPOOL_OPTION_JSON_FLAT_VDEVS: 11041 cb.cb_flat_vdevs = B_TRUE; 11042 break; 11043 case ZPOOL_OPTION_JSON_NUMS_AS_INT: 11044 cb.cb_json_as_int = B_TRUE; 11045 cb.cb_literal = B_TRUE; 11046 break; 11047 case ZPOOL_OPTION_POOL_KEY_GUID: 11048 cb.cb_json_pool_key_guid = B_TRUE; 11049 break; 11050 case '?': 11051 if (optopt == 'c') { 11052 print_zpool_script_list("status"); 11053 exit(0); 11054 } else { 11055 fprintf(stderr, 11056 gettext("invalid option '%c'\n"), optopt); 11057 } 11058 usage(B_FALSE); 11059 } 11060 } 11061 11062 argc -= optind; 11063 argv += optind; 11064 11065 get_interval_count(&argc, argv, &interval, &count); 11066 11067 if (argc == 0) 11068 cb.cb_allpools = B_TRUE; 11069 11070 cb.cb_first = B_TRUE; 11071 cb.cb_print_status = B_TRUE; 11072 11073 if (cb.cb_flat_vdevs && !cb.cb_json) { 11074 fprintf(stderr, gettext("'--json-flat-vdevs' only works with" 11075 " '-j' option\n")); 11076 usage(B_FALSE); 11077 } 11078 11079 if (cb.cb_json_as_int && !cb.cb_json) { 11080 (void) fprintf(stderr, gettext("'--json-int' only works with" 11081 " '-j' option\n")); 11082 usage(B_FALSE); 11083 } 11084 11085 if (!cb.cb_json && cb.cb_json_pool_key_guid) { 11086 (void) fprintf(stderr, gettext("'json-pool-key-guid' only" 11087 " works with '-j' option\n")); 11088 usage(B_FALSE); 11089 } 11090 11091 for (;;) { 11092 if (cb.cb_json) { 11093 cb.cb_jsobj = zpool_json_schema(0, 1); 11094 data = fnvlist_alloc(); 11095 fnvlist_add_nvlist(cb.cb_jsobj, "pools", data); 11096 fnvlist_free(data); 11097 } 11098 11099 if (timestamp_fmt != NODATE) { 11100 if (cb.cb_json) { 11101 if (cb.cb_json_as_int) { 11102 fnvlist_add_uint64(cb.cb_jsobj, "time", 11103 time(NULL)); 11104 } else { 11105 char ts[128]; 11106 get_timestamp(timestamp_fmt, ts, 128); 11107 fnvlist_add_string(cb.cb_jsobj, "time", 11108 ts); 11109 } 11110 } else 11111 print_timestamp(timestamp_fmt); 11112 } 11113 11114 if (cmd != NULL) 11115 cb.vcdl = all_pools_for_each_vdev_run(argc, argv, cmd, 11116 NULL, NULL, 0, 0); 11117 11118 if (cb.cb_json) { 11119 ret = for_each_pool(argc, argv, B_TRUE, NULL, 11120 ZFS_TYPE_POOL, cb.cb_literal, 11121 status_callback_json, &cb); 11122 } else { 11123 ret = for_each_pool(argc, argv, B_TRUE, NULL, 11124 ZFS_TYPE_POOL, cb.cb_literal, 11125 status_callback, &cb); 11126 } 11127 11128 if (cb.vcdl != NULL) 11129 free_vdev_cmd_data_list(cb.vcdl); 11130 11131 if (cb.cb_json) { 11132 if (ret == 0) 11133 zcmd_print_json(cb.cb_jsobj); 11134 else 11135 nvlist_free(cb.cb_jsobj); 11136 } else { 11137 if (argc == 0 && cb.cb_count == 0) { 11138 (void) fprintf(stderr, "%s", 11139 gettext("no pools available\n")); 11140 } else if (cb.cb_explain && cb.cb_first && 11141 cb.cb_allpools) { 11142 (void) printf("%s", 11143 gettext("all pools are healthy\n")); 11144 } 11145 } 11146 11147 if (ret != 0) 11148 return (ret); 11149 11150 if (interval == 0) 11151 break; 11152 11153 if (count != 0 && --count == 0) 11154 break; 11155 11156 (void) fflush(stdout); 11157 (void) fsleep(interval); 11158 } 11159 11160 return (0); 11161 } 11162 11163 typedef struct upgrade_cbdata { 11164 int cb_first; 11165 int cb_argc; 11166 uint64_t cb_version; 11167 char **cb_argv; 11168 } upgrade_cbdata_t; 11169 11170 static int 11171 check_unsupp_fs(zfs_handle_t *zhp, void *unsupp_fs) 11172 { 11173 int zfs_version = (int)zfs_prop_get_int(zhp, ZFS_PROP_VERSION); 11174 int *count = (int *)unsupp_fs; 11175 11176 if (zfs_version > ZPL_VERSION) { 11177 (void) printf(gettext("%s (v%d) is not supported by this " 11178 "implementation of ZFS.\n"), 11179 zfs_get_name(zhp), zfs_version); 11180 (*count)++; 11181 } 11182 11183 zfs_iter_filesystems_v2(zhp, 0, check_unsupp_fs, unsupp_fs); 11184 11185 zfs_close(zhp); 11186 11187 return (0); 11188 } 11189 11190 static int 11191 upgrade_version(zpool_handle_t *zhp, uint64_t version) 11192 { 11193 int ret; 11194 nvlist_t *config; 11195 uint64_t oldversion; 11196 int unsupp_fs = 0; 11197 11198 config = zpool_get_config(zhp, NULL); 11199 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 11200 &oldversion) == 0); 11201 11202 char compat[ZFS_MAXPROPLEN]; 11203 if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, compat, 11204 ZFS_MAXPROPLEN, NULL, B_FALSE) != 0) 11205 compat[0] = '\0'; 11206 11207 assert(SPA_VERSION_IS_SUPPORTED(oldversion)); 11208 assert(oldversion < version); 11209 11210 ret = zfs_iter_root(zpool_get_handle(zhp), check_unsupp_fs, &unsupp_fs); 11211 if (ret != 0) 11212 return (ret); 11213 11214 if (unsupp_fs) { 11215 (void) fprintf(stderr, gettext("Upgrade not performed due " 11216 "to %d unsupported filesystems (max v%d).\n"), 11217 unsupp_fs, (int)ZPL_VERSION); 11218 return (1); 11219 } 11220 11221 if (strcmp(compat, ZPOOL_COMPAT_LEGACY) == 0) { 11222 (void) fprintf(stderr, gettext("Upgrade not performed because " 11223 "'compatibility' property set to '" 11224 ZPOOL_COMPAT_LEGACY "'.\n")); 11225 return (1); 11226 } 11227 11228 ret = zpool_upgrade(zhp, version); 11229 if (ret != 0) 11230 return (ret); 11231 11232 if (version >= SPA_VERSION_FEATURES) { 11233 (void) printf(gettext("Successfully upgraded " 11234 "'%s' from version %llu to feature flags.\n"), 11235 zpool_get_name(zhp), (u_longlong_t)oldversion); 11236 } else { 11237 (void) printf(gettext("Successfully upgraded " 11238 "'%s' from version %llu to version %llu.\n"), 11239 zpool_get_name(zhp), (u_longlong_t)oldversion, 11240 (u_longlong_t)version); 11241 } 11242 11243 return (0); 11244 } 11245 11246 static int 11247 upgrade_enable_all(zpool_handle_t *zhp, int *countp) 11248 { 11249 int i, ret, count; 11250 boolean_t firstff = B_TRUE; 11251 nvlist_t *enabled = zpool_get_features(zhp); 11252 11253 char compat[ZFS_MAXPROPLEN]; 11254 if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, compat, 11255 ZFS_MAXPROPLEN, NULL, B_FALSE) != 0) 11256 compat[0] = '\0'; 11257 11258 boolean_t requested_features[SPA_FEATURES]; 11259 if (zpool_do_load_compat(compat, requested_features) != 11260 ZPOOL_COMPATIBILITY_OK) 11261 return (-1); 11262 11263 count = 0; 11264 for (i = 0; i < SPA_FEATURES; i++) { 11265 const char *fname = spa_feature_table[i].fi_uname; 11266 const char *fguid = spa_feature_table[i].fi_guid; 11267 11268 if (!spa_feature_table[i].fi_zfs_mod_supported) 11269 continue; 11270 11271 if (!nvlist_exists(enabled, fguid) && requested_features[i]) { 11272 char *propname; 11273 verify(-1 != asprintf(&propname, "feature@%s", fname)); 11274 ret = zpool_set_prop(zhp, propname, 11275 ZFS_FEATURE_ENABLED); 11276 if (ret != 0) { 11277 free(propname); 11278 return (ret); 11279 } 11280 count++; 11281 11282 if (firstff) { 11283 (void) printf(gettext("Enabled the " 11284 "following features on '%s':\n"), 11285 zpool_get_name(zhp)); 11286 firstff = B_FALSE; 11287 } 11288 (void) printf(gettext(" %s\n"), fname); 11289 free(propname); 11290 } 11291 } 11292 11293 if (countp != NULL) 11294 *countp = count; 11295 return (0); 11296 } 11297 11298 static int 11299 upgrade_cb(zpool_handle_t *zhp, void *arg) 11300 { 11301 upgrade_cbdata_t *cbp = arg; 11302 nvlist_t *config; 11303 uint64_t version; 11304 boolean_t modified_pool = B_FALSE; 11305 int ret; 11306 11307 config = zpool_get_config(zhp, NULL); 11308 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 11309 &version) == 0); 11310 11311 assert(SPA_VERSION_IS_SUPPORTED(version)); 11312 11313 if (version < cbp->cb_version) { 11314 cbp->cb_first = B_FALSE; 11315 ret = upgrade_version(zhp, cbp->cb_version); 11316 if (ret != 0) 11317 return (ret); 11318 modified_pool = B_TRUE; 11319 11320 /* 11321 * If they did "zpool upgrade -a", then we could 11322 * be doing ioctls to different pools. We need 11323 * to log this history once to each pool, and bypass 11324 * the normal history logging that happens in main(). 11325 */ 11326 (void) zpool_log_history(g_zfs, history_str); 11327 log_history = B_FALSE; 11328 } 11329 11330 if (cbp->cb_version >= SPA_VERSION_FEATURES) { 11331 int count; 11332 ret = upgrade_enable_all(zhp, &count); 11333 if (ret != 0) 11334 return (ret); 11335 11336 if (count > 0) { 11337 cbp->cb_first = B_FALSE; 11338 modified_pool = B_TRUE; 11339 } 11340 } 11341 11342 if (modified_pool) { 11343 (void) printf("\n"); 11344 (void) after_zpool_upgrade(zhp); 11345 } 11346 11347 return (0); 11348 } 11349 11350 static int 11351 upgrade_list_older_cb(zpool_handle_t *zhp, void *arg) 11352 { 11353 upgrade_cbdata_t *cbp = arg; 11354 nvlist_t *config; 11355 uint64_t version; 11356 11357 config = zpool_get_config(zhp, NULL); 11358 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 11359 &version) == 0); 11360 11361 assert(SPA_VERSION_IS_SUPPORTED(version)); 11362 11363 if (version < SPA_VERSION_FEATURES) { 11364 if (cbp->cb_first) { 11365 (void) printf(gettext("The following pools are " 11366 "formatted with legacy version numbers and can\n" 11367 "be upgraded to use feature flags. After " 11368 "being upgraded, these pools\nwill no " 11369 "longer be accessible by software that does not " 11370 "support feature\nflags.\n\n" 11371 "Note that setting a pool's 'compatibility' " 11372 "feature to '" ZPOOL_COMPAT_LEGACY "' will\n" 11373 "inhibit upgrades.\n\n")); 11374 (void) printf(gettext("VER POOL\n")); 11375 (void) printf(gettext("--- ------------\n")); 11376 cbp->cb_first = B_FALSE; 11377 } 11378 11379 (void) printf("%2llu %s\n", (u_longlong_t)version, 11380 zpool_get_name(zhp)); 11381 } 11382 11383 return (0); 11384 } 11385 11386 static int 11387 upgrade_list_disabled_cb(zpool_handle_t *zhp, void *arg) 11388 { 11389 upgrade_cbdata_t *cbp = arg; 11390 nvlist_t *config; 11391 uint64_t version; 11392 11393 config = zpool_get_config(zhp, NULL); 11394 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 11395 &version) == 0); 11396 11397 if (version >= SPA_VERSION_FEATURES) { 11398 int i; 11399 boolean_t poolfirst = B_TRUE; 11400 nvlist_t *enabled = zpool_get_features(zhp); 11401 11402 for (i = 0; i < SPA_FEATURES; i++) { 11403 const char *fguid = spa_feature_table[i].fi_guid; 11404 const char *fname = spa_feature_table[i].fi_uname; 11405 11406 if (!spa_feature_table[i].fi_zfs_mod_supported) 11407 continue; 11408 11409 if (!nvlist_exists(enabled, fguid)) { 11410 if (cbp->cb_first) { 11411 (void) printf(gettext("\nSome " 11412 "supported features are not " 11413 "enabled on the following pools. " 11414 "Once a\nfeature is enabled the " 11415 "pool may become incompatible with " 11416 "software\nthat does not support " 11417 "the feature. See " 11418 "zpool-features(7) for " 11419 "details.\n\n" 11420 "Note that the pool " 11421 "'compatibility' feature can be " 11422 "used to inhibit\nfeature " 11423 "upgrades.\n\n")); 11424 (void) printf(gettext("POOL " 11425 "FEATURE\n")); 11426 (void) printf(gettext("------" 11427 "---------\n")); 11428 cbp->cb_first = B_FALSE; 11429 } 11430 11431 if (poolfirst) { 11432 (void) printf(gettext("%s\n"), 11433 zpool_get_name(zhp)); 11434 poolfirst = B_FALSE; 11435 } 11436 11437 (void) printf(gettext(" %s\n"), fname); 11438 } 11439 /* 11440 * If they did "zpool upgrade -a", then we could 11441 * be doing ioctls to different pools. We need 11442 * to log this history once to each pool, and bypass 11443 * the normal history logging that happens in main(). 11444 */ 11445 (void) zpool_log_history(g_zfs, history_str); 11446 log_history = B_FALSE; 11447 } 11448 } 11449 11450 return (0); 11451 } 11452 11453 static int 11454 upgrade_one(zpool_handle_t *zhp, void *data) 11455 { 11456 boolean_t modified_pool = B_FALSE; 11457 upgrade_cbdata_t *cbp = data; 11458 uint64_t cur_version; 11459 int ret; 11460 11461 if (strcmp("log", zpool_get_name(zhp)) == 0) { 11462 (void) fprintf(stderr, gettext("'log' is now a reserved word\n" 11463 "Pool 'log' must be renamed using export and import" 11464 " to upgrade.\n")); 11465 return (1); 11466 } 11467 11468 cur_version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 11469 if (cur_version > cbp->cb_version) { 11470 (void) printf(gettext("Pool '%s' is already formatted " 11471 "using more current version '%llu'.\n\n"), 11472 zpool_get_name(zhp), (u_longlong_t)cur_version); 11473 return (0); 11474 } 11475 11476 if (cbp->cb_version != SPA_VERSION && cur_version == cbp->cb_version) { 11477 (void) printf(gettext("Pool '%s' is already formatted " 11478 "using version %llu.\n\n"), zpool_get_name(zhp), 11479 (u_longlong_t)cbp->cb_version); 11480 return (0); 11481 } 11482 11483 if (cur_version != cbp->cb_version) { 11484 modified_pool = B_TRUE; 11485 ret = upgrade_version(zhp, cbp->cb_version); 11486 if (ret != 0) 11487 return (ret); 11488 } 11489 11490 if (cbp->cb_version >= SPA_VERSION_FEATURES) { 11491 int count = 0; 11492 ret = upgrade_enable_all(zhp, &count); 11493 if (ret != 0) 11494 return (ret); 11495 11496 if (count != 0) { 11497 modified_pool = B_TRUE; 11498 } else if (cur_version == SPA_VERSION) { 11499 (void) printf(gettext("Pool '%s' already has all " 11500 "supported and requested features enabled.\n"), 11501 zpool_get_name(zhp)); 11502 } 11503 } 11504 11505 if (modified_pool) { 11506 (void) printf("\n"); 11507 (void) after_zpool_upgrade(zhp); 11508 } 11509 11510 return (0); 11511 } 11512 11513 /* 11514 * zpool upgrade 11515 * zpool upgrade -v 11516 * zpool upgrade [-V version] <-a | pool ...> 11517 * 11518 * With no arguments, display downrev'd ZFS pool available for upgrade. 11519 * Individual pools can be upgraded by specifying the pool, and '-a' will 11520 * upgrade all pools. 11521 */ 11522 int 11523 zpool_do_upgrade(int argc, char **argv) 11524 { 11525 int c; 11526 upgrade_cbdata_t cb = { 0 }; 11527 int ret = 0; 11528 boolean_t showversions = B_FALSE; 11529 boolean_t upgradeall = B_FALSE; 11530 char *end; 11531 11532 11533 /* check options */ 11534 while ((c = getopt(argc, argv, ":avV:")) != -1) { 11535 switch (c) { 11536 case 'a': 11537 upgradeall = B_TRUE; 11538 break; 11539 case 'v': 11540 showversions = B_TRUE; 11541 break; 11542 case 'V': 11543 cb.cb_version = strtoll(optarg, &end, 10); 11544 if (*end != '\0' || 11545 !SPA_VERSION_IS_SUPPORTED(cb.cb_version)) { 11546 (void) fprintf(stderr, 11547 gettext("invalid version '%s'\n"), optarg); 11548 usage(B_FALSE); 11549 } 11550 break; 11551 case ':': 11552 (void) fprintf(stderr, gettext("missing argument for " 11553 "'%c' option\n"), optopt); 11554 usage(B_FALSE); 11555 break; 11556 case '?': 11557 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 11558 optopt); 11559 usage(B_FALSE); 11560 } 11561 } 11562 11563 cb.cb_argc = argc; 11564 cb.cb_argv = argv; 11565 argc -= optind; 11566 argv += optind; 11567 11568 if (cb.cb_version == 0) { 11569 cb.cb_version = SPA_VERSION; 11570 } else if (!upgradeall && argc == 0) { 11571 (void) fprintf(stderr, gettext("-V option is " 11572 "incompatible with other arguments\n")); 11573 usage(B_FALSE); 11574 } 11575 11576 if (showversions) { 11577 if (upgradeall || argc != 0) { 11578 (void) fprintf(stderr, gettext("-v option is " 11579 "incompatible with other arguments\n")); 11580 usage(B_FALSE); 11581 } 11582 } else if (upgradeall) { 11583 if (argc != 0) { 11584 (void) fprintf(stderr, gettext("-a option should not " 11585 "be used along with a pool name\n")); 11586 usage(B_FALSE); 11587 } 11588 } 11589 11590 (void) printf("%s", gettext("This system supports ZFS pool feature " 11591 "flags.\n\n")); 11592 if (showversions) { 11593 int i; 11594 11595 (void) printf(gettext("The following features are " 11596 "supported:\n\n")); 11597 (void) printf(gettext("FEAT DESCRIPTION\n")); 11598 (void) printf("----------------------------------------------" 11599 "---------------\n"); 11600 for (i = 0; i < SPA_FEATURES; i++) { 11601 zfeature_info_t *fi = &spa_feature_table[i]; 11602 if (!fi->fi_zfs_mod_supported) 11603 continue; 11604 const char *ro = 11605 (fi->fi_flags & ZFEATURE_FLAG_READONLY_COMPAT) ? 11606 " (read-only compatible)" : ""; 11607 11608 (void) printf("%-37s%s\n", fi->fi_uname, ro); 11609 (void) printf(" %s\n", fi->fi_desc); 11610 } 11611 (void) printf("\n"); 11612 11613 (void) printf(gettext("The following legacy versions are also " 11614 "supported:\n\n")); 11615 (void) printf(gettext("VER DESCRIPTION\n")); 11616 (void) printf("--- -----------------------------------------" 11617 "---------------\n"); 11618 (void) printf(gettext(" 1 Initial ZFS version\n")); 11619 (void) printf(gettext(" 2 Ditto blocks " 11620 "(replicated metadata)\n")); 11621 (void) printf(gettext(" 3 Hot spares and double parity " 11622 "RAID-Z\n")); 11623 (void) printf(gettext(" 4 zpool history\n")); 11624 (void) printf(gettext(" 5 Compression using the gzip " 11625 "algorithm\n")); 11626 (void) printf(gettext(" 6 bootfs pool property\n")); 11627 (void) printf(gettext(" 7 Separate intent log devices\n")); 11628 (void) printf(gettext(" 8 Delegated administration\n")); 11629 (void) printf(gettext(" 9 refquota and refreservation " 11630 "properties\n")); 11631 (void) printf(gettext(" 10 Cache devices\n")); 11632 (void) printf(gettext(" 11 Improved scrub performance\n")); 11633 (void) printf(gettext(" 12 Snapshot properties\n")); 11634 (void) printf(gettext(" 13 snapused property\n")); 11635 (void) printf(gettext(" 14 passthrough-x aclinherit\n")); 11636 (void) printf(gettext(" 15 user/group space accounting\n")); 11637 (void) printf(gettext(" 16 stmf property support\n")); 11638 (void) printf(gettext(" 17 Triple-parity RAID-Z\n")); 11639 (void) printf(gettext(" 18 Snapshot user holds\n")); 11640 (void) printf(gettext(" 19 Log device removal\n")); 11641 (void) printf(gettext(" 20 Compression using zle " 11642 "(zero-length encoding)\n")); 11643 (void) printf(gettext(" 21 Deduplication\n")); 11644 (void) printf(gettext(" 22 Received properties\n")); 11645 (void) printf(gettext(" 23 Slim ZIL\n")); 11646 (void) printf(gettext(" 24 System attributes\n")); 11647 (void) printf(gettext(" 25 Improved scrub stats\n")); 11648 (void) printf(gettext(" 26 Improved snapshot deletion " 11649 "performance\n")); 11650 (void) printf(gettext(" 27 Improved snapshot creation " 11651 "performance\n")); 11652 (void) printf(gettext(" 28 Multiple vdev replacements\n")); 11653 (void) printf(gettext("\nFor more information on a particular " 11654 "version, including supported releases,\n")); 11655 (void) printf(gettext("see the ZFS Administration Guide.\n\n")); 11656 } else if (argc == 0 && upgradeall) { 11657 cb.cb_first = B_TRUE; 11658 ret = zpool_iter(g_zfs, upgrade_cb, &cb); 11659 if (ret == 0 && cb.cb_first) { 11660 if (cb.cb_version == SPA_VERSION) { 11661 (void) printf(gettext("All pools are already " 11662 "formatted using feature flags.\n\n")); 11663 (void) printf(gettext("Every feature flags " 11664 "pool already has all supported and " 11665 "requested features enabled.\n")); 11666 } else { 11667 (void) printf(gettext("All pools are already " 11668 "formatted with version %llu or higher.\n"), 11669 (u_longlong_t)cb.cb_version); 11670 } 11671 } 11672 } else if (argc == 0) { 11673 cb.cb_first = B_TRUE; 11674 ret = zpool_iter(g_zfs, upgrade_list_older_cb, &cb); 11675 assert(ret == 0); 11676 11677 if (cb.cb_first) { 11678 (void) printf(gettext("All pools are formatted " 11679 "using feature flags.\n\n")); 11680 } else { 11681 (void) printf(gettext("\nUse 'zpool upgrade -v' " 11682 "for a list of available legacy versions.\n")); 11683 } 11684 11685 cb.cb_first = B_TRUE; 11686 ret = zpool_iter(g_zfs, upgrade_list_disabled_cb, &cb); 11687 assert(ret == 0); 11688 11689 if (cb.cb_first) { 11690 (void) printf(gettext("Every feature flags pool has " 11691 "all supported and requested features enabled.\n")); 11692 } else { 11693 (void) printf(gettext("\n")); 11694 } 11695 } else { 11696 ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL, 11697 B_FALSE, upgrade_one, &cb); 11698 } 11699 11700 return (ret); 11701 } 11702 11703 typedef struct hist_cbdata { 11704 boolean_t first; 11705 boolean_t longfmt; 11706 boolean_t internal; 11707 } hist_cbdata_t; 11708 11709 static void 11710 print_history_records(nvlist_t *nvhis, hist_cbdata_t *cb) 11711 { 11712 nvlist_t **records; 11713 uint_t numrecords; 11714 int i; 11715 11716 verify(nvlist_lookup_nvlist_array(nvhis, ZPOOL_HIST_RECORD, 11717 &records, &numrecords) == 0); 11718 for (i = 0; i < numrecords; i++) { 11719 nvlist_t *rec = records[i]; 11720 char tbuf[64] = ""; 11721 11722 if (nvlist_exists(rec, ZPOOL_HIST_TIME)) { 11723 time_t tsec; 11724 struct tm t; 11725 11726 tsec = fnvlist_lookup_uint64(records[i], 11727 ZPOOL_HIST_TIME); 11728 (void) localtime_r(&tsec, &t); 11729 (void) strftime(tbuf, sizeof (tbuf), "%F.%T", &t); 11730 } 11731 11732 if (nvlist_exists(rec, ZPOOL_HIST_ELAPSED_NS)) { 11733 uint64_t elapsed_ns = fnvlist_lookup_int64(records[i], 11734 ZPOOL_HIST_ELAPSED_NS); 11735 (void) snprintf(tbuf + strlen(tbuf), 11736 sizeof (tbuf) - strlen(tbuf), 11737 " (%lldms)", (long long)elapsed_ns / 1000 / 1000); 11738 } 11739 11740 if (nvlist_exists(rec, ZPOOL_HIST_CMD)) { 11741 (void) printf("%s %s", tbuf, 11742 fnvlist_lookup_string(rec, ZPOOL_HIST_CMD)); 11743 } else if (nvlist_exists(rec, ZPOOL_HIST_INT_EVENT)) { 11744 int ievent = 11745 fnvlist_lookup_uint64(rec, ZPOOL_HIST_INT_EVENT); 11746 if (!cb->internal) 11747 continue; 11748 if (ievent >= ZFS_NUM_LEGACY_HISTORY_EVENTS) { 11749 (void) printf("%s unrecognized record:\n", 11750 tbuf); 11751 dump_nvlist(rec, 4); 11752 continue; 11753 } 11754 (void) printf("%s [internal %s txg:%lld] %s", tbuf, 11755 zfs_history_event_names[ievent], 11756 (longlong_t)fnvlist_lookup_uint64( 11757 rec, ZPOOL_HIST_TXG), 11758 fnvlist_lookup_string(rec, ZPOOL_HIST_INT_STR)); 11759 } else if (nvlist_exists(rec, ZPOOL_HIST_INT_NAME)) { 11760 if (!cb->internal) 11761 continue; 11762 (void) printf("%s [txg:%lld] %s", tbuf, 11763 (longlong_t)fnvlist_lookup_uint64( 11764 rec, ZPOOL_HIST_TXG), 11765 fnvlist_lookup_string(rec, ZPOOL_HIST_INT_NAME)); 11766 if (nvlist_exists(rec, ZPOOL_HIST_DSNAME)) { 11767 (void) printf(" %s (%llu)", 11768 fnvlist_lookup_string(rec, 11769 ZPOOL_HIST_DSNAME), 11770 (u_longlong_t)fnvlist_lookup_uint64(rec, 11771 ZPOOL_HIST_DSID)); 11772 } 11773 (void) printf(" %s", fnvlist_lookup_string(rec, 11774 ZPOOL_HIST_INT_STR)); 11775 } else if (nvlist_exists(rec, ZPOOL_HIST_IOCTL)) { 11776 if (!cb->internal) 11777 continue; 11778 (void) printf("%s ioctl %s\n", tbuf, 11779 fnvlist_lookup_string(rec, ZPOOL_HIST_IOCTL)); 11780 if (nvlist_exists(rec, ZPOOL_HIST_INPUT_NVL)) { 11781 (void) printf(" input:\n"); 11782 dump_nvlist(fnvlist_lookup_nvlist(rec, 11783 ZPOOL_HIST_INPUT_NVL), 8); 11784 } 11785 if (nvlist_exists(rec, ZPOOL_HIST_OUTPUT_NVL)) { 11786 (void) printf(" output:\n"); 11787 dump_nvlist(fnvlist_lookup_nvlist(rec, 11788 ZPOOL_HIST_OUTPUT_NVL), 8); 11789 } 11790 if (nvlist_exists(rec, ZPOOL_HIST_OUTPUT_SIZE)) { 11791 (void) printf(" output nvlist omitted; " 11792 "original size: %lldKB\n", 11793 (longlong_t)fnvlist_lookup_int64(rec, 11794 ZPOOL_HIST_OUTPUT_SIZE) / 1024); 11795 } 11796 if (nvlist_exists(rec, ZPOOL_HIST_ERRNO)) { 11797 (void) printf(" errno: %lld\n", 11798 (longlong_t)fnvlist_lookup_int64(rec, 11799 ZPOOL_HIST_ERRNO)); 11800 } 11801 } else { 11802 if (!cb->internal) 11803 continue; 11804 (void) printf("%s unrecognized record:\n", tbuf); 11805 dump_nvlist(rec, 4); 11806 } 11807 11808 if (!cb->longfmt) { 11809 (void) printf("\n"); 11810 continue; 11811 } 11812 (void) printf(" ["); 11813 if (nvlist_exists(rec, ZPOOL_HIST_WHO)) { 11814 uid_t who = fnvlist_lookup_uint64(rec, ZPOOL_HIST_WHO); 11815 struct passwd *pwd = getpwuid(who); 11816 (void) printf("user %d ", (int)who); 11817 if (pwd != NULL) 11818 (void) printf("(%s) ", pwd->pw_name); 11819 } 11820 if (nvlist_exists(rec, ZPOOL_HIST_HOST)) { 11821 (void) printf("on %s", 11822 fnvlist_lookup_string(rec, ZPOOL_HIST_HOST)); 11823 } 11824 if (nvlist_exists(rec, ZPOOL_HIST_ZONE)) { 11825 (void) printf(":%s", 11826 fnvlist_lookup_string(rec, ZPOOL_HIST_ZONE)); 11827 } 11828 11829 (void) printf("]"); 11830 (void) printf("\n"); 11831 } 11832 } 11833 11834 /* 11835 * Print out the command history for a specific pool. 11836 */ 11837 static int 11838 get_history_one(zpool_handle_t *zhp, void *data) 11839 { 11840 nvlist_t *nvhis; 11841 int ret; 11842 hist_cbdata_t *cb = (hist_cbdata_t *)data; 11843 uint64_t off = 0; 11844 boolean_t eof = B_FALSE; 11845 11846 cb->first = B_FALSE; 11847 11848 (void) printf(gettext("History for '%s':\n"), zpool_get_name(zhp)); 11849 11850 while (!eof) { 11851 if ((ret = zpool_get_history(zhp, &nvhis, &off, &eof)) != 0) 11852 return (ret); 11853 11854 print_history_records(nvhis, cb); 11855 nvlist_free(nvhis); 11856 } 11857 (void) printf("\n"); 11858 11859 return (ret); 11860 } 11861 11862 /* 11863 * zpool history <pool> 11864 * 11865 * Displays the history of commands that modified pools. 11866 */ 11867 int 11868 zpool_do_history(int argc, char **argv) 11869 { 11870 hist_cbdata_t cbdata = { 0 }; 11871 int ret; 11872 int c; 11873 11874 cbdata.first = B_TRUE; 11875 /* check options */ 11876 while ((c = getopt(argc, argv, "li")) != -1) { 11877 switch (c) { 11878 case 'l': 11879 cbdata.longfmt = B_TRUE; 11880 break; 11881 case 'i': 11882 cbdata.internal = B_TRUE; 11883 break; 11884 case '?': 11885 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 11886 optopt); 11887 usage(B_FALSE); 11888 } 11889 } 11890 argc -= optind; 11891 argv += optind; 11892 11893 ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL, 11894 B_FALSE, get_history_one, &cbdata); 11895 11896 if (argc == 0 && cbdata.first == B_TRUE) { 11897 (void) fprintf(stderr, gettext("no pools available\n")); 11898 return (0); 11899 } 11900 11901 return (ret); 11902 } 11903 11904 typedef struct ev_opts { 11905 int verbose; 11906 int scripted; 11907 int follow; 11908 int clear; 11909 char poolname[ZFS_MAX_DATASET_NAME_LEN]; 11910 } ev_opts_t; 11911 11912 static void 11913 zpool_do_events_short(nvlist_t *nvl, ev_opts_t *opts) 11914 { 11915 char ctime_str[26], str[32]; 11916 const char *ptr; 11917 int64_t *tv; 11918 uint_t n; 11919 11920 verify(nvlist_lookup_int64_array(nvl, FM_EREPORT_TIME, &tv, &n) == 0); 11921 memset(str, ' ', 32); 11922 (void) ctime_r((const time_t *)&tv[0], ctime_str); 11923 (void) memcpy(str, ctime_str+4, 6); /* 'Jun 30' */ 11924 (void) memcpy(str+7, ctime_str+20, 4); /* '1993' */ 11925 (void) memcpy(str+12, ctime_str+11, 8); /* '21:49:08' */ 11926 (void) sprintf(str+20, ".%09lld", (longlong_t)tv[1]); /* '.123456789' */ 11927 if (opts->scripted) 11928 (void) printf(gettext("%s\t"), str); 11929 else 11930 (void) printf(gettext("%s "), str); 11931 11932 verify(nvlist_lookup_string(nvl, FM_CLASS, &ptr) == 0); 11933 (void) printf(gettext("%s\n"), ptr); 11934 } 11935 11936 static void 11937 zpool_do_events_nvprint(nvlist_t *nvl, int depth) 11938 { 11939 nvpair_t *nvp; 11940 static char flagstr[256]; 11941 11942 for (nvp = nvlist_next_nvpair(nvl, NULL); 11943 nvp != NULL; nvp = nvlist_next_nvpair(nvl, nvp)) { 11944 11945 data_type_t type = nvpair_type(nvp); 11946 const char *name = nvpair_name(nvp); 11947 11948 boolean_t b; 11949 uint8_t i8; 11950 uint16_t i16; 11951 uint32_t i32; 11952 uint64_t i64; 11953 const char *str; 11954 nvlist_t *cnv; 11955 11956 printf(gettext("%*s%s = "), depth, "", name); 11957 11958 switch (type) { 11959 case DATA_TYPE_BOOLEAN: 11960 printf(gettext("%s"), "1"); 11961 break; 11962 11963 case DATA_TYPE_BOOLEAN_VALUE: 11964 (void) nvpair_value_boolean_value(nvp, &b); 11965 printf(gettext("%s"), b ? "1" : "0"); 11966 break; 11967 11968 case DATA_TYPE_BYTE: 11969 (void) nvpair_value_byte(nvp, &i8); 11970 printf(gettext("0x%x"), i8); 11971 break; 11972 11973 case DATA_TYPE_INT8: 11974 (void) nvpair_value_int8(nvp, (void *)&i8); 11975 printf(gettext("0x%x"), i8); 11976 break; 11977 11978 case DATA_TYPE_UINT8: 11979 (void) nvpair_value_uint8(nvp, &i8); 11980 printf(gettext("0x%x"), i8); 11981 break; 11982 11983 case DATA_TYPE_INT16: 11984 (void) nvpair_value_int16(nvp, (void *)&i16); 11985 printf(gettext("0x%x"), i16); 11986 break; 11987 11988 case DATA_TYPE_UINT16: 11989 (void) nvpair_value_uint16(nvp, &i16); 11990 printf(gettext("0x%x"), i16); 11991 break; 11992 11993 case DATA_TYPE_INT32: 11994 (void) nvpair_value_int32(nvp, (void *)&i32); 11995 printf(gettext("0x%x"), i32); 11996 break; 11997 11998 case DATA_TYPE_UINT32: 11999 (void) nvpair_value_uint32(nvp, &i32); 12000 if (strcmp(name, 12001 FM_EREPORT_PAYLOAD_ZFS_ZIO_STAGE) == 0 || 12002 strcmp(name, 12003 FM_EREPORT_PAYLOAD_ZFS_ZIO_PIPELINE) == 0) { 12004 zfs_valstr_zio_stage(i32, flagstr, 12005 sizeof (flagstr)); 12006 printf(gettext("0x%x [%s]"), i32, flagstr); 12007 } else if (strcmp(name, 12008 FM_EREPORT_PAYLOAD_ZFS_ZIO_PRIORITY) == 0) { 12009 zfs_valstr_zio_priority(i32, flagstr, 12010 sizeof (flagstr)); 12011 printf(gettext("0x%x [%s]"), i32, flagstr); 12012 } else { 12013 printf(gettext("0x%x"), i32); 12014 } 12015 break; 12016 12017 case DATA_TYPE_INT64: 12018 (void) nvpair_value_int64(nvp, (void *)&i64); 12019 printf(gettext("0x%llx"), (u_longlong_t)i64); 12020 break; 12021 12022 case DATA_TYPE_UINT64: 12023 (void) nvpair_value_uint64(nvp, &i64); 12024 /* 12025 * translate vdev state values to readable 12026 * strings to aide zpool events consumers 12027 */ 12028 if (strcmp(name, 12029 FM_EREPORT_PAYLOAD_ZFS_VDEV_STATE) == 0 || 12030 strcmp(name, 12031 FM_EREPORT_PAYLOAD_ZFS_VDEV_LASTSTATE) == 0) { 12032 printf(gettext("\"%s\" (0x%llx)"), 12033 zpool_state_to_name(i64, VDEV_AUX_NONE), 12034 (u_longlong_t)i64); 12035 } else if (strcmp(name, 12036 FM_EREPORT_PAYLOAD_ZFS_ZIO_FLAGS) == 0) { 12037 zfs_valstr_zio_flag(i64, flagstr, 12038 sizeof (flagstr)); 12039 printf(gettext("0x%llx [%s]"), 12040 (u_longlong_t)i64, flagstr); 12041 } else { 12042 printf(gettext("0x%llx"), (u_longlong_t)i64); 12043 } 12044 break; 12045 12046 case DATA_TYPE_HRTIME: 12047 (void) nvpair_value_hrtime(nvp, (void *)&i64); 12048 printf(gettext("0x%llx"), (u_longlong_t)i64); 12049 break; 12050 12051 case DATA_TYPE_STRING: 12052 (void) nvpair_value_string(nvp, &str); 12053 printf(gettext("\"%s\""), str ? str : "<NULL>"); 12054 break; 12055 12056 case DATA_TYPE_NVLIST: 12057 printf(gettext("(embedded nvlist)\n")); 12058 (void) nvpair_value_nvlist(nvp, &cnv); 12059 zpool_do_events_nvprint(cnv, depth + 8); 12060 printf(gettext("%*s(end %s)"), depth, "", name); 12061 break; 12062 12063 case DATA_TYPE_NVLIST_ARRAY: { 12064 nvlist_t **val; 12065 uint_t i, nelem; 12066 12067 (void) nvpair_value_nvlist_array(nvp, &val, &nelem); 12068 printf(gettext("(%d embedded nvlists)\n"), nelem); 12069 for (i = 0; i < nelem; i++) { 12070 printf(gettext("%*s%s[%d] = %s\n"), 12071 depth, "", name, i, "(embedded nvlist)"); 12072 zpool_do_events_nvprint(val[i], depth + 8); 12073 printf(gettext("%*s(end %s[%i])\n"), 12074 depth, "", name, i); 12075 } 12076 printf(gettext("%*s(end %s)\n"), depth, "", name); 12077 } 12078 break; 12079 12080 case DATA_TYPE_INT8_ARRAY: { 12081 int8_t *val; 12082 uint_t i, nelem; 12083 12084 (void) nvpair_value_int8_array(nvp, &val, &nelem); 12085 for (i = 0; i < nelem; i++) 12086 printf(gettext("0x%x "), val[i]); 12087 12088 break; 12089 } 12090 12091 case DATA_TYPE_UINT8_ARRAY: { 12092 uint8_t *val; 12093 uint_t i, nelem; 12094 12095 (void) nvpair_value_uint8_array(nvp, &val, &nelem); 12096 for (i = 0; i < nelem; i++) 12097 printf(gettext("0x%x "), val[i]); 12098 12099 break; 12100 } 12101 12102 case DATA_TYPE_INT16_ARRAY: { 12103 int16_t *val; 12104 uint_t i, nelem; 12105 12106 (void) nvpair_value_int16_array(nvp, &val, &nelem); 12107 for (i = 0; i < nelem; i++) 12108 printf(gettext("0x%x "), val[i]); 12109 12110 break; 12111 } 12112 12113 case DATA_TYPE_UINT16_ARRAY: { 12114 uint16_t *val; 12115 uint_t i, nelem; 12116 12117 (void) nvpair_value_uint16_array(nvp, &val, &nelem); 12118 for (i = 0; i < nelem; i++) 12119 printf(gettext("0x%x "), val[i]); 12120 12121 break; 12122 } 12123 12124 case DATA_TYPE_INT32_ARRAY: { 12125 int32_t *val; 12126 uint_t i, nelem; 12127 12128 (void) nvpair_value_int32_array(nvp, &val, &nelem); 12129 for (i = 0; i < nelem; i++) 12130 printf(gettext("0x%x "), val[i]); 12131 12132 break; 12133 } 12134 12135 case DATA_TYPE_UINT32_ARRAY: { 12136 uint32_t *val; 12137 uint_t i, nelem; 12138 12139 (void) nvpair_value_uint32_array(nvp, &val, &nelem); 12140 for (i = 0; i < nelem; i++) 12141 printf(gettext("0x%x "), val[i]); 12142 12143 break; 12144 } 12145 12146 case DATA_TYPE_INT64_ARRAY: { 12147 int64_t *val; 12148 uint_t i, nelem; 12149 12150 (void) nvpair_value_int64_array(nvp, &val, &nelem); 12151 for (i = 0; i < nelem; i++) 12152 printf(gettext("0x%llx "), 12153 (u_longlong_t)val[i]); 12154 12155 break; 12156 } 12157 12158 case DATA_TYPE_UINT64_ARRAY: { 12159 uint64_t *val; 12160 uint_t i, nelem; 12161 12162 (void) nvpair_value_uint64_array(nvp, &val, &nelem); 12163 for (i = 0; i < nelem; i++) 12164 printf(gettext("0x%llx "), 12165 (u_longlong_t)val[i]); 12166 12167 break; 12168 } 12169 12170 case DATA_TYPE_STRING_ARRAY: { 12171 const char **str; 12172 uint_t i, nelem; 12173 12174 (void) nvpair_value_string_array(nvp, &str, &nelem); 12175 for (i = 0; i < nelem; i++) 12176 printf(gettext("\"%s\" "), 12177 str[i] ? str[i] : "<NULL>"); 12178 12179 break; 12180 } 12181 12182 case DATA_TYPE_BOOLEAN_ARRAY: 12183 case DATA_TYPE_BYTE_ARRAY: 12184 case DATA_TYPE_DOUBLE: 12185 case DATA_TYPE_DONTCARE: 12186 case DATA_TYPE_UNKNOWN: 12187 printf(gettext("<unknown>")); 12188 break; 12189 } 12190 12191 printf(gettext("\n")); 12192 } 12193 } 12194 12195 static int 12196 zpool_do_events_next(ev_opts_t *opts) 12197 { 12198 nvlist_t *nvl; 12199 int zevent_fd, ret, dropped; 12200 const char *pool; 12201 12202 zevent_fd = open(ZFS_DEV, O_RDWR); 12203 VERIFY(zevent_fd >= 0); 12204 12205 if (!opts->scripted) 12206 (void) printf(gettext("%-30s %s\n"), "TIME", "CLASS"); 12207 12208 while (1) { 12209 ret = zpool_events_next(g_zfs, &nvl, &dropped, 12210 (opts->follow ? ZEVENT_NONE : ZEVENT_NONBLOCK), zevent_fd); 12211 if (ret || nvl == NULL) 12212 break; 12213 12214 if (dropped > 0) 12215 (void) printf(gettext("dropped %d events\n"), dropped); 12216 12217 if (strlen(opts->poolname) > 0 && 12218 nvlist_lookup_string(nvl, FM_FMRI_ZFS_POOL, &pool) == 0 && 12219 strcmp(opts->poolname, pool) != 0) 12220 continue; 12221 12222 zpool_do_events_short(nvl, opts); 12223 12224 if (opts->verbose) { 12225 zpool_do_events_nvprint(nvl, 8); 12226 printf(gettext("\n")); 12227 } 12228 (void) fflush(stdout); 12229 12230 nvlist_free(nvl); 12231 } 12232 12233 VERIFY(0 == close(zevent_fd)); 12234 12235 return (ret); 12236 } 12237 12238 static int 12239 zpool_do_events_clear(void) 12240 { 12241 int count, ret; 12242 12243 ret = zpool_events_clear(g_zfs, &count); 12244 if (!ret) 12245 (void) printf(gettext("cleared %d events\n"), count); 12246 12247 return (ret); 12248 } 12249 12250 /* 12251 * zpool events [-vHf [pool] | -c] 12252 * 12253 * Displays events logs by ZFS. 12254 */ 12255 int 12256 zpool_do_events(int argc, char **argv) 12257 { 12258 ev_opts_t opts = { 0 }; 12259 int ret; 12260 int c; 12261 12262 /* check options */ 12263 while ((c = getopt(argc, argv, "vHfc")) != -1) { 12264 switch (c) { 12265 case 'v': 12266 opts.verbose = 1; 12267 break; 12268 case 'H': 12269 opts.scripted = 1; 12270 break; 12271 case 'f': 12272 opts.follow = 1; 12273 break; 12274 case 'c': 12275 opts.clear = 1; 12276 break; 12277 case '?': 12278 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 12279 optopt); 12280 usage(B_FALSE); 12281 } 12282 } 12283 argc -= optind; 12284 argv += optind; 12285 12286 if (argc > 1) { 12287 (void) fprintf(stderr, gettext("too many arguments\n")); 12288 usage(B_FALSE); 12289 } else if (argc == 1) { 12290 (void) strlcpy(opts.poolname, argv[0], sizeof (opts.poolname)); 12291 if (!zfs_name_valid(opts.poolname, ZFS_TYPE_POOL)) { 12292 (void) fprintf(stderr, 12293 gettext("invalid pool name '%s'\n"), opts.poolname); 12294 usage(B_FALSE); 12295 } 12296 } 12297 12298 if ((argc == 1 || opts.verbose || opts.scripted || opts.follow) && 12299 opts.clear) { 12300 (void) fprintf(stderr, 12301 gettext("invalid options combined with -c\n")); 12302 usage(B_FALSE); 12303 } 12304 12305 if (opts.clear) 12306 ret = zpool_do_events_clear(); 12307 else 12308 ret = zpool_do_events_next(&opts); 12309 12310 return (ret); 12311 } 12312 12313 static int 12314 get_callback_vdev(zpool_handle_t *zhp, char *vdevname, void *data) 12315 { 12316 zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data; 12317 char value[ZFS_MAXPROPLEN]; 12318 zprop_source_t srctype; 12319 nvlist_t *props, *item, *d; 12320 props = item = d = NULL; 12321 12322 if (cbp->cb_json) { 12323 d = fnvlist_lookup_nvlist(cbp->cb_jsobj, "vdevs"); 12324 if (d == NULL) { 12325 fprintf(stderr, "vdevs obj not found.\n"); 12326 exit(1); 12327 } 12328 props = fnvlist_alloc(); 12329 } 12330 12331 for (zprop_list_t *pl = cbp->cb_proplist; pl != NULL; 12332 pl = pl->pl_next) { 12333 char *prop_name; 12334 /* 12335 * If the first property is pool name, it is a special 12336 * placeholder that we can skip. This will also skip 12337 * over the name property when 'all' is specified. 12338 */ 12339 if (pl->pl_prop == ZPOOL_PROP_NAME && 12340 pl == cbp->cb_proplist) 12341 continue; 12342 12343 if (pl->pl_prop == ZPROP_INVAL) { 12344 prop_name = pl->pl_user_prop; 12345 } else { 12346 prop_name = (char *)vdev_prop_to_name(pl->pl_prop); 12347 } 12348 if (zpool_get_vdev_prop(zhp, vdevname, pl->pl_prop, 12349 prop_name, value, sizeof (value), &srctype, 12350 cbp->cb_literal) == 0) { 12351 zprop_collect_property(vdevname, cbp, prop_name, 12352 value, srctype, NULL, NULL, props); 12353 } 12354 } 12355 12356 if (cbp->cb_json) { 12357 if (!nvlist_empty(props)) { 12358 item = fnvlist_alloc(); 12359 fill_vdev_info(item, zhp, vdevname, B_TRUE, 12360 cbp->cb_json_as_int); 12361 fnvlist_add_nvlist(item, "properties", props); 12362 fnvlist_add_nvlist(d, vdevname, item); 12363 fnvlist_add_nvlist(cbp->cb_jsobj, "vdevs", d); 12364 fnvlist_free(item); 12365 } 12366 fnvlist_free(props); 12367 } 12368 12369 return (0); 12370 } 12371 12372 static int 12373 get_callback_vdev_cb(void *zhp_data, nvlist_t *nv, void *data) 12374 { 12375 zpool_handle_t *zhp = zhp_data; 12376 zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data; 12377 char *vdevname; 12378 const char *type; 12379 int ret; 12380 12381 /* 12382 * zpool_vdev_name() transforms the root vdev name (i.e., root-0) to the 12383 * pool name for display purposes, which is not desired. Fallback to 12384 * zpool_vdev_name() when not dealing with the root vdev. 12385 */ 12386 type = fnvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE); 12387 if (zhp != NULL && strcmp(type, "root") == 0) 12388 vdevname = strdup("root-0"); 12389 else 12390 vdevname = zpool_vdev_name(g_zfs, zhp, nv, 12391 cbp->cb_vdevs.cb_name_flags); 12392 12393 (void) vdev_expand_proplist(zhp, vdevname, &cbp->cb_proplist); 12394 12395 ret = get_callback_vdev(zhp, vdevname, data); 12396 12397 free(vdevname); 12398 12399 return (ret); 12400 } 12401 12402 static int 12403 get_callback(zpool_handle_t *zhp, void *data) 12404 { 12405 zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data; 12406 char value[ZFS_MAXPROPLEN]; 12407 zprop_source_t srctype; 12408 zprop_list_t *pl; 12409 int vid; 12410 int err = 0; 12411 nvlist_t *props, *item, *d; 12412 props = item = d = NULL; 12413 12414 if (cbp->cb_type == ZFS_TYPE_VDEV) { 12415 if (cbp->cb_json) { 12416 nvlist_t *pool = fnvlist_alloc(); 12417 fill_pool_info(pool, zhp, B_FALSE, cbp->cb_json_as_int); 12418 fnvlist_add_nvlist(cbp->cb_jsobj, "pool", pool); 12419 fnvlist_free(pool); 12420 } 12421 12422 if (strcmp(cbp->cb_vdevs.cb_names[0], "all-vdevs") == 0) { 12423 for_each_vdev(zhp, get_callback_vdev_cb, data); 12424 } else { 12425 /* Adjust column widths for vdev properties */ 12426 for (vid = 0; vid < cbp->cb_vdevs.cb_names_count; 12427 vid++) { 12428 vdev_expand_proplist(zhp, 12429 cbp->cb_vdevs.cb_names[vid], 12430 &cbp->cb_proplist); 12431 } 12432 /* Display the properties */ 12433 for (vid = 0; vid < cbp->cb_vdevs.cb_names_count; 12434 vid++) { 12435 get_callback_vdev(zhp, 12436 cbp->cb_vdevs.cb_names[vid], data); 12437 } 12438 } 12439 } else { 12440 assert(cbp->cb_type == ZFS_TYPE_POOL); 12441 if (cbp->cb_json) { 12442 d = fnvlist_lookup_nvlist(cbp->cb_jsobj, "pools"); 12443 if (d == NULL) { 12444 fprintf(stderr, "pools obj not found.\n"); 12445 exit(1); 12446 } 12447 props = fnvlist_alloc(); 12448 } 12449 for (pl = cbp->cb_proplist; pl != NULL; pl = pl->pl_next) { 12450 /* 12451 * Skip the special fake placeholder. This will also 12452 * skip over the name property when 'all' is specified. 12453 */ 12454 if (pl->pl_prop == ZPOOL_PROP_NAME && 12455 pl == cbp->cb_proplist) 12456 continue; 12457 12458 if (pl->pl_prop == ZPROP_INVAL && 12459 zfs_prop_user(pl->pl_user_prop)) { 12460 srctype = ZPROP_SRC_LOCAL; 12461 12462 if (zpool_get_userprop(zhp, pl->pl_user_prop, 12463 value, sizeof (value), &srctype) != 0) 12464 continue; 12465 12466 err = zprop_collect_property( 12467 zpool_get_name(zhp), cbp, pl->pl_user_prop, 12468 value, srctype, NULL, NULL, props); 12469 } else if (pl->pl_prop == ZPROP_INVAL && 12470 (zpool_prop_feature(pl->pl_user_prop) || 12471 zpool_prop_unsupported(pl->pl_user_prop))) { 12472 srctype = ZPROP_SRC_LOCAL; 12473 12474 if (zpool_prop_get_feature(zhp, 12475 pl->pl_user_prop, value, 12476 sizeof (value)) == 0) { 12477 err = zprop_collect_property( 12478 zpool_get_name(zhp), cbp, 12479 pl->pl_user_prop, value, srctype, 12480 NULL, NULL, props); 12481 } 12482 } else { 12483 if (zpool_get_prop(zhp, pl->pl_prop, value, 12484 sizeof (value), &srctype, 12485 cbp->cb_literal) != 0) 12486 continue; 12487 12488 err = zprop_collect_property( 12489 zpool_get_name(zhp), cbp, 12490 zpool_prop_to_name(pl->pl_prop), 12491 value, srctype, NULL, NULL, props); 12492 } 12493 if (err != 0) 12494 return (err); 12495 } 12496 12497 if (cbp->cb_json) { 12498 if (!nvlist_empty(props)) { 12499 item = fnvlist_alloc(); 12500 fill_pool_info(item, zhp, B_TRUE, 12501 cbp->cb_json_as_int); 12502 fnvlist_add_nvlist(item, "properties", props); 12503 if (cbp->cb_json_pool_key_guid) { 12504 char buf[256]; 12505 uint64_t guid = fnvlist_lookup_uint64( 12506 zpool_get_config(zhp, NULL), 12507 ZPOOL_CONFIG_POOL_GUID); 12508 snprintf(buf, 256, "%llu", 12509 (u_longlong_t)guid); 12510 fnvlist_add_nvlist(d, buf, item); 12511 } else { 12512 const char *name = zpool_get_name(zhp); 12513 fnvlist_add_nvlist(d, name, item); 12514 } 12515 fnvlist_add_nvlist(cbp->cb_jsobj, "pools", d); 12516 fnvlist_free(item); 12517 } 12518 fnvlist_free(props); 12519 } 12520 } 12521 12522 return (0); 12523 } 12524 12525 /* 12526 * zpool get [-Hp] [-o "all" | field[,...]] <"all" | property[,...]> <pool> ... 12527 * 12528 * -H Scripted mode. Don't display headers, and separate properties 12529 * by a single tab. 12530 * -o List of columns to display. Defaults to 12531 * "name,property,value,source". 12532 * -p Display values in parsable (exact) format. 12533 * -j Display output in JSON format. 12534 * --json-int Display numbers as integers instead of strings. 12535 * --json-pool-key-guid Set pool GUID as key for pool objects. 12536 * 12537 * Get properties of pools in the system. Output space statistics 12538 * for each one as well as other attributes. 12539 */ 12540 int 12541 zpool_do_get(int argc, char **argv) 12542 { 12543 zprop_get_cbdata_t cb = { 0 }; 12544 zprop_list_t fake_name = { 0 }; 12545 int ret; 12546 int c, i; 12547 char *propstr = NULL; 12548 char *vdev = NULL; 12549 nvlist_t *data = NULL; 12550 12551 cb.cb_first = B_TRUE; 12552 12553 /* 12554 * Set up default columns and sources. 12555 */ 12556 cb.cb_sources = ZPROP_SRC_ALL; 12557 cb.cb_columns[0] = GET_COL_NAME; 12558 cb.cb_columns[1] = GET_COL_PROPERTY; 12559 cb.cb_columns[2] = GET_COL_VALUE; 12560 cb.cb_columns[3] = GET_COL_SOURCE; 12561 cb.cb_type = ZFS_TYPE_POOL; 12562 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_TYPE_ID; 12563 current_prop_type = cb.cb_type; 12564 12565 struct option long_options[] = { 12566 {"json-int", no_argument, NULL, ZPOOL_OPTION_JSON_NUMS_AS_INT}, 12567 {"json-pool-key-guid", no_argument, NULL, 12568 ZPOOL_OPTION_POOL_KEY_GUID}, 12569 {0, 0, 0, 0} 12570 }; 12571 12572 /* check options */ 12573 while ((c = getopt_long(argc, argv, ":jHpo:", long_options, 12574 NULL)) != -1) { 12575 switch (c) { 12576 case 'p': 12577 cb.cb_literal = B_TRUE; 12578 break; 12579 case 'H': 12580 cb.cb_scripted = B_TRUE; 12581 break; 12582 case 'j': 12583 cb.cb_json = B_TRUE; 12584 cb.cb_jsobj = zpool_json_schema(0, 1); 12585 data = fnvlist_alloc(); 12586 break; 12587 case ZPOOL_OPTION_POOL_KEY_GUID: 12588 cb.cb_json_pool_key_guid = B_TRUE; 12589 break; 12590 case ZPOOL_OPTION_JSON_NUMS_AS_INT: 12591 cb.cb_json_as_int = B_TRUE; 12592 cb.cb_literal = B_TRUE; 12593 break; 12594 case 'o': 12595 memset(&cb.cb_columns, 0, sizeof (cb.cb_columns)); 12596 i = 0; 12597 12598 for (char *tok; (tok = strsep(&optarg, ",")); ) { 12599 static const char *const col_opts[] = 12600 { "name", "property", "value", "source", 12601 "all" }; 12602 static const zfs_get_column_t col_cols[] = 12603 { GET_COL_NAME, GET_COL_PROPERTY, GET_COL_VALUE, 12604 GET_COL_SOURCE }; 12605 12606 if (i == ZFS_GET_NCOLS - 1) { 12607 (void) fprintf(stderr, gettext("too " 12608 "many fields given to -o " 12609 "option\n")); 12610 usage(B_FALSE); 12611 } 12612 12613 for (c = 0; c < ARRAY_SIZE(col_opts); ++c) 12614 if (strcmp(tok, col_opts[c]) == 0) 12615 goto found; 12616 12617 (void) fprintf(stderr, 12618 gettext("invalid column name '%s'\n"), tok); 12619 usage(B_FALSE); 12620 12621 found: 12622 if (c >= 4) { 12623 if (i > 0) { 12624 (void) fprintf(stderr, 12625 gettext("\"all\" conflicts " 12626 "with specific fields " 12627 "given to -o option\n")); 12628 usage(B_FALSE); 12629 } 12630 12631 memcpy(cb.cb_columns, col_cols, 12632 sizeof (col_cols)); 12633 i = ZFS_GET_NCOLS - 1; 12634 } else 12635 cb.cb_columns[i++] = col_cols[c]; 12636 } 12637 break; 12638 case '?': 12639 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 12640 optopt); 12641 usage(B_FALSE); 12642 } 12643 } 12644 12645 argc -= optind; 12646 argv += optind; 12647 12648 if (!cb.cb_json && cb.cb_json_as_int) { 12649 (void) fprintf(stderr, gettext("'--json-int' only works with" 12650 " '-j' option\n")); 12651 usage(B_FALSE); 12652 } 12653 12654 if (!cb.cb_json && cb.cb_json_pool_key_guid) { 12655 (void) fprintf(stderr, gettext("'json-pool-key-guid' only" 12656 " works with '-j' option\n")); 12657 usage(B_FALSE); 12658 } 12659 12660 if (argc < 1) { 12661 (void) fprintf(stderr, gettext("missing property " 12662 "argument\n")); 12663 usage(B_FALSE); 12664 } 12665 12666 /* Properties list is needed later by zprop_get_list() */ 12667 propstr = argv[0]; 12668 12669 argc--; 12670 argv++; 12671 12672 if (argc == 0) { 12673 /* No args, so just print the defaults. */ 12674 } else if (are_all_pools(argc, argv)) { 12675 /* All the args are pool names */ 12676 } else if (are_all_pools(1, argv)) { 12677 /* The first arg is a pool name */ 12678 if ((argc == 2 && strcmp(argv[1], "all-vdevs") == 0) || 12679 (argc == 2 && strcmp(argv[1], "root") == 0) || 12680 are_vdevs_in_pool(argc - 1, argv + 1, argv[0], 12681 &cb.cb_vdevs)) { 12682 12683 if (strcmp(argv[1], "root") == 0) 12684 vdev = strdup("root-0"); 12685 else 12686 vdev = strdup(argv[1]); 12687 12688 /* ... and the rest are vdev names */ 12689 cb.cb_vdevs.cb_names = &vdev; 12690 cb.cb_vdevs.cb_names_count = argc - 1; 12691 cb.cb_type = ZFS_TYPE_VDEV; 12692 argc = 1; /* One pool to process */ 12693 } else { 12694 if (cb.cb_json) { 12695 nvlist_free(cb.cb_jsobj); 12696 nvlist_free(data); 12697 } 12698 fprintf(stderr, gettext("Expected a list of vdevs in" 12699 " \"%s\", but got:\n"), argv[0]); 12700 error_list_unresolved_vdevs(argc - 1, argv + 1, 12701 argv[0], &cb.cb_vdevs); 12702 fprintf(stderr, "\n"); 12703 usage(B_FALSE); 12704 return (1); 12705 } 12706 } else { 12707 if (cb.cb_json) { 12708 nvlist_free(cb.cb_jsobj); 12709 nvlist_free(data); 12710 } 12711 /* 12712 * The first arg isn't the name of a valid pool. 12713 */ 12714 fprintf(stderr, gettext("Cannot get properties of %s: " 12715 "no such pool available.\n"), argv[0]); 12716 return (1); 12717 } 12718 12719 if (zprop_get_list(g_zfs, propstr, &cb.cb_proplist, 12720 cb.cb_type) != 0) { 12721 /* Use correct list of valid properties (pool or vdev) */ 12722 current_prop_type = cb.cb_type; 12723 usage(B_FALSE); 12724 } 12725 12726 if (cb.cb_proplist != NULL) { 12727 fake_name.pl_prop = ZPOOL_PROP_NAME; 12728 fake_name.pl_width = strlen(gettext("NAME")); 12729 fake_name.pl_next = cb.cb_proplist; 12730 cb.cb_proplist = &fake_name; 12731 } 12732 12733 if (cb.cb_json) { 12734 if (cb.cb_type == ZFS_TYPE_VDEV) 12735 fnvlist_add_nvlist(cb.cb_jsobj, "vdevs", data); 12736 else 12737 fnvlist_add_nvlist(cb.cb_jsobj, "pools", data); 12738 fnvlist_free(data); 12739 } 12740 12741 ret = for_each_pool(argc, argv, B_TRUE, &cb.cb_proplist, cb.cb_type, 12742 cb.cb_literal, get_callback, &cb); 12743 12744 if (ret == 0 && cb.cb_json) 12745 zcmd_print_json(cb.cb_jsobj); 12746 else if (ret != 0 && cb.cb_json) 12747 nvlist_free(cb.cb_jsobj); 12748 12749 if (cb.cb_proplist == &fake_name) 12750 zprop_free_list(fake_name.pl_next); 12751 else 12752 zprop_free_list(cb.cb_proplist); 12753 12754 if (vdev != NULL) 12755 free(vdev); 12756 12757 return (ret); 12758 } 12759 12760 typedef struct set_cbdata { 12761 char *cb_propname; 12762 char *cb_value; 12763 zfs_type_t cb_type; 12764 vdev_cbdata_t cb_vdevs; 12765 boolean_t cb_any_successful; 12766 } set_cbdata_t; 12767 12768 static int 12769 set_pool_callback(zpool_handle_t *zhp, set_cbdata_t *cb) 12770 { 12771 int error; 12772 12773 /* Check if we have out-of-bounds features */ 12774 if (strcmp(cb->cb_propname, ZPOOL_CONFIG_COMPATIBILITY) == 0) { 12775 boolean_t features[SPA_FEATURES]; 12776 if (zpool_do_load_compat(cb->cb_value, features) != 12777 ZPOOL_COMPATIBILITY_OK) 12778 return (-1); 12779 12780 nvlist_t *enabled = zpool_get_features(zhp); 12781 spa_feature_t i; 12782 for (i = 0; i < SPA_FEATURES; i++) { 12783 const char *fguid = spa_feature_table[i].fi_guid; 12784 if (nvlist_exists(enabled, fguid) && !features[i]) 12785 break; 12786 } 12787 if (i < SPA_FEATURES) 12788 (void) fprintf(stderr, gettext("Warning: one or " 12789 "more features already enabled on pool '%s'\n" 12790 "are not present in this compatibility set.\n"), 12791 zpool_get_name(zhp)); 12792 } 12793 12794 /* if we're setting a feature, check it's in compatibility set */ 12795 if (zpool_prop_feature(cb->cb_propname) && 12796 strcmp(cb->cb_value, ZFS_FEATURE_ENABLED) == 0) { 12797 char *fname = strchr(cb->cb_propname, '@') + 1; 12798 spa_feature_t f; 12799 12800 if (zfeature_lookup_name(fname, &f) == 0) { 12801 char compat[ZFS_MAXPROPLEN]; 12802 if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, 12803 compat, ZFS_MAXPROPLEN, NULL, B_FALSE) != 0) 12804 compat[0] = '\0'; 12805 12806 boolean_t features[SPA_FEATURES]; 12807 if (zpool_do_load_compat(compat, features) != 12808 ZPOOL_COMPATIBILITY_OK) { 12809 (void) fprintf(stderr, gettext("Error: " 12810 "cannot enable feature '%s' on pool '%s'\n" 12811 "because the pool's 'compatibility' " 12812 "property cannot be parsed.\n"), 12813 fname, zpool_get_name(zhp)); 12814 return (-1); 12815 } 12816 12817 if (!features[f]) { 12818 (void) fprintf(stderr, gettext("Error: " 12819 "cannot enable feature '%s' on pool '%s'\n" 12820 "as it is not specified in this pool's " 12821 "current compatibility set.\n" 12822 "Consider setting 'compatibility' to a " 12823 "less restrictive set, or to 'off'.\n"), 12824 fname, zpool_get_name(zhp)); 12825 return (-1); 12826 } 12827 } 12828 } 12829 12830 error = zpool_set_prop(zhp, cb->cb_propname, cb->cb_value); 12831 12832 return (error); 12833 } 12834 12835 static int 12836 set_callback(zpool_handle_t *zhp, void *data) 12837 { 12838 int error; 12839 set_cbdata_t *cb = (set_cbdata_t *)data; 12840 12841 if (cb->cb_type == ZFS_TYPE_VDEV) { 12842 error = zpool_set_vdev_prop(zhp, *cb->cb_vdevs.cb_names, 12843 cb->cb_propname, cb->cb_value); 12844 } else { 12845 assert(cb->cb_type == ZFS_TYPE_POOL); 12846 error = set_pool_callback(zhp, cb); 12847 } 12848 12849 cb->cb_any_successful = !error; 12850 return (error); 12851 } 12852 12853 int 12854 zpool_do_set(int argc, char **argv) 12855 { 12856 set_cbdata_t cb = { 0 }; 12857 int error; 12858 char *vdev = NULL; 12859 12860 current_prop_type = ZFS_TYPE_POOL; 12861 if (argc > 1 && argv[1][0] == '-') { 12862 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 12863 argv[1][1]); 12864 usage(B_FALSE); 12865 } 12866 12867 if (argc < 2) { 12868 (void) fprintf(stderr, gettext("missing property=value " 12869 "argument\n")); 12870 usage(B_FALSE); 12871 } 12872 12873 if (argc < 3) { 12874 (void) fprintf(stderr, gettext("missing pool name\n")); 12875 usage(B_FALSE); 12876 } 12877 12878 if (argc > 4) { 12879 (void) fprintf(stderr, gettext("too many pool names\n")); 12880 usage(B_FALSE); 12881 } 12882 12883 cb.cb_propname = argv[1]; 12884 cb.cb_type = ZFS_TYPE_POOL; 12885 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_TYPE_ID; 12886 cb.cb_value = strchr(cb.cb_propname, '='); 12887 if (cb.cb_value == NULL) { 12888 (void) fprintf(stderr, gettext("missing value in " 12889 "property=value argument\n")); 12890 usage(B_FALSE); 12891 } 12892 12893 *(cb.cb_value) = '\0'; 12894 cb.cb_value++; 12895 argc -= 2; 12896 argv += 2; 12897 12898 /* argv[0] is pool name */ 12899 if (!is_pool(argv[0])) { 12900 (void) fprintf(stderr, 12901 gettext("cannot open '%s': is not a pool\n"), argv[0]); 12902 return (EINVAL); 12903 } 12904 12905 /* argv[1], when supplied, is vdev name */ 12906 if (argc == 2) { 12907 12908 if (strcmp(argv[1], "root") == 0) 12909 vdev = strdup("root-0"); 12910 else 12911 vdev = strdup(argv[1]); 12912 12913 if (!are_vdevs_in_pool(1, &vdev, argv[0], &cb.cb_vdevs)) { 12914 (void) fprintf(stderr, gettext( 12915 "cannot find '%s' in '%s': device not in pool\n"), 12916 vdev, argv[0]); 12917 free(vdev); 12918 return (EINVAL); 12919 } 12920 cb.cb_vdevs.cb_names = &vdev; 12921 cb.cb_vdevs.cb_names_count = 1; 12922 cb.cb_type = ZFS_TYPE_VDEV; 12923 } 12924 12925 error = for_each_pool(1, argv, B_TRUE, NULL, ZFS_TYPE_POOL, 12926 B_FALSE, set_callback, &cb); 12927 12928 if (vdev != NULL) 12929 free(vdev); 12930 12931 return (error); 12932 } 12933 12934 /* Add up the total number of bytes left to initialize/trim across all vdevs */ 12935 static uint64_t 12936 vdev_activity_remaining(nvlist_t *nv, zpool_wait_activity_t activity) 12937 { 12938 uint64_t bytes_remaining; 12939 nvlist_t **child; 12940 uint_t c, children; 12941 vdev_stat_t *vs; 12942 12943 assert(activity == ZPOOL_WAIT_INITIALIZE || 12944 activity == ZPOOL_WAIT_TRIM); 12945 12946 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, 12947 (uint64_t **)&vs, &c) == 0); 12948 12949 if (activity == ZPOOL_WAIT_INITIALIZE && 12950 vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE) 12951 bytes_remaining = vs->vs_initialize_bytes_est - 12952 vs->vs_initialize_bytes_done; 12953 else if (activity == ZPOOL_WAIT_TRIM && 12954 vs->vs_trim_state == VDEV_TRIM_ACTIVE) 12955 bytes_remaining = vs->vs_trim_bytes_est - 12956 vs->vs_trim_bytes_done; 12957 else 12958 bytes_remaining = 0; 12959 12960 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 12961 &child, &children) != 0) 12962 children = 0; 12963 12964 for (c = 0; c < children; c++) 12965 bytes_remaining += vdev_activity_remaining(child[c], activity); 12966 12967 return (bytes_remaining); 12968 } 12969 12970 /* Add up the total number of bytes left to rebuild across top-level vdevs */ 12971 static uint64_t 12972 vdev_activity_top_remaining(nvlist_t *nv) 12973 { 12974 uint64_t bytes_remaining = 0; 12975 nvlist_t **child; 12976 uint_t children; 12977 int error; 12978 12979 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 12980 &child, &children) != 0) 12981 children = 0; 12982 12983 for (uint_t c = 0; c < children; c++) { 12984 vdev_rebuild_stat_t *vrs; 12985 uint_t i; 12986 12987 error = nvlist_lookup_uint64_array(child[c], 12988 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i); 12989 if (error == 0) { 12990 if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) { 12991 bytes_remaining += (vrs->vrs_bytes_est - 12992 vrs->vrs_bytes_rebuilt); 12993 } 12994 } 12995 } 12996 12997 return (bytes_remaining); 12998 } 12999 13000 /* Whether any vdevs are 'spare' or 'replacing' vdevs */ 13001 static boolean_t 13002 vdev_any_spare_replacing(nvlist_t *nv) 13003 { 13004 nvlist_t **child; 13005 uint_t c, children; 13006 const char *vdev_type; 13007 13008 (void) nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &vdev_type); 13009 13010 if (strcmp(vdev_type, VDEV_TYPE_REPLACING) == 0 || 13011 strcmp(vdev_type, VDEV_TYPE_SPARE) == 0 || 13012 strcmp(vdev_type, VDEV_TYPE_DRAID_SPARE) == 0) { 13013 return (B_TRUE); 13014 } 13015 13016 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 13017 &child, &children) != 0) 13018 children = 0; 13019 13020 for (c = 0; c < children; c++) { 13021 if (vdev_any_spare_replacing(child[c])) 13022 return (B_TRUE); 13023 } 13024 13025 return (B_FALSE); 13026 } 13027 13028 typedef struct wait_data { 13029 char *wd_poolname; 13030 boolean_t wd_scripted; 13031 boolean_t wd_exact; 13032 boolean_t wd_headers_once; 13033 boolean_t wd_should_exit; 13034 /* Which activities to wait for */ 13035 boolean_t wd_enabled[ZPOOL_WAIT_NUM_ACTIVITIES]; 13036 float wd_interval; 13037 pthread_cond_t wd_cv; 13038 pthread_mutex_t wd_mutex; 13039 } wait_data_t; 13040 13041 /* 13042 * Print to stdout a single line, containing one column for each activity that 13043 * we are waiting for specifying how many bytes of work are left for that 13044 * activity. 13045 */ 13046 static void 13047 print_wait_status_row(wait_data_t *wd, zpool_handle_t *zhp, int row) 13048 { 13049 nvlist_t *config, *nvroot; 13050 uint_t c; 13051 int i; 13052 pool_checkpoint_stat_t *pcs = NULL; 13053 pool_scan_stat_t *pss = NULL; 13054 pool_removal_stat_t *prs = NULL; 13055 pool_raidz_expand_stat_t *pres = NULL; 13056 const char *const headers[] = {"DISCARD", "FREE", "INITIALIZE", 13057 "REPLACE", "REMOVE", "RESILVER", "SCRUB", "TRIM", "RAIDZ_EXPAND"}; 13058 int col_widths[ZPOOL_WAIT_NUM_ACTIVITIES]; 13059 13060 /* Calculate the width of each column */ 13061 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) { 13062 /* 13063 * Make sure we have enough space in the col for pretty-printed 13064 * numbers and for the column header, and then leave a couple 13065 * spaces between cols for readability. 13066 */ 13067 col_widths[i] = MAX(strlen(headers[i]), 6) + 2; 13068 } 13069 13070 if (timestamp_fmt != NODATE) 13071 print_timestamp(timestamp_fmt); 13072 13073 /* Print header if appropriate */ 13074 int term_height = terminal_height(); 13075 boolean_t reprint_header = (!wd->wd_headers_once && term_height > 0 && 13076 row % (term_height-1) == 0); 13077 if (!wd->wd_scripted && (row == 0 || reprint_header)) { 13078 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) { 13079 if (wd->wd_enabled[i]) 13080 (void) printf("%*s", col_widths[i], headers[i]); 13081 } 13082 (void) fputc('\n', stdout); 13083 } 13084 13085 /* Bytes of work remaining in each activity */ 13086 int64_t bytes_rem[ZPOOL_WAIT_NUM_ACTIVITIES] = {0}; 13087 13088 bytes_rem[ZPOOL_WAIT_FREE] = 13089 zpool_get_prop_int(zhp, ZPOOL_PROP_FREEING, NULL); 13090 13091 config = zpool_get_config(zhp, NULL); 13092 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE); 13093 13094 (void) nvlist_lookup_uint64_array(nvroot, 13095 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c); 13096 if (pcs != NULL && pcs->pcs_state == CS_CHECKPOINT_DISCARDING) 13097 bytes_rem[ZPOOL_WAIT_CKPT_DISCARD] = pcs->pcs_space; 13098 13099 (void) nvlist_lookup_uint64_array(nvroot, 13100 ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c); 13101 if (prs != NULL && prs->prs_state == DSS_SCANNING) 13102 bytes_rem[ZPOOL_WAIT_REMOVE] = prs->prs_to_copy - 13103 prs->prs_copied; 13104 13105 (void) nvlist_lookup_uint64_array(nvroot, 13106 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&pss, &c); 13107 if (pss != NULL && pss->pss_state == DSS_SCANNING && 13108 pss->pss_pass_scrub_pause == 0) { 13109 int64_t rem = pss->pss_to_examine - pss->pss_issued; 13110 if (pss->pss_func == POOL_SCAN_SCRUB) 13111 bytes_rem[ZPOOL_WAIT_SCRUB] = rem; 13112 else 13113 bytes_rem[ZPOOL_WAIT_RESILVER] = rem; 13114 } else if (check_rebuilding(nvroot, NULL)) { 13115 bytes_rem[ZPOOL_WAIT_RESILVER] = 13116 vdev_activity_top_remaining(nvroot); 13117 } 13118 13119 (void) nvlist_lookup_uint64_array(nvroot, 13120 ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, (uint64_t **)&pres, &c); 13121 if (pres != NULL && pres->pres_state == DSS_SCANNING) { 13122 int64_t rem = pres->pres_to_reflow - pres->pres_reflowed; 13123 bytes_rem[ZPOOL_WAIT_RAIDZ_EXPAND] = rem; 13124 } 13125 13126 bytes_rem[ZPOOL_WAIT_INITIALIZE] = 13127 vdev_activity_remaining(nvroot, ZPOOL_WAIT_INITIALIZE); 13128 bytes_rem[ZPOOL_WAIT_TRIM] = 13129 vdev_activity_remaining(nvroot, ZPOOL_WAIT_TRIM); 13130 13131 /* 13132 * A replace finishes after resilvering finishes, so the amount of work 13133 * left for a replace is the same as for resilvering. 13134 * 13135 * It isn't quite correct to say that if we have any 'spare' or 13136 * 'replacing' vdevs and a resilver is happening, then a replace is in 13137 * progress, like we do here. When a hot spare is used, the faulted vdev 13138 * is not removed after the hot spare is resilvered, so parent 'spare' 13139 * vdev is not removed either. So we could have a 'spare' vdev, but be 13140 * resilvering for a different reason. However, we use it as a heuristic 13141 * because we don't have access to the DTLs, which could tell us whether 13142 * or not we have really finished resilvering a hot spare. 13143 */ 13144 if (vdev_any_spare_replacing(nvroot)) 13145 bytes_rem[ZPOOL_WAIT_REPLACE] = bytes_rem[ZPOOL_WAIT_RESILVER]; 13146 13147 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) { 13148 char buf[64]; 13149 if (!wd->wd_enabled[i]) 13150 continue; 13151 13152 if (wd->wd_exact) { 13153 (void) snprintf(buf, sizeof (buf), "%" PRIi64, 13154 bytes_rem[i]); 13155 } else { 13156 zfs_nicenum(bytes_rem[i], buf, sizeof (buf)); 13157 } 13158 13159 if (wd->wd_scripted) 13160 (void) printf(i == 0 ? "%s" : "\t%s", buf); 13161 else 13162 (void) printf(" %*s", col_widths[i] - 1, buf); 13163 } 13164 (void) printf("\n"); 13165 (void) fflush(stdout); 13166 } 13167 13168 static void * 13169 wait_status_thread(void *arg) 13170 { 13171 wait_data_t *wd = (wait_data_t *)arg; 13172 zpool_handle_t *zhp; 13173 13174 if ((zhp = zpool_open(g_zfs, wd->wd_poolname)) == NULL) 13175 return (void *)(1); 13176 13177 for (int row = 0; ; row++) { 13178 boolean_t missing; 13179 struct timespec timeout; 13180 int ret = 0; 13181 (void) clock_gettime(CLOCK_REALTIME, &timeout); 13182 13183 if (zpool_refresh_stats(zhp, &missing) != 0 || missing || 13184 zpool_props_refresh(zhp) != 0) { 13185 zpool_close(zhp); 13186 return (void *)(uintptr_t)(missing ? 0 : 1); 13187 } 13188 13189 print_wait_status_row(wd, zhp, row); 13190 13191 timeout.tv_sec += floor(wd->wd_interval); 13192 long nanos = timeout.tv_nsec + 13193 (wd->wd_interval - floor(wd->wd_interval)) * NANOSEC; 13194 if (nanos >= NANOSEC) { 13195 timeout.tv_sec++; 13196 timeout.tv_nsec = nanos - NANOSEC; 13197 } else { 13198 timeout.tv_nsec = nanos; 13199 } 13200 pthread_mutex_lock(&wd->wd_mutex); 13201 if (!wd->wd_should_exit) 13202 ret = pthread_cond_timedwait(&wd->wd_cv, &wd->wd_mutex, 13203 &timeout); 13204 pthread_mutex_unlock(&wd->wd_mutex); 13205 if (ret == 0) { 13206 break; /* signaled by main thread */ 13207 } else if (ret != ETIMEDOUT) { 13208 (void) fprintf(stderr, gettext("pthread_cond_timedwait " 13209 "failed: %s\n"), strerror(ret)); 13210 zpool_close(zhp); 13211 return (void *)(uintptr_t)(1); 13212 } 13213 } 13214 13215 zpool_close(zhp); 13216 return (void *)(0); 13217 } 13218 13219 int 13220 zpool_do_wait(int argc, char **argv) 13221 { 13222 boolean_t verbose = B_FALSE; 13223 int c, i; 13224 unsigned long count; 13225 pthread_t status_thr; 13226 int error = 0; 13227 zpool_handle_t *zhp; 13228 13229 wait_data_t wd; 13230 wd.wd_scripted = B_FALSE; 13231 wd.wd_exact = B_FALSE; 13232 wd.wd_headers_once = B_FALSE; 13233 wd.wd_should_exit = B_FALSE; 13234 13235 pthread_mutex_init(&wd.wd_mutex, NULL); 13236 pthread_cond_init(&wd.wd_cv, NULL); 13237 13238 /* By default, wait for all types of activity. */ 13239 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) 13240 wd.wd_enabled[i] = B_TRUE; 13241 13242 while ((c = getopt(argc, argv, "HpT:t:")) != -1) { 13243 switch (c) { 13244 case 'H': 13245 wd.wd_scripted = B_TRUE; 13246 break; 13247 case 'n': 13248 wd.wd_headers_once = B_TRUE; 13249 break; 13250 case 'p': 13251 wd.wd_exact = B_TRUE; 13252 break; 13253 case 'T': 13254 get_timestamp_arg(*optarg); 13255 break; 13256 case 't': 13257 /* Reset activities array */ 13258 memset(&wd.wd_enabled, 0, sizeof (wd.wd_enabled)); 13259 13260 for (char *tok; (tok = strsep(&optarg, ",")); ) { 13261 static const char *const col_opts[] = { 13262 "discard", "free", "initialize", "replace", 13263 "remove", "resilver", "scrub", "trim", 13264 "raidz_expand" }; 13265 13266 for (i = 0; i < ARRAY_SIZE(col_opts); ++i) 13267 if (strcmp(tok, col_opts[i]) == 0) { 13268 wd.wd_enabled[i] = B_TRUE; 13269 goto found; 13270 } 13271 13272 (void) fprintf(stderr, 13273 gettext("invalid activity '%s'\n"), tok); 13274 usage(B_FALSE); 13275 found:; 13276 } 13277 break; 13278 case '?': 13279 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 13280 optopt); 13281 usage(B_FALSE); 13282 } 13283 } 13284 13285 argc -= optind; 13286 argv += optind; 13287 13288 get_interval_count(&argc, argv, &wd.wd_interval, &count); 13289 if (count != 0) { 13290 /* This subcmd only accepts an interval, not a count */ 13291 (void) fprintf(stderr, gettext("too many arguments\n")); 13292 usage(B_FALSE); 13293 } 13294 13295 if (wd.wd_interval != 0) 13296 verbose = B_TRUE; 13297 13298 if (argc < 1) { 13299 (void) fprintf(stderr, gettext("missing 'pool' argument\n")); 13300 usage(B_FALSE); 13301 } 13302 if (argc > 1) { 13303 (void) fprintf(stderr, gettext("too many arguments\n")); 13304 usage(B_FALSE); 13305 } 13306 13307 wd.wd_poolname = argv[0]; 13308 13309 if ((zhp = zpool_open(g_zfs, wd.wd_poolname)) == NULL) 13310 return (1); 13311 13312 if (verbose) { 13313 /* 13314 * We use a separate thread for printing status updates because 13315 * the main thread will call lzc_wait(), which blocks as long 13316 * as an activity is in progress, which can be a long time. 13317 */ 13318 if (pthread_create(&status_thr, NULL, wait_status_thread, &wd) 13319 != 0) { 13320 (void) fprintf(stderr, gettext("failed to create status" 13321 "thread: %s\n"), strerror(errno)); 13322 zpool_close(zhp); 13323 return (1); 13324 } 13325 } 13326 13327 /* 13328 * Loop over all activities that we are supposed to wait for until none 13329 * of them are in progress. Note that this means we can end up waiting 13330 * for more activities to complete than just those that were in progress 13331 * when we began waiting; if an activity we are interested in begins 13332 * while we are waiting for another activity, we will wait for both to 13333 * complete before exiting. 13334 */ 13335 for (;;) { 13336 boolean_t missing = B_FALSE; 13337 boolean_t any_waited = B_FALSE; 13338 13339 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) { 13340 boolean_t waited; 13341 13342 if (!wd.wd_enabled[i]) 13343 continue; 13344 13345 error = zpool_wait_status(zhp, i, &missing, &waited); 13346 if (error != 0 || missing) 13347 break; 13348 13349 any_waited = (any_waited || waited); 13350 } 13351 13352 if (error != 0 || missing || !any_waited) 13353 break; 13354 } 13355 13356 zpool_close(zhp); 13357 13358 if (verbose) { 13359 uintptr_t status; 13360 pthread_mutex_lock(&wd.wd_mutex); 13361 wd.wd_should_exit = B_TRUE; 13362 pthread_cond_signal(&wd.wd_cv); 13363 pthread_mutex_unlock(&wd.wd_mutex); 13364 (void) pthread_join(status_thr, (void *)&status); 13365 if (status != 0) 13366 error = status; 13367 } 13368 13369 pthread_mutex_destroy(&wd.wd_mutex); 13370 pthread_cond_destroy(&wd.wd_cv); 13371 return (error); 13372 } 13373 13374 /* 13375 * zpool ddtprune -d|-p <amount> <pool> 13376 * 13377 * -d <days> Prune entries <days> old and older 13378 * -p <percent> Prune <percent> amount of entries 13379 * 13380 * Prune single reference entries from DDT to satisfy the amount specified. 13381 */ 13382 int 13383 zpool_do_ddt_prune(int argc, char **argv) 13384 { 13385 zpool_ddt_prune_unit_t unit = ZPOOL_DDT_PRUNE_NONE; 13386 uint64_t amount = 0; 13387 zpool_handle_t *zhp; 13388 char *endptr; 13389 int c; 13390 13391 while ((c = getopt(argc, argv, "d:p:")) != -1) { 13392 switch (c) { 13393 case 'd': 13394 if (unit == ZPOOL_DDT_PRUNE_PERCENTAGE) { 13395 (void) fprintf(stderr, gettext("-d cannot be " 13396 "combined with -p option\n")); 13397 usage(B_FALSE); 13398 } 13399 errno = 0; 13400 amount = strtoull(optarg, &endptr, 0); 13401 if (errno != 0 || *endptr != '\0' || amount == 0) { 13402 (void) fprintf(stderr, 13403 gettext("invalid days value\n")); 13404 usage(B_FALSE); 13405 } 13406 amount *= 86400; /* convert days to seconds */ 13407 unit = ZPOOL_DDT_PRUNE_AGE; 13408 break; 13409 case 'p': 13410 if (unit == ZPOOL_DDT_PRUNE_AGE) { 13411 (void) fprintf(stderr, gettext("-p cannot be " 13412 "combined with -d option\n")); 13413 usage(B_FALSE); 13414 } 13415 errno = 0; 13416 amount = strtoull(optarg, &endptr, 0); 13417 if (errno != 0 || *endptr != '\0' || 13418 amount == 0 || amount > 100) { 13419 (void) fprintf(stderr, 13420 gettext("invalid percentage value\n")); 13421 usage(B_FALSE); 13422 } 13423 unit = ZPOOL_DDT_PRUNE_PERCENTAGE; 13424 break; 13425 case '?': 13426 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 13427 optopt); 13428 usage(B_FALSE); 13429 } 13430 } 13431 argc -= optind; 13432 argv += optind; 13433 13434 if (unit == ZPOOL_DDT_PRUNE_NONE) { 13435 (void) fprintf(stderr, 13436 gettext("missing amount option (-d|-p <value>)\n")); 13437 usage(B_FALSE); 13438 } else if (argc < 1) { 13439 (void) fprintf(stderr, gettext("missing pool argument\n")); 13440 usage(B_FALSE); 13441 } else if (argc > 1) { 13442 (void) fprintf(stderr, gettext("too many arguments\n")); 13443 usage(B_FALSE); 13444 } 13445 zhp = zpool_open(g_zfs, argv[0]); 13446 if (zhp == NULL) 13447 return (-1); 13448 13449 int error = zpool_ddt_prune(zhp, unit, amount); 13450 13451 zpool_close(zhp); 13452 13453 return (error); 13454 } 13455 13456 static int 13457 find_command_idx(const char *command, int *idx) 13458 { 13459 for (int i = 0; i < NCOMMAND; ++i) { 13460 if (command_table[i].name == NULL) 13461 continue; 13462 13463 if (strcmp(command, command_table[i].name) == 0) { 13464 *idx = i; 13465 return (0); 13466 } 13467 } 13468 return (1); 13469 } 13470 13471 /* 13472 * Display version message 13473 */ 13474 static int 13475 zpool_do_version(int argc, char **argv) 13476 { 13477 int c; 13478 nvlist_t *jsobj = NULL, *zfs_ver = NULL; 13479 boolean_t json = B_FALSE; 13480 while ((c = getopt(argc, argv, "j")) != -1) { 13481 switch (c) { 13482 case 'j': 13483 json = B_TRUE; 13484 jsobj = zpool_json_schema(0, 1); 13485 break; 13486 case '?': 13487 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 13488 optopt); 13489 usage(B_FALSE); 13490 } 13491 } 13492 13493 argc -= optind; 13494 if (argc != 0) { 13495 (void) fprintf(stderr, "too many arguments\n"); 13496 usage(B_FALSE); 13497 } 13498 13499 if (json) { 13500 zfs_ver = zfs_version_nvlist(); 13501 if (zfs_ver) { 13502 fnvlist_add_nvlist(jsobj, "zfs_version", zfs_ver); 13503 zcmd_print_json(jsobj); 13504 fnvlist_free(zfs_ver); 13505 return (0); 13506 } else 13507 return (-1); 13508 } else 13509 return (zfs_version_print() != 0); 13510 } 13511 13512 /* Display documentation */ 13513 static int 13514 zpool_do_help(int argc, char **argv) 13515 { 13516 char page[MAXNAMELEN]; 13517 if (argc < 3 || strcmp(argv[2], "zpool") == 0) 13518 strcpy(page, "zpool"); 13519 else if (strcmp(argv[2], "concepts") == 0 || 13520 strcmp(argv[2], "props") == 0) 13521 snprintf(page, sizeof (page), "zpool%s", argv[2]); 13522 else 13523 snprintf(page, sizeof (page), "zpool-%s", argv[2]); 13524 13525 execlp("man", "man", page, NULL); 13526 13527 fprintf(stderr, "couldn't run man program: %s", strerror(errno)); 13528 return (-1); 13529 } 13530 13531 /* 13532 * Do zpool_load_compat() and print error message on failure 13533 */ 13534 static zpool_compat_status_t 13535 zpool_do_load_compat(const char *compat, boolean_t *list) 13536 { 13537 char report[1024]; 13538 13539 zpool_compat_status_t ret; 13540 13541 ret = zpool_load_compat(compat, list, report, 1024); 13542 switch (ret) { 13543 13544 case ZPOOL_COMPATIBILITY_OK: 13545 break; 13546 13547 case ZPOOL_COMPATIBILITY_NOFILES: 13548 case ZPOOL_COMPATIBILITY_BADFILE: 13549 case ZPOOL_COMPATIBILITY_BADTOKEN: 13550 (void) fprintf(stderr, "Error: %s\n", report); 13551 break; 13552 13553 case ZPOOL_COMPATIBILITY_WARNTOKEN: 13554 (void) fprintf(stderr, "Warning: %s\n", report); 13555 ret = ZPOOL_COMPATIBILITY_OK; 13556 break; 13557 } 13558 return (ret); 13559 } 13560 13561 int 13562 main(int argc, char **argv) 13563 { 13564 int ret = 0; 13565 int i = 0; 13566 char *cmdname; 13567 char **newargv; 13568 13569 (void) setlocale(LC_ALL, ""); 13570 (void) setlocale(LC_NUMERIC, "C"); 13571 (void) textdomain(TEXT_DOMAIN); 13572 srand(time(NULL)); 13573 13574 opterr = 0; 13575 13576 /* 13577 * Make sure the user has specified some command. 13578 */ 13579 if (argc < 2) { 13580 (void) fprintf(stderr, gettext("missing command\n")); 13581 usage(B_FALSE); 13582 } 13583 13584 cmdname = argv[1]; 13585 13586 /* 13587 * Special case '-?' 13588 */ 13589 if ((strcmp(cmdname, "-?") == 0) || strcmp(cmdname, "--help") == 0) 13590 usage(B_TRUE); 13591 13592 /* 13593 * Special case '-V|--version' 13594 */ 13595 if ((strcmp(cmdname, "-V") == 0) || (strcmp(cmdname, "--version") == 0)) 13596 return (zpool_do_version(argc, argv)); 13597 13598 /* 13599 * Special case 'help' 13600 */ 13601 if (strcmp(cmdname, "help") == 0) 13602 return (zpool_do_help(argc, argv)); 13603 13604 if ((g_zfs = libzfs_init()) == NULL) { 13605 (void) fprintf(stderr, "%s\n", libzfs_error_init(errno)); 13606 return (1); 13607 } 13608 13609 libzfs_print_on_error(g_zfs, B_TRUE); 13610 13611 zfs_save_arguments(argc, argv, history_str, sizeof (history_str)); 13612 13613 /* 13614 * Many commands modify input strings for string parsing reasons. 13615 * We create a copy to protect the original argv. 13616 */ 13617 newargv = safe_malloc((argc + 1) * sizeof (newargv[0])); 13618 for (i = 0; i < argc; i++) 13619 newargv[i] = strdup(argv[i]); 13620 newargv[argc] = NULL; 13621 13622 /* 13623 * Run the appropriate command. 13624 */ 13625 if (find_command_idx(cmdname, &i) == 0) { 13626 current_command = &command_table[i]; 13627 ret = command_table[i].func(argc - 1, newargv + 1); 13628 } else if (strchr(cmdname, '=')) { 13629 verify(find_command_idx("set", &i) == 0); 13630 current_command = &command_table[i]; 13631 ret = command_table[i].func(argc, newargv); 13632 } else if (strcmp(cmdname, "freeze") == 0 && argc == 3) { 13633 /* 13634 * 'freeze' is a vile debugging abomination, so we treat 13635 * it as such. 13636 */ 13637 zfs_cmd_t zc = {"\0"}; 13638 13639 (void) strlcpy(zc.zc_name, argv[2], sizeof (zc.zc_name)); 13640 ret = zfs_ioctl(g_zfs, ZFS_IOC_POOL_FREEZE, &zc); 13641 if (ret != 0) { 13642 (void) fprintf(stderr, 13643 gettext("failed to freeze pool: %d\n"), errno); 13644 ret = 1; 13645 } 13646 13647 log_history = 0; 13648 } else { 13649 (void) fprintf(stderr, gettext("unrecognized " 13650 "command '%s'\n"), cmdname); 13651 usage(B_FALSE); 13652 ret = 1; 13653 } 13654 13655 for (i = 0; i < argc; i++) 13656 free(newargv[i]); 13657 free(newargv); 13658 13659 if (ret == 0 && log_history) 13660 (void) zpool_log_history(g_zfs, history_str); 13661 13662 libzfs_fini(g_zfs); 13663 13664 /* 13665 * The 'ZFS_ABORT' environment variable causes us to dump core on exit 13666 * for the purposes of running ::findleaks. 13667 */ 13668 if (getenv("ZFS_ABORT") != NULL) { 13669 (void) printf("dumping core by request\n"); 13670 abort(); 13671 } 13672 13673 return (ret); 13674 } 13675