1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 25 * Copyright (c) 2011, 2020 by Delphix. All rights reserved. 26 * Copyright (c) 2012 by Frederik Wessels. All rights reserved. 27 * Copyright (c) 2012 by Cyril Plisko. All rights reserved. 28 * Copyright (c) 2013 by Prasad Joshi (sTec). All rights reserved. 29 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>. 30 * Copyright (c) 2017 Datto Inc. 31 * Copyright (c) 2017 Open-E, Inc. All Rights Reserved. 32 * Copyright (c) 2017, Intel Corporation. 33 * Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com> 34 */ 35 36 #include <assert.h> 37 #include <ctype.h> 38 #include <dirent.h> 39 #include <errno.h> 40 #include <fcntl.h> 41 #include <getopt.h> 42 #include <libgen.h> 43 #include <libintl.h> 44 #include <libuutil.h> 45 #include <locale.h> 46 #include <pthread.h> 47 #include <stdio.h> 48 #include <stdlib.h> 49 #include <string.h> 50 #include <strings.h> 51 #include <time.h> 52 #include <unistd.h> 53 #include <pwd.h> 54 #include <zone.h> 55 #include <sys/wait.h> 56 #include <zfs_prop.h> 57 #include <sys/fs/zfs.h> 58 #include <sys/stat.h> 59 #include <sys/systeminfo.h> 60 #include <sys/fm/fs/zfs.h> 61 #include <sys/fm/util.h> 62 #include <sys/fm/protocol.h> 63 #include <sys/zfs_ioctl.h> 64 #include <sys/mount.h> 65 #include <sys/sysmacros.h> 66 67 #include <math.h> 68 69 #include <libzfs.h> 70 #include <libzutil.h> 71 72 #include "zpool_util.h" 73 #include "zfs_comutil.h" 74 #include "zfeature_common.h" 75 76 #include "statcommon.h" 77 78 libzfs_handle_t *g_zfs; 79 80 static int zpool_do_create(int, char **); 81 static int zpool_do_destroy(int, char **); 82 83 static int zpool_do_add(int, char **); 84 static int zpool_do_remove(int, char **); 85 static int zpool_do_labelclear(int, char **); 86 87 static int zpool_do_checkpoint(int, char **); 88 89 static int zpool_do_list(int, char **); 90 static int zpool_do_iostat(int, char **); 91 static int zpool_do_status(int, char **); 92 93 static int zpool_do_online(int, char **); 94 static int zpool_do_offline(int, char **); 95 static int zpool_do_clear(int, char **); 96 static int zpool_do_reopen(int, char **); 97 98 static int zpool_do_reguid(int, char **); 99 100 static int zpool_do_attach(int, char **); 101 static int zpool_do_detach(int, char **); 102 static int zpool_do_replace(int, char **); 103 static int zpool_do_split(int, char **); 104 105 static int zpool_do_initialize(int, char **); 106 static int zpool_do_scrub(int, char **); 107 static int zpool_do_resilver(int, char **); 108 static int zpool_do_trim(int, char **); 109 110 static int zpool_do_import(int, char **); 111 static int zpool_do_export(int, char **); 112 113 static int zpool_do_upgrade(int, char **); 114 115 static int zpool_do_history(int, char **); 116 static int zpool_do_events(int, char **); 117 118 static int zpool_do_get(int, char **); 119 static int zpool_do_set(int, char **); 120 121 static int zpool_do_sync(int, char **); 122 123 static int zpool_do_version(int, char **); 124 125 static int zpool_do_wait(int, char **); 126 127 /* 128 * These libumem hooks provide a reasonable set of defaults for the allocator's 129 * debugging facilities. 130 */ 131 132 #ifdef DEBUG 133 const char * 134 _umem_debug_init(void) 135 { 136 return ("default,verbose"); /* $UMEM_DEBUG setting */ 137 } 138 139 const char * 140 _umem_logging_init(void) 141 { 142 return ("fail,contents"); /* $UMEM_LOGGING setting */ 143 } 144 #endif 145 146 typedef enum { 147 HELP_ADD, 148 HELP_ATTACH, 149 HELP_CLEAR, 150 HELP_CREATE, 151 HELP_CHECKPOINT, 152 HELP_DESTROY, 153 HELP_DETACH, 154 HELP_EXPORT, 155 HELP_HISTORY, 156 HELP_IMPORT, 157 HELP_IOSTAT, 158 HELP_LABELCLEAR, 159 HELP_LIST, 160 HELP_OFFLINE, 161 HELP_ONLINE, 162 HELP_REPLACE, 163 HELP_REMOVE, 164 HELP_INITIALIZE, 165 HELP_SCRUB, 166 HELP_RESILVER, 167 HELP_TRIM, 168 HELP_STATUS, 169 HELP_UPGRADE, 170 HELP_EVENTS, 171 HELP_GET, 172 HELP_SET, 173 HELP_SPLIT, 174 HELP_SYNC, 175 HELP_REGUID, 176 HELP_REOPEN, 177 HELP_VERSION, 178 HELP_WAIT 179 } zpool_help_t; 180 181 182 /* 183 * Flags for stats to display with "zpool iostats" 184 */ 185 enum iostat_type { 186 IOS_DEFAULT = 0, 187 IOS_LATENCY = 1, 188 IOS_QUEUES = 2, 189 IOS_L_HISTO = 3, 190 IOS_RQ_HISTO = 4, 191 IOS_COUNT, /* always last element */ 192 }; 193 194 /* iostat_type entries as bitmasks */ 195 #define IOS_DEFAULT_M (1ULL << IOS_DEFAULT) 196 #define IOS_LATENCY_M (1ULL << IOS_LATENCY) 197 #define IOS_QUEUES_M (1ULL << IOS_QUEUES) 198 #define IOS_L_HISTO_M (1ULL << IOS_L_HISTO) 199 #define IOS_RQ_HISTO_M (1ULL << IOS_RQ_HISTO) 200 201 /* Mask of all the histo bits */ 202 #define IOS_ANYHISTO_M (IOS_L_HISTO_M | IOS_RQ_HISTO_M) 203 204 /* 205 * Lookup table for iostat flags to nvlist names. Basically a list 206 * of all the nvlists a flag requires. Also specifies the order in 207 * which data gets printed in zpool iostat. 208 */ 209 static const char *vsx_type_to_nvlist[IOS_COUNT][13] = { 210 [IOS_L_HISTO] = { 211 ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO, 212 ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO, 213 ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO, 214 ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO, 215 ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO, 216 ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO, 217 ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO, 218 ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO, 219 ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO, 220 ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO, 221 NULL}, 222 [IOS_LATENCY] = { 223 ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO, 224 ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO, 225 ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO, 226 ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO, 227 ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO, 228 NULL}, 229 [IOS_QUEUES] = { 230 ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE, 231 ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE, 232 ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE, 233 ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE, 234 ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE, 235 ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE, 236 NULL}, 237 [IOS_RQ_HISTO] = { 238 ZPOOL_CONFIG_VDEV_SYNC_IND_R_HISTO, 239 ZPOOL_CONFIG_VDEV_SYNC_AGG_R_HISTO, 240 ZPOOL_CONFIG_VDEV_SYNC_IND_W_HISTO, 241 ZPOOL_CONFIG_VDEV_SYNC_AGG_W_HISTO, 242 ZPOOL_CONFIG_VDEV_ASYNC_IND_R_HISTO, 243 ZPOOL_CONFIG_VDEV_ASYNC_AGG_R_HISTO, 244 ZPOOL_CONFIG_VDEV_ASYNC_IND_W_HISTO, 245 ZPOOL_CONFIG_VDEV_ASYNC_AGG_W_HISTO, 246 ZPOOL_CONFIG_VDEV_IND_SCRUB_HISTO, 247 ZPOOL_CONFIG_VDEV_AGG_SCRUB_HISTO, 248 ZPOOL_CONFIG_VDEV_IND_TRIM_HISTO, 249 ZPOOL_CONFIG_VDEV_AGG_TRIM_HISTO, 250 NULL}, 251 }; 252 253 254 /* 255 * Given a cb->cb_flags with a histogram bit set, return the iostat_type. 256 * Right now, only one histo bit is ever set at one time, so we can 257 * just do a highbit64(a) 258 */ 259 #define IOS_HISTO_IDX(a) (highbit64(a & IOS_ANYHISTO_M) - 1) 260 261 typedef struct zpool_command { 262 const char *name; 263 int (*func)(int, char **); 264 zpool_help_t usage; 265 } zpool_command_t; 266 267 /* 268 * Master command table. Each ZFS command has a name, associated function, and 269 * usage message. The usage messages need to be internationalized, so we have 270 * to have a function to return the usage message based on a command index. 271 * 272 * These commands are organized according to how they are displayed in the usage 273 * message. An empty command (one with a NULL name) indicates an empty line in 274 * the generic usage message. 275 */ 276 static zpool_command_t command_table[] = { 277 { "version", zpool_do_version, HELP_VERSION }, 278 { NULL }, 279 { "create", zpool_do_create, HELP_CREATE }, 280 { "destroy", zpool_do_destroy, HELP_DESTROY }, 281 { NULL }, 282 { "add", zpool_do_add, HELP_ADD }, 283 { "remove", zpool_do_remove, HELP_REMOVE }, 284 { NULL }, 285 { "labelclear", zpool_do_labelclear, HELP_LABELCLEAR }, 286 { NULL }, 287 { "checkpoint", zpool_do_checkpoint, HELP_CHECKPOINT }, 288 { NULL }, 289 { "list", zpool_do_list, HELP_LIST }, 290 { "iostat", zpool_do_iostat, HELP_IOSTAT }, 291 { "status", zpool_do_status, HELP_STATUS }, 292 { NULL }, 293 { "online", zpool_do_online, HELP_ONLINE }, 294 { "offline", zpool_do_offline, HELP_OFFLINE }, 295 { "clear", zpool_do_clear, HELP_CLEAR }, 296 { "reopen", zpool_do_reopen, HELP_REOPEN }, 297 { NULL }, 298 { "attach", zpool_do_attach, HELP_ATTACH }, 299 { "detach", zpool_do_detach, HELP_DETACH }, 300 { "replace", zpool_do_replace, HELP_REPLACE }, 301 { "split", zpool_do_split, HELP_SPLIT }, 302 { NULL }, 303 { "initialize", zpool_do_initialize, HELP_INITIALIZE }, 304 { "resilver", zpool_do_resilver, HELP_RESILVER }, 305 { "scrub", zpool_do_scrub, HELP_SCRUB }, 306 { "trim", zpool_do_trim, HELP_TRIM }, 307 { NULL }, 308 { "import", zpool_do_import, HELP_IMPORT }, 309 { "export", zpool_do_export, HELP_EXPORT }, 310 { "upgrade", zpool_do_upgrade, HELP_UPGRADE }, 311 { "reguid", zpool_do_reguid, HELP_REGUID }, 312 { NULL }, 313 { "history", zpool_do_history, HELP_HISTORY }, 314 { "events", zpool_do_events, HELP_EVENTS }, 315 { NULL }, 316 { "get", zpool_do_get, HELP_GET }, 317 { "set", zpool_do_set, HELP_SET }, 318 { "sync", zpool_do_sync, HELP_SYNC }, 319 { NULL }, 320 { "wait", zpool_do_wait, HELP_WAIT }, 321 }; 322 323 #define NCOMMAND (ARRAY_SIZE(command_table)) 324 325 #define VDEV_ALLOC_CLASS_LOGS "logs" 326 327 static zpool_command_t *current_command; 328 static char history_str[HIS_MAX_RECORD_LEN]; 329 static boolean_t log_history = B_TRUE; 330 static uint_t timestamp_fmt = NODATE; 331 332 static const char * 333 get_usage(zpool_help_t idx) 334 { 335 switch (idx) { 336 case HELP_ADD: 337 return (gettext("\tadd [-fgLnP] [-o property=value] " 338 "<pool> <vdev> ...\n")); 339 case HELP_ATTACH: 340 return (gettext("\tattach [-fsw] [-o property=value] " 341 "<pool> <device> <new-device>\n")); 342 case HELP_CLEAR: 343 return (gettext("\tclear [-nF] <pool> [device]\n")); 344 case HELP_CREATE: 345 return (gettext("\tcreate [-fnd] [-o property=value] ... \n" 346 "\t [-O file-system-property=value] ... \n" 347 "\t [-m mountpoint] [-R root] <pool> <vdev> ...\n")); 348 case HELP_CHECKPOINT: 349 return (gettext("\tcheckpoint [-d [-w]] <pool> ...\n")); 350 case HELP_DESTROY: 351 return (gettext("\tdestroy [-f] <pool>\n")); 352 case HELP_DETACH: 353 return (gettext("\tdetach <pool> <device>\n")); 354 case HELP_EXPORT: 355 return (gettext("\texport [-af] <pool> ...\n")); 356 case HELP_HISTORY: 357 return (gettext("\thistory [-il] [<pool>] ...\n")); 358 case HELP_IMPORT: 359 return (gettext("\timport [-d dir] [-D]\n" 360 "\timport [-o mntopts] [-o property=value] ... \n" 361 "\t [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] " 362 "[-R root] [-F [-n]] -a\n" 363 "\timport [-o mntopts] [-o property=value] ... \n" 364 "\t [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] " 365 "[-R root] [-F [-n]]\n" 366 "\t [--rewind-to-checkpoint] <pool | id> [newpool]\n")); 367 case HELP_IOSTAT: 368 return (gettext("\tiostat [[[-c [script1,script2,...]" 369 "[-lq]]|[-rw]] [-T d | u] [-ghHLpPvy]\n" 370 "\t [[pool ...]|[pool vdev ...]|[vdev ...]]" 371 " [[-n] interval [count]]\n")); 372 case HELP_LABELCLEAR: 373 return (gettext("\tlabelclear [-f] <vdev>\n")); 374 case HELP_LIST: 375 return (gettext("\tlist [-gHLpPv] [-o property[,...]] " 376 "[-T d|u] [pool] ... \n" 377 "\t [interval [count]]\n")); 378 case HELP_OFFLINE: 379 return (gettext("\toffline [-f] [-t] <pool> <device> ...\n")); 380 case HELP_ONLINE: 381 return (gettext("\tonline [-e] <pool> <device> ...\n")); 382 case HELP_REPLACE: 383 return (gettext("\treplace [-fsw] [-o property=value] " 384 "<pool> <device> [new-device]\n")); 385 case HELP_REMOVE: 386 return (gettext("\tremove [-npsw] <pool> <device> ...\n")); 387 case HELP_REOPEN: 388 return (gettext("\treopen [-n] <pool>\n")); 389 case HELP_INITIALIZE: 390 return (gettext("\tinitialize [-c | -s] [-w] <pool> " 391 "[<device> ...]\n")); 392 case HELP_SCRUB: 393 return (gettext("\tscrub [-s | -p] [-w] <pool> ...\n")); 394 case HELP_RESILVER: 395 return (gettext("\tresilver <pool> ...\n")); 396 case HELP_TRIM: 397 return (gettext("\ttrim [-dw] [-r <rate>] [-c | -s] <pool> " 398 "[<device> ...]\n")); 399 case HELP_STATUS: 400 return (gettext("\tstatus [-c [script1,script2,...]] " 401 "[-igLpPstvxD] [-T d|u] [pool] ... \n" 402 "\t [interval [count]]\n")); 403 case HELP_UPGRADE: 404 return (gettext("\tupgrade\n" 405 "\tupgrade -v\n" 406 "\tupgrade [-V version] <-a | pool ...>\n")); 407 case HELP_EVENTS: 408 return (gettext("\tevents [-vHf [pool] | -c]\n")); 409 case HELP_GET: 410 return (gettext("\tget [-Hp] [-o \"all\" | field[,...]] " 411 "<\"all\" | property[,...]> <pool> ...\n")); 412 case HELP_SET: 413 return (gettext("\tset <property=value> <pool> \n")); 414 case HELP_SPLIT: 415 return (gettext("\tsplit [-gLnPl] [-R altroot] [-o mntopts]\n" 416 "\t [-o property=value] <pool> <newpool> " 417 "[<device> ...]\n")); 418 case HELP_REGUID: 419 return (gettext("\treguid <pool>\n")); 420 case HELP_SYNC: 421 return (gettext("\tsync [pool] ...\n")); 422 case HELP_VERSION: 423 return (gettext("\tversion\n")); 424 case HELP_WAIT: 425 return (gettext("\twait [-Hp] [-T d|u] [-t <activity>[,...]] " 426 "<pool> [interval]\n")); 427 } 428 429 abort(); 430 /* NOTREACHED */ 431 } 432 433 static void 434 zpool_collect_leaves(zpool_handle_t *zhp, nvlist_t *nvroot, nvlist_t *res) 435 { 436 uint_t children = 0; 437 nvlist_t **child; 438 uint_t i; 439 440 (void) nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 441 &child, &children); 442 443 if (children == 0) { 444 char *path = zpool_vdev_name(g_zfs, zhp, nvroot, 445 VDEV_NAME_PATH); 446 447 if (strcmp(path, VDEV_TYPE_INDIRECT) != 0 && 448 strcmp(path, VDEV_TYPE_HOLE) != 0) 449 fnvlist_add_boolean(res, path); 450 451 free(path); 452 return; 453 } 454 455 for (i = 0; i < children; i++) { 456 zpool_collect_leaves(zhp, child[i], res); 457 } 458 } 459 460 /* 461 * Callback routine that will print out a pool property value. 462 */ 463 static int 464 print_prop_cb(int prop, void *cb) 465 { 466 FILE *fp = cb; 467 468 (void) fprintf(fp, "\t%-19s ", zpool_prop_to_name(prop)); 469 470 if (zpool_prop_readonly(prop)) 471 (void) fprintf(fp, " NO "); 472 else 473 (void) fprintf(fp, " YES "); 474 475 if (zpool_prop_values(prop) == NULL) 476 (void) fprintf(fp, "-\n"); 477 else 478 (void) fprintf(fp, "%s\n", zpool_prop_values(prop)); 479 480 return (ZPROP_CONT); 481 } 482 483 /* 484 * Display usage message. If we're inside a command, display only the usage for 485 * that command. Otherwise, iterate over the entire command table and display 486 * a complete usage message. 487 */ 488 static void 489 usage(boolean_t requested) 490 { 491 FILE *fp = requested ? stdout : stderr; 492 493 if (current_command == NULL) { 494 int i; 495 496 (void) fprintf(fp, gettext("usage: zpool command args ...\n")); 497 (void) fprintf(fp, 498 gettext("where 'command' is one of the following:\n\n")); 499 500 for (i = 0; i < NCOMMAND; i++) { 501 if (command_table[i].name == NULL) 502 (void) fprintf(fp, "\n"); 503 else 504 (void) fprintf(fp, "%s", 505 get_usage(command_table[i].usage)); 506 } 507 } else { 508 (void) fprintf(fp, gettext("usage:\n")); 509 (void) fprintf(fp, "%s", get_usage(current_command->usage)); 510 } 511 512 if (current_command != NULL && 513 ((strcmp(current_command->name, "set") == 0) || 514 (strcmp(current_command->name, "get") == 0) || 515 (strcmp(current_command->name, "list") == 0))) { 516 517 (void) fprintf(fp, 518 gettext("\nthe following properties are supported:\n")); 519 520 (void) fprintf(fp, "\n\t%-19s %s %s\n\n", 521 "PROPERTY", "EDIT", "VALUES"); 522 523 /* Iterate over all properties */ 524 (void) zprop_iter(print_prop_cb, fp, B_FALSE, B_TRUE, 525 ZFS_TYPE_POOL); 526 527 (void) fprintf(fp, "\t%-19s ", "feature@..."); 528 (void) fprintf(fp, "YES disabled | enabled | active\n"); 529 530 (void) fprintf(fp, gettext("\nThe feature@ properties must be " 531 "appended with a feature name.\nSee zpool-features(5).\n")); 532 } 533 534 /* 535 * See comments at end of main(). 536 */ 537 if (getenv("ZFS_ABORT") != NULL) { 538 (void) printf("dumping core by request\n"); 539 abort(); 540 } 541 542 exit(requested ? 0 : 2); 543 } 544 545 /* 546 * zpool initialize [-c | -s] [-w] <pool> [<vdev> ...] 547 * Initialize all unused blocks in the specified vdevs, or all vdevs in the pool 548 * if none specified. 549 * 550 * -c Cancel. Ends active initializing. 551 * -s Suspend. Initializing can then be restarted with no flags. 552 * -w Wait. Blocks until initializing has completed. 553 */ 554 int 555 zpool_do_initialize(int argc, char **argv) 556 { 557 int c; 558 char *poolname; 559 zpool_handle_t *zhp; 560 nvlist_t *vdevs; 561 int err = 0; 562 boolean_t wait = B_FALSE; 563 564 struct option long_options[] = { 565 {"cancel", no_argument, NULL, 'c'}, 566 {"suspend", no_argument, NULL, 's'}, 567 {"wait", no_argument, NULL, 'w'}, 568 {0, 0, 0, 0} 569 }; 570 571 pool_initialize_func_t cmd_type = POOL_INITIALIZE_START; 572 while ((c = getopt_long(argc, argv, "csw", long_options, NULL)) != -1) { 573 switch (c) { 574 case 'c': 575 if (cmd_type != POOL_INITIALIZE_START && 576 cmd_type != POOL_INITIALIZE_CANCEL) { 577 (void) fprintf(stderr, gettext("-c cannot be " 578 "combined with other options\n")); 579 usage(B_FALSE); 580 } 581 cmd_type = POOL_INITIALIZE_CANCEL; 582 break; 583 case 's': 584 if (cmd_type != POOL_INITIALIZE_START && 585 cmd_type != POOL_INITIALIZE_SUSPEND) { 586 (void) fprintf(stderr, gettext("-s cannot be " 587 "combined with other options\n")); 588 usage(B_FALSE); 589 } 590 cmd_type = POOL_INITIALIZE_SUSPEND; 591 break; 592 case 'w': 593 wait = B_TRUE; 594 break; 595 case '?': 596 if (optopt != 0) { 597 (void) fprintf(stderr, 598 gettext("invalid option '%c'\n"), optopt); 599 } else { 600 (void) fprintf(stderr, 601 gettext("invalid option '%s'\n"), 602 argv[optind - 1]); 603 } 604 usage(B_FALSE); 605 } 606 } 607 608 argc -= optind; 609 argv += optind; 610 611 if (argc < 1) { 612 (void) fprintf(stderr, gettext("missing pool name argument\n")); 613 usage(B_FALSE); 614 return (-1); 615 } 616 617 if (wait && (cmd_type != POOL_INITIALIZE_START)) { 618 (void) fprintf(stderr, gettext("-w cannot be used with -c or " 619 "-s\n")); 620 usage(B_FALSE); 621 } 622 623 poolname = argv[0]; 624 zhp = zpool_open(g_zfs, poolname); 625 if (zhp == NULL) 626 return (-1); 627 628 vdevs = fnvlist_alloc(); 629 if (argc == 1) { 630 /* no individual leaf vdevs specified, so add them all */ 631 nvlist_t *config = zpool_get_config(zhp, NULL); 632 nvlist_t *nvroot = fnvlist_lookup_nvlist(config, 633 ZPOOL_CONFIG_VDEV_TREE); 634 zpool_collect_leaves(zhp, nvroot, vdevs); 635 } else { 636 for (int i = 1; i < argc; i++) { 637 fnvlist_add_boolean(vdevs, argv[i]); 638 } 639 } 640 641 if (wait) 642 err = zpool_initialize_wait(zhp, cmd_type, vdevs); 643 else 644 err = zpool_initialize(zhp, cmd_type, vdevs); 645 646 fnvlist_free(vdevs); 647 zpool_close(zhp); 648 649 return (err); 650 } 651 652 /* 653 * print a pool vdev config for dry runs 654 */ 655 static void 656 print_vdev_tree(zpool_handle_t *zhp, const char *name, nvlist_t *nv, int indent, 657 const char *match, int name_flags) 658 { 659 nvlist_t **child; 660 uint_t c, children; 661 char *vname; 662 boolean_t printed = B_FALSE; 663 664 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 665 &child, &children) != 0) { 666 if (name != NULL) 667 (void) printf("\t%*s%s\n", indent, "", name); 668 return; 669 } 670 671 for (c = 0; c < children; c++) { 672 uint64_t is_log = B_FALSE, is_hole = B_FALSE; 673 char *class = ""; 674 675 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 676 &is_hole); 677 678 if (is_hole == B_TRUE) { 679 continue; 680 } 681 682 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 683 &is_log); 684 if (is_log) 685 class = VDEV_ALLOC_BIAS_LOG; 686 (void) nvlist_lookup_string(child[c], 687 ZPOOL_CONFIG_ALLOCATION_BIAS, &class); 688 if (strcmp(match, class) != 0) 689 continue; 690 691 if (!printed && name != NULL) { 692 (void) printf("\t%*s%s\n", indent, "", name); 693 printed = B_TRUE; 694 } 695 vname = zpool_vdev_name(g_zfs, zhp, child[c], name_flags); 696 print_vdev_tree(zhp, vname, child[c], indent + 2, "", 697 name_flags); 698 free(vname); 699 } 700 } 701 702 /* 703 * Print the list of l2cache devices for dry runs. 704 */ 705 static void 706 print_cache_list(nvlist_t *nv, int indent) 707 { 708 nvlist_t **child; 709 uint_t c, children; 710 711 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, 712 &child, &children) == 0 && children > 0) { 713 (void) printf("\t%*s%s\n", indent, "", "cache"); 714 } else { 715 return; 716 } 717 for (c = 0; c < children; c++) { 718 char *vname; 719 720 vname = zpool_vdev_name(g_zfs, NULL, child[c], 0); 721 (void) printf("\t%*s%s\n", indent + 2, "", vname); 722 free(vname); 723 } 724 } 725 726 /* 727 * Print the list of spares for dry runs. 728 */ 729 static void 730 print_spare_list(nvlist_t *nv, int indent) 731 { 732 nvlist_t **child; 733 uint_t c, children; 734 735 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, 736 &child, &children) == 0 && children > 0) { 737 (void) printf("\t%*s%s\n", indent, "", "spares"); 738 } else { 739 return; 740 } 741 for (c = 0; c < children; c++) { 742 char *vname; 743 744 vname = zpool_vdev_name(g_zfs, NULL, child[c], 0); 745 (void) printf("\t%*s%s\n", indent + 2, "", vname); 746 free(vname); 747 } 748 } 749 750 static boolean_t 751 prop_list_contains_feature(nvlist_t *proplist) 752 { 753 nvpair_t *nvp; 754 for (nvp = nvlist_next_nvpair(proplist, NULL); NULL != nvp; 755 nvp = nvlist_next_nvpair(proplist, nvp)) { 756 if (zpool_prop_feature(nvpair_name(nvp))) 757 return (B_TRUE); 758 } 759 return (B_FALSE); 760 } 761 762 /* 763 * Add a property pair (name, string-value) into a property nvlist. 764 */ 765 static int 766 add_prop_list(const char *propname, char *propval, nvlist_t **props, 767 boolean_t poolprop) 768 { 769 zpool_prop_t prop = ZPOOL_PROP_INVAL; 770 nvlist_t *proplist; 771 const char *normnm; 772 char *strval; 773 774 if (*props == NULL && 775 nvlist_alloc(props, NV_UNIQUE_NAME, 0) != 0) { 776 (void) fprintf(stderr, 777 gettext("internal error: out of memory\n")); 778 return (1); 779 } 780 781 proplist = *props; 782 783 if (poolprop) { 784 const char *vname = zpool_prop_to_name(ZPOOL_PROP_VERSION); 785 786 if ((prop = zpool_name_to_prop(propname)) == ZPOOL_PROP_INVAL && 787 !zpool_prop_feature(propname)) { 788 (void) fprintf(stderr, gettext("property '%s' is " 789 "not a valid pool property\n"), propname); 790 return (2); 791 } 792 793 /* 794 * feature@ properties and version should not be specified 795 * at the same time. 796 */ 797 if ((prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname) && 798 nvlist_exists(proplist, vname)) || 799 (prop == ZPOOL_PROP_VERSION && 800 prop_list_contains_feature(proplist))) { 801 (void) fprintf(stderr, gettext("'feature@' and " 802 "'version' properties cannot be specified " 803 "together\n")); 804 return (2); 805 } 806 807 808 if (zpool_prop_feature(propname)) 809 normnm = propname; 810 else 811 normnm = zpool_prop_to_name(prop); 812 } else { 813 zfs_prop_t fsprop = zfs_name_to_prop(propname); 814 815 if (zfs_prop_valid_for_type(fsprop, ZFS_TYPE_FILESYSTEM, 816 B_FALSE)) { 817 normnm = zfs_prop_to_name(fsprop); 818 } else if (zfs_prop_user(propname) || 819 zfs_prop_userquota(propname)) { 820 normnm = propname; 821 } else { 822 (void) fprintf(stderr, gettext("property '%s' is " 823 "not a valid filesystem property\n"), propname); 824 return (2); 825 } 826 } 827 828 if (nvlist_lookup_string(proplist, normnm, &strval) == 0 && 829 prop != ZPOOL_PROP_CACHEFILE) { 830 (void) fprintf(stderr, gettext("property '%s' " 831 "specified multiple times\n"), propname); 832 return (2); 833 } 834 835 if (nvlist_add_string(proplist, normnm, propval) != 0) { 836 (void) fprintf(stderr, gettext("internal " 837 "error: out of memory\n")); 838 return (1); 839 } 840 841 return (0); 842 } 843 844 /* 845 * Set a default property pair (name, string-value) in a property nvlist 846 */ 847 static int 848 add_prop_list_default(const char *propname, char *propval, nvlist_t **props, 849 boolean_t poolprop) 850 { 851 char *pval; 852 853 if (nvlist_lookup_string(*props, propname, &pval) == 0) 854 return (0); 855 856 return (add_prop_list(propname, propval, props, B_TRUE)); 857 } 858 859 /* 860 * zpool add [-fgLnP] [-o property=value] <pool> <vdev> ... 861 * 862 * -f Force addition of devices, even if they appear in use 863 * -g Display guid for individual vdev name. 864 * -L Follow links when resolving vdev path name. 865 * -n Do not add the devices, but display the resulting layout if 866 * they were to be added. 867 * -o Set property=value. 868 * -P Display full path for vdev name. 869 * 870 * Adds the given vdevs to 'pool'. As with create, the bulk of this work is 871 * handled by make_root_vdev(), which constructs the nvlist needed to pass to 872 * libzfs. 873 */ 874 int 875 zpool_do_add(int argc, char **argv) 876 { 877 boolean_t force = B_FALSE; 878 boolean_t dryrun = B_FALSE; 879 int name_flags = 0; 880 int c; 881 nvlist_t *nvroot; 882 char *poolname; 883 int ret; 884 zpool_handle_t *zhp; 885 nvlist_t *config; 886 nvlist_t *props = NULL; 887 char *propval; 888 889 /* check options */ 890 while ((c = getopt(argc, argv, "fgLno:P")) != -1) { 891 switch (c) { 892 case 'f': 893 force = B_TRUE; 894 break; 895 case 'g': 896 name_flags |= VDEV_NAME_GUID; 897 break; 898 case 'L': 899 name_flags |= VDEV_NAME_FOLLOW_LINKS; 900 break; 901 case 'n': 902 dryrun = B_TRUE; 903 break; 904 case 'o': 905 if ((propval = strchr(optarg, '=')) == NULL) { 906 (void) fprintf(stderr, gettext("missing " 907 "'=' for -o option\n")); 908 usage(B_FALSE); 909 } 910 *propval = '\0'; 911 propval++; 912 913 if ((strcmp(optarg, ZPOOL_CONFIG_ASHIFT) != 0) || 914 (add_prop_list(optarg, propval, &props, B_TRUE))) 915 usage(B_FALSE); 916 break; 917 case 'P': 918 name_flags |= VDEV_NAME_PATH; 919 break; 920 case '?': 921 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 922 optopt); 923 usage(B_FALSE); 924 } 925 } 926 927 argc -= optind; 928 argv += optind; 929 930 /* get pool name and check number of arguments */ 931 if (argc < 1) { 932 (void) fprintf(stderr, gettext("missing pool name argument\n")); 933 usage(B_FALSE); 934 } 935 if (argc < 2) { 936 (void) fprintf(stderr, gettext("missing vdev specification\n")); 937 usage(B_FALSE); 938 } 939 940 poolname = argv[0]; 941 942 argc--; 943 argv++; 944 945 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) 946 return (1); 947 948 if ((config = zpool_get_config(zhp, NULL)) == NULL) { 949 (void) fprintf(stderr, gettext("pool '%s' is unavailable\n"), 950 poolname); 951 zpool_close(zhp); 952 return (1); 953 } 954 955 /* unless manually specified use "ashift" pool property (if set) */ 956 if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) { 957 int intval; 958 zprop_source_t src; 959 char strval[ZPOOL_MAXPROPLEN]; 960 961 intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src); 962 if (src != ZPROP_SRC_DEFAULT) { 963 (void) sprintf(strval, "%" PRId32, intval); 964 verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval, 965 &props, B_TRUE) == 0); 966 } 967 } 968 969 /* pass off to make_root_vdev for processing */ 970 nvroot = make_root_vdev(zhp, props, force, !force, B_FALSE, dryrun, 971 argc, argv); 972 if (nvroot == NULL) { 973 zpool_close(zhp); 974 return (1); 975 } 976 977 if (dryrun) { 978 nvlist_t *poolnvroot; 979 nvlist_t **l2child, **sparechild; 980 uint_t l2children, sparechildren, c; 981 char *vname; 982 boolean_t hadcache = B_FALSE, hadspare = B_FALSE; 983 984 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 985 &poolnvroot) == 0); 986 987 (void) printf(gettext("would update '%s' to the following " 988 "configuration:\n\n"), zpool_get_name(zhp)); 989 990 /* print original main pool and new tree */ 991 print_vdev_tree(zhp, poolname, poolnvroot, 0, "", 992 name_flags | VDEV_NAME_TYPE_ID); 993 print_vdev_tree(zhp, NULL, nvroot, 0, "", name_flags); 994 995 /* print other classes: 'dedup', 'special', and 'log' */ 996 if (zfs_special_devs(poolnvroot, VDEV_ALLOC_BIAS_DEDUP)) { 997 print_vdev_tree(zhp, "dedup", poolnvroot, 0, 998 VDEV_ALLOC_BIAS_DEDUP, name_flags); 999 print_vdev_tree(zhp, NULL, nvroot, 0, 1000 VDEV_ALLOC_BIAS_DEDUP, name_flags); 1001 } else if (zfs_special_devs(nvroot, VDEV_ALLOC_BIAS_DEDUP)) { 1002 print_vdev_tree(zhp, "dedup", nvroot, 0, 1003 VDEV_ALLOC_BIAS_DEDUP, name_flags); 1004 } 1005 1006 if (zfs_special_devs(poolnvroot, VDEV_ALLOC_BIAS_SPECIAL)) { 1007 print_vdev_tree(zhp, "special", poolnvroot, 0, 1008 VDEV_ALLOC_BIAS_SPECIAL, name_flags); 1009 print_vdev_tree(zhp, NULL, nvroot, 0, 1010 VDEV_ALLOC_BIAS_SPECIAL, name_flags); 1011 } else if (zfs_special_devs(nvroot, VDEV_ALLOC_BIAS_SPECIAL)) { 1012 print_vdev_tree(zhp, "special", nvroot, 0, 1013 VDEV_ALLOC_BIAS_SPECIAL, name_flags); 1014 } 1015 1016 if (num_logs(poolnvroot) > 0) { 1017 print_vdev_tree(zhp, "logs", poolnvroot, 0, 1018 VDEV_ALLOC_BIAS_LOG, name_flags); 1019 print_vdev_tree(zhp, NULL, nvroot, 0, 1020 VDEV_ALLOC_BIAS_LOG, name_flags); 1021 } else if (num_logs(nvroot) > 0) { 1022 print_vdev_tree(zhp, "logs", nvroot, 0, 1023 VDEV_ALLOC_BIAS_LOG, name_flags); 1024 } 1025 1026 /* Do the same for the caches */ 1027 if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_L2CACHE, 1028 &l2child, &l2children) == 0 && l2children) { 1029 hadcache = B_TRUE; 1030 (void) printf(gettext("\tcache\n")); 1031 for (c = 0; c < l2children; c++) { 1032 vname = zpool_vdev_name(g_zfs, NULL, 1033 l2child[c], name_flags); 1034 (void) printf("\t %s\n", vname); 1035 free(vname); 1036 } 1037 } 1038 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 1039 &l2child, &l2children) == 0 && l2children) { 1040 if (!hadcache) 1041 (void) printf(gettext("\tcache\n")); 1042 for (c = 0; c < l2children; c++) { 1043 vname = zpool_vdev_name(g_zfs, NULL, 1044 l2child[c], name_flags); 1045 (void) printf("\t %s\n", vname); 1046 free(vname); 1047 } 1048 } 1049 /* And finaly the spares */ 1050 if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_SPARES, 1051 &sparechild, &sparechildren) == 0 && sparechildren > 0) { 1052 hadspare = B_TRUE; 1053 (void) printf(gettext("\tspares\n")); 1054 for (c = 0; c < sparechildren; c++) { 1055 vname = zpool_vdev_name(g_zfs, NULL, 1056 sparechild[c], name_flags); 1057 (void) printf("\t %s\n", vname); 1058 free(vname); 1059 } 1060 } 1061 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 1062 &sparechild, &sparechildren) == 0 && sparechildren > 0) { 1063 if (!hadspare) 1064 (void) printf(gettext("\tspares\n")); 1065 for (c = 0; c < sparechildren; c++) { 1066 vname = zpool_vdev_name(g_zfs, NULL, 1067 sparechild[c], name_flags); 1068 (void) printf("\t %s\n", vname); 1069 free(vname); 1070 } 1071 } 1072 1073 ret = 0; 1074 } else { 1075 ret = (zpool_add(zhp, nvroot) != 0); 1076 } 1077 1078 nvlist_free(props); 1079 nvlist_free(nvroot); 1080 zpool_close(zhp); 1081 1082 return (ret); 1083 } 1084 1085 /* 1086 * zpool remove [-npsw] <pool> <vdev> ... 1087 * 1088 * Removes the given vdev from the pool. 1089 */ 1090 int 1091 zpool_do_remove(int argc, char **argv) 1092 { 1093 char *poolname; 1094 int i, ret = 0; 1095 zpool_handle_t *zhp = NULL; 1096 boolean_t stop = B_FALSE; 1097 int c; 1098 boolean_t noop = B_FALSE; 1099 boolean_t parsable = B_FALSE; 1100 boolean_t wait = B_FALSE; 1101 1102 /* check options */ 1103 while ((c = getopt(argc, argv, "npsw")) != -1) { 1104 switch (c) { 1105 case 'n': 1106 noop = B_TRUE; 1107 break; 1108 case 'p': 1109 parsable = B_TRUE; 1110 break; 1111 case 's': 1112 stop = B_TRUE; 1113 break; 1114 case 'w': 1115 wait = B_TRUE; 1116 break; 1117 case '?': 1118 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 1119 optopt); 1120 usage(B_FALSE); 1121 } 1122 } 1123 1124 argc -= optind; 1125 argv += optind; 1126 1127 /* get pool name and check number of arguments */ 1128 if (argc < 1) { 1129 (void) fprintf(stderr, gettext("missing pool name argument\n")); 1130 usage(B_FALSE); 1131 } 1132 1133 poolname = argv[0]; 1134 1135 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) 1136 return (1); 1137 1138 if (stop && noop) { 1139 (void) fprintf(stderr, gettext("stop request ignored\n")); 1140 return (0); 1141 } 1142 1143 if (stop) { 1144 if (argc > 1) { 1145 (void) fprintf(stderr, gettext("too many arguments\n")); 1146 usage(B_FALSE); 1147 } 1148 if (zpool_vdev_remove_cancel(zhp) != 0) 1149 ret = 1; 1150 if (wait) { 1151 (void) fprintf(stderr, gettext("invalid option " 1152 "combination: -w cannot be used with -s\n")); 1153 usage(B_FALSE); 1154 } 1155 } else { 1156 if (argc < 2) { 1157 (void) fprintf(stderr, gettext("missing device\n")); 1158 usage(B_FALSE); 1159 } 1160 1161 for (i = 1; i < argc; i++) { 1162 if (noop) { 1163 uint64_t size; 1164 1165 if (zpool_vdev_indirect_size(zhp, argv[i], 1166 &size) != 0) { 1167 ret = 1; 1168 break; 1169 } 1170 if (parsable) { 1171 (void) printf("%s %llu\n", 1172 argv[i], (unsigned long long)size); 1173 } else { 1174 char valstr[32]; 1175 zfs_nicenum(size, valstr, 1176 sizeof (valstr)); 1177 (void) printf("Memory that will be " 1178 "used after removing %s: %s\n", 1179 argv[i], valstr); 1180 } 1181 } else { 1182 if (zpool_vdev_remove(zhp, argv[i]) != 0) 1183 ret = 1; 1184 } 1185 } 1186 1187 if (ret == 0 && wait) 1188 ret = zpool_wait(zhp, ZPOOL_WAIT_REMOVE); 1189 } 1190 zpool_close(zhp); 1191 1192 return (ret); 1193 } 1194 1195 /* 1196 * zpool labelclear [-f] <vdev> 1197 * 1198 * -f Force clearing the label for the vdevs which are members of 1199 * the exported or foreign pools. 1200 * 1201 * Verifies that the vdev is not active and zeros out the label information 1202 * on the device. 1203 */ 1204 int 1205 zpool_do_labelclear(int argc, char **argv) 1206 { 1207 char vdev[MAXPATHLEN]; 1208 char *name = NULL; 1209 struct stat st; 1210 int c, fd = -1, ret = 0; 1211 nvlist_t *config; 1212 pool_state_t state; 1213 boolean_t inuse = B_FALSE; 1214 boolean_t force = B_FALSE; 1215 1216 /* check options */ 1217 while ((c = getopt(argc, argv, "f")) != -1) { 1218 switch (c) { 1219 case 'f': 1220 force = B_TRUE; 1221 break; 1222 default: 1223 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 1224 optopt); 1225 usage(B_FALSE); 1226 } 1227 } 1228 1229 argc -= optind; 1230 argv += optind; 1231 1232 /* get vdev name */ 1233 if (argc < 1) { 1234 (void) fprintf(stderr, gettext("missing vdev name\n")); 1235 usage(B_FALSE); 1236 } 1237 if (argc > 1) { 1238 (void) fprintf(stderr, gettext("too many arguments\n")); 1239 usage(B_FALSE); 1240 } 1241 1242 /* 1243 * Check if we were given absolute path and use it as is. 1244 * Otherwise if the provided vdev name doesn't point to a file, 1245 * try prepending expected disk paths and partition numbers. 1246 */ 1247 (void) strlcpy(vdev, argv[0], sizeof (vdev)); 1248 if (vdev[0] != '/' && stat(vdev, &st) != 0) { 1249 int error; 1250 1251 error = zfs_resolve_shortname(argv[0], vdev, MAXPATHLEN); 1252 if (error == 0 && zfs_dev_is_whole_disk(vdev)) { 1253 if (zfs_append_partition(vdev, MAXPATHLEN) == -1) 1254 error = ENOENT; 1255 } 1256 1257 if (error || (stat(vdev, &st) != 0)) { 1258 (void) fprintf(stderr, gettext( 1259 "failed to find device %s, try specifying absolute " 1260 "path instead\n"), argv[0]); 1261 return (1); 1262 } 1263 } 1264 1265 if ((fd = open(vdev, O_RDWR)) < 0) { 1266 (void) fprintf(stderr, gettext("failed to open %s: %s\n"), 1267 vdev, strerror(errno)); 1268 return (1); 1269 } 1270 1271 /* 1272 * Flush all dirty pages for the block device. This should not be 1273 * fatal when the device does not support BLKFLSBUF as would be the 1274 * case for a file vdev. 1275 */ 1276 if ((zfs_dev_flush(fd) != 0) && (errno != ENOTTY)) 1277 (void) fprintf(stderr, gettext("failed to invalidate " 1278 "cache for %s: %s\n"), vdev, strerror(errno)); 1279 1280 if (zpool_read_label(fd, &config, NULL) != 0) { 1281 (void) fprintf(stderr, 1282 gettext("failed to read label from %s\n"), vdev); 1283 ret = 1; 1284 goto errout; 1285 } 1286 nvlist_free(config); 1287 1288 ret = zpool_in_use(g_zfs, fd, &state, &name, &inuse); 1289 if (ret != 0) { 1290 (void) fprintf(stderr, 1291 gettext("failed to check state for %s\n"), vdev); 1292 ret = 1; 1293 goto errout; 1294 } 1295 1296 if (!inuse) 1297 goto wipe_label; 1298 1299 switch (state) { 1300 default: 1301 case POOL_STATE_ACTIVE: 1302 case POOL_STATE_SPARE: 1303 case POOL_STATE_L2CACHE: 1304 (void) fprintf(stderr, gettext( 1305 "%s is a member (%s) of pool \"%s\"\n"), 1306 vdev, zpool_pool_state_to_name(state), name); 1307 ret = 1; 1308 goto errout; 1309 1310 case POOL_STATE_EXPORTED: 1311 if (force) 1312 break; 1313 (void) fprintf(stderr, gettext( 1314 "use '-f' to override the following error:\n" 1315 "%s is a member of exported pool \"%s\"\n"), 1316 vdev, name); 1317 ret = 1; 1318 goto errout; 1319 1320 case POOL_STATE_POTENTIALLY_ACTIVE: 1321 if (force) 1322 break; 1323 (void) fprintf(stderr, gettext( 1324 "use '-f' to override the following error:\n" 1325 "%s is a member of potentially active pool \"%s\"\n"), 1326 vdev, name); 1327 ret = 1; 1328 goto errout; 1329 1330 case POOL_STATE_DESTROYED: 1331 /* inuse should never be set for a destroyed pool */ 1332 assert(0); 1333 break; 1334 } 1335 1336 wipe_label: 1337 ret = zpool_clear_label(fd); 1338 if (ret != 0) { 1339 (void) fprintf(stderr, 1340 gettext("failed to clear label for %s\n"), vdev); 1341 } 1342 1343 errout: 1344 free(name); 1345 (void) close(fd); 1346 1347 return (ret); 1348 } 1349 1350 /* 1351 * zpool create [-fnd] [-o property=value] ... 1352 * [-O file-system-property=value] ... 1353 * [-R root] [-m mountpoint] <pool> <dev> ... 1354 * 1355 * -f Force creation, even if devices appear in use 1356 * -n Do not create the pool, but display the resulting layout if it 1357 * were to be created. 1358 * -R Create a pool under an alternate root 1359 * -m Set default mountpoint for the root dataset. By default it's 1360 * '/<pool>' 1361 * -o Set property=value. 1362 * -o Set feature@feature=enabled|disabled. 1363 * -d Don't automatically enable all supported pool features 1364 * (individual features can be enabled with -o). 1365 * -O Set fsproperty=value in the pool's root file system 1366 * 1367 * Creates the named pool according to the given vdev specification. The 1368 * bulk of the vdev processing is done in make_root_vdev() in zpool_vdev.c. 1369 * Once we get the nvlist back from make_root_vdev(), we either print out the 1370 * contents (if '-n' was specified), or pass it to libzfs to do the creation. 1371 */ 1372 int 1373 zpool_do_create(int argc, char **argv) 1374 { 1375 boolean_t force = B_FALSE; 1376 boolean_t dryrun = B_FALSE; 1377 boolean_t enable_all_pool_feat = B_TRUE; 1378 int c; 1379 nvlist_t *nvroot = NULL; 1380 char *poolname; 1381 char *tname = NULL; 1382 int ret = 1; 1383 char *altroot = NULL; 1384 char *mountpoint = NULL; 1385 nvlist_t *fsprops = NULL; 1386 nvlist_t *props = NULL; 1387 char *propval; 1388 1389 /* check options */ 1390 while ((c = getopt(argc, argv, ":fndR:m:o:O:t:")) != -1) { 1391 switch (c) { 1392 case 'f': 1393 force = B_TRUE; 1394 break; 1395 case 'n': 1396 dryrun = B_TRUE; 1397 break; 1398 case 'd': 1399 enable_all_pool_feat = B_FALSE; 1400 break; 1401 case 'R': 1402 altroot = optarg; 1403 if (add_prop_list(zpool_prop_to_name( 1404 ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE)) 1405 goto errout; 1406 if (add_prop_list_default(zpool_prop_to_name( 1407 ZPOOL_PROP_CACHEFILE), "none", &props, B_TRUE)) 1408 goto errout; 1409 break; 1410 case 'm': 1411 /* Equivalent to -O mountpoint=optarg */ 1412 mountpoint = optarg; 1413 break; 1414 case 'o': 1415 if ((propval = strchr(optarg, '=')) == NULL) { 1416 (void) fprintf(stderr, gettext("missing " 1417 "'=' for -o option\n")); 1418 goto errout; 1419 } 1420 *propval = '\0'; 1421 propval++; 1422 1423 if (add_prop_list(optarg, propval, &props, B_TRUE)) 1424 goto errout; 1425 1426 /* 1427 * If the user is creating a pool that doesn't support 1428 * feature flags, don't enable any features. 1429 */ 1430 if (zpool_name_to_prop(optarg) == ZPOOL_PROP_VERSION) { 1431 char *end; 1432 u_longlong_t ver; 1433 1434 ver = strtoull(propval, &end, 10); 1435 if (*end == '\0' && 1436 ver < SPA_VERSION_FEATURES) { 1437 enable_all_pool_feat = B_FALSE; 1438 } 1439 } 1440 if (zpool_name_to_prop(optarg) == ZPOOL_PROP_ALTROOT) 1441 altroot = propval; 1442 break; 1443 case 'O': 1444 if ((propval = strchr(optarg, '=')) == NULL) { 1445 (void) fprintf(stderr, gettext("missing " 1446 "'=' for -O option\n")); 1447 goto errout; 1448 } 1449 *propval = '\0'; 1450 propval++; 1451 1452 /* 1453 * Mountpoints are checked and then added later. 1454 * Uniquely among properties, they can be specified 1455 * more than once, to avoid conflict with -m. 1456 */ 1457 if (0 == strcmp(optarg, 1458 zfs_prop_to_name(ZFS_PROP_MOUNTPOINT))) { 1459 mountpoint = propval; 1460 } else if (add_prop_list(optarg, propval, &fsprops, 1461 B_FALSE)) { 1462 goto errout; 1463 } 1464 break; 1465 case 't': 1466 /* 1467 * Sanity check temporary pool name. 1468 */ 1469 if (strchr(optarg, '/') != NULL) { 1470 (void) fprintf(stderr, gettext("cannot create " 1471 "'%s': invalid character '/' in temporary " 1472 "name\n"), optarg); 1473 (void) fprintf(stderr, gettext("use 'zfs " 1474 "create' to create a dataset\n")); 1475 goto errout; 1476 } 1477 1478 if (add_prop_list(zpool_prop_to_name( 1479 ZPOOL_PROP_TNAME), optarg, &props, B_TRUE)) 1480 goto errout; 1481 if (add_prop_list_default(zpool_prop_to_name( 1482 ZPOOL_PROP_CACHEFILE), "none", &props, B_TRUE)) 1483 goto errout; 1484 tname = optarg; 1485 break; 1486 case ':': 1487 (void) fprintf(stderr, gettext("missing argument for " 1488 "'%c' option\n"), optopt); 1489 goto badusage; 1490 case '?': 1491 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 1492 optopt); 1493 goto badusage; 1494 } 1495 } 1496 1497 argc -= optind; 1498 argv += optind; 1499 1500 /* get pool name and check number of arguments */ 1501 if (argc < 1) { 1502 (void) fprintf(stderr, gettext("missing pool name argument\n")); 1503 goto badusage; 1504 } 1505 if (argc < 2) { 1506 (void) fprintf(stderr, gettext("missing vdev specification\n")); 1507 goto badusage; 1508 } 1509 1510 poolname = argv[0]; 1511 1512 /* 1513 * As a special case, check for use of '/' in the name, and direct the 1514 * user to use 'zfs create' instead. 1515 */ 1516 if (strchr(poolname, '/') != NULL) { 1517 (void) fprintf(stderr, gettext("cannot create '%s': invalid " 1518 "character '/' in pool name\n"), poolname); 1519 (void) fprintf(stderr, gettext("use 'zfs create' to " 1520 "create a dataset\n")); 1521 goto errout; 1522 } 1523 1524 /* pass off to make_root_vdev for bulk processing */ 1525 nvroot = make_root_vdev(NULL, props, force, !force, B_FALSE, dryrun, 1526 argc - 1, argv + 1); 1527 if (nvroot == NULL) 1528 goto errout; 1529 1530 /* make_root_vdev() allows 0 toplevel children if there are spares */ 1531 if (!zfs_allocatable_devs(nvroot)) { 1532 (void) fprintf(stderr, gettext("invalid vdev " 1533 "specification: at least one toplevel vdev must be " 1534 "specified\n")); 1535 goto errout; 1536 } 1537 1538 if (altroot != NULL && altroot[0] != '/') { 1539 (void) fprintf(stderr, gettext("invalid alternate root '%s': " 1540 "must be an absolute path\n"), altroot); 1541 goto errout; 1542 } 1543 1544 /* 1545 * Check the validity of the mountpoint and direct the user to use the 1546 * '-m' mountpoint option if it looks like its in use. 1547 */ 1548 if (mountpoint == NULL || 1549 (strcmp(mountpoint, ZFS_MOUNTPOINT_LEGACY) != 0 && 1550 strcmp(mountpoint, ZFS_MOUNTPOINT_NONE) != 0)) { 1551 char buf[MAXPATHLEN]; 1552 DIR *dirp; 1553 1554 if (mountpoint && mountpoint[0] != '/') { 1555 (void) fprintf(stderr, gettext("invalid mountpoint " 1556 "'%s': must be an absolute path, 'legacy', or " 1557 "'none'\n"), mountpoint); 1558 goto errout; 1559 } 1560 1561 if (mountpoint == NULL) { 1562 if (altroot != NULL) 1563 (void) snprintf(buf, sizeof (buf), "%s/%s", 1564 altroot, poolname); 1565 else 1566 (void) snprintf(buf, sizeof (buf), "/%s", 1567 poolname); 1568 } else { 1569 if (altroot != NULL) 1570 (void) snprintf(buf, sizeof (buf), "%s%s", 1571 altroot, mountpoint); 1572 else 1573 (void) snprintf(buf, sizeof (buf), "%s", 1574 mountpoint); 1575 } 1576 1577 if ((dirp = opendir(buf)) == NULL && errno != ENOENT) { 1578 (void) fprintf(stderr, gettext("mountpoint '%s' : " 1579 "%s\n"), buf, strerror(errno)); 1580 (void) fprintf(stderr, gettext("use '-m' " 1581 "option to provide a different default\n")); 1582 goto errout; 1583 } else if (dirp) { 1584 int count = 0; 1585 1586 while (count < 3 && readdir(dirp) != NULL) 1587 count++; 1588 (void) closedir(dirp); 1589 1590 if (count > 2) { 1591 (void) fprintf(stderr, gettext("mountpoint " 1592 "'%s' exists and is not empty\n"), buf); 1593 (void) fprintf(stderr, gettext("use '-m' " 1594 "option to provide a " 1595 "different default\n")); 1596 goto errout; 1597 } 1598 } 1599 } 1600 1601 /* 1602 * Now that the mountpoint's validity has been checked, ensure that 1603 * the property is set appropriately prior to creating the pool. 1604 */ 1605 if (mountpoint != NULL) { 1606 ret = add_prop_list(zfs_prop_to_name(ZFS_PROP_MOUNTPOINT), 1607 mountpoint, &fsprops, B_FALSE); 1608 if (ret != 0) 1609 goto errout; 1610 } 1611 1612 ret = 1; 1613 if (dryrun) { 1614 /* 1615 * For a dry run invocation, print out a basic message and run 1616 * through all the vdevs in the list and print out in an 1617 * appropriate hierarchy. 1618 */ 1619 (void) printf(gettext("would create '%s' with the " 1620 "following layout:\n\n"), poolname); 1621 1622 print_vdev_tree(NULL, poolname, nvroot, 0, "", 0); 1623 print_vdev_tree(NULL, "dedup", nvroot, 0, 1624 VDEV_ALLOC_BIAS_DEDUP, 0); 1625 print_vdev_tree(NULL, "special", nvroot, 0, 1626 VDEV_ALLOC_BIAS_SPECIAL, 0); 1627 print_vdev_tree(NULL, "logs", nvroot, 0, 1628 VDEV_ALLOC_BIAS_LOG, 0); 1629 print_cache_list(nvroot, 0); 1630 print_spare_list(nvroot, 0); 1631 1632 ret = 0; 1633 } else { 1634 /* 1635 * Hand off to libzfs. 1636 */ 1637 spa_feature_t i; 1638 for (i = 0; i < SPA_FEATURES; i++) { 1639 char propname[MAXPATHLEN]; 1640 char *propval; 1641 zfeature_info_t *feat = &spa_feature_table[i]; 1642 1643 (void) snprintf(propname, sizeof (propname), 1644 "feature@%s", feat->fi_uname); 1645 1646 /* 1647 * Only features contained in props will be enabled: 1648 * remove from the nvlist every ZFS_FEATURE_DISABLED 1649 * value and add every missing ZFS_FEATURE_ENABLED if 1650 * enable_all_pool_feat is set. 1651 */ 1652 if (!nvlist_lookup_string(props, propname, &propval)) { 1653 if (strcmp(propval, ZFS_FEATURE_DISABLED) == 0) 1654 (void) nvlist_remove_all(props, 1655 propname); 1656 } else if (enable_all_pool_feat) { 1657 ret = add_prop_list(propname, 1658 ZFS_FEATURE_ENABLED, &props, B_TRUE); 1659 if (ret != 0) 1660 goto errout; 1661 } 1662 } 1663 1664 ret = 1; 1665 if (zpool_create(g_zfs, poolname, 1666 nvroot, props, fsprops) == 0) { 1667 zfs_handle_t *pool = zfs_open(g_zfs, 1668 tname ? tname : poolname, ZFS_TYPE_FILESYSTEM); 1669 if (pool != NULL) { 1670 if (zfs_mount(pool, NULL, 0) == 0) { 1671 ret = zfs_shareall(pool); 1672 zfs_commit_all_shares(); 1673 } 1674 zfs_close(pool); 1675 } 1676 } else if (libzfs_errno(g_zfs) == EZFS_INVALIDNAME) { 1677 (void) fprintf(stderr, gettext("pool name may have " 1678 "been omitted\n")); 1679 } 1680 } 1681 1682 errout: 1683 nvlist_free(nvroot); 1684 nvlist_free(fsprops); 1685 nvlist_free(props); 1686 return (ret); 1687 badusage: 1688 nvlist_free(fsprops); 1689 nvlist_free(props); 1690 usage(B_FALSE); 1691 return (2); 1692 } 1693 1694 /* 1695 * zpool destroy <pool> 1696 * 1697 * -f Forcefully unmount any datasets 1698 * 1699 * Destroy the given pool. Automatically unmounts any datasets in the pool. 1700 */ 1701 int 1702 zpool_do_destroy(int argc, char **argv) 1703 { 1704 boolean_t force = B_FALSE; 1705 int c; 1706 char *pool; 1707 zpool_handle_t *zhp; 1708 int ret; 1709 1710 /* check options */ 1711 while ((c = getopt(argc, argv, "f")) != -1) { 1712 switch (c) { 1713 case 'f': 1714 force = B_TRUE; 1715 break; 1716 case '?': 1717 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 1718 optopt); 1719 usage(B_FALSE); 1720 } 1721 } 1722 1723 argc -= optind; 1724 argv += optind; 1725 1726 /* check arguments */ 1727 if (argc < 1) { 1728 (void) fprintf(stderr, gettext("missing pool argument\n")); 1729 usage(B_FALSE); 1730 } 1731 if (argc > 1) { 1732 (void) fprintf(stderr, gettext("too many arguments\n")); 1733 usage(B_FALSE); 1734 } 1735 1736 pool = argv[0]; 1737 1738 if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) { 1739 /* 1740 * As a special case, check for use of '/' in the name, and 1741 * direct the user to use 'zfs destroy' instead. 1742 */ 1743 if (strchr(pool, '/') != NULL) 1744 (void) fprintf(stderr, gettext("use 'zfs destroy' to " 1745 "destroy a dataset\n")); 1746 return (1); 1747 } 1748 1749 if (zpool_disable_datasets(zhp, force) != 0) { 1750 (void) fprintf(stderr, gettext("could not destroy '%s': " 1751 "could not unmount datasets\n"), zpool_get_name(zhp)); 1752 zpool_close(zhp); 1753 return (1); 1754 } 1755 1756 /* The history must be logged as part of the export */ 1757 log_history = B_FALSE; 1758 1759 ret = (zpool_destroy(zhp, history_str) != 0); 1760 1761 zpool_close(zhp); 1762 1763 return (ret); 1764 } 1765 1766 typedef struct export_cbdata { 1767 boolean_t force; 1768 boolean_t hardforce; 1769 } export_cbdata_t; 1770 1771 /* 1772 * Export one pool 1773 */ 1774 static int 1775 zpool_export_one(zpool_handle_t *zhp, void *data) 1776 { 1777 export_cbdata_t *cb = data; 1778 1779 if (zpool_disable_datasets(zhp, cb->force) != 0) 1780 return (1); 1781 1782 /* The history must be logged as part of the export */ 1783 log_history = B_FALSE; 1784 1785 if (cb->hardforce) { 1786 if (zpool_export_force(zhp, history_str) != 0) 1787 return (1); 1788 } else if (zpool_export(zhp, cb->force, history_str) != 0) { 1789 return (1); 1790 } 1791 1792 return (0); 1793 } 1794 1795 /* 1796 * zpool export [-f] <pool> ... 1797 * 1798 * -a Export all pools 1799 * -f Forcefully unmount datasets 1800 * 1801 * Export the given pools. By default, the command will attempt to cleanly 1802 * unmount any active datasets within the pool. If the '-f' flag is specified, 1803 * then the datasets will be forcefully unmounted. 1804 */ 1805 int 1806 zpool_do_export(int argc, char **argv) 1807 { 1808 export_cbdata_t cb; 1809 boolean_t do_all = B_FALSE; 1810 boolean_t force = B_FALSE; 1811 boolean_t hardforce = B_FALSE; 1812 int c, ret; 1813 1814 /* check options */ 1815 while ((c = getopt(argc, argv, "afF")) != -1) { 1816 switch (c) { 1817 case 'a': 1818 do_all = B_TRUE; 1819 break; 1820 case 'f': 1821 force = B_TRUE; 1822 break; 1823 case 'F': 1824 hardforce = B_TRUE; 1825 break; 1826 case '?': 1827 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 1828 optopt); 1829 usage(B_FALSE); 1830 } 1831 } 1832 1833 cb.force = force; 1834 cb.hardforce = hardforce; 1835 argc -= optind; 1836 argv += optind; 1837 1838 if (do_all) { 1839 if (argc != 0) { 1840 (void) fprintf(stderr, gettext("too many arguments\n")); 1841 usage(B_FALSE); 1842 } 1843 1844 return (for_each_pool(argc, argv, B_TRUE, NULL, 1845 B_FALSE, zpool_export_one, &cb)); 1846 } 1847 1848 /* check arguments */ 1849 if (argc < 1) { 1850 (void) fprintf(stderr, gettext("missing pool argument\n")); 1851 usage(B_FALSE); 1852 } 1853 1854 ret = for_each_pool(argc, argv, B_TRUE, NULL, B_FALSE, zpool_export_one, 1855 &cb); 1856 1857 return (ret); 1858 } 1859 1860 /* 1861 * Given a vdev configuration, determine the maximum width needed for the device 1862 * name column. 1863 */ 1864 static int 1865 max_width(zpool_handle_t *zhp, nvlist_t *nv, int depth, int max, 1866 int name_flags) 1867 { 1868 char *name; 1869 nvlist_t **child; 1870 uint_t c, children; 1871 int ret; 1872 1873 name = zpool_vdev_name(g_zfs, zhp, nv, name_flags); 1874 if (strlen(name) + depth > max) 1875 max = strlen(name) + depth; 1876 1877 free(name); 1878 1879 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, 1880 &child, &children) == 0) { 1881 for (c = 0; c < children; c++) 1882 if ((ret = max_width(zhp, child[c], depth + 2, 1883 max, name_flags)) > max) 1884 max = ret; 1885 } 1886 1887 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, 1888 &child, &children) == 0) { 1889 for (c = 0; c < children; c++) 1890 if ((ret = max_width(zhp, child[c], depth + 2, 1891 max, name_flags)) > max) 1892 max = ret; 1893 } 1894 1895 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1896 &child, &children) == 0) { 1897 for (c = 0; c < children; c++) 1898 if ((ret = max_width(zhp, child[c], depth + 2, 1899 max, name_flags)) > max) 1900 max = ret; 1901 } 1902 1903 return (max); 1904 } 1905 1906 typedef struct spare_cbdata { 1907 uint64_t cb_guid; 1908 zpool_handle_t *cb_zhp; 1909 } spare_cbdata_t; 1910 1911 static boolean_t 1912 find_vdev(nvlist_t *nv, uint64_t search) 1913 { 1914 uint64_t guid; 1915 nvlist_t **child; 1916 uint_t c, children; 1917 1918 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0 && 1919 search == guid) 1920 return (B_TRUE); 1921 1922 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1923 &child, &children) == 0) { 1924 for (c = 0; c < children; c++) 1925 if (find_vdev(child[c], search)) 1926 return (B_TRUE); 1927 } 1928 1929 return (B_FALSE); 1930 } 1931 1932 static int 1933 find_spare(zpool_handle_t *zhp, void *data) 1934 { 1935 spare_cbdata_t *cbp = data; 1936 nvlist_t *config, *nvroot; 1937 1938 config = zpool_get_config(zhp, NULL); 1939 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 1940 &nvroot) == 0); 1941 1942 if (find_vdev(nvroot, cbp->cb_guid)) { 1943 cbp->cb_zhp = zhp; 1944 return (1); 1945 } 1946 1947 zpool_close(zhp); 1948 return (0); 1949 } 1950 1951 typedef struct status_cbdata { 1952 int cb_count; 1953 int cb_name_flags; 1954 int cb_namewidth; 1955 boolean_t cb_allpools; 1956 boolean_t cb_verbose; 1957 boolean_t cb_literal; 1958 boolean_t cb_explain; 1959 boolean_t cb_first; 1960 boolean_t cb_dedup_stats; 1961 boolean_t cb_print_status; 1962 boolean_t cb_print_slow_ios; 1963 boolean_t cb_print_vdev_init; 1964 boolean_t cb_print_vdev_trim; 1965 vdev_cmd_data_list_t *vcdl; 1966 } status_cbdata_t; 1967 1968 /* Return 1 if string is NULL, empty, or whitespace; return 0 otherwise. */ 1969 static int 1970 is_blank_str(char *str) 1971 { 1972 while (str != NULL && *str != '\0') { 1973 if (!isblank(*str)) 1974 return (0); 1975 str++; 1976 } 1977 return (1); 1978 } 1979 1980 /* Print command output lines for specific vdev in a specific pool */ 1981 static void 1982 zpool_print_cmd(vdev_cmd_data_list_t *vcdl, const char *pool, char *path) 1983 { 1984 vdev_cmd_data_t *data; 1985 int i, j; 1986 char *val; 1987 1988 for (i = 0; i < vcdl->count; i++) { 1989 if ((strcmp(vcdl->data[i].path, path) != 0) || 1990 (strcmp(vcdl->data[i].pool, pool) != 0)) { 1991 /* Not the vdev we're looking for */ 1992 continue; 1993 } 1994 1995 data = &vcdl->data[i]; 1996 /* Print out all the output values for this vdev */ 1997 for (j = 0; j < vcdl->uniq_cols_cnt; j++) { 1998 val = NULL; 1999 /* Does this vdev have values for this column? */ 2000 for (int k = 0; k < data->cols_cnt; k++) { 2001 if (strcmp(data->cols[k], 2002 vcdl->uniq_cols[j]) == 0) { 2003 /* yes it does, record the value */ 2004 val = data->lines[k]; 2005 break; 2006 } 2007 } 2008 /* 2009 * Mark empty values with dashes to make output 2010 * awk-able. 2011 */ 2012 if (is_blank_str(val)) 2013 val = "-"; 2014 2015 printf("%*s", vcdl->uniq_cols_width[j], val); 2016 if (j < vcdl->uniq_cols_cnt - 1) 2017 printf(" "); 2018 } 2019 2020 /* Print out any values that aren't in a column at the end */ 2021 for (j = data->cols_cnt; j < data->lines_cnt; j++) { 2022 /* Did we have any columns? If so print a spacer. */ 2023 if (vcdl->uniq_cols_cnt > 0) 2024 printf(" "); 2025 2026 val = data->lines[j]; 2027 printf("%s", val ? val : ""); 2028 } 2029 break; 2030 } 2031 } 2032 2033 /* 2034 * Print vdev initialization status for leaves 2035 */ 2036 static void 2037 print_status_initialize(vdev_stat_t *vs, boolean_t verbose) 2038 { 2039 if (verbose) { 2040 if ((vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE || 2041 vs->vs_initialize_state == VDEV_INITIALIZE_SUSPENDED || 2042 vs->vs_initialize_state == VDEV_INITIALIZE_COMPLETE) && 2043 !vs->vs_scan_removing) { 2044 char zbuf[1024]; 2045 char tbuf[256]; 2046 struct tm zaction_ts; 2047 2048 time_t t = vs->vs_initialize_action_time; 2049 int initialize_pct = 100; 2050 if (vs->vs_initialize_state != 2051 VDEV_INITIALIZE_COMPLETE) { 2052 initialize_pct = (vs->vs_initialize_bytes_done * 2053 100 / (vs->vs_initialize_bytes_est + 1)); 2054 } 2055 2056 (void) localtime_r(&t, &zaction_ts); 2057 (void) strftime(tbuf, sizeof (tbuf), "%c", &zaction_ts); 2058 2059 switch (vs->vs_initialize_state) { 2060 case VDEV_INITIALIZE_SUSPENDED: 2061 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s", 2062 gettext("suspended, started at"), tbuf); 2063 break; 2064 case VDEV_INITIALIZE_ACTIVE: 2065 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s", 2066 gettext("started at"), tbuf); 2067 break; 2068 case VDEV_INITIALIZE_COMPLETE: 2069 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s", 2070 gettext("completed at"), tbuf); 2071 break; 2072 } 2073 2074 (void) printf(gettext(" (%d%% initialized%s)"), 2075 initialize_pct, zbuf); 2076 } else { 2077 (void) printf(gettext(" (uninitialized)")); 2078 } 2079 } else if (vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE) { 2080 (void) printf(gettext(" (initializing)")); 2081 } 2082 } 2083 2084 /* 2085 * Print vdev TRIM status for leaves 2086 */ 2087 static void 2088 print_status_trim(vdev_stat_t *vs, boolean_t verbose) 2089 { 2090 if (verbose) { 2091 if ((vs->vs_trim_state == VDEV_TRIM_ACTIVE || 2092 vs->vs_trim_state == VDEV_TRIM_SUSPENDED || 2093 vs->vs_trim_state == VDEV_TRIM_COMPLETE) && 2094 !vs->vs_scan_removing) { 2095 char zbuf[1024]; 2096 char tbuf[256]; 2097 struct tm zaction_ts; 2098 2099 time_t t = vs->vs_trim_action_time; 2100 int trim_pct = 100; 2101 if (vs->vs_trim_state != VDEV_TRIM_COMPLETE) { 2102 trim_pct = (vs->vs_trim_bytes_done * 2103 100 / (vs->vs_trim_bytes_est + 1)); 2104 } 2105 2106 (void) localtime_r(&t, &zaction_ts); 2107 (void) strftime(tbuf, sizeof (tbuf), "%c", &zaction_ts); 2108 2109 switch (vs->vs_trim_state) { 2110 case VDEV_TRIM_SUSPENDED: 2111 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s", 2112 gettext("suspended, started at"), tbuf); 2113 break; 2114 case VDEV_TRIM_ACTIVE: 2115 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s", 2116 gettext("started at"), tbuf); 2117 break; 2118 case VDEV_TRIM_COMPLETE: 2119 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s", 2120 gettext("completed at"), tbuf); 2121 break; 2122 } 2123 2124 (void) printf(gettext(" (%d%% trimmed%s)"), 2125 trim_pct, zbuf); 2126 } else if (vs->vs_trim_notsup) { 2127 (void) printf(gettext(" (trim unsupported)")); 2128 } else { 2129 (void) printf(gettext(" (untrimmed)")); 2130 } 2131 } else if (vs->vs_trim_state == VDEV_TRIM_ACTIVE) { 2132 (void) printf(gettext(" (trimming)")); 2133 } 2134 } 2135 2136 /* 2137 * Return the color associated with a health string. This includes returning 2138 * NULL for no color change. 2139 */ 2140 static char * 2141 health_str_to_color(const char *health) 2142 { 2143 if (strcmp(health, gettext("FAULTED")) == 0 || 2144 strcmp(health, gettext("SUSPENDED")) == 0 || 2145 strcmp(health, gettext("UNAVAIL")) == 0) { 2146 return (ANSI_RED); 2147 } 2148 2149 if (strcmp(health, gettext("OFFLINE")) == 0 || 2150 strcmp(health, gettext("DEGRADED")) == 0 || 2151 strcmp(health, gettext("REMOVED")) == 0) { 2152 return (ANSI_YELLOW); 2153 } 2154 2155 return (NULL); 2156 } 2157 2158 /* 2159 * Print out configuration state as requested by status_callback. 2160 */ 2161 static void 2162 print_status_config(zpool_handle_t *zhp, status_cbdata_t *cb, const char *name, 2163 nvlist_t *nv, int depth, boolean_t isspare, vdev_rebuild_stat_t *vrs) 2164 { 2165 nvlist_t **child, *root; 2166 uint_t c, i, vsc, children; 2167 pool_scan_stat_t *ps = NULL; 2168 vdev_stat_t *vs; 2169 char rbuf[6], wbuf[6], cbuf[6]; 2170 char *vname; 2171 uint64_t notpresent; 2172 spare_cbdata_t spare_cb; 2173 const char *state; 2174 char *type; 2175 char *path = NULL; 2176 char *rcolor = NULL, *wcolor = NULL, *ccolor = NULL; 2177 2178 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 2179 &child, &children) != 0) 2180 children = 0; 2181 2182 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, 2183 (uint64_t **)&vs, &vsc) == 0); 2184 2185 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0); 2186 2187 if (strcmp(type, VDEV_TYPE_INDIRECT) == 0) 2188 return; 2189 2190 state = zpool_state_to_name(vs->vs_state, vs->vs_aux); 2191 2192 if (isspare) { 2193 /* 2194 * For hot spares, we use the terms 'INUSE' and 'AVAILABLE' for 2195 * online drives. 2196 */ 2197 if (vs->vs_aux == VDEV_AUX_SPARED) 2198 state = gettext("INUSE"); 2199 else if (vs->vs_state == VDEV_STATE_HEALTHY) 2200 state = gettext("AVAIL"); 2201 } 2202 2203 printf_color(health_str_to_color(state), 2204 "\t%*s%-*s %-8s", depth, "", cb->cb_namewidth - depth, 2205 name, state); 2206 2207 if (!isspare) { 2208 if (vs->vs_read_errors) 2209 rcolor = ANSI_RED; 2210 2211 if (vs->vs_write_errors) 2212 wcolor = ANSI_RED; 2213 2214 if (vs->vs_checksum_errors) 2215 ccolor = ANSI_RED; 2216 2217 if (cb->cb_literal) { 2218 printf(" "); 2219 printf_color(rcolor, "%5llu", 2220 (u_longlong_t)vs->vs_read_errors); 2221 printf(" "); 2222 printf_color(wcolor, "%5llu", 2223 (u_longlong_t)vs->vs_write_errors); 2224 printf(" "); 2225 printf_color(ccolor, "%5llu", 2226 (u_longlong_t)vs->vs_checksum_errors); 2227 } else { 2228 zfs_nicenum(vs->vs_read_errors, rbuf, sizeof (rbuf)); 2229 zfs_nicenum(vs->vs_write_errors, wbuf, sizeof (wbuf)); 2230 zfs_nicenum(vs->vs_checksum_errors, cbuf, 2231 sizeof (cbuf)); 2232 printf(" "); 2233 printf_color(rcolor, "%5s", rbuf); 2234 printf(" "); 2235 printf_color(wcolor, "%5s", wbuf); 2236 printf(" "); 2237 printf_color(ccolor, "%5s", cbuf); 2238 } 2239 if (cb->cb_print_slow_ios) { 2240 if (children == 0) { 2241 /* Only leafs vdevs have slow IOs */ 2242 zfs_nicenum(vs->vs_slow_ios, rbuf, 2243 sizeof (rbuf)); 2244 } else { 2245 snprintf(rbuf, sizeof (rbuf), "-"); 2246 } 2247 2248 if (cb->cb_literal) 2249 printf(" %5llu", (u_longlong_t)vs->vs_slow_ios); 2250 else 2251 printf(" %5s", rbuf); 2252 } 2253 } 2254 2255 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 2256 ¬present) == 0) { 2257 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0); 2258 (void) printf(" %s %s", gettext("was"), path); 2259 } else if (vs->vs_aux != 0) { 2260 (void) printf(" "); 2261 color_start(ANSI_RED); 2262 switch (vs->vs_aux) { 2263 case VDEV_AUX_OPEN_FAILED: 2264 (void) printf(gettext("cannot open")); 2265 break; 2266 2267 case VDEV_AUX_BAD_GUID_SUM: 2268 (void) printf(gettext("missing device")); 2269 break; 2270 2271 case VDEV_AUX_NO_REPLICAS: 2272 (void) printf(gettext("insufficient replicas")); 2273 break; 2274 2275 case VDEV_AUX_VERSION_NEWER: 2276 (void) printf(gettext("newer version")); 2277 break; 2278 2279 case VDEV_AUX_UNSUP_FEAT: 2280 (void) printf(gettext("unsupported feature(s)")); 2281 break; 2282 2283 case VDEV_AUX_ASHIFT_TOO_BIG: 2284 (void) printf(gettext("unsupported minimum blocksize")); 2285 break; 2286 2287 case VDEV_AUX_SPARED: 2288 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 2289 &spare_cb.cb_guid) == 0); 2290 if (zpool_iter(g_zfs, find_spare, &spare_cb) == 1) { 2291 if (strcmp(zpool_get_name(spare_cb.cb_zhp), 2292 zpool_get_name(zhp)) == 0) 2293 (void) printf(gettext("currently in " 2294 "use")); 2295 else 2296 (void) printf(gettext("in use by " 2297 "pool '%s'"), 2298 zpool_get_name(spare_cb.cb_zhp)); 2299 zpool_close(spare_cb.cb_zhp); 2300 } else { 2301 (void) printf(gettext("currently in use")); 2302 } 2303 break; 2304 2305 case VDEV_AUX_ERR_EXCEEDED: 2306 (void) printf(gettext("too many errors")); 2307 break; 2308 2309 case VDEV_AUX_IO_FAILURE: 2310 (void) printf(gettext("experienced I/O failures")); 2311 break; 2312 2313 case VDEV_AUX_BAD_LOG: 2314 (void) printf(gettext("bad intent log")); 2315 break; 2316 2317 case VDEV_AUX_EXTERNAL: 2318 (void) printf(gettext("external device fault")); 2319 break; 2320 2321 case VDEV_AUX_SPLIT_POOL: 2322 (void) printf(gettext("split into new pool")); 2323 break; 2324 2325 case VDEV_AUX_ACTIVE: 2326 (void) printf(gettext("currently in use")); 2327 break; 2328 2329 case VDEV_AUX_CHILDREN_OFFLINE: 2330 (void) printf(gettext("all children offline")); 2331 break; 2332 2333 default: 2334 (void) printf(gettext("corrupted data")); 2335 break; 2336 } 2337 color_end(); 2338 } else if (children == 0 && !isspare && 2339 getenv("ZPOOL_STATUS_NON_NATIVE_ASHIFT_IGNORE") == NULL && 2340 VDEV_STAT_VALID(vs_physical_ashift, vsc) && 2341 vs->vs_configured_ashift < vs->vs_physical_ashift) { 2342 (void) printf( 2343 gettext(" block size: %dB configured, %dB native"), 2344 1 << vs->vs_configured_ashift, 1 << vs->vs_physical_ashift); 2345 } 2346 2347 /* The root vdev has the scrub/resilver stats */ 2348 root = fnvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 2349 ZPOOL_CONFIG_VDEV_TREE); 2350 (void) nvlist_lookup_uint64_array(root, ZPOOL_CONFIG_SCAN_STATS, 2351 (uint64_t **)&ps, &c); 2352 2353 if (ps != NULL && ps->pss_state == DSS_SCANNING && children == 0) { 2354 if (vs->vs_scan_processed != 0) { 2355 (void) printf(gettext(" (%s)"), 2356 (ps->pss_func == POOL_SCAN_RESILVER) ? 2357 "resilvering" : "repairing"); 2358 } else if (vs->vs_resilver_deferred) { 2359 (void) printf(gettext(" (awaiting resilver)")); 2360 } 2361 } 2362 2363 /* The top-level vdevs have the rebuild stats */ 2364 if (vrs != NULL && vrs->vrs_state == VDEV_REBUILD_ACTIVE && 2365 children == 0) { 2366 if (vs->vs_rebuild_processed != 0) { 2367 (void) printf(gettext(" (resilvering)")); 2368 } 2369 } 2370 2371 if (cb->vcdl != NULL) { 2372 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) { 2373 printf(" "); 2374 zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path); 2375 } 2376 } 2377 2378 /* Display vdev initialization and trim status for leaves. */ 2379 if (children == 0) { 2380 print_status_initialize(vs, cb->cb_print_vdev_init); 2381 print_status_trim(vs, cb->cb_print_vdev_trim); 2382 } 2383 2384 (void) printf("\n"); 2385 2386 for (c = 0; c < children; c++) { 2387 uint64_t islog = B_FALSE, ishole = B_FALSE; 2388 2389 /* Don't print logs or holes here */ 2390 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 2391 &islog); 2392 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 2393 &ishole); 2394 if (islog || ishole) 2395 continue; 2396 /* Only print normal classes here */ 2397 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS)) 2398 continue; 2399 2400 /* Provide vdev_rebuild_stats to children if available */ 2401 if (vrs == NULL) { 2402 (void) nvlist_lookup_uint64_array(nv, 2403 ZPOOL_CONFIG_REBUILD_STATS, 2404 (uint64_t **)&vrs, &i); 2405 } 2406 2407 vname = zpool_vdev_name(g_zfs, zhp, child[c], 2408 cb->cb_name_flags | VDEV_NAME_TYPE_ID); 2409 print_status_config(zhp, cb, vname, child[c], depth + 2, 2410 isspare, vrs); 2411 free(vname); 2412 } 2413 } 2414 2415 /* 2416 * Print the configuration of an exported pool. Iterate over all vdevs in the 2417 * pool, printing out the name and status for each one. 2418 */ 2419 static void 2420 print_import_config(status_cbdata_t *cb, const char *name, nvlist_t *nv, 2421 int depth) 2422 { 2423 nvlist_t **child; 2424 uint_t c, children; 2425 vdev_stat_t *vs; 2426 char *type, *vname; 2427 2428 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0); 2429 if (strcmp(type, VDEV_TYPE_MISSING) == 0 || 2430 strcmp(type, VDEV_TYPE_HOLE) == 0) 2431 return; 2432 2433 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, 2434 (uint64_t **)&vs, &c) == 0); 2435 2436 (void) printf("\t%*s%-*s", depth, "", cb->cb_namewidth - depth, name); 2437 (void) printf(" %s", zpool_state_to_name(vs->vs_state, vs->vs_aux)); 2438 2439 if (vs->vs_aux != 0) { 2440 (void) printf(" "); 2441 2442 switch (vs->vs_aux) { 2443 case VDEV_AUX_OPEN_FAILED: 2444 (void) printf(gettext("cannot open")); 2445 break; 2446 2447 case VDEV_AUX_BAD_GUID_SUM: 2448 (void) printf(gettext("missing device")); 2449 break; 2450 2451 case VDEV_AUX_NO_REPLICAS: 2452 (void) printf(gettext("insufficient replicas")); 2453 break; 2454 2455 case VDEV_AUX_VERSION_NEWER: 2456 (void) printf(gettext("newer version")); 2457 break; 2458 2459 case VDEV_AUX_UNSUP_FEAT: 2460 (void) printf(gettext("unsupported feature(s)")); 2461 break; 2462 2463 case VDEV_AUX_ERR_EXCEEDED: 2464 (void) printf(gettext("too many errors")); 2465 break; 2466 2467 case VDEV_AUX_ACTIVE: 2468 (void) printf(gettext("currently in use")); 2469 break; 2470 2471 case VDEV_AUX_CHILDREN_OFFLINE: 2472 (void) printf(gettext("all children offline")); 2473 break; 2474 2475 default: 2476 (void) printf(gettext("corrupted data")); 2477 break; 2478 } 2479 } 2480 (void) printf("\n"); 2481 2482 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 2483 &child, &children) != 0) 2484 return; 2485 2486 for (c = 0; c < children; c++) { 2487 uint64_t is_log = B_FALSE; 2488 2489 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 2490 &is_log); 2491 if (is_log) 2492 continue; 2493 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS)) 2494 continue; 2495 2496 vname = zpool_vdev_name(g_zfs, NULL, child[c], 2497 cb->cb_name_flags | VDEV_NAME_TYPE_ID); 2498 print_import_config(cb, vname, child[c], depth + 2); 2499 free(vname); 2500 } 2501 2502 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, 2503 &child, &children) == 0) { 2504 (void) printf(gettext("\tcache\n")); 2505 for (c = 0; c < children; c++) { 2506 vname = zpool_vdev_name(g_zfs, NULL, child[c], 2507 cb->cb_name_flags); 2508 (void) printf("\t %s\n", vname); 2509 free(vname); 2510 } 2511 } 2512 2513 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, 2514 &child, &children) == 0) { 2515 (void) printf(gettext("\tspares\n")); 2516 for (c = 0; c < children; c++) { 2517 vname = zpool_vdev_name(g_zfs, NULL, child[c], 2518 cb->cb_name_flags); 2519 (void) printf("\t %s\n", vname); 2520 free(vname); 2521 } 2522 } 2523 } 2524 2525 /* 2526 * Print specialized class vdevs. 2527 * 2528 * These are recorded as top level vdevs in the main pool child array 2529 * but with "is_log" set to 1 or an "alloc_bias" string. We use either 2530 * print_status_config() or print_import_config() to print the top level 2531 * class vdevs then any of their children (eg mirrored slogs) are printed 2532 * recursively - which works because only the top level vdev is marked. 2533 */ 2534 static void 2535 print_class_vdevs(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv, 2536 const char *class) 2537 { 2538 uint_t c, children; 2539 nvlist_t **child; 2540 boolean_t printed = B_FALSE; 2541 2542 assert(zhp != NULL || !cb->cb_verbose); 2543 2544 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &child, 2545 &children) != 0) 2546 return; 2547 2548 for (c = 0; c < children; c++) { 2549 uint64_t is_log = B_FALSE; 2550 char *bias = NULL; 2551 char *type = NULL; 2552 2553 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 2554 &is_log); 2555 2556 if (is_log) { 2557 bias = VDEV_ALLOC_CLASS_LOGS; 2558 } else { 2559 (void) nvlist_lookup_string(child[c], 2560 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias); 2561 (void) nvlist_lookup_string(child[c], 2562 ZPOOL_CONFIG_TYPE, &type); 2563 } 2564 2565 if (bias == NULL || strcmp(bias, class) != 0) 2566 continue; 2567 if (!is_log && strcmp(type, VDEV_TYPE_INDIRECT) == 0) 2568 continue; 2569 2570 if (!printed) { 2571 (void) printf("\t%s\t\n", gettext(class)); 2572 printed = B_TRUE; 2573 } 2574 2575 char *name = zpool_vdev_name(g_zfs, zhp, child[c], 2576 cb->cb_name_flags | VDEV_NAME_TYPE_ID); 2577 if (cb->cb_print_status) 2578 print_status_config(zhp, cb, name, child[c], 2, 2579 B_FALSE, NULL); 2580 else 2581 print_import_config(cb, name, child[c], 2); 2582 free(name); 2583 } 2584 } 2585 2586 /* 2587 * Display the status for the given pool. 2588 */ 2589 static void 2590 show_import(nvlist_t *config) 2591 { 2592 uint64_t pool_state; 2593 vdev_stat_t *vs; 2594 char *name; 2595 uint64_t guid; 2596 uint64_t hostid = 0; 2597 char *msgid; 2598 char *hostname = "unknown"; 2599 nvlist_t *nvroot, *nvinfo; 2600 zpool_status_t reason; 2601 zpool_errata_t errata; 2602 const char *health; 2603 uint_t vsc; 2604 char *comment; 2605 status_cbdata_t cb = { 0 }; 2606 2607 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 2608 &name) == 0); 2609 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 2610 &guid) == 0); 2611 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, 2612 &pool_state) == 0); 2613 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 2614 &nvroot) == 0); 2615 2616 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS, 2617 (uint64_t **)&vs, &vsc) == 0); 2618 health = zpool_state_to_name(vs->vs_state, vs->vs_aux); 2619 2620 reason = zpool_import_status(config, &msgid, &errata); 2621 2622 (void) printf(gettext(" pool: %s\n"), name); 2623 (void) printf(gettext(" id: %llu\n"), (u_longlong_t)guid); 2624 (void) printf(gettext(" state: %s"), health); 2625 if (pool_state == POOL_STATE_DESTROYED) 2626 (void) printf(gettext(" (DESTROYED)")); 2627 (void) printf("\n"); 2628 2629 switch (reason) { 2630 case ZPOOL_STATUS_MISSING_DEV_R: 2631 case ZPOOL_STATUS_MISSING_DEV_NR: 2632 case ZPOOL_STATUS_BAD_GUID_SUM: 2633 printf_color(ANSI_BOLD, gettext("status: ")); 2634 printf_color(ANSI_YELLOW, gettext("One or more devices are " 2635 "missing from the system.\n")); 2636 break; 2637 2638 case ZPOOL_STATUS_CORRUPT_LABEL_R: 2639 case ZPOOL_STATUS_CORRUPT_LABEL_NR: 2640 printf_color(ANSI_BOLD, gettext("status: ")); 2641 printf_color(ANSI_YELLOW, gettext("One or more devices contains" 2642 " corrupted data.\n")); 2643 break; 2644 2645 case ZPOOL_STATUS_CORRUPT_DATA: 2646 (void) printf( 2647 gettext(" status: The pool data is corrupted.\n")); 2648 break; 2649 2650 case ZPOOL_STATUS_OFFLINE_DEV: 2651 printf_color(ANSI_BOLD, gettext("status: ")); 2652 printf_color(ANSI_YELLOW, gettext("One or more devices " 2653 "are offlined.\n")); 2654 break; 2655 2656 case ZPOOL_STATUS_CORRUPT_POOL: 2657 printf_color(ANSI_BOLD, gettext("status: ")); 2658 printf_color(ANSI_YELLOW, gettext("The pool metadata is " 2659 "corrupted.\n")); 2660 break; 2661 2662 case ZPOOL_STATUS_VERSION_OLDER: 2663 printf_color(ANSI_BOLD, gettext("status: ")); 2664 printf_color(ANSI_YELLOW, gettext("The pool is formatted using " 2665 "a legacy on-disk version.\n")); 2666 break; 2667 2668 case ZPOOL_STATUS_VERSION_NEWER: 2669 printf_color(ANSI_BOLD, gettext("status: ")); 2670 printf_color(ANSI_YELLOW, gettext("The pool is formatted using " 2671 "an incompatible version.\n")); 2672 break; 2673 2674 case ZPOOL_STATUS_FEAT_DISABLED: 2675 printf_color(ANSI_BOLD, gettext("status: ")); 2676 printf_color(ANSI_YELLOW, gettext("Some supported features are " 2677 "not enabled on the pool.\n")); 2678 break; 2679 2680 case ZPOOL_STATUS_UNSUP_FEAT_READ: 2681 printf_color(ANSI_BOLD, gettext("status: ")); 2682 printf_color(ANSI_YELLOW, gettext("The pool uses the following " 2683 "feature(s) not supported on this system:\n")); 2684 color_start(ANSI_YELLOW); 2685 zpool_print_unsup_feat(config); 2686 color_end(); 2687 break; 2688 2689 case ZPOOL_STATUS_UNSUP_FEAT_WRITE: 2690 printf_color(ANSI_BOLD, gettext("status: ")); 2691 printf_color(ANSI_YELLOW, gettext("The pool can only be " 2692 "accessed in read-only mode on this system. It\n\tcannot be" 2693 " accessed in read-write mode because it uses the " 2694 "following\n\tfeature(s) not supported on this system:\n")); 2695 color_start(ANSI_YELLOW); 2696 zpool_print_unsup_feat(config); 2697 color_end(); 2698 break; 2699 2700 case ZPOOL_STATUS_HOSTID_ACTIVE: 2701 printf_color(ANSI_BOLD, gettext("status: ")); 2702 printf_color(ANSI_YELLOW, gettext("The pool is currently " 2703 "imported by another system.\n")); 2704 break; 2705 2706 case ZPOOL_STATUS_HOSTID_REQUIRED: 2707 printf_color(ANSI_BOLD, gettext("status: ")); 2708 printf_color(ANSI_YELLOW, gettext("The pool has the " 2709 "multihost property on. It cannot\n\tbe safely imported " 2710 "when the system hostid is not set.\n")); 2711 break; 2712 2713 case ZPOOL_STATUS_HOSTID_MISMATCH: 2714 printf_color(ANSI_BOLD, gettext("status: ")); 2715 printf_color(ANSI_YELLOW, gettext("The pool was last accessed " 2716 "by another system.\n")); 2717 break; 2718 2719 case ZPOOL_STATUS_FAULTED_DEV_R: 2720 case ZPOOL_STATUS_FAULTED_DEV_NR: 2721 printf_color(ANSI_BOLD, gettext("status: ")); 2722 printf_color(ANSI_YELLOW, gettext("One or more devices are " 2723 "faulted.\n")); 2724 break; 2725 2726 case ZPOOL_STATUS_BAD_LOG: 2727 printf_color(ANSI_BOLD, gettext("status: ")); 2728 printf_color(ANSI_YELLOW, gettext("An intent log record cannot " 2729 "be read.\n")); 2730 break; 2731 2732 case ZPOOL_STATUS_RESILVERING: 2733 case ZPOOL_STATUS_REBUILDING: 2734 printf_color(ANSI_BOLD, gettext("status: ")); 2735 printf_color(ANSI_YELLOW, gettext("One or more devices were " 2736 "being resilvered.\n")); 2737 break; 2738 2739 case ZPOOL_STATUS_ERRATA: 2740 printf_color(ANSI_BOLD, gettext("status: ")); 2741 printf_color(ANSI_YELLOW, gettext("Errata #%d detected.\n"), 2742 errata); 2743 break; 2744 2745 case ZPOOL_STATUS_NON_NATIVE_ASHIFT: 2746 printf_color(ANSI_BOLD, gettext("status: ")); 2747 printf_color(ANSI_YELLOW, gettext("One or more devices are " 2748 "configured to use a non-native block size.\n" 2749 "\tExpect reduced performance.\n")); 2750 break; 2751 2752 default: 2753 /* 2754 * No other status can be seen when importing pools. 2755 */ 2756 assert(reason == ZPOOL_STATUS_OK); 2757 } 2758 2759 /* 2760 * Print out an action according to the overall state of the pool. 2761 */ 2762 if (vs->vs_state == VDEV_STATE_HEALTHY) { 2763 if (reason == ZPOOL_STATUS_VERSION_OLDER || 2764 reason == ZPOOL_STATUS_FEAT_DISABLED) { 2765 (void) printf(gettext(" action: The pool can be " 2766 "imported using its name or numeric identifier, " 2767 "though\n\tsome features will not be available " 2768 "without an explicit 'zpool upgrade'.\n")); 2769 } else if (reason == ZPOOL_STATUS_HOSTID_MISMATCH) { 2770 (void) printf(gettext(" action: The pool can be " 2771 "imported using its name or numeric " 2772 "identifier and\n\tthe '-f' flag.\n")); 2773 } else if (reason == ZPOOL_STATUS_ERRATA) { 2774 switch (errata) { 2775 case ZPOOL_ERRATA_NONE: 2776 break; 2777 2778 case ZPOOL_ERRATA_ZOL_2094_SCRUB: 2779 (void) printf(gettext(" action: The pool can " 2780 "be imported using its name or numeric " 2781 "identifier,\n\thowever there is a compat" 2782 "ibility issue which should be corrected" 2783 "\n\tby running 'zpool scrub'\n")); 2784 break; 2785 2786 case ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY: 2787 (void) printf(gettext(" action: The pool can" 2788 "not be imported with this version of ZFS " 2789 "due to\n\tan active asynchronous destroy. " 2790 "Revert to an earlier version\n\tand " 2791 "allow the destroy to complete before " 2792 "updating.\n")); 2793 break; 2794 2795 case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION: 2796 (void) printf(gettext(" action: Existing " 2797 "encrypted datasets contain an on-disk " 2798 "incompatibility, which\n\tneeds to be " 2799 "corrected. Backup these datasets to new " 2800 "encrypted datasets\n\tand destroy the " 2801 "old ones.\n")); 2802 break; 2803 2804 case ZPOOL_ERRATA_ZOL_8308_ENCRYPTION: 2805 (void) printf(gettext(" action: Existing " 2806 "encrypted snapshots and bookmarks contain " 2807 "an on-disk\n\tincompatibility. This may " 2808 "cause on-disk corruption if they are used" 2809 "\n\twith 'zfs recv'. To correct the " 2810 "issue, enable the bookmark_v2 feature.\n\t" 2811 "No additional action is needed if there " 2812 "are no encrypted snapshots or\n\t" 2813 "bookmarks. If preserving the encrypted " 2814 "snapshots and bookmarks is\n\trequired, " 2815 "use a non-raw send to backup and restore " 2816 "them. Alternately,\n\tthey may be removed" 2817 " to resolve the incompatibility.\n")); 2818 break; 2819 default: 2820 /* 2821 * All errata must contain an action message. 2822 */ 2823 assert(0); 2824 } 2825 } else { 2826 (void) printf(gettext(" action: The pool can be " 2827 "imported using its name or numeric " 2828 "identifier.\n")); 2829 } 2830 } else if (vs->vs_state == VDEV_STATE_DEGRADED) { 2831 (void) printf(gettext(" action: The pool can be imported " 2832 "despite missing or damaged devices. The\n\tfault " 2833 "tolerance of the pool may be compromised if imported.\n")); 2834 } else { 2835 switch (reason) { 2836 case ZPOOL_STATUS_VERSION_NEWER: 2837 (void) printf(gettext(" action: The pool cannot be " 2838 "imported. Access the pool on a system running " 2839 "newer\n\tsoftware, or recreate the pool from " 2840 "backup.\n")); 2841 break; 2842 case ZPOOL_STATUS_UNSUP_FEAT_READ: 2843 printf_color(ANSI_BOLD, gettext("action: ")); 2844 printf_color(ANSI_YELLOW, gettext("The pool cannot be " 2845 "imported. Access the pool on a system that " 2846 "supports\n\tthe required feature(s), or recreate " 2847 "the pool from backup.\n")); 2848 break; 2849 case ZPOOL_STATUS_UNSUP_FEAT_WRITE: 2850 printf_color(ANSI_BOLD, gettext("action: ")); 2851 printf_color(ANSI_YELLOW, gettext("The pool cannot be " 2852 "imported in read-write mode. Import the pool " 2853 "with\n" 2854 "\t\"-o readonly=on\", access the pool on a system " 2855 "that supports the\n\trequired feature(s), or " 2856 "recreate the pool from backup.\n")); 2857 break; 2858 case ZPOOL_STATUS_MISSING_DEV_R: 2859 case ZPOOL_STATUS_MISSING_DEV_NR: 2860 case ZPOOL_STATUS_BAD_GUID_SUM: 2861 (void) printf(gettext(" action: The pool cannot be " 2862 "imported. Attach the missing\n\tdevices and try " 2863 "again.\n")); 2864 break; 2865 case ZPOOL_STATUS_HOSTID_ACTIVE: 2866 VERIFY0(nvlist_lookup_nvlist(config, 2867 ZPOOL_CONFIG_LOAD_INFO, &nvinfo)); 2868 2869 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME)) 2870 hostname = fnvlist_lookup_string(nvinfo, 2871 ZPOOL_CONFIG_MMP_HOSTNAME); 2872 2873 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID)) 2874 hostid = fnvlist_lookup_uint64(nvinfo, 2875 ZPOOL_CONFIG_MMP_HOSTID); 2876 2877 (void) printf(gettext(" action: The pool must be " 2878 "exported from %s (hostid=%lx)\n\tbefore it " 2879 "can be safely imported.\n"), hostname, 2880 (unsigned long) hostid); 2881 break; 2882 case ZPOOL_STATUS_HOSTID_REQUIRED: 2883 (void) printf(gettext(" action: Set a unique system " 2884 "hostid with the zgenhostid(8) command.\n")); 2885 break; 2886 default: 2887 (void) printf(gettext(" action: The pool cannot be " 2888 "imported due to damaged devices or data.\n")); 2889 } 2890 } 2891 2892 /* Print the comment attached to the pool. */ 2893 if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0) 2894 (void) printf(gettext("comment: %s\n"), comment); 2895 2896 /* 2897 * If the state is "closed" or "can't open", and the aux state 2898 * is "corrupt data": 2899 */ 2900 if (((vs->vs_state == VDEV_STATE_CLOSED) || 2901 (vs->vs_state == VDEV_STATE_CANT_OPEN)) && 2902 (vs->vs_aux == VDEV_AUX_CORRUPT_DATA)) { 2903 if (pool_state == POOL_STATE_DESTROYED) 2904 (void) printf(gettext("\tThe pool was destroyed, " 2905 "but can be imported using the '-Df' flags.\n")); 2906 else if (pool_state != POOL_STATE_EXPORTED) 2907 (void) printf(gettext("\tThe pool may be active on " 2908 "another system, but can be imported using\n\t" 2909 "the '-f' flag.\n")); 2910 } 2911 2912 if (msgid != NULL) { 2913 (void) printf(gettext( 2914 " see: https://openzfs.github.io/openzfs-docs/msg/%s\n"), 2915 msgid); 2916 } 2917 2918 (void) printf(gettext(" config:\n\n")); 2919 2920 cb.cb_namewidth = max_width(NULL, nvroot, 0, strlen(name), 2921 VDEV_NAME_TYPE_ID); 2922 if (cb.cb_namewidth < 10) 2923 cb.cb_namewidth = 10; 2924 2925 print_import_config(&cb, name, nvroot, 0); 2926 2927 print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_BIAS_DEDUP); 2928 print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_BIAS_SPECIAL); 2929 print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_CLASS_LOGS); 2930 2931 if (reason == ZPOOL_STATUS_BAD_GUID_SUM) { 2932 (void) printf(gettext("\n\tAdditional devices are known to " 2933 "be part of this pool, though their\n\texact " 2934 "configuration cannot be determined.\n")); 2935 } 2936 } 2937 2938 static boolean_t 2939 zfs_force_import_required(nvlist_t *config) 2940 { 2941 uint64_t state; 2942 uint64_t hostid = 0; 2943 nvlist_t *nvinfo; 2944 2945 state = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE); 2946 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_HOSTID, &hostid); 2947 2948 if (state != POOL_STATE_EXPORTED && hostid != get_system_hostid()) 2949 return (B_TRUE); 2950 2951 nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO); 2952 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE)) { 2953 mmp_state_t mmp_state = fnvlist_lookup_uint64(nvinfo, 2954 ZPOOL_CONFIG_MMP_STATE); 2955 2956 if (mmp_state != MMP_STATE_INACTIVE) 2957 return (B_TRUE); 2958 } 2959 2960 return (B_FALSE); 2961 } 2962 2963 /* 2964 * Perform the import for the given configuration. This passes the heavy 2965 * lifting off to zpool_import_props(), and then mounts the datasets contained 2966 * within the pool. 2967 */ 2968 static int 2969 do_import(nvlist_t *config, const char *newname, const char *mntopts, 2970 nvlist_t *props, int flags) 2971 { 2972 int ret = 0; 2973 zpool_handle_t *zhp; 2974 char *name; 2975 uint64_t version; 2976 2977 name = fnvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME); 2978 version = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION); 2979 2980 if (!SPA_VERSION_IS_SUPPORTED(version)) { 2981 (void) fprintf(stderr, gettext("cannot import '%s': pool " 2982 "is formatted using an unsupported ZFS version\n"), name); 2983 return (1); 2984 } else if (zfs_force_import_required(config) && 2985 !(flags & ZFS_IMPORT_ANY_HOST)) { 2986 mmp_state_t mmp_state = MMP_STATE_INACTIVE; 2987 nvlist_t *nvinfo; 2988 2989 nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO); 2990 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE)) 2991 mmp_state = fnvlist_lookup_uint64(nvinfo, 2992 ZPOOL_CONFIG_MMP_STATE); 2993 2994 if (mmp_state == MMP_STATE_ACTIVE) { 2995 char *hostname = "<unknown>"; 2996 uint64_t hostid = 0; 2997 2998 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME)) 2999 hostname = fnvlist_lookup_string(nvinfo, 3000 ZPOOL_CONFIG_MMP_HOSTNAME); 3001 3002 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID)) 3003 hostid = fnvlist_lookup_uint64(nvinfo, 3004 ZPOOL_CONFIG_MMP_HOSTID); 3005 3006 (void) fprintf(stderr, gettext("cannot import '%s': " 3007 "pool is imported on %s (hostid: " 3008 "0x%lx)\nExport the pool on the other system, " 3009 "then run 'zpool import'.\n"), 3010 name, hostname, (unsigned long) hostid); 3011 } else if (mmp_state == MMP_STATE_NO_HOSTID) { 3012 (void) fprintf(stderr, gettext("Cannot import '%s': " 3013 "pool has the multihost property on and the\n" 3014 "system's hostid is not set. Set a unique hostid " 3015 "with the zgenhostid(8) command.\n"), name); 3016 } else { 3017 char *hostname = "<unknown>"; 3018 uint64_t timestamp = 0; 3019 uint64_t hostid = 0; 3020 3021 if (nvlist_exists(config, ZPOOL_CONFIG_HOSTNAME)) 3022 hostname = fnvlist_lookup_string(config, 3023 ZPOOL_CONFIG_HOSTNAME); 3024 3025 if (nvlist_exists(config, ZPOOL_CONFIG_TIMESTAMP)) 3026 timestamp = fnvlist_lookup_uint64(config, 3027 ZPOOL_CONFIG_TIMESTAMP); 3028 3029 if (nvlist_exists(config, ZPOOL_CONFIG_HOSTID)) 3030 hostid = fnvlist_lookup_uint64(config, 3031 ZPOOL_CONFIG_HOSTID); 3032 3033 (void) fprintf(stderr, gettext("cannot import '%s': " 3034 "pool was previously in use from another system.\n" 3035 "Last accessed by %s (hostid=%lx) at %s" 3036 "The pool can be imported, use 'zpool import -f' " 3037 "to import the pool.\n"), name, hostname, 3038 (unsigned long)hostid, ctime((time_t *)×tamp)); 3039 } 3040 3041 return (1); 3042 } 3043 3044 if (zpool_import_props(g_zfs, config, newname, props, flags) != 0) 3045 return (1); 3046 3047 if (newname != NULL) 3048 name = (char *)newname; 3049 3050 if ((zhp = zpool_open_canfail(g_zfs, name)) == NULL) 3051 return (1); 3052 3053 /* 3054 * Loading keys is best effort. We don't want to return immediately 3055 * if it fails but we do want to give the error to the caller. 3056 */ 3057 if (flags & ZFS_IMPORT_LOAD_KEYS) { 3058 ret = zfs_crypto_attempt_load_keys(g_zfs, name); 3059 if (ret != 0) 3060 ret = 1; 3061 } 3062 3063 if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL && 3064 !(flags & ZFS_IMPORT_ONLY) && 3065 zpool_enable_datasets(zhp, mntopts, 0) != 0) { 3066 zpool_close(zhp); 3067 return (1); 3068 } 3069 3070 zpool_close(zhp); 3071 return (ret); 3072 } 3073 3074 typedef struct target_exists_args { 3075 const char *poolname; 3076 uint64_t poolguid; 3077 } target_exists_args_t; 3078 3079 static int 3080 name_or_guid_exists(zpool_handle_t *zhp, void *data) 3081 { 3082 target_exists_args_t *args = data; 3083 nvlist_t *config = zpool_get_config(zhp, NULL); 3084 int found = 0; 3085 3086 if (config == NULL) 3087 return (0); 3088 3089 if (args->poolname != NULL) { 3090 char *pool_name; 3091 3092 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 3093 &pool_name) == 0); 3094 if (strcmp(pool_name, args->poolname) == 0) 3095 found = 1; 3096 } else { 3097 uint64_t pool_guid; 3098 3099 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 3100 &pool_guid) == 0); 3101 if (pool_guid == args->poolguid) 3102 found = 1; 3103 } 3104 zpool_close(zhp); 3105 3106 return (found); 3107 } 3108 /* 3109 * zpool checkpoint <pool> 3110 * checkpoint --discard <pool> 3111 * 3112 * -d Discard the checkpoint from a checkpointed 3113 * --discard pool. 3114 * 3115 * -w Wait for discarding a checkpoint to complete. 3116 * --wait 3117 * 3118 * Checkpoints the specified pool, by taking a "snapshot" of its 3119 * current state. A pool can only have one checkpoint at a time. 3120 */ 3121 int 3122 zpool_do_checkpoint(int argc, char **argv) 3123 { 3124 boolean_t discard, wait; 3125 char *pool; 3126 zpool_handle_t *zhp; 3127 int c, err; 3128 3129 struct option long_options[] = { 3130 {"discard", no_argument, NULL, 'd'}, 3131 {"wait", no_argument, NULL, 'w'}, 3132 {0, 0, 0, 0} 3133 }; 3134 3135 discard = B_FALSE; 3136 wait = B_FALSE; 3137 while ((c = getopt_long(argc, argv, ":dw", long_options, NULL)) != -1) { 3138 switch (c) { 3139 case 'd': 3140 discard = B_TRUE; 3141 break; 3142 case 'w': 3143 wait = B_TRUE; 3144 break; 3145 case '?': 3146 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 3147 optopt); 3148 usage(B_FALSE); 3149 } 3150 } 3151 3152 if (wait && !discard) { 3153 (void) fprintf(stderr, gettext("--wait only valid when " 3154 "--discard also specified\n")); 3155 usage(B_FALSE); 3156 } 3157 3158 argc -= optind; 3159 argv += optind; 3160 3161 if (argc < 1) { 3162 (void) fprintf(stderr, gettext("missing pool argument\n")); 3163 usage(B_FALSE); 3164 } 3165 3166 if (argc > 1) { 3167 (void) fprintf(stderr, gettext("too many arguments\n")); 3168 usage(B_FALSE); 3169 } 3170 3171 pool = argv[0]; 3172 3173 if ((zhp = zpool_open(g_zfs, pool)) == NULL) { 3174 /* As a special case, check for use of '/' in the name */ 3175 if (strchr(pool, '/') != NULL) 3176 (void) fprintf(stderr, gettext("'zpool checkpoint' " 3177 "doesn't work on datasets. To save the state " 3178 "of a dataset from a specific point in time " 3179 "please use 'zfs snapshot'\n")); 3180 return (1); 3181 } 3182 3183 if (discard) { 3184 err = (zpool_discard_checkpoint(zhp) != 0); 3185 if (err == 0 && wait) 3186 err = zpool_wait(zhp, ZPOOL_WAIT_CKPT_DISCARD); 3187 } else { 3188 err = (zpool_checkpoint(zhp) != 0); 3189 } 3190 3191 zpool_close(zhp); 3192 3193 return (err); 3194 } 3195 3196 #define CHECKPOINT_OPT 1024 3197 3198 /* 3199 * zpool import [-d dir] [-D] 3200 * import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l] 3201 * [-d dir | -c cachefile] [-f] -a 3202 * import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l] 3203 * [-d dir | -c cachefile] [-f] [-n] [-F] <pool | id> [newpool] 3204 * 3205 * -c Read pool information from a cachefile instead of searching 3206 * devices. 3207 * 3208 * -d Scan in a specific directory, other than /dev/. More than 3209 * one directory can be specified using multiple '-d' options. 3210 * 3211 * -D Scan for previously destroyed pools or import all or only 3212 * specified destroyed pools. 3213 * 3214 * -R Temporarily import the pool, with all mountpoints relative to 3215 * the given root. The pool will remain exported when the machine 3216 * is rebooted. 3217 * 3218 * -V Import even in the presence of faulted vdevs. This is an 3219 * intentionally undocumented option for testing purposes, and 3220 * treats the pool configuration as complete, leaving any bad 3221 * vdevs in the FAULTED state. In other words, it does verbatim 3222 * import. 3223 * 3224 * -f Force import, even if it appears that the pool is active. 3225 * 3226 * -F Attempt rewind if necessary. 3227 * 3228 * -n See if rewind would work, but don't actually rewind. 3229 * 3230 * -N Import the pool but don't mount datasets. 3231 * 3232 * -T Specify a starting txg to use for import. This option is 3233 * intentionally undocumented option for testing purposes. 3234 * 3235 * -a Import all pools found. 3236 * 3237 * -l Load encryption keys while importing. 3238 * 3239 * -o Set property=value and/or temporary mount options (without '='). 3240 * 3241 * -s Scan using the default search path, the libblkid cache will 3242 * not be consulted. 3243 * 3244 * --rewind-to-checkpoint 3245 * Import the pool and revert back to the checkpoint. 3246 * 3247 * The import command scans for pools to import, and import pools based on pool 3248 * name and GUID. The pool can also be renamed as part of the import process. 3249 */ 3250 int 3251 zpool_do_import(int argc, char **argv) 3252 { 3253 char **searchdirs = NULL; 3254 char *env, *envdup = NULL; 3255 int nsearch = 0; 3256 int c; 3257 int err = 0; 3258 nvlist_t *pools = NULL; 3259 boolean_t do_all = B_FALSE; 3260 boolean_t do_destroyed = B_FALSE; 3261 char *mntopts = NULL; 3262 nvpair_t *elem; 3263 nvlist_t *config; 3264 uint64_t searchguid = 0; 3265 char *searchname = NULL; 3266 char *propval; 3267 nvlist_t *found_config; 3268 nvlist_t *policy = NULL; 3269 nvlist_t *props = NULL; 3270 boolean_t first; 3271 int flags = ZFS_IMPORT_NORMAL; 3272 uint32_t rewind_policy = ZPOOL_NO_REWIND; 3273 boolean_t dryrun = B_FALSE; 3274 boolean_t do_rewind = B_FALSE; 3275 boolean_t xtreme_rewind = B_FALSE; 3276 boolean_t do_scan = B_FALSE; 3277 boolean_t pool_exists = B_FALSE; 3278 uint64_t pool_state, txg = -1ULL; 3279 char *cachefile = NULL; 3280 importargs_t idata = { 0 }; 3281 char *endptr; 3282 3283 struct option long_options[] = { 3284 {"rewind-to-checkpoint", no_argument, NULL, CHECKPOINT_OPT}, 3285 {0, 0, 0, 0} 3286 }; 3287 3288 /* check options */ 3289 while ((c = getopt_long(argc, argv, ":aCc:d:DEfFlmnNo:R:stT:VX", 3290 long_options, NULL)) != -1) { 3291 switch (c) { 3292 case 'a': 3293 do_all = B_TRUE; 3294 break; 3295 case 'c': 3296 cachefile = optarg; 3297 break; 3298 case 'd': 3299 if (searchdirs == NULL) { 3300 searchdirs = safe_malloc(sizeof (char *)); 3301 } else { 3302 char **tmp = safe_malloc((nsearch + 1) * 3303 sizeof (char *)); 3304 bcopy(searchdirs, tmp, nsearch * 3305 sizeof (char *)); 3306 free(searchdirs); 3307 searchdirs = tmp; 3308 } 3309 searchdirs[nsearch++] = optarg; 3310 break; 3311 case 'D': 3312 do_destroyed = B_TRUE; 3313 break; 3314 case 'f': 3315 flags |= ZFS_IMPORT_ANY_HOST; 3316 break; 3317 case 'F': 3318 do_rewind = B_TRUE; 3319 break; 3320 case 'l': 3321 flags |= ZFS_IMPORT_LOAD_KEYS; 3322 break; 3323 case 'm': 3324 flags |= ZFS_IMPORT_MISSING_LOG; 3325 break; 3326 case 'n': 3327 dryrun = B_TRUE; 3328 break; 3329 case 'N': 3330 flags |= ZFS_IMPORT_ONLY; 3331 break; 3332 case 'o': 3333 if ((propval = strchr(optarg, '=')) != NULL) { 3334 *propval = '\0'; 3335 propval++; 3336 if (add_prop_list(optarg, propval, 3337 &props, B_TRUE)) 3338 goto error; 3339 } else { 3340 mntopts = optarg; 3341 } 3342 break; 3343 case 'R': 3344 if (add_prop_list(zpool_prop_to_name( 3345 ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE)) 3346 goto error; 3347 if (add_prop_list_default(zpool_prop_to_name( 3348 ZPOOL_PROP_CACHEFILE), "none", &props, B_TRUE)) 3349 goto error; 3350 break; 3351 case 's': 3352 do_scan = B_TRUE; 3353 break; 3354 case 't': 3355 flags |= ZFS_IMPORT_TEMP_NAME; 3356 if (add_prop_list_default(zpool_prop_to_name( 3357 ZPOOL_PROP_CACHEFILE), "none", &props, B_TRUE)) 3358 goto error; 3359 break; 3360 3361 case 'T': 3362 errno = 0; 3363 txg = strtoull(optarg, &endptr, 0); 3364 if (errno != 0 || *endptr != '\0') { 3365 (void) fprintf(stderr, 3366 gettext("invalid txg value\n")); 3367 usage(B_FALSE); 3368 } 3369 rewind_policy = ZPOOL_DO_REWIND | ZPOOL_EXTREME_REWIND; 3370 break; 3371 case 'V': 3372 flags |= ZFS_IMPORT_VERBATIM; 3373 break; 3374 case 'X': 3375 xtreme_rewind = B_TRUE; 3376 break; 3377 case CHECKPOINT_OPT: 3378 flags |= ZFS_IMPORT_CHECKPOINT; 3379 break; 3380 case ':': 3381 (void) fprintf(stderr, gettext("missing argument for " 3382 "'%c' option\n"), optopt); 3383 usage(B_FALSE); 3384 break; 3385 case '?': 3386 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 3387 optopt); 3388 usage(B_FALSE); 3389 } 3390 } 3391 3392 argc -= optind; 3393 argv += optind; 3394 3395 if (cachefile && nsearch != 0) { 3396 (void) fprintf(stderr, gettext("-c is incompatible with -d\n")); 3397 usage(B_FALSE); 3398 } 3399 3400 if ((flags & ZFS_IMPORT_LOAD_KEYS) && (flags & ZFS_IMPORT_ONLY)) { 3401 (void) fprintf(stderr, gettext("-l is incompatible with -N\n")); 3402 usage(B_FALSE); 3403 } 3404 3405 if ((flags & ZFS_IMPORT_LOAD_KEYS) && !do_all && argc == 0) { 3406 (void) fprintf(stderr, gettext("-l is only meaningful during " 3407 "an import\n")); 3408 usage(B_FALSE); 3409 } 3410 3411 if ((dryrun || xtreme_rewind) && !do_rewind) { 3412 (void) fprintf(stderr, 3413 gettext("-n or -X only meaningful with -F\n")); 3414 usage(B_FALSE); 3415 } 3416 if (dryrun) 3417 rewind_policy = ZPOOL_TRY_REWIND; 3418 else if (do_rewind) 3419 rewind_policy = ZPOOL_DO_REWIND; 3420 if (xtreme_rewind) 3421 rewind_policy |= ZPOOL_EXTREME_REWIND; 3422 3423 /* In the future, we can capture further policy and include it here */ 3424 if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 || 3425 nvlist_add_uint64(policy, ZPOOL_LOAD_REQUEST_TXG, txg) != 0 || 3426 nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY, 3427 rewind_policy) != 0) 3428 goto error; 3429 3430 /* check argument count */ 3431 if (do_all) { 3432 if (argc != 0) { 3433 (void) fprintf(stderr, gettext("too many arguments\n")); 3434 usage(B_FALSE); 3435 } 3436 } else { 3437 if (argc > 2) { 3438 (void) fprintf(stderr, gettext("too many arguments\n")); 3439 usage(B_FALSE); 3440 } 3441 } 3442 3443 /* 3444 * Check for the effective uid. We do this explicitly here because 3445 * otherwise any attempt to discover pools will silently fail. 3446 */ 3447 if (argc == 0 && geteuid() != 0) { 3448 (void) fprintf(stderr, gettext("cannot " 3449 "discover pools: permission denied\n")); 3450 if (searchdirs != NULL) 3451 free(searchdirs); 3452 3453 nvlist_free(props); 3454 nvlist_free(policy); 3455 return (1); 3456 } 3457 3458 /* 3459 * Depending on the arguments given, we do one of the following: 3460 * 3461 * <none> Iterate through all pools and display information about 3462 * each one. 3463 * 3464 * -a Iterate through all pools and try to import each one. 3465 * 3466 * <id> Find the pool that corresponds to the given GUID/pool 3467 * name and import that one. 3468 * 3469 * -D Above options applies only to destroyed pools. 3470 */ 3471 if (argc != 0) { 3472 char *endptr; 3473 3474 errno = 0; 3475 searchguid = strtoull(argv[0], &endptr, 10); 3476 if (errno != 0 || *endptr != '\0') { 3477 searchname = argv[0]; 3478 searchguid = 0; 3479 } 3480 found_config = NULL; 3481 3482 /* 3483 * User specified a name or guid. Ensure it's unique. 3484 */ 3485 target_exists_args_t search = {searchname, searchguid}; 3486 pool_exists = zpool_iter(g_zfs, name_or_guid_exists, &search); 3487 } 3488 3489 /* 3490 * Check the environment for the preferred search path. 3491 */ 3492 if ((searchdirs == NULL) && (env = getenv("ZPOOL_IMPORT_PATH"))) { 3493 char *dir; 3494 3495 envdup = strdup(env); 3496 3497 dir = strtok(envdup, ":"); 3498 while (dir != NULL) { 3499 if (searchdirs == NULL) { 3500 searchdirs = safe_malloc(sizeof (char *)); 3501 } else { 3502 char **tmp = safe_malloc((nsearch + 1) * 3503 sizeof (char *)); 3504 bcopy(searchdirs, tmp, nsearch * 3505 sizeof (char *)); 3506 free(searchdirs); 3507 searchdirs = tmp; 3508 } 3509 searchdirs[nsearch++] = dir; 3510 dir = strtok(NULL, ":"); 3511 } 3512 } 3513 3514 idata.path = searchdirs; 3515 idata.paths = nsearch; 3516 idata.poolname = searchname; 3517 idata.guid = searchguid; 3518 idata.cachefile = cachefile; 3519 idata.scan = do_scan; 3520 idata.policy = policy; 3521 3522 pools = zpool_search_import(g_zfs, &idata, &libzfs_config_ops); 3523 3524 if (pools != NULL && pool_exists && 3525 (argc == 1 || strcmp(argv[0], argv[1]) == 0)) { 3526 (void) fprintf(stderr, gettext("cannot import '%s': " 3527 "a pool with that name already exists\n"), 3528 argv[0]); 3529 (void) fprintf(stderr, gettext("use the form '%s " 3530 "<pool | id> <newpool>' to give it a new name\n"), 3531 "zpool import"); 3532 err = 1; 3533 } else if (pools == NULL && pool_exists) { 3534 (void) fprintf(stderr, gettext("cannot import '%s': " 3535 "a pool with that name is already created/imported,\n"), 3536 argv[0]); 3537 (void) fprintf(stderr, gettext("and no additional pools " 3538 "with that name were found\n")); 3539 err = 1; 3540 } else if (pools == NULL) { 3541 if (argc != 0) { 3542 (void) fprintf(stderr, gettext("cannot import '%s': " 3543 "no such pool available\n"), argv[0]); 3544 } 3545 err = 1; 3546 } 3547 3548 if (err == 1) { 3549 if (searchdirs != NULL) 3550 free(searchdirs); 3551 if (envdup != NULL) 3552 free(envdup); 3553 nvlist_free(policy); 3554 nvlist_free(pools); 3555 nvlist_free(props); 3556 return (1); 3557 } 3558 3559 /* 3560 * At this point we have a list of import candidate configs. Even if 3561 * we were searching by pool name or guid, we still need to 3562 * post-process the list to deal with pool state and possible 3563 * duplicate names. 3564 */ 3565 err = 0; 3566 elem = NULL; 3567 first = B_TRUE; 3568 while ((elem = nvlist_next_nvpair(pools, elem)) != NULL) { 3569 3570 verify(nvpair_value_nvlist(elem, &config) == 0); 3571 3572 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, 3573 &pool_state) == 0); 3574 if (!do_destroyed && pool_state == POOL_STATE_DESTROYED) 3575 continue; 3576 if (do_destroyed && pool_state != POOL_STATE_DESTROYED) 3577 continue; 3578 3579 verify(nvlist_add_nvlist(config, ZPOOL_LOAD_POLICY, 3580 policy) == 0); 3581 3582 if (argc == 0) { 3583 if (first) 3584 first = B_FALSE; 3585 else if (!do_all) 3586 (void) printf("\n"); 3587 3588 if (do_all) { 3589 err |= do_import(config, NULL, mntopts, 3590 props, flags); 3591 } else { 3592 show_import(config); 3593 } 3594 } else if (searchname != NULL) { 3595 char *name; 3596 3597 /* 3598 * We are searching for a pool based on name. 3599 */ 3600 verify(nvlist_lookup_string(config, 3601 ZPOOL_CONFIG_POOL_NAME, &name) == 0); 3602 3603 if (strcmp(name, searchname) == 0) { 3604 if (found_config != NULL) { 3605 (void) fprintf(stderr, gettext( 3606 "cannot import '%s': more than " 3607 "one matching pool\n"), searchname); 3608 (void) fprintf(stderr, gettext( 3609 "import by numeric ID instead\n")); 3610 err = B_TRUE; 3611 } 3612 found_config = config; 3613 } 3614 } else { 3615 uint64_t guid; 3616 3617 /* 3618 * Search for a pool by guid. 3619 */ 3620 verify(nvlist_lookup_uint64(config, 3621 ZPOOL_CONFIG_POOL_GUID, &guid) == 0); 3622 3623 if (guid == searchguid) 3624 found_config = config; 3625 } 3626 } 3627 3628 /* 3629 * If we were searching for a specific pool, verify that we found a 3630 * pool, and then do the import. 3631 */ 3632 if (argc != 0 && err == 0) { 3633 if (found_config == NULL) { 3634 (void) fprintf(stderr, gettext("cannot import '%s': " 3635 "no such pool available\n"), argv[0]); 3636 err = B_TRUE; 3637 } else { 3638 err |= do_import(found_config, argc == 1 ? NULL : 3639 argv[1], mntopts, props, flags); 3640 } 3641 } 3642 3643 /* 3644 * If we were just looking for pools, report an error if none were 3645 * found. 3646 */ 3647 if (argc == 0 && first) 3648 (void) fprintf(stderr, 3649 gettext("no pools available to import\n")); 3650 3651 error: 3652 nvlist_free(props); 3653 nvlist_free(pools); 3654 nvlist_free(policy); 3655 if (searchdirs != NULL) 3656 free(searchdirs); 3657 if (envdup != NULL) 3658 free(envdup); 3659 3660 return (err ? 1 : 0); 3661 } 3662 3663 /* 3664 * zpool sync [-f] [pool] ... 3665 * 3666 * -f (undocumented) force uberblock (and config including zpool cache file) 3667 * update. 3668 * 3669 * Sync the specified pool(s). 3670 * Without arguments "zpool sync" will sync all pools. 3671 * This command initiates TXG sync(s) and will return after the TXG(s) commit. 3672 * 3673 */ 3674 static int 3675 zpool_do_sync(int argc, char **argv) 3676 { 3677 int ret; 3678 boolean_t force = B_FALSE; 3679 3680 /* check options */ 3681 while ((ret = getopt(argc, argv, "f")) != -1) { 3682 switch (ret) { 3683 case 'f': 3684 force = B_TRUE; 3685 break; 3686 case '?': 3687 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 3688 optopt); 3689 usage(B_FALSE); 3690 } 3691 } 3692 3693 argc -= optind; 3694 argv += optind; 3695 3696 /* if argc == 0 we will execute zpool_sync_one on all pools */ 3697 ret = for_each_pool(argc, argv, B_FALSE, NULL, B_FALSE, zpool_sync_one, 3698 &force); 3699 3700 return (ret); 3701 } 3702 3703 typedef struct iostat_cbdata { 3704 uint64_t cb_flags; 3705 int cb_name_flags; 3706 int cb_namewidth; 3707 int cb_iteration; 3708 char **cb_vdev_names; /* Only show these vdevs */ 3709 unsigned int cb_vdev_names_count; 3710 boolean_t cb_verbose; 3711 boolean_t cb_literal; 3712 boolean_t cb_scripted; 3713 zpool_list_t *cb_list; 3714 vdev_cmd_data_list_t *vcdl; 3715 } iostat_cbdata_t; 3716 3717 /* iostat labels */ 3718 typedef struct name_and_columns { 3719 const char *name; /* Column name */ 3720 unsigned int columns; /* Center name to this number of columns */ 3721 } name_and_columns_t; 3722 3723 #define IOSTAT_MAX_LABELS 13 /* Max number of labels on one line */ 3724 3725 static const name_and_columns_t iostat_top_labels[][IOSTAT_MAX_LABELS] = 3726 { 3727 [IOS_DEFAULT] = {{"capacity", 2}, {"operations", 2}, {"bandwidth", 2}, 3728 {NULL}}, 3729 [IOS_LATENCY] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2}, 3730 {"asyncq_wait", 2}, {"scrub", 1}, {"trim", 1}, {NULL}}, 3731 [IOS_QUEUES] = {{"syncq_read", 2}, {"syncq_write", 2}, 3732 {"asyncq_read", 2}, {"asyncq_write", 2}, {"scrubq_read", 2}, 3733 {"trimq_write", 2}, {NULL}}, 3734 [IOS_L_HISTO] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2}, 3735 {"asyncq_wait", 2}, {NULL}}, 3736 [IOS_RQ_HISTO] = {{"sync_read", 2}, {"sync_write", 2}, 3737 {"async_read", 2}, {"async_write", 2}, {"scrub", 2}, 3738 {"trim", 2}, {NULL}}, 3739 }; 3740 3741 /* Shorthand - if "columns" field not set, default to 1 column */ 3742 static const name_and_columns_t iostat_bottom_labels[][IOSTAT_MAX_LABELS] = 3743 { 3744 [IOS_DEFAULT] = {{"alloc"}, {"free"}, {"read"}, {"write"}, {"read"}, 3745 {"write"}, {NULL}}, 3746 [IOS_LATENCY] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"}, 3747 {"write"}, {"read"}, {"write"}, {"wait"}, {"wait"}, {NULL}}, 3748 [IOS_QUEUES] = {{"pend"}, {"activ"}, {"pend"}, {"activ"}, {"pend"}, 3749 {"activ"}, {"pend"}, {"activ"}, {"pend"}, {"activ"}, 3750 {"pend"}, {"activ"}, {NULL}}, 3751 [IOS_L_HISTO] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"}, 3752 {"write"}, {"read"}, {"write"}, {"scrub"}, {"trim"}, {NULL}}, 3753 [IOS_RQ_HISTO] = {{"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"}, 3754 {"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"}, {NULL}}, 3755 }; 3756 3757 static const char *histo_to_title[] = { 3758 [IOS_L_HISTO] = "latency", 3759 [IOS_RQ_HISTO] = "req_size", 3760 }; 3761 3762 /* 3763 * Return the number of labels in a null-terminated name_and_columns_t 3764 * array. 3765 * 3766 */ 3767 static unsigned int 3768 label_array_len(const name_and_columns_t *labels) 3769 { 3770 int i = 0; 3771 3772 while (labels[i].name) 3773 i++; 3774 3775 return (i); 3776 } 3777 3778 /* 3779 * Return the number of strings in a null-terminated string array. 3780 * For example: 3781 * 3782 * const char foo[] = {"bar", "baz", NULL} 3783 * 3784 * returns 2 3785 */ 3786 static uint64_t 3787 str_array_len(const char *array[]) 3788 { 3789 uint64_t i = 0; 3790 while (array[i]) 3791 i++; 3792 3793 return (i); 3794 } 3795 3796 3797 /* 3798 * Return a default column width for default/latency/queue columns. This does 3799 * not include histograms, which have their columns autosized. 3800 */ 3801 static unsigned int 3802 default_column_width(iostat_cbdata_t *cb, enum iostat_type type) 3803 { 3804 unsigned long column_width = 5; /* Normal niceprint */ 3805 static unsigned long widths[] = { 3806 /* 3807 * Choose some sane default column sizes for printing the 3808 * raw numbers. 3809 */ 3810 [IOS_DEFAULT] = 15, /* 1PB capacity */ 3811 [IOS_LATENCY] = 10, /* 1B ns = 10sec */ 3812 [IOS_QUEUES] = 6, /* 1M queue entries */ 3813 [IOS_L_HISTO] = 10, /* 1B ns = 10sec */ 3814 [IOS_RQ_HISTO] = 6, /* 1M queue entries */ 3815 }; 3816 3817 if (cb->cb_literal) 3818 column_width = widths[type]; 3819 3820 return (column_width); 3821 } 3822 3823 /* 3824 * Print the column labels, i.e: 3825 * 3826 * capacity operations bandwidth 3827 * alloc free read write read write ... 3828 * 3829 * If force_column_width is set, use it for the column width. If not set, use 3830 * the default column width. 3831 */ 3832 static void 3833 print_iostat_labels(iostat_cbdata_t *cb, unsigned int force_column_width, 3834 const name_and_columns_t labels[][IOSTAT_MAX_LABELS]) 3835 { 3836 int i, idx, s; 3837 int text_start, rw_column_width, spaces_to_end; 3838 uint64_t flags = cb->cb_flags; 3839 uint64_t f; 3840 unsigned int column_width = force_column_width; 3841 3842 /* For each bit set in flags */ 3843 for (f = flags; f; f &= ~(1ULL << idx)) { 3844 idx = lowbit64(f) - 1; 3845 if (!force_column_width) 3846 column_width = default_column_width(cb, idx); 3847 /* Print our top labels centered over "read write" label. */ 3848 for (i = 0; i < label_array_len(labels[idx]); i++) { 3849 const char *name = labels[idx][i].name; 3850 /* 3851 * We treat labels[][].columns == 0 as shorthand 3852 * for one column. It makes writing out the label 3853 * tables more concise. 3854 */ 3855 unsigned int columns = MAX(1, labels[idx][i].columns); 3856 unsigned int slen = strlen(name); 3857 3858 rw_column_width = (column_width * columns) + 3859 (2 * (columns - 1)); 3860 3861 text_start = (int)((rw_column_width) / columns - 3862 slen / columns); 3863 if (text_start < 0) 3864 text_start = 0; 3865 3866 printf(" "); /* Two spaces between columns */ 3867 3868 /* Space from beginning of column to label */ 3869 for (s = 0; s < text_start; s++) 3870 printf(" "); 3871 3872 printf("%s", name); 3873 3874 /* Print space after label to end of column */ 3875 spaces_to_end = rw_column_width - text_start - slen; 3876 if (spaces_to_end < 0) 3877 spaces_to_end = 0; 3878 3879 for (s = 0; s < spaces_to_end; s++) 3880 printf(" "); 3881 } 3882 } 3883 } 3884 3885 3886 /* 3887 * print_cmd_columns - Print custom column titles from -c 3888 * 3889 * If the user specified the "zpool status|iostat -c" then print their custom 3890 * column titles in the header. For example, print_cmd_columns() would print 3891 * the " col1 col2" part of this: 3892 * 3893 * $ zpool iostat -vc 'echo col1=val1; echo col2=val2' 3894 * ... 3895 * capacity operations bandwidth 3896 * pool alloc free read write read write col1 col2 3897 * ---------- ----- ----- ----- ----- ----- ----- ---- ---- 3898 * mypool 269K 1008M 0 0 107 946 3899 * mirror 269K 1008M 0 0 107 946 3900 * sdb - - 0 0 102 473 val1 val2 3901 * sdc - - 0 0 5 473 val1 val2 3902 * ---------- ----- ----- ----- ----- ----- ----- ---- ---- 3903 */ 3904 static void 3905 print_cmd_columns(vdev_cmd_data_list_t *vcdl, int use_dashes) 3906 { 3907 int i, j; 3908 vdev_cmd_data_t *data = &vcdl->data[0]; 3909 3910 if (vcdl->count == 0 || data == NULL) 3911 return; 3912 3913 /* 3914 * Each vdev cmd should have the same column names unless the user did 3915 * something weird with their cmd. Just take the column names from the 3916 * first vdev and assume it works for all of them. 3917 */ 3918 for (i = 0; i < vcdl->uniq_cols_cnt; i++) { 3919 printf(" "); 3920 if (use_dashes) { 3921 for (j = 0; j < vcdl->uniq_cols_width[i]; j++) 3922 printf("-"); 3923 } else { 3924 printf_color(ANSI_BOLD, "%*s", vcdl->uniq_cols_width[i], 3925 vcdl->uniq_cols[i]); 3926 } 3927 } 3928 } 3929 3930 3931 /* 3932 * Utility function to print out a line of dashes like: 3933 * 3934 * -------------------------------- ----- ----- ----- ----- ----- 3935 * 3936 * ...or a dashed named-row line like: 3937 * 3938 * logs - - - - - 3939 * 3940 * @cb: iostat data 3941 * 3942 * @force_column_width If non-zero, use the value as the column width. 3943 * Otherwise use the default column widths. 3944 * 3945 * @name: Print a dashed named-row line starting 3946 * with @name. Otherwise, print a regular 3947 * dashed line. 3948 */ 3949 static void 3950 print_iostat_dashes(iostat_cbdata_t *cb, unsigned int force_column_width, 3951 const char *name) 3952 { 3953 int i; 3954 unsigned int namewidth; 3955 uint64_t flags = cb->cb_flags; 3956 uint64_t f; 3957 int idx; 3958 const name_and_columns_t *labels; 3959 const char *title; 3960 3961 3962 if (cb->cb_flags & IOS_ANYHISTO_M) { 3963 title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)]; 3964 } else if (cb->cb_vdev_names_count) { 3965 title = "vdev"; 3966 } else { 3967 title = "pool"; 3968 } 3969 3970 namewidth = MAX(MAX(strlen(title), cb->cb_namewidth), 3971 name ? strlen(name) : 0); 3972 3973 3974 if (name) { 3975 printf("%-*s", namewidth, name); 3976 } else { 3977 for (i = 0; i < namewidth; i++) 3978 (void) printf("-"); 3979 } 3980 3981 /* For each bit in flags */ 3982 for (f = flags; f; f &= ~(1ULL << idx)) { 3983 unsigned int column_width; 3984 idx = lowbit64(f) - 1; 3985 if (force_column_width) 3986 column_width = force_column_width; 3987 else 3988 column_width = default_column_width(cb, idx); 3989 3990 labels = iostat_bottom_labels[idx]; 3991 for (i = 0; i < label_array_len(labels); i++) { 3992 if (name) 3993 printf(" %*s-", column_width - 1, " "); 3994 else 3995 printf(" %.*s", column_width, 3996 "--------------------"); 3997 } 3998 } 3999 } 4000 4001 4002 static void 4003 print_iostat_separator_impl(iostat_cbdata_t *cb, 4004 unsigned int force_column_width) 4005 { 4006 print_iostat_dashes(cb, force_column_width, NULL); 4007 } 4008 4009 static void 4010 print_iostat_separator(iostat_cbdata_t *cb) 4011 { 4012 print_iostat_separator_impl(cb, 0); 4013 } 4014 4015 static void 4016 print_iostat_header_impl(iostat_cbdata_t *cb, unsigned int force_column_width, 4017 const char *histo_vdev_name) 4018 { 4019 unsigned int namewidth; 4020 const char *title; 4021 4022 if (cb->cb_flags & IOS_ANYHISTO_M) { 4023 title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)]; 4024 } else if (cb->cb_vdev_names_count) { 4025 title = "vdev"; 4026 } else { 4027 title = "pool"; 4028 } 4029 4030 namewidth = MAX(MAX(strlen(title), cb->cb_namewidth), 4031 histo_vdev_name ? strlen(histo_vdev_name) : 0); 4032 4033 if (histo_vdev_name) 4034 printf("%-*s", namewidth, histo_vdev_name); 4035 else 4036 printf("%*s", namewidth, ""); 4037 4038 4039 print_iostat_labels(cb, force_column_width, iostat_top_labels); 4040 printf("\n"); 4041 4042 printf("%-*s", namewidth, title); 4043 4044 print_iostat_labels(cb, force_column_width, iostat_bottom_labels); 4045 if (cb->vcdl != NULL) 4046 print_cmd_columns(cb->vcdl, 0); 4047 4048 printf("\n"); 4049 4050 print_iostat_separator_impl(cb, force_column_width); 4051 4052 if (cb->vcdl != NULL) 4053 print_cmd_columns(cb->vcdl, 1); 4054 4055 printf("\n"); 4056 } 4057 4058 static void 4059 print_iostat_header(iostat_cbdata_t *cb) 4060 { 4061 print_iostat_header_impl(cb, 0, NULL); 4062 } 4063 4064 4065 /* 4066 * Display a single statistic. 4067 */ 4068 static void 4069 print_one_stat(uint64_t value, enum zfs_nicenum_format format, 4070 unsigned int column_size, boolean_t scripted) 4071 { 4072 char buf[64]; 4073 4074 zfs_nicenum_format(value, buf, sizeof (buf), format); 4075 4076 if (scripted) 4077 printf("\t%s", buf); 4078 else 4079 printf(" %*s", column_size, buf); 4080 } 4081 4082 /* 4083 * Calculate the default vdev stats 4084 * 4085 * Subtract oldvs from newvs, apply a scaling factor, and save the resulting 4086 * stats into calcvs. 4087 */ 4088 static void 4089 calc_default_iostats(vdev_stat_t *oldvs, vdev_stat_t *newvs, 4090 vdev_stat_t *calcvs) 4091 { 4092 int i; 4093 4094 memcpy(calcvs, newvs, sizeof (*calcvs)); 4095 for (i = 0; i < ARRAY_SIZE(calcvs->vs_ops); i++) 4096 calcvs->vs_ops[i] = (newvs->vs_ops[i] - oldvs->vs_ops[i]); 4097 4098 for (i = 0; i < ARRAY_SIZE(calcvs->vs_bytes); i++) 4099 calcvs->vs_bytes[i] = (newvs->vs_bytes[i] - oldvs->vs_bytes[i]); 4100 } 4101 4102 /* 4103 * Internal representation of the extended iostats data. 4104 * 4105 * The extended iostat stats are exported in nvlists as either uint64_t arrays 4106 * or single uint64_t's. We make both look like arrays to make them easier 4107 * to process. In order to make single uint64_t's look like arrays, we set 4108 * __data to the stat data, and then set *data = &__data with count = 1. Then, 4109 * we can just use *data and count. 4110 */ 4111 struct stat_array { 4112 uint64_t *data; 4113 uint_t count; /* Number of entries in data[] */ 4114 uint64_t __data; /* Only used when data is a single uint64_t */ 4115 }; 4116 4117 static uint64_t 4118 stat_histo_max(struct stat_array *nva, unsigned int len) 4119 { 4120 uint64_t max = 0; 4121 int i; 4122 for (i = 0; i < len; i++) 4123 max = MAX(max, array64_max(nva[i].data, nva[i].count)); 4124 4125 return (max); 4126 } 4127 4128 /* 4129 * Helper function to lookup a uint64_t array or uint64_t value and store its 4130 * data as a stat_array. If the nvpair is a single uint64_t value, then we make 4131 * it look like a one element array to make it easier to process. 4132 */ 4133 static int 4134 nvpair64_to_stat_array(nvlist_t *nvl, const char *name, 4135 struct stat_array *nva) 4136 { 4137 nvpair_t *tmp; 4138 int ret; 4139 4140 verify(nvlist_lookup_nvpair(nvl, name, &tmp) == 0); 4141 switch (nvpair_type(tmp)) { 4142 case DATA_TYPE_UINT64_ARRAY: 4143 ret = nvpair_value_uint64_array(tmp, &nva->data, &nva->count); 4144 break; 4145 case DATA_TYPE_UINT64: 4146 ret = nvpair_value_uint64(tmp, &nva->__data); 4147 nva->data = &nva->__data; 4148 nva->count = 1; 4149 break; 4150 default: 4151 /* Not a uint64_t */ 4152 ret = EINVAL; 4153 break; 4154 } 4155 4156 return (ret); 4157 } 4158 4159 /* 4160 * Given a list of nvlist names, look up the extended stats in newnv and oldnv, 4161 * subtract them, and return the results in a newly allocated stat_array. 4162 * You must free the returned array after you are done with it with 4163 * free_calc_stats(). 4164 * 4165 * Additionally, you can set "oldnv" to NULL if you simply want the newnv 4166 * values. 4167 */ 4168 static struct stat_array * 4169 calc_and_alloc_stats_ex(const char **names, unsigned int len, nvlist_t *oldnv, 4170 nvlist_t *newnv) 4171 { 4172 nvlist_t *oldnvx = NULL, *newnvx; 4173 struct stat_array *oldnva, *newnva, *calcnva; 4174 int i, j; 4175 unsigned int alloc_size = (sizeof (struct stat_array)) * len; 4176 4177 /* Extract our extended stats nvlist from the main list */ 4178 verify(nvlist_lookup_nvlist(newnv, ZPOOL_CONFIG_VDEV_STATS_EX, 4179 &newnvx) == 0); 4180 if (oldnv) { 4181 verify(nvlist_lookup_nvlist(oldnv, ZPOOL_CONFIG_VDEV_STATS_EX, 4182 &oldnvx) == 0); 4183 } 4184 4185 newnva = safe_malloc(alloc_size); 4186 oldnva = safe_malloc(alloc_size); 4187 calcnva = safe_malloc(alloc_size); 4188 4189 for (j = 0; j < len; j++) { 4190 verify(nvpair64_to_stat_array(newnvx, names[j], 4191 &newnva[j]) == 0); 4192 calcnva[j].count = newnva[j].count; 4193 alloc_size = calcnva[j].count * sizeof (calcnva[j].data[0]); 4194 calcnva[j].data = safe_malloc(alloc_size); 4195 memcpy(calcnva[j].data, newnva[j].data, alloc_size); 4196 4197 if (oldnvx) { 4198 verify(nvpair64_to_stat_array(oldnvx, names[j], 4199 &oldnva[j]) == 0); 4200 for (i = 0; i < oldnva[j].count; i++) 4201 calcnva[j].data[i] -= oldnva[j].data[i]; 4202 } 4203 } 4204 free(newnva); 4205 free(oldnva); 4206 return (calcnva); 4207 } 4208 4209 static void 4210 free_calc_stats(struct stat_array *nva, unsigned int len) 4211 { 4212 int i; 4213 for (i = 0; i < len; i++) 4214 free(nva[i].data); 4215 4216 free(nva); 4217 } 4218 4219 static void 4220 print_iostat_histo(struct stat_array *nva, unsigned int len, 4221 iostat_cbdata_t *cb, unsigned int column_width, unsigned int namewidth, 4222 double scale) 4223 { 4224 int i, j; 4225 char buf[6]; 4226 uint64_t val; 4227 enum zfs_nicenum_format format; 4228 unsigned int buckets; 4229 unsigned int start_bucket; 4230 4231 if (cb->cb_literal) 4232 format = ZFS_NICENUM_RAW; 4233 else 4234 format = ZFS_NICENUM_1024; 4235 4236 /* All these histos are the same size, so just use nva[0].count */ 4237 buckets = nva[0].count; 4238 4239 if (cb->cb_flags & IOS_RQ_HISTO_M) { 4240 /* Start at 512 - req size should never be lower than this */ 4241 start_bucket = 9; 4242 } else { 4243 start_bucket = 0; 4244 } 4245 4246 for (j = start_bucket; j < buckets; j++) { 4247 /* Print histogram bucket label */ 4248 if (cb->cb_flags & IOS_L_HISTO_M) { 4249 /* Ending range of this bucket */ 4250 val = (1UL << (j + 1)) - 1; 4251 zfs_nicetime(val, buf, sizeof (buf)); 4252 } else { 4253 /* Request size (starting range of bucket) */ 4254 val = (1UL << j); 4255 zfs_nicenum(val, buf, sizeof (buf)); 4256 } 4257 4258 if (cb->cb_scripted) 4259 printf("%llu", (u_longlong_t)val); 4260 else 4261 printf("%-*s", namewidth, buf); 4262 4263 /* Print the values on the line */ 4264 for (i = 0; i < len; i++) { 4265 print_one_stat(nva[i].data[j] * scale, format, 4266 column_width, cb->cb_scripted); 4267 } 4268 printf("\n"); 4269 } 4270 } 4271 4272 static void 4273 print_solid_separator(unsigned int length) 4274 { 4275 while (length--) 4276 printf("-"); 4277 printf("\n"); 4278 } 4279 4280 static void 4281 print_iostat_histos(iostat_cbdata_t *cb, nvlist_t *oldnv, 4282 nvlist_t *newnv, double scale, const char *name) 4283 { 4284 unsigned int column_width; 4285 unsigned int namewidth; 4286 unsigned int entire_width; 4287 enum iostat_type type; 4288 struct stat_array *nva; 4289 const char **names; 4290 unsigned int names_len; 4291 4292 /* What type of histo are we? */ 4293 type = IOS_HISTO_IDX(cb->cb_flags); 4294 4295 /* Get NULL-terminated array of nvlist names for our histo */ 4296 names = vsx_type_to_nvlist[type]; 4297 names_len = str_array_len(names); /* num of names */ 4298 4299 nva = calc_and_alloc_stats_ex(names, names_len, oldnv, newnv); 4300 4301 if (cb->cb_literal) { 4302 column_width = MAX(5, 4303 (unsigned int) log10(stat_histo_max(nva, names_len)) + 1); 4304 } else { 4305 column_width = 5; 4306 } 4307 4308 namewidth = MAX(cb->cb_namewidth, 4309 strlen(histo_to_title[IOS_HISTO_IDX(cb->cb_flags)])); 4310 4311 /* 4312 * Calculate the entire line width of what we're printing. The 4313 * +2 is for the two spaces between columns: 4314 */ 4315 /* read write */ 4316 /* ----- ----- */ 4317 /* |___| <---------- column_width */ 4318 /* */ 4319 /* |__________| <--- entire_width */ 4320 /* */ 4321 entire_width = namewidth + (column_width + 2) * 4322 label_array_len(iostat_bottom_labels[type]); 4323 4324 if (cb->cb_scripted) 4325 printf("%s\n", name); 4326 else 4327 print_iostat_header_impl(cb, column_width, name); 4328 4329 print_iostat_histo(nva, names_len, cb, column_width, 4330 namewidth, scale); 4331 4332 free_calc_stats(nva, names_len); 4333 if (!cb->cb_scripted) 4334 print_solid_separator(entire_width); 4335 } 4336 4337 /* 4338 * Calculate the average latency of a power-of-two latency histogram 4339 */ 4340 static uint64_t 4341 single_histo_average(uint64_t *histo, unsigned int buckets) 4342 { 4343 int i; 4344 uint64_t count = 0, total = 0; 4345 4346 for (i = 0; i < buckets; i++) { 4347 /* 4348 * Our buckets are power-of-two latency ranges. Use the 4349 * midpoint latency of each bucket to calculate the average. 4350 * For example: 4351 * 4352 * Bucket Midpoint 4353 * 8ns-15ns: 12ns 4354 * 16ns-31ns: 24ns 4355 * ... 4356 */ 4357 if (histo[i] != 0) { 4358 total += histo[i] * (((1UL << i) + ((1UL << i)/2))); 4359 count += histo[i]; 4360 } 4361 } 4362 4363 /* Prevent divide by zero */ 4364 return (count == 0 ? 0 : total / count); 4365 } 4366 4367 static void 4368 print_iostat_queues(iostat_cbdata_t *cb, nvlist_t *oldnv, 4369 nvlist_t *newnv) 4370 { 4371 int i; 4372 uint64_t val; 4373 const char *names[] = { 4374 ZPOOL_CONFIG_VDEV_SYNC_R_PEND_QUEUE, 4375 ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE, 4376 ZPOOL_CONFIG_VDEV_SYNC_W_PEND_QUEUE, 4377 ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE, 4378 ZPOOL_CONFIG_VDEV_ASYNC_R_PEND_QUEUE, 4379 ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE, 4380 ZPOOL_CONFIG_VDEV_ASYNC_W_PEND_QUEUE, 4381 ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE, 4382 ZPOOL_CONFIG_VDEV_SCRUB_PEND_QUEUE, 4383 ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE, 4384 ZPOOL_CONFIG_VDEV_TRIM_PEND_QUEUE, 4385 ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE, 4386 }; 4387 4388 struct stat_array *nva; 4389 4390 unsigned int column_width = default_column_width(cb, IOS_QUEUES); 4391 enum zfs_nicenum_format format; 4392 4393 nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), NULL, newnv); 4394 4395 if (cb->cb_literal) 4396 format = ZFS_NICENUM_RAW; 4397 else 4398 format = ZFS_NICENUM_1024; 4399 4400 for (i = 0; i < ARRAY_SIZE(names); i++) { 4401 val = nva[i].data[0]; 4402 print_one_stat(val, format, column_width, cb->cb_scripted); 4403 } 4404 4405 free_calc_stats(nva, ARRAY_SIZE(names)); 4406 } 4407 4408 static void 4409 print_iostat_latency(iostat_cbdata_t *cb, nvlist_t *oldnv, 4410 nvlist_t *newnv) 4411 { 4412 int i; 4413 uint64_t val; 4414 const char *names[] = { 4415 ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO, 4416 ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO, 4417 ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO, 4418 ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO, 4419 ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO, 4420 ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO, 4421 ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO, 4422 ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO, 4423 ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO, 4424 ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO, 4425 }; 4426 struct stat_array *nva; 4427 4428 unsigned int column_width = default_column_width(cb, IOS_LATENCY); 4429 enum zfs_nicenum_format format; 4430 4431 nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), oldnv, newnv); 4432 4433 if (cb->cb_literal) 4434 format = ZFS_NICENUM_RAWTIME; 4435 else 4436 format = ZFS_NICENUM_TIME; 4437 4438 /* Print our avg latencies on the line */ 4439 for (i = 0; i < ARRAY_SIZE(names); i++) { 4440 /* Compute average latency for a latency histo */ 4441 val = single_histo_average(nva[i].data, nva[i].count); 4442 print_one_stat(val, format, column_width, cb->cb_scripted); 4443 } 4444 free_calc_stats(nva, ARRAY_SIZE(names)); 4445 } 4446 4447 /* 4448 * Print default statistics (capacity/operations/bandwidth) 4449 */ 4450 static void 4451 print_iostat_default(vdev_stat_t *vs, iostat_cbdata_t *cb, double scale) 4452 { 4453 unsigned int column_width = default_column_width(cb, IOS_DEFAULT); 4454 enum zfs_nicenum_format format; 4455 char na; /* char to print for "not applicable" values */ 4456 4457 if (cb->cb_literal) { 4458 format = ZFS_NICENUM_RAW; 4459 na = '0'; 4460 } else { 4461 format = ZFS_NICENUM_1024; 4462 na = '-'; 4463 } 4464 4465 /* only toplevel vdevs have capacity stats */ 4466 if (vs->vs_space == 0) { 4467 if (cb->cb_scripted) 4468 printf("\t%c\t%c", na, na); 4469 else 4470 printf(" %*c %*c", column_width, na, column_width, 4471 na); 4472 } else { 4473 print_one_stat(vs->vs_alloc, format, column_width, 4474 cb->cb_scripted); 4475 print_one_stat(vs->vs_space - vs->vs_alloc, format, 4476 column_width, cb->cb_scripted); 4477 } 4478 4479 print_one_stat((uint64_t)(vs->vs_ops[ZIO_TYPE_READ] * scale), 4480 format, column_width, cb->cb_scripted); 4481 print_one_stat((uint64_t)(vs->vs_ops[ZIO_TYPE_WRITE] * scale), 4482 format, column_width, cb->cb_scripted); 4483 print_one_stat((uint64_t)(vs->vs_bytes[ZIO_TYPE_READ] * scale), 4484 format, column_width, cb->cb_scripted); 4485 print_one_stat((uint64_t)(vs->vs_bytes[ZIO_TYPE_WRITE] * scale), 4486 format, column_width, cb->cb_scripted); 4487 } 4488 4489 static const char *class_name[] = { 4490 VDEV_ALLOC_BIAS_DEDUP, 4491 VDEV_ALLOC_BIAS_SPECIAL, 4492 VDEV_ALLOC_CLASS_LOGS 4493 }; 4494 4495 /* 4496 * Print out all the statistics for the given vdev. This can either be the 4497 * toplevel configuration, or called recursively. If 'name' is NULL, then this 4498 * is a verbose output, and we don't want to display the toplevel pool stats. 4499 * 4500 * Returns the number of stat lines printed. 4501 */ 4502 static unsigned int 4503 print_vdev_stats(zpool_handle_t *zhp, const char *name, nvlist_t *oldnv, 4504 nvlist_t *newnv, iostat_cbdata_t *cb, int depth) 4505 { 4506 nvlist_t **oldchild, **newchild; 4507 uint_t c, children, oldchildren; 4508 vdev_stat_t *oldvs, *newvs, *calcvs; 4509 vdev_stat_t zerovs = { 0 }; 4510 char *vname; 4511 int i; 4512 int ret = 0; 4513 uint64_t tdelta; 4514 double scale; 4515 4516 if (strcmp(name, VDEV_TYPE_INDIRECT) == 0) 4517 return (ret); 4518 4519 calcvs = safe_malloc(sizeof (*calcvs)); 4520 4521 if (oldnv != NULL) { 4522 verify(nvlist_lookup_uint64_array(oldnv, 4523 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&oldvs, &c) == 0); 4524 } else { 4525 oldvs = &zerovs; 4526 } 4527 4528 /* Do we only want to see a specific vdev? */ 4529 for (i = 0; i < cb->cb_vdev_names_count; i++) { 4530 /* Yes we do. Is this the vdev? */ 4531 if (strcmp(name, cb->cb_vdev_names[i]) == 0) { 4532 /* 4533 * This is our vdev. Since it is the only vdev we 4534 * will be displaying, make depth = 0 so that it 4535 * doesn't get indented. 4536 */ 4537 depth = 0; 4538 break; 4539 } 4540 } 4541 4542 if (cb->cb_vdev_names_count && (i == cb->cb_vdev_names_count)) { 4543 /* Couldn't match the name */ 4544 goto children; 4545 } 4546 4547 4548 verify(nvlist_lookup_uint64_array(newnv, ZPOOL_CONFIG_VDEV_STATS, 4549 (uint64_t **)&newvs, &c) == 0); 4550 4551 /* 4552 * Print the vdev name unless it's is a histogram. Histograms 4553 * display the vdev name in the header itself. 4554 */ 4555 if (!(cb->cb_flags & IOS_ANYHISTO_M)) { 4556 if (cb->cb_scripted) { 4557 printf("%s", name); 4558 } else { 4559 if (strlen(name) + depth > cb->cb_namewidth) 4560 (void) printf("%*s%s", depth, "", name); 4561 else 4562 (void) printf("%*s%s%*s", depth, "", name, 4563 (int)(cb->cb_namewidth - strlen(name) - 4564 depth), ""); 4565 } 4566 } 4567 4568 /* Calculate our scaling factor */ 4569 tdelta = newvs->vs_timestamp - oldvs->vs_timestamp; 4570 if ((oldvs->vs_timestamp == 0) && (cb->cb_flags & IOS_ANYHISTO_M)) { 4571 /* 4572 * If we specify printing histograms with no time interval, then 4573 * print the histogram numbers over the entire lifetime of the 4574 * vdev. 4575 */ 4576 scale = 1; 4577 } else { 4578 if (tdelta == 0) 4579 scale = 1.0; 4580 else 4581 scale = (double)NANOSEC / tdelta; 4582 } 4583 4584 if (cb->cb_flags & IOS_DEFAULT_M) { 4585 calc_default_iostats(oldvs, newvs, calcvs); 4586 print_iostat_default(calcvs, cb, scale); 4587 } 4588 if (cb->cb_flags & IOS_LATENCY_M) 4589 print_iostat_latency(cb, oldnv, newnv); 4590 if (cb->cb_flags & IOS_QUEUES_M) 4591 print_iostat_queues(cb, oldnv, newnv); 4592 if (cb->cb_flags & IOS_ANYHISTO_M) { 4593 printf("\n"); 4594 print_iostat_histos(cb, oldnv, newnv, scale, name); 4595 } 4596 4597 if (cb->vcdl != NULL) { 4598 char *path; 4599 if (nvlist_lookup_string(newnv, ZPOOL_CONFIG_PATH, 4600 &path) == 0) { 4601 printf(" "); 4602 zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path); 4603 } 4604 } 4605 4606 if (!(cb->cb_flags & IOS_ANYHISTO_M)) 4607 printf("\n"); 4608 4609 ret++; 4610 4611 children: 4612 4613 free(calcvs); 4614 4615 if (!cb->cb_verbose) 4616 return (ret); 4617 4618 if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_CHILDREN, 4619 &newchild, &children) != 0) 4620 return (ret); 4621 4622 if (oldnv) { 4623 if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_CHILDREN, 4624 &oldchild, &oldchildren) != 0) 4625 return (ret); 4626 4627 children = MIN(oldchildren, children); 4628 } 4629 4630 /* 4631 * print normal top-level devices 4632 */ 4633 for (c = 0; c < children; c++) { 4634 uint64_t ishole = B_FALSE, islog = B_FALSE; 4635 4636 (void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_HOLE, 4637 &ishole); 4638 4639 (void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_LOG, 4640 &islog); 4641 4642 if (ishole || islog) 4643 continue; 4644 4645 if (nvlist_exists(newchild[c], ZPOOL_CONFIG_ALLOCATION_BIAS)) 4646 continue; 4647 4648 vname = zpool_vdev_name(g_zfs, zhp, newchild[c], 4649 cb->cb_name_flags); 4650 ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c] : NULL, 4651 newchild[c], cb, depth + 2); 4652 free(vname); 4653 } 4654 4655 /* 4656 * print all other top-level devices 4657 */ 4658 for (uint_t n = 0; n < 3; n++) { 4659 boolean_t printed = B_FALSE; 4660 4661 for (c = 0; c < children; c++) { 4662 uint64_t islog = B_FALSE; 4663 char *bias = NULL; 4664 char *type = NULL; 4665 4666 (void) nvlist_lookup_uint64(newchild[c], 4667 ZPOOL_CONFIG_IS_LOG, &islog); 4668 if (islog) { 4669 bias = VDEV_ALLOC_CLASS_LOGS; 4670 } else { 4671 (void) nvlist_lookup_string(newchild[c], 4672 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias); 4673 (void) nvlist_lookup_string(newchild[c], 4674 ZPOOL_CONFIG_TYPE, &type); 4675 } 4676 if (bias == NULL || strcmp(bias, class_name[n]) != 0) 4677 continue; 4678 if (!islog && strcmp(type, VDEV_TYPE_INDIRECT) == 0) 4679 continue; 4680 4681 if (!printed) { 4682 if ((!(cb->cb_flags & IOS_ANYHISTO_M)) && 4683 !cb->cb_scripted && !cb->cb_vdev_names) { 4684 print_iostat_dashes(cb, 0, 4685 class_name[n]); 4686 } 4687 printf("\n"); 4688 printed = B_TRUE; 4689 } 4690 4691 vname = zpool_vdev_name(g_zfs, zhp, newchild[c], 4692 cb->cb_name_flags); 4693 ret += print_vdev_stats(zhp, vname, oldnv ? 4694 oldchild[c] : NULL, newchild[c], cb, depth + 2); 4695 free(vname); 4696 } 4697 } 4698 4699 /* 4700 * Include level 2 ARC devices in iostat output 4701 */ 4702 if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_L2CACHE, 4703 &newchild, &children) != 0) 4704 return (ret); 4705 4706 if (oldnv) { 4707 if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_L2CACHE, 4708 &oldchild, &oldchildren) != 0) 4709 return (ret); 4710 4711 children = MIN(oldchildren, children); 4712 } 4713 4714 if (children > 0) { 4715 if ((!(cb->cb_flags & IOS_ANYHISTO_M)) && !cb->cb_scripted && 4716 !cb->cb_vdev_names) { 4717 print_iostat_dashes(cb, 0, "cache"); 4718 } 4719 printf("\n"); 4720 4721 for (c = 0; c < children; c++) { 4722 vname = zpool_vdev_name(g_zfs, zhp, newchild[c], 4723 cb->cb_name_flags); 4724 ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c] 4725 : NULL, newchild[c], cb, depth + 2); 4726 free(vname); 4727 } 4728 } 4729 4730 return (ret); 4731 } 4732 4733 static int 4734 refresh_iostat(zpool_handle_t *zhp, void *data) 4735 { 4736 iostat_cbdata_t *cb = data; 4737 boolean_t missing; 4738 4739 /* 4740 * If the pool has disappeared, remove it from the list and continue. 4741 */ 4742 if (zpool_refresh_stats(zhp, &missing) != 0) 4743 return (-1); 4744 4745 if (missing) 4746 pool_list_remove(cb->cb_list, zhp); 4747 4748 return (0); 4749 } 4750 4751 /* 4752 * Callback to print out the iostats for the given pool. 4753 */ 4754 static int 4755 print_iostat(zpool_handle_t *zhp, void *data) 4756 { 4757 iostat_cbdata_t *cb = data; 4758 nvlist_t *oldconfig, *newconfig; 4759 nvlist_t *oldnvroot, *newnvroot; 4760 int ret; 4761 4762 newconfig = zpool_get_config(zhp, &oldconfig); 4763 4764 if (cb->cb_iteration == 1) 4765 oldconfig = NULL; 4766 4767 verify(nvlist_lookup_nvlist(newconfig, ZPOOL_CONFIG_VDEV_TREE, 4768 &newnvroot) == 0); 4769 4770 if (oldconfig == NULL) 4771 oldnvroot = NULL; 4772 else 4773 verify(nvlist_lookup_nvlist(oldconfig, ZPOOL_CONFIG_VDEV_TREE, 4774 &oldnvroot) == 0); 4775 4776 ret = print_vdev_stats(zhp, zpool_get_name(zhp), oldnvroot, newnvroot, 4777 cb, 0); 4778 if ((ret != 0) && !(cb->cb_flags & IOS_ANYHISTO_M) && 4779 !cb->cb_scripted && cb->cb_verbose && !cb->cb_vdev_names_count) { 4780 print_iostat_separator(cb); 4781 if (cb->vcdl != NULL) { 4782 print_cmd_columns(cb->vcdl, 1); 4783 } 4784 printf("\n"); 4785 } 4786 4787 return (ret); 4788 } 4789 4790 static int 4791 get_columns(void) 4792 { 4793 struct winsize ws; 4794 int columns = 80; 4795 int error; 4796 4797 if (isatty(STDOUT_FILENO)) { 4798 error = ioctl(STDOUT_FILENO, TIOCGWINSZ, &ws); 4799 if (error == 0) 4800 columns = ws.ws_col; 4801 } else { 4802 columns = 999; 4803 } 4804 4805 return (columns); 4806 } 4807 4808 /* 4809 * Return the required length of the pool/vdev name column. The minimum 4810 * allowed width and output formatting flags must be provided. 4811 */ 4812 static int 4813 get_namewidth(zpool_handle_t *zhp, int min_width, int flags, boolean_t verbose) 4814 { 4815 nvlist_t *config, *nvroot; 4816 int width = min_width; 4817 4818 if ((config = zpool_get_config(zhp, NULL)) != NULL) { 4819 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 4820 &nvroot) == 0); 4821 unsigned int poolname_len = strlen(zpool_get_name(zhp)); 4822 if (verbose == B_FALSE) { 4823 width = MAX(poolname_len, min_width); 4824 } else { 4825 width = MAX(poolname_len, 4826 max_width(zhp, nvroot, 0, min_width, flags)); 4827 } 4828 } 4829 4830 return (width); 4831 } 4832 4833 /* 4834 * Parse the input string, get the 'interval' and 'count' value if there is one. 4835 */ 4836 static void 4837 get_interval_count(int *argcp, char **argv, float *iv, 4838 unsigned long *cnt) 4839 { 4840 float interval = 0; 4841 unsigned long count = 0; 4842 int argc = *argcp; 4843 4844 /* 4845 * Determine if the last argument is an integer or a pool name 4846 */ 4847 if (argc > 0 && zfs_isnumber(argv[argc - 1])) { 4848 char *end; 4849 4850 errno = 0; 4851 interval = strtof(argv[argc - 1], &end); 4852 4853 if (*end == '\0' && errno == 0) { 4854 if (interval == 0) { 4855 (void) fprintf(stderr, gettext("interval " 4856 "cannot be zero\n")); 4857 usage(B_FALSE); 4858 } 4859 /* 4860 * Ignore the last parameter 4861 */ 4862 argc--; 4863 } else { 4864 /* 4865 * If this is not a valid number, just plow on. The 4866 * user will get a more informative error message later 4867 * on. 4868 */ 4869 interval = 0; 4870 } 4871 } 4872 4873 /* 4874 * If the last argument is also an integer, then we have both a count 4875 * and an interval. 4876 */ 4877 if (argc > 0 && zfs_isnumber(argv[argc - 1])) { 4878 char *end; 4879 4880 errno = 0; 4881 count = interval; 4882 interval = strtof(argv[argc - 1], &end); 4883 4884 if (*end == '\0' && errno == 0) { 4885 if (interval == 0) { 4886 (void) fprintf(stderr, gettext("interval " 4887 "cannot be zero\n")); 4888 usage(B_FALSE); 4889 } 4890 4891 /* 4892 * Ignore the last parameter 4893 */ 4894 argc--; 4895 } else { 4896 interval = 0; 4897 } 4898 } 4899 4900 *iv = interval; 4901 *cnt = count; 4902 *argcp = argc; 4903 } 4904 4905 static void 4906 get_timestamp_arg(char c) 4907 { 4908 if (c == 'u') 4909 timestamp_fmt = UDATE; 4910 else if (c == 'd') 4911 timestamp_fmt = DDATE; 4912 else 4913 usage(B_FALSE); 4914 } 4915 4916 /* 4917 * Return stat flags that are supported by all pools by both the module and 4918 * zpool iostat. "*data" should be initialized to all 0xFFs before running. 4919 * It will get ANDed down until only the flags that are supported on all pools 4920 * remain. 4921 */ 4922 static int 4923 get_stat_flags_cb(zpool_handle_t *zhp, void *data) 4924 { 4925 uint64_t *mask = data; 4926 nvlist_t *config, *nvroot, *nvx; 4927 uint64_t flags = 0; 4928 int i, j; 4929 4930 config = zpool_get_config(zhp, NULL); 4931 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 4932 &nvroot) == 0); 4933 4934 /* Default stats are always supported, but for completeness.. */ 4935 if (nvlist_exists(nvroot, ZPOOL_CONFIG_VDEV_STATS)) 4936 flags |= IOS_DEFAULT_M; 4937 4938 /* Get our extended stats nvlist from the main list */ 4939 if (nvlist_lookup_nvlist(nvroot, ZPOOL_CONFIG_VDEV_STATS_EX, 4940 &nvx) != 0) { 4941 /* 4942 * No extended stats; they're probably running an older 4943 * module. No big deal, we support that too. 4944 */ 4945 goto end; 4946 } 4947 4948 /* For each extended stat, make sure all its nvpairs are supported */ 4949 for (j = 0; j < ARRAY_SIZE(vsx_type_to_nvlist); j++) { 4950 if (!vsx_type_to_nvlist[j][0]) 4951 continue; 4952 4953 /* Start off by assuming the flag is supported, then check */ 4954 flags |= (1ULL << j); 4955 for (i = 0; vsx_type_to_nvlist[j][i]; i++) { 4956 if (!nvlist_exists(nvx, vsx_type_to_nvlist[j][i])) { 4957 /* flag isn't supported */ 4958 flags = flags & ~(1ULL << j); 4959 break; 4960 } 4961 } 4962 } 4963 end: 4964 *mask = *mask & flags; 4965 return (0); 4966 } 4967 4968 /* 4969 * Return a bitmask of stats that are supported on all pools by both the module 4970 * and zpool iostat. 4971 */ 4972 static uint64_t 4973 get_stat_flags(zpool_list_t *list) 4974 { 4975 uint64_t mask = -1; 4976 4977 /* 4978 * get_stat_flags_cb() will lop off bits from "mask" until only the 4979 * flags that are supported on all pools remain. 4980 */ 4981 pool_list_iter(list, B_FALSE, get_stat_flags_cb, &mask); 4982 return (mask); 4983 } 4984 4985 /* 4986 * Return 1 if cb_data->cb_vdev_names[0] is this vdev's name, 0 otherwise. 4987 */ 4988 static int 4989 is_vdev_cb(zpool_handle_t *zhp, nvlist_t *nv, void *cb_data) 4990 { 4991 iostat_cbdata_t *cb = cb_data; 4992 char *name = NULL; 4993 int ret = 0; 4994 4995 name = zpool_vdev_name(g_zfs, zhp, nv, cb->cb_name_flags); 4996 4997 if (strcmp(name, cb->cb_vdev_names[0]) == 0) 4998 ret = 1; /* match */ 4999 free(name); 5000 5001 return (ret); 5002 } 5003 5004 /* 5005 * Returns 1 if cb_data->cb_vdev_names[0] is a vdev name, 0 otherwise. 5006 */ 5007 static int 5008 is_vdev(zpool_handle_t *zhp, void *cb_data) 5009 { 5010 return (for_each_vdev(zhp, is_vdev_cb, cb_data)); 5011 } 5012 5013 /* 5014 * Check if vdevs are in a pool 5015 * 5016 * Return 1 if all argv[] strings are vdev names in pool "pool_name". Otherwise 5017 * return 0. If pool_name is NULL, then search all pools. 5018 */ 5019 static int 5020 are_vdevs_in_pool(int argc, char **argv, char *pool_name, 5021 iostat_cbdata_t *cb) 5022 { 5023 char **tmp_name; 5024 int ret = 0; 5025 int i; 5026 int pool_count = 0; 5027 5028 if ((argc == 0) || !*argv) 5029 return (0); 5030 5031 if (pool_name) 5032 pool_count = 1; 5033 5034 /* Temporarily hijack cb_vdev_names for a second... */ 5035 tmp_name = cb->cb_vdev_names; 5036 5037 /* Go though our list of prospective vdev names */ 5038 for (i = 0; i < argc; i++) { 5039 cb->cb_vdev_names = argv + i; 5040 5041 /* Is this name a vdev in our pools? */ 5042 ret = for_each_pool(pool_count, &pool_name, B_TRUE, NULL, 5043 B_FALSE, is_vdev, cb); 5044 if (!ret) { 5045 /* No match */ 5046 break; 5047 } 5048 } 5049 5050 cb->cb_vdev_names = tmp_name; 5051 5052 return (ret); 5053 } 5054 5055 static int 5056 is_pool_cb(zpool_handle_t *zhp, void *data) 5057 { 5058 char *name = data; 5059 if (strcmp(name, zpool_get_name(zhp)) == 0) 5060 return (1); 5061 5062 return (0); 5063 } 5064 5065 /* 5066 * Do we have a pool named *name? If so, return 1, otherwise 0. 5067 */ 5068 static int 5069 is_pool(char *name) 5070 { 5071 return (for_each_pool(0, NULL, B_TRUE, NULL, B_FALSE, is_pool_cb, 5072 name)); 5073 } 5074 5075 /* Are all our argv[] strings pool names? If so return 1, 0 otherwise. */ 5076 static int 5077 are_all_pools(int argc, char **argv) 5078 { 5079 if ((argc == 0) || !*argv) 5080 return (0); 5081 5082 while (--argc >= 0) 5083 if (!is_pool(argv[argc])) 5084 return (0); 5085 5086 return (1); 5087 } 5088 5089 /* 5090 * Helper function to print out vdev/pool names we can't resolve. Used for an 5091 * error message. 5092 */ 5093 static void 5094 error_list_unresolved_vdevs(int argc, char **argv, char *pool_name, 5095 iostat_cbdata_t *cb) 5096 { 5097 int i; 5098 char *name; 5099 char *str; 5100 for (i = 0; i < argc; i++) { 5101 name = argv[i]; 5102 5103 if (is_pool(name)) 5104 str = gettext("pool"); 5105 else if (are_vdevs_in_pool(1, &name, pool_name, cb)) 5106 str = gettext("vdev in this pool"); 5107 else if (are_vdevs_in_pool(1, &name, NULL, cb)) 5108 str = gettext("vdev in another pool"); 5109 else 5110 str = gettext("unknown"); 5111 5112 fprintf(stderr, "\t%s (%s)\n", name, str); 5113 } 5114 } 5115 5116 /* 5117 * Same as get_interval_count(), but with additional checks to not misinterpret 5118 * guids as interval/count values. Assumes VDEV_NAME_GUID is set in 5119 * cb.cb_name_flags. 5120 */ 5121 static void 5122 get_interval_count_filter_guids(int *argc, char **argv, float *interval, 5123 unsigned long *count, iostat_cbdata_t *cb) 5124 { 5125 char **tmpargv = argv; 5126 int argc_for_interval = 0; 5127 5128 /* Is the last arg an interval value? Or a guid? */ 5129 if (*argc >= 1 && !are_vdevs_in_pool(1, &argv[*argc - 1], NULL, cb)) { 5130 /* 5131 * The last arg is not a guid, so it's probably an 5132 * interval value. 5133 */ 5134 argc_for_interval++; 5135 5136 if (*argc >= 2 && 5137 !are_vdevs_in_pool(1, &argv[*argc - 2], NULL, cb)) { 5138 /* 5139 * The 2nd to last arg is not a guid, so it's probably 5140 * an interval value. 5141 */ 5142 argc_for_interval++; 5143 } 5144 } 5145 5146 /* Point to our list of possible intervals */ 5147 tmpargv = &argv[*argc - argc_for_interval]; 5148 5149 *argc = *argc - argc_for_interval; 5150 get_interval_count(&argc_for_interval, tmpargv, 5151 interval, count); 5152 } 5153 5154 /* 5155 * Floating point sleep(). Allows you to pass in a floating point value for 5156 * seconds. 5157 */ 5158 static void 5159 fsleep(float sec) 5160 { 5161 struct timespec req; 5162 req.tv_sec = floor(sec); 5163 req.tv_nsec = (sec - (float)req.tv_sec) * NANOSEC; 5164 nanosleep(&req, NULL); 5165 } 5166 5167 /* 5168 * Terminal height, in rows. Returns -1 if stdout is not connected to a TTY or 5169 * if we were unable to determine its size. 5170 */ 5171 static int 5172 terminal_height(void) 5173 { 5174 struct winsize win; 5175 5176 if (isatty(STDOUT_FILENO) == 0) 5177 return (-1); 5178 5179 if (ioctl(STDOUT_FILENO, TIOCGWINSZ, &win) != -1 && win.ws_row > 0) 5180 return (win.ws_row); 5181 5182 return (-1); 5183 } 5184 5185 /* 5186 * Run one of the zpool status/iostat -c scripts with the help (-h) option and 5187 * print the result. 5188 * 5189 * name: Short name of the script ('iostat'). 5190 * path: Full path to the script ('/usr/local/etc/zfs/zpool.d/iostat'); 5191 */ 5192 static void 5193 print_zpool_script_help(char *name, char *path) 5194 { 5195 char *argv[] = {path, "-h", NULL}; 5196 char **lines = NULL; 5197 int lines_cnt = 0; 5198 int rc; 5199 5200 rc = libzfs_run_process_get_stdout_nopath(path, argv, NULL, &lines, 5201 &lines_cnt); 5202 if (rc != 0 || lines == NULL || lines_cnt <= 0) { 5203 if (lines != NULL) 5204 libzfs_free_str_array(lines, lines_cnt); 5205 return; 5206 } 5207 5208 for (int i = 0; i < lines_cnt; i++) 5209 if (!is_blank_str(lines[i])) 5210 printf(" %-14s %s\n", name, lines[i]); 5211 5212 libzfs_free_str_array(lines, lines_cnt); 5213 } 5214 5215 /* 5216 * Go though the zpool status/iostat -c scripts in the user's path, run their 5217 * help option (-h), and print out the results. 5218 */ 5219 static void 5220 print_zpool_dir_scripts(char *dirpath) 5221 { 5222 DIR *dir; 5223 struct dirent *ent; 5224 char fullpath[MAXPATHLEN]; 5225 struct stat dir_stat; 5226 5227 if ((dir = opendir(dirpath)) != NULL) { 5228 /* print all the files and directories within directory */ 5229 while ((ent = readdir(dir)) != NULL) { 5230 sprintf(fullpath, "%s/%s", dirpath, ent->d_name); 5231 5232 /* Print the scripts */ 5233 if (stat(fullpath, &dir_stat) == 0) 5234 if (dir_stat.st_mode & S_IXUSR && 5235 S_ISREG(dir_stat.st_mode)) 5236 print_zpool_script_help(ent->d_name, 5237 fullpath); 5238 } 5239 closedir(dir); 5240 } 5241 } 5242 5243 /* 5244 * Print out help text for all zpool status/iostat -c scripts. 5245 */ 5246 static void 5247 print_zpool_script_list(char *subcommand) 5248 { 5249 char *dir, *sp; 5250 5251 printf(gettext("Available 'zpool %s -c' commands:\n"), subcommand); 5252 5253 sp = zpool_get_cmd_search_path(); 5254 if (sp == NULL) 5255 return; 5256 5257 dir = strtok(sp, ":"); 5258 while (dir != NULL) { 5259 print_zpool_dir_scripts(dir); 5260 dir = strtok(NULL, ":"); 5261 } 5262 5263 free(sp); 5264 } 5265 5266 /* 5267 * Set the minimum pool/vdev name column width. The width must be at least 10, 5268 * but may be as large as the column width - 42 so it still fits on one line. 5269 * NOTE: 42 is the width of the default capacity/operations/bandwidth output 5270 */ 5271 static int 5272 get_namewidth_iostat(zpool_handle_t *zhp, void *data) 5273 { 5274 iostat_cbdata_t *cb = data; 5275 int width, available_width; 5276 5277 /* 5278 * get_namewidth() returns the maximum width of any name in that column 5279 * for any pool/vdev/device line that will be output. 5280 */ 5281 width = get_namewidth(zhp, cb->cb_namewidth, cb->cb_name_flags, 5282 cb->cb_verbose); 5283 5284 /* 5285 * The width we are calculating is the width of the header and also the 5286 * padding width for names that are less than maximum width. The stats 5287 * take up 42 characters, so the width available for names is: 5288 */ 5289 available_width = get_columns() - 42; 5290 5291 /* 5292 * If the maximum width fits on a screen, then great! Make everything 5293 * line up by justifying all lines to the same width. If that max 5294 * width is larger than what's available, the name plus stats won't fit 5295 * on one line, and justifying to that width would cause every line to 5296 * wrap on the screen. We only want lines with long names to wrap. 5297 * Limit the padding to what won't wrap. 5298 */ 5299 if (width > available_width) 5300 width = available_width; 5301 5302 /* 5303 * And regardless of whatever the screen width is (get_columns can 5304 * return 0 if the width is not known or less than 42 for a narrow 5305 * terminal) have the width be a minimum of 10. 5306 */ 5307 if (width < 10) 5308 width = 10; 5309 5310 /* Save the calculated width */ 5311 cb->cb_namewidth = width; 5312 5313 return (0); 5314 } 5315 5316 /* 5317 * zpool iostat [[-c [script1,script2,...]] [-lq]|[-rw]] [-ghHLpPvy] [-n name] 5318 * [-T d|u] [[ pool ...]|[pool vdev ...]|[vdev ...]] 5319 * [interval [count]] 5320 * 5321 * -c CMD For each vdev, run command CMD 5322 * -g Display guid for individual vdev name. 5323 * -L Follow links when resolving vdev path name. 5324 * -P Display full path for vdev name. 5325 * -v Display statistics for individual vdevs 5326 * -h Display help 5327 * -p Display values in parsable (exact) format. 5328 * -H Scripted mode. Don't display headers, and separate properties 5329 * by a single tab. 5330 * -l Display average latency 5331 * -q Display queue depths 5332 * -w Display latency histograms 5333 * -r Display request size histogram 5334 * -T Display a timestamp in date(1) or Unix format 5335 * -n Only print headers once 5336 * 5337 * This command can be tricky because we want to be able to deal with pool 5338 * creation/destruction as well as vdev configuration changes. The bulk of this 5339 * processing is handled by the pool_list_* routines in zpool_iter.c. We rely 5340 * on pool_list_update() to detect the addition of new pools. Configuration 5341 * changes are all handled within libzfs. 5342 */ 5343 int 5344 zpool_do_iostat(int argc, char **argv) 5345 { 5346 int c; 5347 int ret; 5348 int npools; 5349 float interval = 0; 5350 unsigned long count = 0; 5351 int winheight = 24; 5352 zpool_list_t *list; 5353 boolean_t verbose = B_FALSE; 5354 boolean_t latency = B_FALSE, l_histo = B_FALSE, rq_histo = B_FALSE; 5355 boolean_t queues = B_FALSE, parsable = B_FALSE, scripted = B_FALSE; 5356 boolean_t omit_since_boot = B_FALSE; 5357 boolean_t guid = B_FALSE; 5358 boolean_t follow_links = B_FALSE; 5359 boolean_t full_name = B_FALSE; 5360 boolean_t headers_once = B_FALSE; 5361 iostat_cbdata_t cb = { 0 }; 5362 char *cmd = NULL; 5363 5364 /* Used for printing error message */ 5365 const char flag_to_arg[] = {[IOS_LATENCY] = 'l', [IOS_QUEUES] = 'q', 5366 [IOS_L_HISTO] = 'w', [IOS_RQ_HISTO] = 'r'}; 5367 5368 uint64_t unsupported_flags; 5369 5370 /* check options */ 5371 while ((c = getopt(argc, argv, "c:gLPT:vyhplqrwnH")) != -1) { 5372 switch (c) { 5373 case 'c': 5374 if (cmd != NULL) { 5375 fprintf(stderr, 5376 gettext("Can't set -c flag twice\n")); 5377 exit(1); 5378 } 5379 5380 if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL && 5381 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) { 5382 fprintf(stderr, gettext( 5383 "Can't run -c, disabled by " 5384 "ZPOOL_SCRIPTS_ENABLED.\n")); 5385 exit(1); 5386 } 5387 5388 if ((getuid() <= 0 || geteuid() <= 0) && 5389 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) { 5390 fprintf(stderr, gettext( 5391 "Can't run -c with root privileges " 5392 "unless ZPOOL_SCRIPTS_AS_ROOT is set.\n")); 5393 exit(1); 5394 } 5395 cmd = optarg; 5396 verbose = B_TRUE; 5397 break; 5398 case 'g': 5399 guid = B_TRUE; 5400 break; 5401 case 'L': 5402 follow_links = B_TRUE; 5403 break; 5404 case 'P': 5405 full_name = B_TRUE; 5406 break; 5407 case 'T': 5408 get_timestamp_arg(*optarg); 5409 break; 5410 case 'v': 5411 verbose = B_TRUE; 5412 break; 5413 case 'p': 5414 parsable = B_TRUE; 5415 break; 5416 case 'l': 5417 latency = B_TRUE; 5418 break; 5419 case 'q': 5420 queues = B_TRUE; 5421 break; 5422 case 'H': 5423 scripted = B_TRUE; 5424 break; 5425 case 'w': 5426 l_histo = B_TRUE; 5427 break; 5428 case 'r': 5429 rq_histo = B_TRUE; 5430 break; 5431 case 'y': 5432 omit_since_boot = B_TRUE; 5433 break; 5434 case 'n': 5435 headers_once = B_TRUE; 5436 break; 5437 case 'h': 5438 usage(B_FALSE); 5439 break; 5440 case '?': 5441 if (optopt == 'c') { 5442 print_zpool_script_list("iostat"); 5443 exit(0); 5444 } else { 5445 fprintf(stderr, 5446 gettext("invalid option '%c'\n"), optopt); 5447 } 5448 usage(B_FALSE); 5449 } 5450 } 5451 5452 argc -= optind; 5453 argv += optind; 5454 5455 cb.cb_literal = parsable; 5456 cb.cb_scripted = scripted; 5457 5458 if (guid) 5459 cb.cb_name_flags |= VDEV_NAME_GUID; 5460 if (follow_links) 5461 cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS; 5462 if (full_name) 5463 cb.cb_name_flags |= VDEV_NAME_PATH; 5464 cb.cb_iteration = 0; 5465 cb.cb_namewidth = 0; 5466 cb.cb_verbose = verbose; 5467 5468 /* Get our interval and count values (if any) */ 5469 if (guid) { 5470 get_interval_count_filter_guids(&argc, argv, &interval, 5471 &count, &cb); 5472 } else { 5473 get_interval_count(&argc, argv, &interval, &count); 5474 } 5475 5476 if (argc == 0) { 5477 /* No args, so just print the defaults. */ 5478 } else if (are_all_pools(argc, argv)) { 5479 /* All the args are pool names */ 5480 } else if (are_vdevs_in_pool(argc, argv, NULL, &cb)) { 5481 /* All the args are vdevs */ 5482 cb.cb_vdev_names = argv; 5483 cb.cb_vdev_names_count = argc; 5484 argc = 0; /* No pools to process */ 5485 } else if (are_all_pools(1, argv)) { 5486 /* The first arg is a pool name */ 5487 if (are_vdevs_in_pool(argc - 1, argv + 1, argv[0], &cb)) { 5488 /* ...and the rest are vdev names */ 5489 cb.cb_vdev_names = argv + 1; 5490 cb.cb_vdev_names_count = argc - 1; 5491 argc = 1; /* One pool to process */ 5492 } else { 5493 fprintf(stderr, gettext("Expected either a list of ")); 5494 fprintf(stderr, gettext("pools, or list of vdevs in")); 5495 fprintf(stderr, " \"%s\", ", argv[0]); 5496 fprintf(stderr, gettext("but got:\n")); 5497 error_list_unresolved_vdevs(argc - 1, argv + 1, 5498 argv[0], &cb); 5499 fprintf(stderr, "\n"); 5500 usage(B_FALSE); 5501 return (1); 5502 } 5503 } else { 5504 /* 5505 * The args don't make sense. The first arg isn't a pool name, 5506 * nor are all the args vdevs. 5507 */ 5508 fprintf(stderr, gettext("Unable to parse pools/vdevs list.\n")); 5509 fprintf(stderr, "\n"); 5510 return (1); 5511 } 5512 5513 if (cb.cb_vdev_names_count != 0) { 5514 /* 5515 * If user specified vdevs, it implies verbose. 5516 */ 5517 cb.cb_verbose = B_TRUE; 5518 } 5519 5520 /* 5521 * Construct the list of all interesting pools. 5522 */ 5523 ret = 0; 5524 if ((list = pool_list_get(argc, argv, NULL, parsable, &ret)) == NULL) 5525 return (1); 5526 5527 if (pool_list_count(list) == 0 && argc != 0) { 5528 pool_list_free(list); 5529 return (1); 5530 } 5531 5532 if (pool_list_count(list) == 0 && interval == 0) { 5533 pool_list_free(list); 5534 (void) fprintf(stderr, gettext("no pools available\n")); 5535 return (1); 5536 } 5537 5538 if ((l_histo || rq_histo) && (cmd != NULL || latency || queues)) { 5539 pool_list_free(list); 5540 (void) fprintf(stderr, 5541 gettext("[-r|-w] isn't allowed with [-c|-l|-q]\n")); 5542 usage(B_FALSE); 5543 return (1); 5544 } 5545 5546 if (l_histo && rq_histo) { 5547 pool_list_free(list); 5548 (void) fprintf(stderr, 5549 gettext("Only one of [-r|-w] can be passed at a time\n")); 5550 usage(B_FALSE); 5551 return (1); 5552 } 5553 5554 /* 5555 * Enter the main iostat loop. 5556 */ 5557 cb.cb_list = list; 5558 5559 if (l_histo) { 5560 /* 5561 * Histograms tables look out of place when you try to display 5562 * them with the other stats, so make a rule that you can only 5563 * print histograms by themselves. 5564 */ 5565 cb.cb_flags = IOS_L_HISTO_M; 5566 } else if (rq_histo) { 5567 cb.cb_flags = IOS_RQ_HISTO_M; 5568 } else { 5569 cb.cb_flags = IOS_DEFAULT_M; 5570 if (latency) 5571 cb.cb_flags |= IOS_LATENCY_M; 5572 if (queues) 5573 cb.cb_flags |= IOS_QUEUES_M; 5574 } 5575 5576 /* 5577 * See if the module supports all the stats we want to display. 5578 */ 5579 unsupported_flags = cb.cb_flags & ~get_stat_flags(list); 5580 if (unsupported_flags) { 5581 uint64_t f; 5582 int idx; 5583 fprintf(stderr, 5584 gettext("The loaded zfs module doesn't support:")); 5585 5586 /* for each bit set in unsupported_flags */ 5587 for (f = unsupported_flags; f; f &= ~(1ULL << idx)) { 5588 idx = lowbit64(f) - 1; 5589 fprintf(stderr, " -%c", flag_to_arg[idx]); 5590 } 5591 5592 fprintf(stderr, ". Try running a newer module.\n"); 5593 pool_list_free(list); 5594 5595 return (1); 5596 } 5597 5598 for (;;) { 5599 if ((npools = pool_list_count(list)) == 0) 5600 (void) fprintf(stderr, gettext("no pools available\n")); 5601 else { 5602 /* 5603 * If this is the first iteration and -y was supplied 5604 * we skip any printing. 5605 */ 5606 boolean_t skip = (omit_since_boot && 5607 cb.cb_iteration == 0); 5608 5609 /* 5610 * Refresh all statistics. This is done as an 5611 * explicit step before calculating the maximum name 5612 * width, so that any * configuration changes are 5613 * properly accounted for. 5614 */ 5615 (void) pool_list_iter(list, B_FALSE, refresh_iostat, 5616 &cb); 5617 5618 /* 5619 * Iterate over all pools to determine the maximum width 5620 * for the pool / device name column across all pools. 5621 */ 5622 cb.cb_namewidth = 0; 5623 (void) pool_list_iter(list, B_FALSE, 5624 get_namewidth_iostat, &cb); 5625 5626 if (timestamp_fmt != NODATE) 5627 print_timestamp(timestamp_fmt); 5628 5629 if (cmd != NULL && cb.cb_verbose && 5630 !(cb.cb_flags & IOS_ANYHISTO_M)) { 5631 cb.vcdl = all_pools_for_each_vdev_run(argc, 5632 argv, cmd, g_zfs, cb.cb_vdev_names, 5633 cb.cb_vdev_names_count, cb.cb_name_flags); 5634 } else { 5635 cb.vcdl = NULL; 5636 } 5637 5638 5639 /* 5640 * Check terminal size so we can print headers 5641 * even when terminal window has its height 5642 * changed. 5643 */ 5644 winheight = terminal_height(); 5645 /* 5646 * Are we connected to TTY? If not, headers_once 5647 * should be true, to avoid breaking scripts. 5648 */ 5649 if (winheight < 0) 5650 headers_once = B_TRUE; 5651 5652 /* 5653 * If it's the first time and we're not skipping it, 5654 * or either skip or verbose mode, print the header. 5655 * 5656 * The histogram code explicitly prints its header on 5657 * every vdev, so skip this for histograms. 5658 */ 5659 if (((++cb.cb_iteration == 1 && !skip) || 5660 (skip != verbose) || 5661 (!headers_once && 5662 (cb.cb_iteration % winheight) == 0)) && 5663 (!(cb.cb_flags & IOS_ANYHISTO_M)) && 5664 !cb.cb_scripted) 5665 print_iostat_header(&cb); 5666 5667 if (skip) { 5668 (void) fsleep(interval); 5669 continue; 5670 } 5671 5672 pool_list_iter(list, B_FALSE, print_iostat, &cb); 5673 5674 /* 5675 * If there's more than one pool, and we're not in 5676 * verbose mode (which prints a separator for us), 5677 * then print a separator. 5678 * 5679 * In addition, if we're printing specific vdevs then 5680 * we also want an ending separator. 5681 */ 5682 if (((npools > 1 && !verbose && 5683 !(cb.cb_flags & IOS_ANYHISTO_M)) || 5684 (!(cb.cb_flags & IOS_ANYHISTO_M) && 5685 cb.cb_vdev_names_count)) && 5686 !cb.cb_scripted) { 5687 print_iostat_separator(&cb); 5688 if (cb.vcdl != NULL) 5689 print_cmd_columns(cb.vcdl, 1); 5690 printf("\n"); 5691 } 5692 5693 if (cb.vcdl != NULL) 5694 free_vdev_cmd_data_list(cb.vcdl); 5695 5696 } 5697 5698 /* 5699 * Flush the output so that redirection to a file isn't buffered 5700 * indefinitely. 5701 */ 5702 (void) fflush(stdout); 5703 5704 if (interval == 0) 5705 break; 5706 5707 if (count != 0 && --count == 0) 5708 break; 5709 5710 (void) fsleep(interval); 5711 } 5712 5713 pool_list_free(list); 5714 5715 return (ret); 5716 } 5717 5718 typedef struct list_cbdata { 5719 boolean_t cb_verbose; 5720 int cb_name_flags; 5721 int cb_namewidth; 5722 boolean_t cb_scripted; 5723 zprop_list_t *cb_proplist; 5724 boolean_t cb_literal; 5725 } list_cbdata_t; 5726 5727 5728 /* 5729 * Given a list of columns to display, output appropriate headers for each one. 5730 */ 5731 static void 5732 print_header(list_cbdata_t *cb) 5733 { 5734 zprop_list_t *pl = cb->cb_proplist; 5735 char headerbuf[ZPOOL_MAXPROPLEN]; 5736 const char *header; 5737 boolean_t first = B_TRUE; 5738 boolean_t right_justify; 5739 size_t width = 0; 5740 5741 for (; pl != NULL; pl = pl->pl_next) { 5742 width = pl->pl_width; 5743 if (first && cb->cb_verbose) { 5744 /* 5745 * Reset the width to accommodate the verbose listing 5746 * of devices. 5747 */ 5748 width = cb->cb_namewidth; 5749 } 5750 5751 if (!first) 5752 (void) printf(" "); 5753 else 5754 first = B_FALSE; 5755 5756 right_justify = B_FALSE; 5757 if (pl->pl_prop != ZPROP_INVAL) { 5758 header = zpool_prop_column_name(pl->pl_prop); 5759 right_justify = zpool_prop_align_right(pl->pl_prop); 5760 } else { 5761 int i; 5762 5763 for (i = 0; pl->pl_user_prop[i] != '\0'; i++) 5764 headerbuf[i] = toupper(pl->pl_user_prop[i]); 5765 headerbuf[i] = '\0'; 5766 header = headerbuf; 5767 } 5768 5769 if (pl->pl_next == NULL && !right_justify) 5770 (void) printf("%s", header); 5771 else if (right_justify) 5772 (void) printf("%*s", (int)width, header); 5773 else 5774 (void) printf("%-*s", (int)width, header); 5775 } 5776 5777 (void) printf("\n"); 5778 } 5779 5780 /* 5781 * Given a pool and a list of properties, print out all the properties according 5782 * to the described layout. Used by zpool_do_list(). 5783 */ 5784 static void 5785 print_pool(zpool_handle_t *zhp, list_cbdata_t *cb) 5786 { 5787 zprop_list_t *pl = cb->cb_proplist; 5788 boolean_t first = B_TRUE; 5789 char property[ZPOOL_MAXPROPLEN]; 5790 char *propstr; 5791 boolean_t right_justify; 5792 size_t width; 5793 5794 for (; pl != NULL; pl = pl->pl_next) { 5795 5796 width = pl->pl_width; 5797 if (first && cb->cb_verbose) { 5798 /* 5799 * Reset the width to accommodate the verbose listing 5800 * of devices. 5801 */ 5802 width = cb->cb_namewidth; 5803 } 5804 5805 if (!first) { 5806 if (cb->cb_scripted) 5807 (void) printf("\t"); 5808 else 5809 (void) printf(" "); 5810 } else { 5811 first = B_FALSE; 5812 } 5813 5814 right_justify = B_FALSE; 5815 if (pl->pl_prop != ZPROP_INVAL) { 5816 if (zpool_get_prop(zhp, pl->pl_prop, property, 5817 sizeof (property), NULL, cb->cb_literal) != 0) 5818 propstr = "-"; 5819 else 5820 propstr = property; 5821 5822 right_justify = zpool_prop_align_right(pl->pl_prop); 5823 } else if ((zpool_prop_feature(pl->pl_user_prop) || 5824 zpool_prop_unsupported(pl->pl_user_prop)) && 5825 zpool_prop_get_feature(zhp, pl->pl_user_prop, property, 5826 sizeof (property)) == 0) { 5827 propstr = property; 5828 } else { 5829 propstr = "-"; 5830 } 5831 5832 5833 /* 5834 * If this is being called in scripted mode, or if this is the 5835 * last column and it is left-justified, don't include a width 5836 * format specifier. 5837 */ 5838 if (cb->cb_scripted || (pl->pl_next == NULL && !right_justify)) 5839 (void) printf("%s", propstr); 5840 else if (right_justify) 5841 (void) printf("%*s", (int)width, propstr); 5842 else 5843 (void) printf("%-*s", (int)width, propstr); 5844 } 5845 5846 (void) printf("\n"); 5847 } 5848 5849 static void 5850 print_one_column(zpool_prop_t prop, uint64_t value, const char *str, 5851 boolean_t scripted, boolean_t valid, enum zfs_nicenum_format format) 5852 { 5853 char propval[64]; 5854 boolean_t fixed; 5855 size_t width = zprop_width(prop, &fixed, ZFS_TYPE_POOL); 5856 5857 switch (prop) { 5858 case ZPOOL_PROP_EXPANDSZ: 5859 case ZPOOL_PROP_CHECKPOINT: 5860 case ZPOOL_PROP_DEDUPRATIO: 5861 if (value == 0) 5862 (void) strlcpy(propval, "-", sizeof (propval)); 5863 else 5864 zfs_nicenum_format(value, propval, sizeof (propval), 5865 format); 5866 break; 5867 case ZPOOL_PROP_FRAGMENTATION: 5868 if (value == ZFS_FRAG_INVALID) { 5869 (void) strlcpy(propval, "-", sizeof (propval)); 5870 } else if (format == ZFS_NICENUM_RAW) { 5871 (void) snprintf(propval, sizeof (propval), "%llu", 5872 (unsigned long long)value); 5873 } else { 5874 (void) snprintf(propval, sizeof (propval), "%llu%%", 5875 (unsigned long long)value); 5876 } 5877 break; 5878 case ZPOOL_PROP_CAPACITY: 5879 /* capacity value is in parts-per-10,000 (aka permyriad) */ 5880 if (format == ZFS_NICENUM_RAW) 5881 (void) snprintf(propval, sizeof (propval), "%llu", 5882 (unsigned long long)value / 100); 5883 else 5884 (void) snprintf(propval, sizeof (propval), 5885 value < 1000 ? "%1.2f%%" : value < 10000 ? 5886 "%2.1f%%" : "%3.0f%%", value / 100.0); 5887 break; 5888 case ZPOOL_PROP_HEALTH: 5889 width = 8; 5890 snprintf(propval, sizeof (propval), "%-*s", (int)width, str); 5891 break; 5892 default: 5893 zfs_nicenum_format(value, propval, sizeof (propval), format); 5894 } 5895 5896 if (!valid) 5897 (void) strlcpy(propval, "-", sizeof (propval)); 5898 5899 if (scripted) 5900 (void) printf("\t%s", propval); 5901 else 5902 (void) printf(" %*s", (int)width, propval); 5903 } 5904 5905 /* 5906 * print static default line per vdev 5907 * not compatible with '-o' <proplist> option 5908 */ 5909 static void 5910 print_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv, 5911 list_cbdata_t *cb, int depth, boolean_t isspare) 5912 { 5913 nvlist_t **child; 5914 vdev_stat_t *vs; 5915 uint_t c, children; 5916 char *vname; 5917 boolean_t scripted = cb->cb_scripted; 5918 uint64_t islog = B_FALSE; 5919 char *dashes = "%-*s - - - - " 5920 "- - - - -\n"; 5921 5922 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, 5923 (uint64_t **)&vs, &c) == 0); 5924 5925 if (name != NULL) { 5926 boolean_t toplevel = (vs->vs_space != 0); 5927 uint64_t cap; 5928 enum zfs_nicenum_format format; 5929 const char *state; 5930 5931 if (cb->cb_literal) 5932 format = ZFS_NICENUM_RAW; 5933 else 5934 format = ZFS_NICENUM_1024; 5935 5936 if (strcmp(name, VDEV_TYPE_INDIRECT) == 0) 5937 return; 5938 5939 if (scripted) 5940 (void) printf("\t%s", name); 5941 else if (strlen(name) + depth > cb->cb_namewidth) 5942 (void) printf("%*s%s", depth, "", name); 5943 else 5944 (void) printf("%*s%s%*s", depth, "", name, 5945 (int)(cb->cb_namewidth - strlen(name) - depth), ""); 5946 5947 /* 5948 * Print the properties for the individual vdevs. Some 5949 * properties are only applicable to toplevel vdevs. The 5950 * 'toplevel' boolean value is passed to the print_one_column() 5951 * to indicate that the value is valid. 5952 */ 5953 print_one_column(ZPOOL_PROP_SIZE, vs->vs_space, NULL, scripted, 5954 toplevel, format); 5955 print_one_column(ZPOOL_PROP_ALLOCATED, vs->vs_alloc, NULL, 5956 scripted, toplevel, format); 5957 print_one_column(ZPOOL_PROP_FREE, vs->vs_space - vs->vs_alloc, 5958 NULL, scripted, toplevel, format); 5959 print_one_column(ZPOOL_PROP_CHECKPOINT, 5960 vs->vs_checkpoint_space, NULL, scripted, toplevel, format); 5961 print_one_column(ZPOOL_PROP_EXPANDSZ, vs->vs_esize, NULL, 5962 scripted, B_TRUE, format); 5963 print_one_column(ZPOOL_PROP_FRAGMENTATION, 5964 vs->vs_fragmentation, NULL, scripted, 5965 (vs->vs_fragmentation != ZFS_FRAG_INVALID && toplevel), 5966 format); 5967 cap = (vs->vs_space == 0) ? 0 : 5968 (vs->vs_alloc * 10000 / vs->vs_space); 5969 print_one_column(ZPOOL_PROP_CAPACITY, cap, NULL, 5970 scripted, toplevel, format); 5971 print_one_column(ZPOOL_PROP_DEDUPRATIO, 0, NULL, 5972 scripted, toplevel, format); 5973 state = zpool_state_to_name(vs->vs_state, vs->vs_aux); 5974 if (isspare) { 5975 if (vs->vs_aux == VDEV_AUX_SPARED) 5976 state = "INUSE"; 5977 else if (vs->vs_state == VDEV_STATE_HEALTHY) 5978 state = "AVAIL"; 5979 } 5980 print_one_column(ZPOOL_PROP_HEALTH, 0, state, scripted, 5981 B_TRUE, format); 5982 (void) printf("\n"); 5983 } 5984 5985 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 5986 &child, &children) != 0) 5987 return; 5988 5989 /* list the normal vdevs first */ 5990 for (c = 0; c < children; c++) { 5991 uint64_t ishole = B_FALSE; 5992 5993 if (nvlist_lookup_uint64(child[c], 5994 ZPOOL_CONFIG_IS_HOLE, &ishole) == 0 && ishole) 5995 continue; 5996 5997 if (nvlist_lookup_uint64(child[c], 5998 ZPOOL_CONFIG_IS_LOG, &islog) == 0 && islog) 5999 continue; 6000 6001 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS)) 6002 continue; 6003 6004 vname = zpool_vdev_name(g_zfs, zhp, child[c], 6005 cb->cb_name_flags); 6006 print_list_stats(zhp, vname, child[c], cb, depth + 2, B_FALSE); 6007 free(vname); 6008 } 6009 6010 /* list the classes: 'logs', 'dedup', and 'special' */ 6011 for (uint_t n = 0; n < 3; n++) { 6012 boolean_t printed = B_FALSE; 6013 6014 for (c = 0; c < children; c++) { 6015 char *bias = NULL; 6016 char *type = NULL; 6017 6018 if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 6019 &islog) == 0 && islog) { 6020 bias = VDEV_ALLOC_CLASS_LOGS; 6021 } else { 6022 (void) nvlist_lookup_string(child[c], 6023 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias); 6024 (void) nvlist_lookup_string(child[c], 6025 ZPOOL_CONFIG_TYPE, &type); 6026 } 6027 if (bias == NULL || strcmp(bias, class_name[n]) != 0) 6028 continue; 6029 if (!islog && strcmp(type, VDEV_TYPE_INDIRECT) == 0) 6030 continue; 6031 6032 if (!printed) { 6033 /* LINTED E_SEC_PRINTF_VAR_FMT */ 6034 (void) printf(dashes, cb->cb_namewidth, 6035 class_name[n]); 6036 printed = B_TRUE; 6037 } 6038 vname = zpool_vdev_name(g_zfs, zhp, child[c], 6039 cb->cb_name_flags); 6040 print_list_stats(zhp, vname, child[c], cb, depth + 2, 6041 B_FALSE); 6042 free(vname); 6043 } 6044 } 6045 6046 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, 6047 &child, &children) == 0 && children > 0) { 6048 /* LINTED E_SEC_PRINTF_VAR_FMT */ 6049 (void) printf(dashes, cb->cb_namewidth, "cache"); 6050 for (c = 0; c < children; c++) { 6051 vname = zpool_vdev_name(g_zfs, zhp, child[c], 6052 cb->cb_name_flags); 6053 print_list_stats(zhp, vname, child[c], cb, depth + 2, 6054 B_FALSE); 6055 free(vname); 6056 } 6057 } 6058 6059 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, &child, 6060 &children) == 0 && children > 0) { 6061 /* LINTED E_SEC_PRINTF_VAR_FMT */ 6062 (void) printf(dashes, cb->cb_namewidth, "spare"); 6063 for (c = 0; c < children; c++) { 6064 vname = zpool_vdev_name(g_zfs, zhp, child[c], 6065 cb->cb_name_flags); 6066 print_list_stats(zhp, vname, child[c], cb, depth + 2, 6067 B_TRUE); 6068 free(vname); 6069 } 6070 } 6071 } 6072 6073 /* 6074 * Generic callback function to list a pool. 6075 */ 6076 static int 6077 list_callback(zpool_handle_t *zhp, void *data) 6078 { 6079 list_cbdata_t *cbp = data; 6080 6081 print_pool(zhp, cbp); 6082 6083 if (cbp->cb_verbose) { 6084 nvlist_t *config, *nvroot; 6085 6086 config = zpool_get_config(zhp, NULL); 6087 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 6088 &nvroot) == 0); 6089 print_list_stats(zhp, NULL, nvroot, cbp, 0, B_FALSE); 6090 } 6091 6092 return (0); 6093 } 6094 6095 /* 6096 * Set the minimum pool/vdev name column width. The width must be at least 9, 6097 * but may be as large as needed. 6098 */ 6099 static int 6100 get_namewidth_list(zpool_handle_t *zhp, void *data) 6101 { 6102 list_cbdata_t *cb = data; 6103 int width; 6104 6105 width = get_namewidth(zhp, cb->cb_namewidth, cb->cb_name_flags, 6106 cb->cb_verbose); 6107 6108 if (width < 9) 6109 width = 9; 6110 6111 cb->cb_namewidth = width; 6112 6113 return (0); 6114 } 6115 6116 /* 6117 * zpool list [-gHLpP] [-o prop[,prop]*] [-T d|u] [pool] ... [interval [count]] 6118 * 6119 * -g Display guid for individual vdev name. 6120 * -H Scripted mode. Don't display headers, and separate properties 6121 * by a single tab. 6122 * -L Follow links when resolving vdev path name. 6123 * -o List of properties to display. Defaults to 6124 * "name,size,allocated,free,expandsize,fragmentation,capacity," 6125 * "dedupratio,health,altroot" 6126 * -p Display values in parsable (exact) format. 6127 * -P Display full path for vdev name. 6128 * -T Display a timestamp in date(1) or Unix format 6129 * 6130 * List all pools in the system, whether or not they're healthy. Output space 6131 * statistics for each one, as well as health status summary. 6132 */ 6133 int 6134 zpool_do_list(int argc, char **argv) 6135 { 6136 int c; 6137 int ret = 0; 6138 list_cbdata_t cb = { 0 }; 6139 static char default_props[] = 6140 "name,size,allocated,free,checkpoint,expandsize,fragmentation," 6141 "capacity,dedupratio,health,altroot"; 6142 char *props = default_props; 6143 float interval = 0; 6144 unsigned long count = 0; 6145 zpool_list_t *list; 6146 boolean_t first = B_TRUE; 6147 6148 /* check options */ 6149 while ((c = getopt(argc, argv, ":gHLo:pPT:v")) != -1) { 6150 switch (c) { 6151 case 'g': 6152 cb.cb_name_flags |= VDEV_NAME_GUID; 6153 break; 6154 case 'H': 6155 cb.cb_scripted = B_TRUE; 6156 break; 6157 case 'L': 6158 cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS; 6159 break; 6160 case 'o': 6161 props = optarg; 6162 break; 6163 case 'P': 6164 cb.cb_name_flags |= VDEV_NAME_PATH; 6165 break; 6166 case 'p': 6167 cb.cb_literal = B_TRUE; 6168 break; 6169 case 'T': 6170 get_timestamp_arg(*optarg); 6171 break; 6172 case 'v': 6173 cb.cb_verbose = B_TRUE; 6174 cb.cb_namewidth = 8; /* 8 until precalc is avail */ 6175 break; 6176 case ':': 6177 (void) fprintf(stderr, gettext("missing argument for " 6178 "'%c' option\n"), optopt); 6179 usage(B_FALSE); 6180 break; 6181 case '?': 6182 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 6183 optopt); 6184 usage(B_FALSE); 6185 } 6186 } 6187 6188 argc -= optind; 6189 argv += optind; 6190 6191 get_interval_count(&argc, argv, &interval, &count); 6192 6193 if (zprop_get_list(g_zfs, props, &cb.cb_proplist, ZFS_TYPE_POOL) != 0) 6194 usage(B_FALSE); 6195 6196 for (;;) { 6197 if ((list = pool_list_get(argc, argv, &cb.cb_proplist, 6198 cb.cb_literal, &ret)) == NULL) 6199 return (1); 6200 6201 if (pool_list_count(list) == 0) 6202 break; 6203 6204 cb.cb_namewidth = 0; 6205 (void) pool_list_iter(list, B_FALSE, get_namewidth_list, &cb); 6206 6207 if (timestamp_fmt != NODATE) 6208 print_timestamp(timestamp_fmt); 6209 6210 if (!cb.cb_scripted && (first || cb.cb_verbose)) { 6211 print_header(&cb); 6212 first = B_FALSE; 6213 } 6214 ret = pool_list_iter(list, B_TRUE, list_callback, &cb); 6215 6216 if (interval == 0) 6217 break; 6218 6219 if (count != 0 && --count == 0) 6220 break; 6221 6222 pool_list_free(list); 6223 (void) fsleep(interval); 6224 } 6225 6226 if (argc == 0 && !cb.cb_scripted && pool_list_count(list) == 0) { 6227 (void) printf(gettext("no pools available\n")); 6228 ret = 0; 6229 } 6230 6231 pool_list_free(list); 6232 zprop_free_list(cb.cb_proplist); 6233 return (ret); 6234 } 6235 6236 static int 6237 zpool_do_attach_or_replace(int argc, char **argv, int replacing) 6238 { 6239 boolean_t force = B_FALSE; 6240 boolean_t rebuild = B_FALSE; 6241 boolean_t wait = B_FALSE; 6242 int c; 6243 nvlist_t *nvroot; 6244 char *poolname, *old_disk, *new_disk; 6245 zpool_handle_t *zhp; 6246 nvlist_t *props = NULL; 6247 char *propval; 6248 int ret; 6249 6250 /* check options */ 6251 while ((c = getopt(argc, argv, "fo:sw")) != -1) { 6252 switch (c) { 6253 case 'f': 6254 force = B_TRUE; 6255 break; 6256 case 'o': 6257 if ((propval = strchr(optarg, '=')) == NULL) { 6258 (void) fprintf(stderr, gettext("missing " 6259 "'=' for -o option\n")); 6260 usage(B_FALSE); 6261 } 6262 *propval = '\0'; 6263 propval++; 6264 6265 if ((strcmp(optarg, ZPOOL_CONFIG_ASHIFT) != 0) || 6266 (add_prop_list(optarg, propval, &props, B_TRUE))) 6267 usage(B_FALSE); 6268 break; 6269 case 's': 6270 rebuild = B_TRUE; 6271 break; 6272 case 'w': 6273 wait = B_TRUE; 6274 break; 6275 case '?': 6276 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 6277 optopt); 6278 usage(B_FALSE); 6279 } 6280 } 6281 6282 argc -= optind; 6283 argv += optind; 6284 6285 /* get pool name and check number of arguments */ 6286 if (argc < 1) { 6287 (void) fprintf(stderr, gettext("missing pool name argument\n")); 6288 usage(B_FALSE); 6289 } 6290 6291 poolname = argv[0]; 6292 6293 if (argc < 2) { 6294 (void) fprintf(stderr, 6295 gettext("missing <device> specification\n")); 6296 usage(B_FALSE); 6297 } 6298 6299 old_disk = argv[1]; 6300 6301 if (argc < 3) { 6302 if (!replacing) { 6303 (void) fprintf(stderr, 6304 gettext("missing <new_device> specification\n")); 6305 usage(B_FALSE); 6306 } 6307 new_disk = old_disk; 6308 argc -= 1; 6309 argv += 1; 6310 } else { 6311 new_disk = argv[2]; 6312 argc -= 2; 6313 argv += 2; 6314 } 6315 6316 if (argc > 1) { 6317 (void) fprintf(stderr, gettext("too many arguments\n")); 6318 usage(B_FALSE); 6319 } 6320 6321 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) { 6322 nvlist_free(props); 6323 return (1); 6324 } 6325 6326 if (zpool_get_config(zhp, NULL) == NULL) { 6327 (void) fprintf(stderr, gettext("pool '%s' is unavailable\n"), 6328 poolname); 6329 zpool_close(zhp); 6330 nvlist_free(props); 6331 return (1); 6332 } 6333 6334 /* unless manually specified use "ashift" pool property (if set) */ 6335 if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) { 6336 int intval; 6337 zprop_source_t src; 6338 char strval[ZPOOL_MAXPROPLEN]; 6339 6340 intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src); 6341 if (src != ZPROP_SRC_DEFAULT) { 6342 (void) sprintf(strval, "%" PRId32, intval); 6343 verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval, 6344 &props, B_TRUE) == 0); 6345 } 6346 } 6347 6348 nvroot = make_root_vdev(zhp, props, force, B_FALSE, replacing, B_FALSE, 6349 argc, argv); 6350 if (nvroot == NULL) { 6351 zpool_close(zhp); 6352 nvlist_free(props); 6353 return (1); 6354 } 6355 6356 ret = zpool_vdev_attach(zhp, old_disk, new_disk, nvroot, replacing, 6357 rebuild); 6358 6359 if (ret == 0 && wait) 6360 ret = zpool_wait(zhp, 6361 replacing ? ZPOOL_WAIT_REPLACE : ZPOOL_WAIT_RESILVER); 6362 6363 nvlist_free(props); 6364 nvlist_free(nvroot); 6365 zpool_close(zhp); 6366 6367 return (ret); 6368 } 6369 6370 /* 6371 * zpool replace [-fsw] [-o property=value] <pool> <device> <new_device> 6372 * 6373 * -f Force attach, even if <new_device> appears to be in use. 6374 * -s Use sequential instead of healing reconstruction for resilver. 6375 * -o Set property=value. 6376 * -w Wait for replacing to complete before returning 6377 * 6378 * Replace <device> with <new_device>. 6379 */ 6380 /* ARGSUSED */ 6381 int 6382 zpool_do_replace(int argc, char **argv) 6383 { 6384 return (zpool_do_attach_or_replace(argc, argv, B_TRUE)); 6385 } 6386 6387 /* 6388 * zpool attach [-fsw] [-o property=value] <pool> <device> <new_device> 6389 * 6390 * -f Force attach, even if <new_device> appears to be in use. 6391 * -s Use sequential instead of healing reconstruction for resilver. 6392 * -o Set property=value. 6393 * -w Wait for resilvering to complete before returning 6394 * 6395 * Attach <new_device> to the mirror containing <device>. If <device> is not 6396 * part of a mirror, then <device> will be transformed into a mirror of 6397 * <device> and <new_device>. In either case, <new_device> will begin life 6398 * with a DTL of [0, now], and will immediately begin to resilver itself. 6399 */ 6400 int 6401 zpool_do_attach(int argc, char **argv) 6402 { 6403 return (zpool_do_attach_or_replace(argc, argv, B_FALSE)); 6404 } 6405 6406 /* 6407 * zpool detach [-f] <pool> <device> 6408 * 6409 * -f Force detach of <device>, even if DTLs argue against it 6410 * (not supported yet) 6411 * 6412 * Detach a device from a mirror. The operation will be refused if <device> 6413 * is the last device in the mirror, or if the DTLs indicate that this device 6414 * has the only valid copy of some data. 6415 */ 6416 /* ARGSUSED */ 6417 int 6418 zpool_do_detach(int argc, char **argv) 6419 { 6420 int c; 6421 char *poolname, *path; 6422 zpool_handle_t *zhp; 6423 int ret; 6424 6425 /* check options */ 6426 while ((c = getopt(argc, argv, "")) != -1) { 6427 switch (c) { 6428 case '?': 6429 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 6430 optopt); 6431 usage(B_FALSE); 6432 } 6433 } 6434 6435 argc -= optind; 6436 argv += optind; 6437 6438 /* get pool name and check number of arguments */ 6439 if (argc < 1) { 6440 (void) fprintf(stderr, gettext("missing pool name argument\n")); 6441 usage(B_FALSE); 6442 } 6443 6444 if (argc < 2) { 6445 (void) fprintf(stderr, 6446 gettext("missing <device> specification\n")); 6447 usage(B_FALSE); 6448 } 6449 6450 poolname = argv[0]; 6451 path = argv[1]; 6452 6453 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) 6454 return (1); 6455 6456 ret = zpool_vdev_detach(zhp, path); 6457 6458 zpool_close(zhp); 6459 6460 return (ret); 6461 } 6462 6463 /* 6464 * zpool split [-gLnP] [-o prop=val] ... 6465 * [-o mntopt] ... 6466 * [-R altroot] <pool> <newpool> [<device> ...] 6467 * 6468 * -g Display guid for individual vdev name. 6469 * -L Follow links when resolving vdev path name. 6470 * -n Do not split the pool, but display the resulting layout if 6471 * it were to be split. 6472 * -o Set property=value, or set mount options. 6473 * -P Display full path for vdev name. 6474 * -R Mount the split-off pool under an alternate root. 6475 * -l Load encryption keys while importing. 6476 * 6477 * Splits the named pool and gives it the new pool name. Devices to be split 6478 * off may be listed, provided that no more than one device is specified 6479 * per top-level vdev mirror. The newly split pool is left in an exported 6480 * state unless -R is specified. 6481 * 6482 * Restrictions: the top-level of the pool pool must only be made up of 6483 * mirrors; all devices in the pool must be healthy; no device may be 6484 * undergoing a resilvering operation. 6485 */ 6486 int 6487 zpool_do_split(int argc, char **argv) 6488 { 6489 char *srcpool, *newpool, *propval; 6490 char *mntopts = NULL; 6491 splitflags_t flags; 6492 int c, ret = 0; 6493 boolean_t loadkeys = B_FALSE; 6494 zpool_handle_t *zhp; 6495 nvlist_t *config, *props = NULL; 6496 6497 flags.dryrun = B_FALSE; 6498 flags.import = B_FALSE; 6499 flags.name_flags = 0; 6500 6501 /* check options */ 6502 while ((c = getopt(argc, argv, ":gLR:lno:P")) != -1) { 6503 switch (c) { 6504 case 'g': 6505 flags.name_flags |= VDEV_NAME_GUID; 6506 break; 6507 case 'L': 6508 flags.name_flags |= VDEV_NAME_FOLLOW_LINKS; 6509 break; 6510 case 'R': 6511 flags.import = B_TRUE; 6512 if (add_prop_list( 6513 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), optarg, 6514 &props, B_TRUE) != 0) { 6515 nvlist_free(props); 6516 usage(B_FALSE); 6517 } 6518 break; 6519 case 'l': 6520 loadkeys = B_TRUE; 6521 break; 6522 case 'n': 6523 flags.dryrun = B_TRUE; 6524 break; 6525 case 'o': 6526 if ((propval = strchr(optarg, '=')) != NULL) { 6527 *propval = '\0'; 6528 propval++; 6529 if (add_prop_list(optarg, propval, 6530 &props, B_TRUE) != 0) { 6531 nvlist_free(props); 6532 usage(B_FALSE); 6533 } 6534 } else { 6535 mntopts = optarg; 6536 } 6537 break; 6538 case 'P': 6539 flags.name_flags |= VDEV_NAME_PATH; 6540 break; 6541 case ':': 6542 (void) fprintf(stderr, gettext("missing argument for " 6543 "'%c' option\n"), optopt); 6544 usage(B_FALSE); 6545 break; 6546 case '?': 6547 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 6548 optopt); 6549 usage(B_FALSE); 6550 break; 6551 } 6552 } 6553 6554 if (!flags.import && mntopts != NULL) { 6555 (void) fprintf(stderr, gettext("setting mntopts is only " 6556 "valid when importing the pool\n")); 6557 usage(B_FALSE); 6558 } 6559 6560 if (!flags.import && loadkeys) { 6561 (void) fprintf(stderr, gettext("loading keys is only " 6562 "valid when importing the pool\n")); 6563 usage(B_FALSE); 6564 } 6565 6566 argc -= optind; 6567 argv += optind; 6568 6569 if (argc < 1) { 6570 (void) fprintf(stderr, gettext("Missing pool name\n")); 6571 usage(B_FALSE); 6572 } 6573 if (argc < 2) { 6574 (void) fprintf(stderr, gettext("Missing new pool name\n")); 6575 usage(B_FALSE); 6576 } 6577 6578 srcpool = argv[0]; 6579 newpool = argv[1]; 6580 6581 argc -= 2; 6582 argv += 2; 6583 6584 if ((zhp = zpool_open(g_zfs, srcpool)) == NULL) { 6585 nvlist_free(props); 6586 return (1); 6587 } 6588 6589 config = split_mirror_vdev(zhp, newpool, props, flags, argc, argv); 6590 if (config == NULL) { 6591 ret = 1; 6592 } else { 6593 if (flags.dryrun) { 6594 (void) printf(gettext("would create '%s' with the " 6595 "following layout:\n\n"), newpool); 6596 print_vdev_tree(NULL, newpool, config, 0, "", 6597 flags.name_flags); 6598 print_vdev_tree(NULL, "dedup", config, 0, 6599 VDEV_ALLOC_BIAS_DEDUP, 0); 6600 print_vdev_tree(NULL, "special", config, 0, 6601 VDEV_ALLOC_BIAS_SPECIAL, 0); 6602 } 6603 } 6604 6605 zpool_close(zhp); 6606 6607 if (ret != 0 || flags.dryrun || !flags.import) { 6608 nvlist_free(config); 6609 nvlist_free(props); 6610 return (ret); 6611 } 6612 6613 /* 6614 * The split was successful. Now we need to open the new 6615 * pool and import it. 6616 */ 6617 if ((zhp = zpool_open_canfail(g_zfs, newpool)) == NULL) { 6618 nvlist_free(config); 6619 nvlist_free(props); 6620 return (1); 6621 } 6622 6623 if (loadkeys) { 6624 ret = zfs_crypto_attempt_load_keys(g_zfs, newpool); 6625 if (ret != 0) 6626 ret = 1; 6627 } 6628 6629 if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL && 6630 zpool_enable_datasets(zhp, mntopts, 0) != 0) { 6631 ret = 1; 6632 (void) fprintf(stderr, gettext("Split was successful, but " 6633 "the datasets could not all be mounted\n")); 6634 (void) fprintf(stderr, gettext("Try doing '%s' with a " 6635 "different altroot\n"), "zpool import"); 6636 } 6637 zpool_close(zhp); 6638 nvlist_free(config); 6639 nvlist_free(props); 6640 6641 return (ret); 6642 } 6643 6644 6645 6646 /* 6647 * zpool online <pool> <device> ... 6648 */ 6649 int 6650 zpool_do_online(int argc, char **argv) 6651 { 6652 int c, i; 6653 char *poolname; 6654 zpool_handle_t *zhp; 6655 int ret = 0; 6656 vdev_state_t newstate; 6657 int flags = 0; 6658 6659 /* check options */ 6660 while ((c = getopt(argc, argv, "e")) != -1) { 6661 switch (c) { 6662 case 'e': 6663 flags |= ZFS_ONLINE_EXPAND; 6664 break; 6665 case '?': 6666 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 6667 optopt); 6668 usage(B_FALSE); 6669 } 6670 } 6671 6672 argc -= optind; 6673 argv += optind; 6674 6675 /* get pool name and check number of arguments */ 6676 if (argc < 1) { 6677 (void) fprintf(stderr, gettext("missing pool name\n")); 6678 usage(B_FALSE); 6679 } 6680 if (argc < 2) { 6681 (void) fprintf(stderr, gettext("missing device name\n")); 6682 usage(B_FALSE); 6683 } 6684 6685 poolname = argv[0]; 6686 6687 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) 6688 return (1); 6689 6690 for (i = 1; i < argc; i++) { 6691 if (zpool_vdev_online(zhp, argv[i], flags, &newstate) == 0) { 6692 if (newstate != VDEV_STATE_HEALTHY) { 6693 (void) printf(gettext("warning: device '%s' " 6694 "onlined, but remains in faulted state\n"), 6695 argv[i]); 6696 if (newstate == VDEV_STATE_FAULTED) 6697 (void) printf(gettext("use 'zpool " 6698 "clear' to restore a faulted " 6699 "device\n")); 6700 else 6701 (void) printf(gettext("use 'zpool " 6702 "replace' to replace devices " 6703 "that are no longer present\n")); 6704 } 6705 } else { 6706 ret = 1; 6707 } 6708 } 6709 6710 zpool_close(zhp); 6711 6712 return (ret); 6713 } 6714 6715 /* 6716 * zpool offline [-ft] <pool> <device> ... 6717 * 6718 * -f Force the device into a faulted state. 6719 * 6720 * -t Only take the device off-line temporarily. The offline/faulted 6721 * state will not be persistent across reboots. 6722 */ 6723 /* ARGSUSED */ 6724 int 6725 zpool_do_offline(int argc, char **argv) 6726 { 6727 int c, i; 6728 char *poolname; 6729 zpool_handle_t *zhp; 6730 int ret = 0; 6731 boolean_t istmp = B_FALSE; 6732 boolean_t fault = B_FALSE; 6733 6734 /* check options */ 6735 while ((c = getopt(argc, argv, "ft")) != -1) { 6736 switch (c) { 6737 case 'f': 6738 fault = B_TRUE; 6739 break; 6740 case 't': 6741 istmp = B_TRUE; 6742 break; 6743 case '?': 6744 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 6745 optopt); 6746 usage(B_FALSE); 6747 } 6748 } 6749 6750 argc -= optind; 6751 argv += optind; 6752 6753 /* get pool name and check number of arguments */ 6754 if (argc < 1) { 6755 (void) fprintf(stderr, gettext("missing pool name\n")); 6756 usage(B_FALSE); 6757 } 6758 if (argc < 2) { 6759 (void) fprintf(stderr, gettext("missing device name\n")); 6760 usage(B_FALSE); 6761 } 6762 6763 poolname = argv[0]; 6764 6765 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) 6766 return (1); 6767 6768 for (i = 1; i < argc; i++) { 6769 if (fault) { 6770 uint64_t guid = zpool_vdev_path_to_guid(zhp, argv[i]); 6771 vdev_aux_t aux; 6772 if (istmp == B_FALSE) { 6773 /* Force the fault to persist across imports */ 6774 aux = VDEV_AUX_EXTERNAL_PERSIST; 6775 } else { 6776 aux = VDEV_AUX_EXTERNAL; 6777 } 6778 6779 if (guid == 0 || zpool_vdev_fault(zhp, guid, aux) != 0) 6780 ret = 1; 6781 } else { 6782 if (zpool_vdev_offline(zhp, argv[i], istmp) != 0) 6783 ret = 1; 6784 } 6785 } 6786 6787 zpool_close(zhp); 6788 6789 return (ret); 6790 } 6791 6792 /* 6793 * zpool clear <pool> [device] 6794 * 6795 * Clear all errors associated with a pool or a particular device. 6796 */ 6797 int 6798 zpool_do_clear(int argc, char **argv) 6799 { 6800 int c; 6801 int ret = 0; 6802 boolean_t dryrun = B_FALSE; 6803 boolean_t do_rewind = B_FALSE; 6804 boolean_t xtreme_rewind = B_FALSE; 6805 uint32_t rewind_policy = ZPOOL_NO_REWIND; 6806 nvlist_t *policy = NULL; 6807 zpool_handle_t *zhp; 6808 char *pool, *device; 6809 6810 /* check options */ 6811 while ((c = getopt(argc, argv, "FnX")) != -1) { 6812 switch (c) { 6813 case 'F': 6814 do_rewind = B_TRUE; 6815 break; 6816 case 'n': 6817 dryrun = B_TRUE; 6818 break; 6819 case 'X': 6820 xtreme_rewind = B_TRUE; 6821 break; 6822 case '?': 6823 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 6824 optopt); 6825 usage(B_FALSE); 6826 } 6827 } 6828 6829 argc -= optind; 6830 argv += optind; 6831 6832 if (argc < 1) { 6833 (void) fprintf(stderr, gettext("missing pool name\n")); 6834 usage(B_FALSE); 6835 } 6836 6837 if (argc > 2) { 6838 (void) fprintf(stderr, gettext("too many arguments\n")); 6839 usage(B_FALSE); 6840 } 6841 6842 if ((dryrun || xtreme_rewind) && !do_rewind) { 6843 (void) fprintf(stderr, 6844 gettext("-n or -X only meaningful with -F\n")); 6845 usage(B_FALSE); 6846 } 6847 if (dryrun) 6848 rewind_policy = ZPOOL_TRY_REWIND; 6849 else if (do_rewind) 6850 rewind_policy = ZPOOL_DO_REWIND; 6851 if (xtreme_rewind) 6852 rewind_policy |= ZPOOL_EXTREME_REWIND; 6853 6854 /* In future, further rewind policy choices can be passed along here */ 6855 if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 || 6856 nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY, 6857 rewind_policy) != 0) { 6858 return (1); 6859 } 6860 6861 pool = argv[0]; 6862 device = argc == 2 ? argv[1] : NULL; 6863 6864 if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) { 6865 nvlist_free(policy); 6866 return (1); 6867 } 6868 6869 if (zpool_clear(zhp, device, policy) != 0) 6870 ret = 1; 6871 6872 zpool_close(zhp); 6873 6874 nvlist_free(policy); 6875 6876 return (ret); 6877 } 6878 6879 /* 6880 * zpool reguid <pool> 6881 */ 6882 int 6883 zpool_do_reguid(int argc, char **argv) 6884 { 6885 int c; 6886 char *poolname; 6887 zpool_handle_t *zhp; 6888 int ret = 0; 6889 6890 /* check options */ 6891 while ((c = getopt(argc, argv, "")) != -1) { 6892 switch (c) { 6893 case '?': 6894 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 6895 optopt); 6896 usage(B_FALSE); 6897 } 6898 } 6899 6900 argc -= optind; 6901 argv += optind; 6902 6903 /* get pool name and check number of arguments */ 6904 if (argc < 1) { 6905 (void) fprintf(stderr, gettext("missing pool name\n")); 6906 usage(B_FALSE); 6907 } 6908 6909 if (argc > 1) { 6910 (void) fprintf(stderr, gettext("too many arguments\n")); 6911 usage(B_FALSE); 6912 } 6913 6914 poolname = argv[0]; 6915 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) 6916 return (1); 6917 6918 ret = zpool_reguid(zhp); 6919 6920 zpool_close(zhp); 6921 return (ret); 6922 } 6923 6924 6925 /* 6926 * zpool reopen <pool> 6927 * 6928 * Reopen the pool so that the kernel can update the sizes of all vdevs. 6929 */ 6930 int 6931 zpool_do_reopen(int argc, char **argv) 6932 { 6933 int c; 6934 int ret = 0; 6935 boolean_t scrub_restart = B_TRUE; 6936 6937 /* check options */ 6938 while ((c = getopt(argc, argv, "n")) != -1) { 6939 switch (c) { 6940 case 'n': 6941 scrub_restart = B_FALSE; 6942 break; 6943 case '?': 6944 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 6945 optopt); 6946 usage(B_FALSE); 6947 } 6948 } 6949 6950 argc -= optind; 6951 argv += optind; 6952 6953 /* if argc == 0 we will execute zpool_reopen_one on all pools */ 6954 ret = for_each_pool(argc, argv, B_TRUE, NULL, B_FALSE, zpool_reopen_one, 6955 &scrub_restart); 6956 6957 return (ret); 6958 } 6959 6960 typedef struct scrub_cbdata { 6961 int cb_type; 6962 pool_scrub_cmd_t cb_scrub_cmd; 6963 } scrub_cbdata_t; 6964 6965 static boolean_t 6966 zpool_has_checkpoint(zpool_handle_t *zhp) 6967 { 6968 nvlist_t *config, *nvroot; 6969 6970 config = zpool_get_config(zhp, NULL); 6971 6972 if (config != NULL) { 6973 pool_checkpoint_stat_t *pcs = NULL; 6974 uint_t c; 6975 6976 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE); 6977 (void) nvlist_lookup_uint64_array(nvroot, 6978 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c); 6979 6980 if (pcs == NULL || pcs->pcs_state == CS_NONE) 6981 return (B_FALSE); 6982 6983 assert(pcs->pcs_state == CS_CHECKPOINT_EXISTS || 6984 pcs->pcs_state == CS_CHECKPOINT_DISCARDING); 6985 return (B_TRUE); 6986 } 6987 6988 return (B_FALSE); 6989 } 6990 6991 static int 6992 scrub_callback(zpool_handle_t *zhp, void *data) 6993 { 6994 scrub_cbdata_t *cb = data; 6995 int err; 6996 6997 /* 6998 * Ignore faulted pools. 6999 */ 7000 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) { 7001 (void) fprintf(stderr, gettext("cannot scan '%s': pool is " 7002 "currently unavailable\n"), zpool_get_name(zhp)); 7003 return (1); 7004 } 7005 7006 err = zpool_scan(zhp, cb->cb_type, cb->cb_scrub_cmd); 7007 7008 if (err == 0 && zpool_has_checkpoint(zhp) && 7009 cb->cb_type == POOL_SCAN_SCRUB) { 7010 (void) printf(gettext("warning: will not scrub state that " 7011 "belongs to the checkpoint of pool '%s'\n"), 7012 zpool_get_name(zhp)); 7013 } 7014 7015 return (err != 0); 7016 } 7017 7018 static int 7019 wait_callback(zpool_handle_t *zhp, void *data) 7020 { 7021 zpool_wait_activity_t *act = data; 7022 return (zpool_wait(zhp, *act)); 7023 } 7024 7025 /* 7026 * zpool scrub [-s | -p] [-w] <pool> ... 7027 * 7028 * -s Stop. Stops any in-progress scrub. 7029 * -p Pause. Pause in-progress scrub. 7030 * -w Wait. Blocks until scrub has completed. 7031 */ 7032 int 7033 zpool_do_scrub(int argc, char **argv) 7034 { 7035 int c; 7036 scrub_cbdata_t cb; 7037 boolean_t wait = B_FALSE; 7038 int error; 7039 7040 cb.cb_type = POOL_SCAN_SCRUB; 7041 cb.cb_scrub_cmd = POOL_SCRUB_NORMAL; 7042 7043 /* check options */ 7044 while ((c = getopt(argc, argv, "spw")) != -1) { 7045 switch (c) { 7046 case 's': 7047 cb.cb_type = POOL_SCAN_NONE; 7048 break; 7049 case 'p': 7050 cb.cb_scrub_cmd = POOL_SCRUB_PAUSE; 7051 break; 7052 case 'w': 7053 wait = B_TRUE; 7054 break; 7055 case '?': 7056 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 7057 optopt); 7058 usage(B_FALSE); 7059 } 7060 } 7061 7062 if (cb.cb_type == POOL_SCAN_NONE && 7063 cb.cb_scrub_cmd == POOL_SCRUB_PAUSE) { 7064 (void) fprintf(stderr, gettext("invalid option combination: " 7065 "-s and -p are mutually exclusive\n")); 7066 usage(B_FALSE); 7067 } 7068 7069 if (wait && (cb.cb_type == POOL_SCAN_NONE || 7070 cb.cb_scrub_cmd == POOL_SCRUB_PAUSE)) { 7071 (void) fprintf(stderr, gettext("invalid option combination: " 7072 "-w cannot be used with -p or -s\n")); 7073 usage(B_FALSE); 7074 } 7075 7076 argc -= optind; 7077 argv += optind; 7078 7079 if (argc < 1) { 7080 (void) fprintf(stderr, gettext("missing pool name argument\n")); 7081 usage(B_FALSE); 7082 } 7083 7084 error = for_each_pool(argc, argv, B_TRUE, NULL, B_FALSE, 7085 scrub_callback, &cb); 7086 7087 if (wait && !error) { 7088 zpool_wait_activity_t act = ZPOOL_WAIT_SCRUB; 7089 error = for_each_pool(argc, argv, B_TRUE, NULL, B_FALSE, 7090 wait_callback, &act); 7091 } 7092 7093 return (error); 7094 } 7095 7096 /* 7097 * zpool resilver <pool> ... 7098 * 7099 * Restarts any in-progress resilver 7100 */ 7101 int 7102 zpool_do_resilver(int argc, char **argv) 7103 { 7104 int c; 7105 scrub_cbdata_t cb; 7106 7107 cb.cb_type = POOL_SCAN_RESILVER; 7108 cb.cb_scrub_cmd = POOL_SCRUB_NORMAL; 7109 7110 /* check options */ 7111 while ((c = getopt(argc, argv, "")) != -1) { 7112 switch (c) { 7113 case '?': 7114 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 7115 optopt); 7116 usage(B_FALSE); 7117 } 7118 } 7119 7120 argc -= optind; 7121 argv += optind; 7122 7123 if (argc < 1) { 7124 (void) fprintf(stderr, gettext("missing pool name argument\n")); 7125 usage(B_FALSE); 7126 } 7127 7128 return (for_each_pool(argc, argv, B_TRUE, NULL, B_FALSE, 7129 scrub_callback, &cb)); 7130 } 7131 7132 /* 7133 * zpool trim [-d] [-r <rate>] [-c | -s] <pool> [<device> ...] 7134 * 7135 * -c Cancel. Ends any in-progress trim. 7136 * -d Secure trim. Requires kernel and device support. 7137 * -r <rate> Sets the TRIM rate in bytes (per second). Supports 7138 * adding a multiplier suffix such as 'k' or 'm'. 7139 * -s Suspend. TRIM can then be restarted with no flags. 7140 * -w Wait. Blocks until trimming has completed. 7141 */ 7142 int 7143 zpool_do_trim(int argc, char **argv) 7144 { 7145 struct option long_options[] = { 7146 {"cancel", no_argument, NULL, 'c'}, 7147 {"secure", no_argument, NULL, 'd'}, 7148 {"rate", required_argument, NULL, 'r'}, 7149 {"suspend", no_argument, NULL, 's'}, 7150 {"wait", no_argument, NULL, 'w'}, 7151 {0, 0, 0, 0} 7152 }; 7153 7154 pool_trim_func_t cmd_type = POOL_TRIM_START; 7155 uint64_t rate = 0; 7156 boolean_t secure = B_FALSE; 7157 boolean_t wait = B_FALSE; 7158 7159 int c; 7160 while ((c = getopt_long(argc, argv, "cdr:sw", long_options, NULL)) 7161 != -1) { 7162 switch (c) { 7163 case 'c': 7164 if (cmd_type != POOL_TRIM_START && 7165 cmd_type != POOL_TRIM_CANCEL) { 7166 (void) fprintf(stderr, gettext("-c cannot be " 7167 "combined with other options\n")); 7168 usage(B_FALSE); 7169 } 7170 cmd_type = POOL_TRIM_CANCEL; 7171 break; 7172 case 'd': 7173 if (cmd_type != POOL_TRIM_START) { 7174 (void) fprintf(stderr, gettext("-d cannot be " 7175 "combined with the -c or -s options\n")); 7176 usage(B_FALSE); 7177 } 7178 secure = B_TRUE; 7179 break; 7180 case 'r': 7181 if (cmd_type != POOL_TRIM_START) { 7182 (void) fprintf(stderr, gettext("-r cannot be " 7183 "combined with the -c or -s options\n")); 7184 usage(B_FALSE); 7185 } 7186 if (zfs_nicestrtonum(NULL, optarg, &rate) == -1) { 7187 (void) fprintf(stderr, 7188 gettext("invalid value for rate\n")); 7189 usage(B_FALSE); 7190 } 7191 break; 7192 case 's': 7193 if (cmd_type != POOL_TRIM_START && 7194 cmd_type != POOL_TRIM_SUSPEND) { 7195 (void) fprintf(stderr, gettext("-s cannot be " 7196 "combined with other options\n")); 7197 usage(B_FALSE); 7198 } 7199 cmd_type = POOL_TRIM_SUSPEND; 7200 break; 7201 case 'w': 7202 wait = B_TRUE; 7203 break; 7204 case '?': 7205 if (optopt != 0) { 7206 (void) fprintf(stderr, 7207 gettext("invalid option '%c'\n"), optopt); 7208 } else { 7209 (void) fprintf(stderr, 7210 gettext("invalid option '%s'\n"), 7211 argv[optind - 1]); 7212 } 7213 usage(B_FALSE); 7214 } 7215 } 7216 7217 argc -= optind; 7218 argv += optind; 7219 7220 if (argc < 1) { 7221 (void) fprintf(stderr, gettext("missing pool name argument\n")); 7222 usage(B_FALSE); 7223 return (-1); 7224 } 7225 7226 if (wait && (cmd_type != POOL_TRIM_START)) { 7227 (void) fprintf(stderr, gettext("-w cannot be used with -c or " 7228 "-s\n")); 7229 usage(B_FALSE); 7230 } 7231 7232 char *poolname = argv[0]; 7233 zpool_handle_t *zhp = zpool_open(g_zfs, poolname); 7234 if (zhp == NULL) 7235 return (-1); 7236 7237 trimflags_t trim_flags = { 7238 .secure = secure, 7239 .rate = rate, 7240 .wait = wait, 7241 }; 7242 7243 nvlist_t *vdevs = fnvlist_alloc(); 7244 if (argc == 1) { 7245 /* no individual leaf vdevs specified, so add them all */ 7246 nvlist_t *config = zpool_get_config(zhp, NULL); 7247 nvlist_t *nvroot = fnvlist_lookup_nvlist(config, 7248 ZPOOL_CONFIG_VDEV_TREE); 7249 zpool_collect_leaves(zhp, nvroot, vdevs); 7250 trim_flags.fullpool = B_TRUE; 7251 } else { 7252 trim_flags.fullpool = B_FALSE; 7253 for (int i = 1; i < argc; i++) { 7254 fnvlist_add_boolean(vdevs, argv[i]); 7255 } 7256 } 7257 7258 int error = zpool_trim(zhp, cmd_type, vdevs, &trim_flags); 7259 7260 fnvlist_free(vdevs); 7261 zpool_close(zhp); 7262 7263 return (error); 7264 } 7265 7266 /* 7267 * Converts a total number of seconds to a human readable string broken 7268 * down in to days/hours/minutes/seconds. 7269 */ 7270 static void 7271 secs_to_dhms(uint64_t total, char *buf) 7272 { 7273 uint64_t days = total / 60 / 60 / 24; 7274 uint64_t hours = (total / 60 / 60) % 24; 7275 uint64_t mins = (total / 60) % 60; 7276 uint64_t secs = (total % 60); 7277 7278 if (days > 0) { 7279 (void) sprintf(buf, "%llu days %02llu:%02llu:%02llu", 7280 (u_longlong_t)days, (u_longlong_t)hours, 7281 (u_longlong_t)mins, (u_longlong_t)secs); 7282 } else { 7283 (void) sprintf(buf, "%02llu:%02llu:%02llu", 7284 (u_longlong_t)hours, (u_longlong_t)mins, 7285 (u_longlong_t)secs); 7286 } 7287 } 7288 7289 /* 7290 * Print out detailed scrub status. 7291 */ 7292 static void 7293 print_scan_scrub_resilver_status(pool_scan_stat_t *ps) 7294 { 7295 time_t start, end, pause; 7296 uint64_t pass_scanned, scanned, pass_issued, issued, total; 7297 uint64_t elapsed, scan_rate, issue_rate; 7298 double fraction_done; 7299 char processed_buf[7], scanned_buf[7], issued_buf[7], total_buf[7]; 7300 char srate_buf[7], irate_buf[7], time_buf[32]; 7301 7302 printf(" "); 7303 printf_color(ANSI_BOLD, gettext("scan:")); 7304 printf(" "); 7305 7306 /* If there's never been a scan, there's not much to say. */ 7307 if (ps == NULL || ps->pss_func == POOL_SCAN_NONE || 7308 ps->pss_func >= POOL_SCAN_FUNCS) { 7309 (void) printf(gettext("none requested\n")); 7310 return; 7311 } 7312 7313 start = ps->pss_start_time; 7314 end = ps->pss_end_time; 7315 pause = ps->pss_pass_scrub_pause; 7316 7317 zfs_nicebytes(ps->pss_processed, processed_buf, sizeof (processed_buf)); 7318 7319 assert(ps->pss_func == POOL_SCAN_SCRUB || 7320 ps->pss_func == POOL_SCAN_RESILVER); 7321 7322 /* Scan is finished or canceled. */ 7323 if (ps->pss_state == DSS_FINISHED) { 7324 secs_to_dhms(end - start, time_buf); 7325 7326 if (ps->pss_func == POOL_SCAN_SCRUB) { 7327 (void) printf(gettext("scrub repaired %s " 7328 "in %s with %llu errors on %s"), processed_buf, 7329 time_buf, (u_longlong_t)ps->pss_errors, 7330 ctime(&end)); 7331 } else if (ps->pss_func == POOL_SCAN_RESILVER) { 7332 (void) printf(gettext("resilvered %s " 7333 "in %s with %llu errors on %s"), processed_buf, 7334 time_buf, (u_longlong_t)ps->pss_errors, 7335 ctime(&end)); 7336 } 7337 return; 7338 } else if (ps->pss_state == DSS_CANCELED) { 7339 if (ps->pss_func == POOL_SCAN_SCRUB) { 7340 (void) printf(gettext("scrub canceled on %s"), 7341 ctime(&end)); 7342 } else if (ps->pss_func == POOL_SCAN_RESILVER) { 7343 (void) printf(gettext("resilver canceled on %s"), 7344 ctime(&end)); 7345 } 7346 return; 7347 } 7348 7349 assert(ps->pss_state == DSS_SCANNING); 7350 7351 /* Scan is in progress. Resilvers can't be paused. */ 7352 if (ps->pss_func == POOL_SCAN_SCRUB) { 7353 if (pause == 0) { 7354 (void) printf(gettext("scrub in progress since %s"), 7355 ctime(&start)); 7356 } else { 7357 (void) printf(gettext("scrub paused since %s"), 7358 ctime(&pause)); 7359 (void) printf(gettext("\tscrub started on %s"), 7360 ctime(&start)); 7361 } 7362 } else if (ps->pss_func == POOL_SCAN_RESILVER) { 7363 (void) printf(gettext("resilver in progress since %s"), 7364 ctime(&start)); 7365 } 7366 7367 scanned = ps->pss_examined; 7368 pass_scanned = ps->pss_pass_exam; 7369 issued = ps->pss_issued; 7370 pass_issued = ps->pss_pass_issued; 7371 total = ps->pss_to_examine; 7372 7373 /* we are only done with a block once we have issued the IO for it */ 7374 fraction_done = (double)issued / total; 7375 7376 /* elapsed time for this pass, rounding up to 1 if it's 0 */ 7377 elapsed = time(NULL) - ps->pss_pass_start; 7378 elapsed -= ps->pss_pass_scrub_spent_paused; 7379 elapsed = (elapsed != 0) ? elapsed : 1; 7380 7381 scan_rate = pass_scanned / elapsed; 7382 issue_rate = pass_issued / elapsed; 7383 uint64_t total_secs_left = (issue_rate != 0 && total >= issued) ? 7384 ((total - issued) / issue_rate) : UINT64_MAX; 7385 secs_to_dhms(total_secs_left, time_buf); 7386 7387 /* format all of the numbers we will be reporting */ 7388 zfs_nicebytes(scanned, scanned_buf, sizeof (scanned_buf)); 7389 zfs_nicebytes(issued, issued_buf, sizeof (issued_buf)); 7390 zfs_nicebytes(total, total_buf, sizeof (total_buf)); 7391 zfs_nicebytes(scan_rate, srate_buf, sizeof (srate_buf)); 7392 zfs_nicebytes(issue_rate, irate_buf, sizeof (irate_buf)); 7393 7394 /* do not print estimated time if we have a paused scrub */ 7395 if (pause == 0) { 7396 (void) printf(gettext("\t%s scanned at %s/s, " 7397 "%s issued at %s/s, %s total\n"), 7398 scanned_buf, srate_buf, issued_buf, irate_buf, total_buf); 7399 } else { 7400 (void) printf(gettext("\t%s scanned, %s issued, %s total\n"), 7401 scanned_buf, issued_buf, total_buf); 7402 } 7403 7404 if (ps->pss_func == POOL_SCAN_RESILVER) { 7405 (void) printf(gettext("\t%s resilvered, %.2f%% done"), 7406 processed_buf, 100 * fraction_done); 7407 } else if (ps->pss_func == POOL_SCAN_SCRUB) { 7408 (void) printf(gettext("\t%s repaired, %.2f%% done"), 7409 processed_buf, 100 * fraction_done); 7410 } 7411 7412 if (pause == 0) { 7413 if (total_secs_left != UINT64_MAX && 7414 issue_rate >= 10 * 1024 * 1024) { 7415 (void) printf(gettext(", %s to go\n"), time_buf); 7416 } else { 7417 (void) printf(gettext(", no estimated " 7418 "completion time\n")); 7419 } 7420 } else { 7421 (void) printf(gettext("\n")); 7422 } 7423 } 7424 7425 static void 7426 print_rebuild_status_impl(vdev_rebuild_stat_t *vrs, char *vdev_name) 7427 { 7428 if (vrs == NULL || vrs->vrs_state == VDEV_REBUILD_NONE) 7429 return; 7430 7431 printf(" "); 7432 printf_color(ANSI_BOLD, gettext("scan:")); 7433 printf(" "); 7434 7435 uint64_t bytes_scanned = vrs->vrs_bytes_scanned; 7436 uint64_t bytes_issued = vrs->vrs_bytes_issued; 7437 uint64_t bytes_rebuilt = vrs->vrs_bytes_rebuilt; 7438 uint64_t bytes_est = vrs->vrs_bytes_est; 7439 uint64_t scan_rate = (vrs->vrs_pass_bytes_scanned / 7440 (vrs->vrs_pass_time_ms + 1)) * 1000; 7441 uint64_t issue_rate = (vrs->vrs_pass_bytes_issued / 7442 (vrs->vrs_pass_time_ms + 1)) * 1000; 7443 double scan_pct = MIN((double)bytes_scanned * 100 / 7444 (bytes_est + 1), 100); 7445 7446 /* Format all of the numbers we will be reporting */ 7447 char bytes_scanned_buf[7], bytes_issued_buf[7]; 7448 char bytes_rebuilt_buf[7], bytes_est_buf[7]; 7449 char scan_rate_buf[7], issue_rate_buf[7], time_buf[32]; 7450 zfs_nicebytes(bytes_scanned, bytes_scanned_buf, 7451 sizeof (bytes_scanned_buf)); 7452 zfs_nicebytes(bytes_issued, bytes_issued_buf, 7453 sizeof (bytes_issued_buf)); 7454 zfs_nicebytes(bytes_rebuilt, bytes_rebuilt_buf, 7455 sizeof (bytes_rebuilt_buf)); 7456 zfs_nicebytes(bytes_est, bytes_est_buf, sizeof (bytes_est_buf)); 7457 zfs_nicebytes(scan_rate, scan_rate_buf, sizeof (scan_rate_buf)); 7458 zfs_nicebytes(issue_rate, issue_rate_buf, sizeof (issue_rate_buf)); 7459 7460 time_t start = vrs->vrs_start_time; 7461 time_t end = vrs->vrs_end_time; 7462 7463 /* Rebuild is finished or canceled. */ 7464 if (vrs->vrs_state == VDEV_REBUILD_COMPLETE) { 7465 secs_to_dhms(vrs->vrs_scan_time_ms / 1000, time_buf); 7466 (void) printf(gettext("resilvered (%s) %s in %s " 7467 "with %llu errors on %s"), vdev_name, bytes_rebuilt_buf, 7468 time_buf, (u_longlong_t)vrs->vrs_errors, ctime(&end)); 7469 return; 7470 } else if (vrs->vrs_state == VDEV_REBUILD_CANCELED) { 7471 (void) printf(gettext("resilver (%s) canceled on %s"), 7472 vdev_name, ctime(&end)); 7473 return; 7474 } else if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) { 7475 (void) printf(gettext("resilver (%s) in progress since %s"), 7476 vdev_name, ctime(&start)); 7477 } 7478 7479 assert(vrs->vrs_state == VDEV_REBUILD_ACTIVE); 7480 7481 secs_to_dhms(MAX((int64_t)bytes_est - (int64_t)bytes_scanned, 0) / 7482 MAX(scan_rate, 1), time_buf); 7483 7484 (void) printf(gettext("\t%s scanned at %s/s, %s issued %s/s, " 7485 "%s total\n"), bytes_scanned_buf, scan_rate_buf, 7486 bytes_issued_buf, issue_rate_buf, bytes_est_buf); 7487 (void) printf(gettext("\t%s resilvered, %.2f%% done"), 7488 bytes_rebuilt_buf, scan_pct); 7489 7490 if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) { 7491 if (scan_rate >= 10 * 1024 * 1024) { 7492 (void) printf(gettext(", %s to go\n"), time_buf); 7493 } else { 7494 (void) printf(gettext(", no estimated " 7495 "completion time\n")); 7496 } 7497 } else { 7498 (void) printf(gettext("\n")); 7499 } 7500 } 7501 7502 /* 7503 * Print rebuild status for top-level vdevs. 7504 */ 7505 static void 7506 print_rebuild_status(zpool_handle_t *zhp, nvlist_t *nvroot) 7507 { 7508 nvlist_t **child; 7509 uint_t children; 7510 7511 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 7512 &child, &children) != 0) 7513 children = 0; 7514 7515 for (uint_t c = 0; c < children; c++) { 7516 vdev_rebuild_stat_t *vrs; 7517 uint_t i; 7518 7519 if (nvlist_lookup_uint64_array(child[c], 7520 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i) == 0) { 7521 char *name = zpool_vdev_name(g_zfs, zhp, 7522 child[c], VDEV_NAME_TYPE_ID); 7523 print_rebuild_status_impl(vrs, name); 7524 free(name); 7525 } 7526 } 7527 } 7528 7529 /* 7530 * As we don't scrub checkpointed blocks, we want to warn the user that we 7531 * skipped scanning some blocks if a checkpoint exists or existed at any 7532 * time during the scan. If a sequential instead of healing reconstruction 7533 * was performed then the blocks were reconstructed. However, their checksums 7534 * have not been verified so we still print the warning. 7535 */ 7536 static void 7537 print_checkpoint_scan_warning(pool_scan_stat_t *ps, pool_checkpoint_stat_t *pcs) 7538 { 7539 if (ps == NULL || pcs == NULL) 7540 return; 7541 7542 if (pcs->pcs_state == CS_NONE || 7543 pcs->pcs_state == CS_CHECKPOINT_DISCARDING) 7544 return; 7545 7546 assert(pcs->pcs_state == CS_CHECKPOINT_EXISTS); 7547 7548 if (ps->pss_state == DSS_NONE) 7549 return; 7550 7551 if ((ps->pss_state == DSS_FINISHED || ps->pss_state == DSS_CANCELED) && 7552 ps->pss_end_time < pcs->pcs_start_time) 7553 return; 7554 7555 if (ps->pss_state == DSS_FINISHED || ps->pss_state == DSS_CANCELED) { 7556 (void) printf(gettext(" scan warning: skipped blocks " 7557 "that are only referenced by the checkpoint.\n")); 7558 } else { 7559 assert(ps->pss_state == DSS_SCANNING); 7560 (void) printf(gettext(" scan warning: skipping blocks " 7561 "that are only referenced by the checkpoint.\n")); 7562 } 7563 } 7564 7565 /* 7566 * Returns B_TRUE if there is an active rebuild in progress. Otherwise, 7567 * B_FALSE is returned and 'rebuild_end_time' is set to the end time for 7568 * the last completed (or cancelled) rebuild. 7569 */ 7570 static boolean_t 7571 check_rebuilding(nvlist_t *nvroot, uint64_t *rebuild_end_time) 7572 { 7573 nvlist_t **child; 7574 uint_t children; 7575 boolean_t rebuilding = B_FALSE; 7576 uint64_t end_time = 0; 7577 7578 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 7579 &child, &children) != 0) 7580 children = 0; 7581 7582 for (uint_t c = 0; c < children; c++) { 7583 vdev_rebuild_stat_t *vrs; 7584 uint_t i; 7585 7586 if (nvlist_lookup_uint64_array(child[c], 7587 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i) == 0) { 7588 7589 if (vrs->vrs_end_time > end_time) 7590 end_time = vrs->vrs_end_time; 7591 7592 if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) { 7593 rebuilding = B_TRUE; 7594 end_time = 0; 7595 break; 7596 } 7597 } 7598 } 7599 7600 if (rebuild_end_time != NULL) 7601 *rebuild_end_time = end_time; 7602 7603 return (rebuilding); 7604 } 7605 7606 /* 7607 * Print the scan status. 7608 */ 7609 static void 7610 print_scan_status(zpool_handle_t *zhp, nvlist_t *nvroot) 7611 { 7612 uint64_t rebuild_end_time = 0, resilver_end_time = 0; 7613 boolean_t have_resilver = B_FALSE, have_scrub = B_FALSE; 7614 boolean_t active_resilver = B_FALSE; 7615 pool_checkpoint_stat_t *pcs = NULL; 7616 pool_scan_stat_t *ps = NULL; 7617 uint_t c; 7618 7619 if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_SCAN_STATS, 7620 (uint64_t **)&ps, &c) == 0) { 7621 if (ps->pss_func == POOL_SCAN_RESILVER) { 7622 resilver_end_time = ps->pss_end_time; 7623 active_resilver = (ps->pss_state == DSS_SCANNING); 7624 } 7625 7626 have_resilver = (ps->pss_func == POOL_SCAN_RESILVER); 7627 have_scrub = (ps->pss_func == POOL_SCAN_SCRUB); 7628 } 7629 7630 boolean_t active_rebuild = check_rebuilding(nvroot, &rebuild_end_time); 7631 boolean_t have_rebuild = (active_rebuild || (rebuild_end_time > 0)); 7632 7633 /* Always print the scrub status when available. */ 7634 if (have_scrub) 7635 print_scan_scrub_resilver_status(ps); 7636 7637 /* 7638 * When there is an active resilver or rebuild print its status. 7639 * Otherwise print the status of the last resilver or rebuild. 7640 */ 7641 if (active_resilver || (!active_rebuild && have_resilver && 7642 resilver_end_time && resilver_end_time > rebuild_end_time)) { 7643 print_scan_scrub_resilver_status(ps); 7644 } else if (active_rebuild || (!active_resilver && have_rebuild && 7645 rebuild_end_time && rebuild_end_time > resilver_end_time)) { 7646 print_rebuild_status(zhp, nvroot); 7647 } 7648 7649 (void) nvlist_lookup_uint64_array(nvroot, 7650 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c); 7651 print_checkpoint_scan_warning(ps, pcs); 7652 } 7653 7654 /* 7655 * Print out detailed removal status. 7656 */ 7657 static void 7658 print_removal_status(zpool_handle_t *zhp, pool_removal_stat_t *prs) 7659 { 7660 char copied_buf[7], examined_buf[7], total_buf[7], rate_buf[7]; 7661 time_t start, end; 7662 nvlist_t *config, *nvroot; 7663 nvlist_t **child; 7664 uint_t children; 7665 char *vdev_name; 7666 7667 if (prs == NULL || prs->prs_state == DSS_NONE) 7668 return; 7669 7670 /* 7671 * Determine name of vdev. 7672 */ 7673 config = zpool_get_config(zhp, NULL); 7674 nvroot = fnvlist_lookup_nvlist(config, 7675 ZPOOL_CONFIG_VDEV_TREE); 7676 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 7677 &child, &children) == 0); 7678 assert(prs->prs_removing_vdev < children); 7679 vdev_name = zpool_vdev_name(g_zfs, zhp, 7680 child[prs->prs_removing_vdev], B_TRUE); 7681 7682 printf_color(ANSI_BOLD, gettext("remove: ")); 7683 7684 start = prs->prs_start_time; 7685 end = prs->prs_end_time; 7686 zfs_nicenum(prs->prs_copied, copied_buf, sizeof (copied_buf)); 7687 7688 /* 7689 * Removal is finished or canceled. 7690 */ 7691 if (prs->prs_state == DSS_FINISHED) { 7692 uint64_t minutes_taken = (end - start) / 60; 7693 7694 (void) printf(gettext("Removal of vdev %llu copied %s " 7695 "in %lluh%um, completed on %s"), 7696 (longlong_t)prs->prs_removing_vdev, 7697 copied_buf, 7698 (u_longlong_t)(minutes_taken / 60), 7699 (uint_t)(minutes_taken % 60), 7700 ctime((time_t *)&end)); 7701 } else if (prs->prs_state == DSS_CANCELED) { 7702 (void) printf(gettext("Removal of %s canceled on %s"), 7703 vdev_name, ctime(&end)); 7704 } else { 7705 uint64_t copied, total, elapsed, mins_left, hours_left; 7706 double fraction_done; 7707 uint_t rate; 7708 7709 assert(prs->prs_state == DSS_SCANNING); 7710 7711 /* 7712 * Removal is in progress. 7713 */ 7714 (void) printf(gettext( 7715 "Evacuation of %s in progress since %s"), 7716 vdev_name, ctime(&start)); 7717 7718 copied = prs->prs_copied > 0 ? prs->prs_copied : 1; 7719 total = prs->prs_to_copy; 7720 fraction_done = (double)copied / total; 7721 7722 /* elapsed time for this pass */ 7723 elapsed = time(NULL) - prs->prs_start_time; 7724 elapsed = elapsed > 0 ? elapsed : 1; 7725 rate = copied / elapsed; 7726 rate = rate > 0 ? rate : 1; 7727 mins_left = ((total - copied) / rate) / 60; 7728 hours_left = mins_left / 60; 7729 7730 zfs_nicenum(copied, examined_buf, sizeof (examined_buf)); 7731 zfs_nicenum(total, total_buf, sizeof (total_buf)); 7732 zfs_nicenum(rate, rate_buf, sizeof (rate_buf)); 7733 7734 /* 7735 * do not print estimated time if hours_left is more than 7736 * 30 days 7737 */ 7738 (void) printf(gettext(" %s copied out of %s at %s/s, " 7739 "%.2f%% done"), 7740 examined_buf, total_buf, rate_buf, 100 * fraction_done); 7741 if (hours_left < (30 * 24)) { 7742 (void) printf(gettext(", %lluh%um to go\n"), 7743 (u_longlong_t)hours_left, (uint_t)(mins_left % 60)); 7744 } else { 7745 (void) printf(gettext( 7746 ", (copy is slow, no estimated time)\n")); 7747 } 7748 } 7749 free(vdev_name); 7750 7751 if (prs->prs_mapping_memory > 0) { 7752 char mem_buf[7]; 7753 zfs_nicenum(prs->prs_mapping_memory, mem_buf, sizeof (mem_buf)); 7754 (void) printf(gettext(" %s memory used for " 7755 "removed device mappings\n"), 7756 mem_buf); 7757 } 7758 } 7759 7760 static void 7761 print_checkpoint_status(pool_checkpoint_stat_t *pcs) 7762 { 7763 time_t start; 7764 char space_buf[7]; 7765 7766 if (pcs == NULL || pcs->pcs_state == CS_NONE) 7767 return; 7768 7769 (void) printf(gettext("checkpoint: ")); 7770 7771 start = pcs->pcs_start_time; 7772 zfs_nicenum(pcs->pcs_space, space_buf, sizeof (space_buf)); 7773 7774 if (pcs->pcs_state == CS_CHECKPOINT_EXISTS) { 7775 char *date = ctime(&start); 7776 7777 /* 7778 * ctime() adds a newline at the end of the generated 7779 * string, thus the weird format specifier and the 7780 * strlen() call used to chop it off from the output. 7781 */ 7782 (void) printf(gettext("created %.*s, consumes %s\n"), 7783 (int)(strlen(date) - 1), date, space_buf); 7784 return; 7785 } 7786 7787 assert(pcs->pcs_state == CS_CHECKPOINT_DISCARDING); 7788 7789 (void) printf(gettext("discarding, %s remaining.\n"), 7790 space_buf); 7791 } 7792 7793 static void 7794 print_error_log(zpool_handle_t *zhp) 7795 { 7796 nvlist_t *nverrlist = NULL; 7797 nvpair_t *elem; 7798 char *pathname; 7799 size_t len = MAXPATHLEN * 2; 7800 7801 if (zpool_get_errlog(zhp, &nverrlist) != 0) 7802 return; 7803 7804 (void) printf("errors: Permanent errors have been " 7805 "detected in the following files:\n\n"); 7806 7807 pathname = safe_malloc(len); 7808 elem = NULL; 7809 while ((elem = nvlist_next_nvpair(nverrlist, elem)) != NULL) { 7810 nvlist_t *nv; 7811 uint64_t dsobj, obj; 7812 7813 verify(nvpair_value_nvlist(elem, &nv) == 0); 7814 verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_DATASET, 7815 &dsobj) == 0); 7816 verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_OBJECT, 7817 &obj) == 0); 7818 zpool_obj_to_path(zhp, dsobj, obj, pathname, len); 7819 (void) printf("%7s %s\n", "", pathname); 7820 } 7821 free(pathname); 7822 nvlist_free(nverrlist); 7823 } 7824 7825 static void 7826 print_spares(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **spares, 7827 uint_t nspares) 7828 { 7829 uint_t i; 7830 char *name; 7831 7832 if (nspares == 0) 7833 return; 7834 7835 (void) printf(gettext("\tspares\n")); 7836 7837 for (i = 0; i < nspares; i++) { 7838 name = zpool_vdev_name(g_zfs, zhp, spares[i], 7839 cb->cb_name_flags); 7840 print_status_config(zhp, cb, name, spares[i], 2, B_TRUE, NULL); 7841 free(name); 7842 } 7843 } 7844 7845 static void 7846 print_l2cache(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **l2cache, 7847 uint_t nl2cache) 7848 { 7849 uint_t i; 7850 char *name; 7851 7852 if (nl2cache == 0) 7853 return; 7854 7855 (void) printf(gettext("\tcache\n")); 7856 7857 for (i = 0; i < nl2cache; i++) { 7858 name = zpool_vdev_name(g_zfs, zhp, l2cache[i], 7859 cb->cb_name_flags); 7860 print_status_config(zhp, cb, name, l2cache[i], 2, 7861 B_FALSE, NULL); 7862 free(name); 7863 } 7864 } 7865 7866 static void 7867 print_dedup_stats(nvlist_t *config) 7868 { 7869 ddt_histogram_t *ddh; 7870 ddt_stat_t *dds; 7871 ddt_object_t *ddo; 7872 uint_t c; 7873 char dspace[6], mspace[6]; 7874 7875 /* 7876 * If the pool was faulted then we may not have been able to 7877 * obtain the config. Otherwise, if we have anything in the dedup 7878 * table continue processing the stats. 7879 */ 7880 if (nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_OBJ_STATS, 7881 (uint64_t **)&ddo, &c) != 0) 7882 return; 7883 7884 (void) printf("\n"); 7885 (void) printf(gettext(" dedup: ")); 7886 if (ddo->ddo_count == 0) { 7887 (void) printf(gettext("no DDT entries\n")); 7888 return; 7889 } 7890 7891 zfs_nicebytes(ddo->ddo_dspace, dspace, sizeof (dspace)); 7892 zfs_nicebytes(ddo->ddo_mspace, mspace, sizeof (mspace)); 7893 (void) printf("DDT entries %llu, size %s on disk, %s in core\n", 7894 (u_longlong_t)ddo->ddo_count, 7895 dspace, 7896 mspace); 7897 7898 verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_STATS, 7899 (uint64_t **)&dds, &c) == 0); 7900 verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_HISTOGRAM, 7901 (uint64_t **)&ddh, &c) == 0); 7902 zpool_dump_ddt(dds, ddh); 7903 } 7904 7905 /* 7906 * Display a summary of pool status. Displays a summary such as: 7907 * 7908 * pool: tank 7909 * status: DEGRADED 7910 * reason: One or more devices ... 7911 * see: https://openzfs.github.io/openzfs-docs/msg/ZFS-xxxx-01 7912 * config: 7913 * mirror DEGRADED 7914 * c1t0d0 OK 7915 * c2t0d0 UNAVAIL 7916 * 7917 * When given the '-v' option, we print out the complete config. If the '-e' 7918 * option is specified, then we print out error rate information as well. 7919 */ 7920 static int 7921 status_callback(zpool_handle_t *zhp, void *data) 7922 { 7923 status_cbdata_t *cbp = data; 7924 nvlist_t *config, *nvroot; 7925 char *msgid; 7926 zpool_status_t reason; 7927 zpool_errata_t errata; 7928 const char *health; 7929 uint_t c; 7930 vdev_stat_t *vs; 7931 7932 config = zpool_get_config(zhp, NULL); 7933 reason = zpool_get_status(zhp, &msgid, &errata); 7934 7935 cbp->cb_count++; 7936 7937 /* 7938 * If we were given 'zpool status -x', only report those pools with 7939 * problems. 7940 */ 7941 if (cbp->cb_explain && 7942 (reason == ZPOOL_STATUS_OK || 7943 reason == ZPOOL_STATUS_VERSION_OLDER || 7944 reason == ZPOOL_STATUS_FEAT_DISABLED)) { 7945 if (!cbp->cb_allpools) { 7946 (void) printf(gettext("pool '%s' is healthy\n"), 7947 zpool_get_name(zhp)); 7948 if (cbp->cb_first) 7949 cbp->cb_first = B_FALSE; 7950 } 7951 return (0); 7952 } 7953 7954 if (cbp->cb_first) 7955 cbp->cb_first = B_FALSE; 7956 else 7957 (void) printf("\n"); 7958 7959 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE); 7960 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS, 7961 (uint64_t **)&vs, &c) == 0); 7962 7963 health = zpool_get_state_str(zhp); 7964 7965 printf(" "); 7966 printf_color(ANSI_BOLD, gettext("pool:")); 7967 printf(" %s\n", zpool_get_name(zhp)); 7968 printf(" "); 7969 printf_color(ANSI_BOLD, gettext("state: ")); 7970 7971 printf_color(health_str_to_color(health), "%s", health); 7972 7973 printf("\n"); 7974 7975 switch (reason) { 7976 case ZPOOL_STATUS_MISSING_DEV_R: 7977 printf_color(ANSI_BOLD, gettext("status: ")); 7978 printf_color(ANSI_YELLOW, gettext("One or more devices could " 7979 "not be opened. Sufficient replicas exist for\n\tthe pool " 7980 "to continue functioning in a degraded state.\n")); 7981 printf_color(ANSI_BOLD, gettext("action: ")); 7982 printf_color(ANSI_YELLOW, gettext("Attach the missing device " 7983 "and online it using 'zpool online'.\n")); 7984 break; 7985 7986 case ZPOOL_STATUS_MISSING_DEV_NR: 7987 printf_color(ANSI_BOLD, gettext("status: ")); 7988 printf_color(ANSI_YELLOW, gettext("One or more devices could " 7989 "not be opened. There are insufficient\n\treplicas for the" 7990 " pool to continue functioning.\n")); 7991 printf_color(ANSI_BOLD, gettext("action: ")); 7992 printf_color(ANSI_YELLOW, gettext("Attach the missing device " 7993 "and online it using 'zpool online'.\n")); 7994 break; 7995 7996 case ZPOOL_STATUS_CORRUPT_LABEL_R: 7997 printf_color(ANSI_BOLD, gettext("status: ")); 7998 printf_color(ANSI_YELLOW, gettext("One or more devices could " 7999 "not be used because the label is missing or\n\tinvalid. " 8000 "Sufficient replicas exist for the pool to continue\n\t" 8001 "functioning in a degraded state.\n")); 8002 printf_color(ANSI_BOLD, gettext("action: ")); 8003 printf_color(ANSI_YELLOW, gettext("Replace the device using " 8004 "'zpool replace'.\n")); 8005 break; 8006 8007 case ZPOOL_STATUS_CORRUPT_LABEL_NR: 8008 printf_color(ANSI_BOLD, gettext("status: ")); 8009 printf_color(ANSI_YELLOW, gettext("One or more devices could " 8010 "not be used because the label is missing \n\tor invalid. " 8011 "There are insufficient replicas for the pool to " 8012 "continue\n\tfunctioning.\n")); 8013 zpool_explain_recover(zpool_get_handle(zhp), 8014 zpool_get_name(zhp), reason, config); 8015 break; 8016 8017 case ZPOOL_STATUS_FAILING_DEV: 8018 printf_color(ANSI_BOLD, gettext("status: ")); 8019 printf_color(ANSI_YELLOW, gettext("One or more devices has " 8020 "experienced an unrecoverable error. An\n\tattempt was " 8021 "made to correct the error. Applications are " 8022 "unaffected.\n")); 8023 printf_color(ANSI_BOLD, gettext("action: ")); 8024 printf_color(ANSI_YELLOW, gettext("Determine if the " 8025 "device needs to be replaced, and clear the errors\n\tusing" 8026 " 'zpool clear' or replace the device with 'zpool " 8027 "replace'.\n")); 8028 break; 8029 8030 case ZPOOL_STATUS_OFFLINE_DEV: 8031 printf_color(ANSI_BOLD, gettext("status: ")); 8032 printf_color(ANSI_YELLOW, gettext("One or more devices has " 8033 "been taken offline by the administrator.\n\tSufficient " 8034 "replicas exist for the pool to continue functioning in " 8035 "a\n\tdegraded state.\n")); 8036 printf_color(ANSI_BOLD, gettext("action: ")); 8037 printf_color(ANSI_YELLOW, gettext("Online the device " 8038 "using 'zpool online' or replace the device with\n\t'zpool " 8039 "replace'.\n")); 8040 break; 8041 8042 case ZPOOL_STATUS_REMOVED_DEV: 8043 printf_color(ANSI_BOLD, gettext("status: ")); 8044 printf_color(ANSI_YELLOW, gettext("One or more devices has " 8045 "been removed by the administrator.\n\tSufficient " 8046 "replicas exist for the pool to continue functioning in " 8047 "a\n\tdegraded state.\n")); 8048 printf_color(ANSI_BOLD, gettext("action: ")); 8049 printf_color(ANSI_YELLOW, gettext("Online the device " 8050 "using zpool online' or replace the device with\n\t'zpool " 8051 "replace'.\n")); 8052 break; 8053 8054 case ZPOOL_STATUS_RESILVERING: 8055 case ZPOOL_STATUS_REBUILDING: 8056 printf_color(ANSI_BOLD, gettext("status: ")); 8057 printf_color(ANSI_YELLOW, gettext("One or more devices is " 8058 "currently being resilvered. The pool will\n\tcontinue " 8059 "to function, possibly in a degraded state.\n")); 8060 printf_color(ANSI_BOLD, gettext("action: ")); 8061 printf_color(ANSI_YELLOW, gettext("Wait for the resilver to " 8062 "complete.\n")); 8063 break; 8064 8065 case ZPOOL_STATUS_REBUILD_SCRUB: 8066 printf_color(ANSI_BOLD, gettext("status: ")); 8067 printf_color(ANSI_YELLOW, gettext("One or more devices have " 8068 "been sequentially resilvered, scrubbing\n\tthe pool " 8069 "is recommended.\n")); 8070 printf_color(ANSI_BOLD, gettext("action: ")); 8071 printf_color(ANSI_YELLOW, gettext("Use 'zpool scrub' to " 8072 "verify all data checksums.\n")); 8073 break; 8074 8075 case ZPOOL_STATUS_CORRUPT_DATA: 8076 printf_color(ANSI_BOLD, gettext("status: ")); 8077 printf_color(ANSI_YELLOW, gettext("One or more devices has " 8078 "experienced an error resulting in data\n\tcorruption. " 8079 "Applications may be affected.\n")); 8080 printf_color(ANSI_BOLD, gettext("action: ")); 8081 printf_color(ANSI_YELLOW, gettext("Restore the file in question" 8082 " if possible. Otherwise restore the\n\tentire pool from " 8083 "backup.\n")); 8084 break; 8085 8086 case ZPOOL_STATUS_CORRUPT_POOL: 8087 printf_color(ANSI_BOLD, gettext("status: ")); 8088 printf_color(ANSI_YELLOW, gettext("The pool metadata is " 8089 "corrupted and the pool cannot be opened.\n")); 8090 zpool_explain_recover(zpool_get_handle(zhp), 8091 zpool_get_name(zhp), reason, config); 8092 break; 8093 8094 case ZPOOL_STATUS_VERSION_OLDER: 8095 printf_color(ANSI_BOLD, gettext("status: ")); 8096 printf_color(ANSI_YELLOW, gettext("The pool is formatted using " 8097 "a legacy on-disk format. The pool can\n\tstill be used, " 8098 "but some features are unavailable.\n")); 8099 printf_color(ANSI_BOLD, gettext("action: ")); 8100 printf_color(ANSI_YELLOW, gettext("Upgrade the pool using " 8101 "'zpool upgrade'. Once this is done, the\n\tpool will no " 8102 "longer be accessible on software that does not support\n\t" 8103 "feature flags.\n")); 8104 break; 8105 8106 case ZPOOL_STATUS_VERSION_NEWER: 8107 printf_color(ANSI_BOLD, gettext("status: ")); 8108 printf_color(ANSI_YELLOW, gettext("The pool has been upgraded " 8109 "to a newer, incompatible on-disk version.\n\tThe pool " 8110 "cannot be accessed on this system.\n")); 8111 printf_color(ANSI_BOLD, gettext("action: ")); 8112 printf_color(ANSI_YELLOW, gettext("Access the pool from a " 8113 "system running more recent software, or\n\trestore the " 8114 "pool from backup.\n")); 8115 break; 8116 8117 case ZPOOL_STATUS_FEAT_DISABLED: 8118 printf_color(ANSI_BOLD, gettext("status: ")); 8119 printf_color(ANSI_YELLOW, gettext("Some supported features are " 8120 "not enabled on the pool. The pool can\n\tstill be used, " 8121 "but some features are unavailable.\n")); 8122 printf_color(ANSI_BOLD, gettext("action: ")); 8123 printf_color(ANSI_YELLOW, gettext("Enable all features using " 8124 "'zpool upgrade'. Once this is done,\n\tthe pool may no " 8125 "longer be accessible by software that does not support\n\t" 8126 "the features. See zpool-features(5) for details.\n")); 8127 break; 8128 8129 case ZPOOL_STATUS_UNSUP_FEAT_READ: 8130 printf_color(ANSI_BOLD, gettext("status: ")); 8131 printf_color(ANSI_YELLOW, gettext("The pool cannot be accessed " 8132 "on this system because it uses the\n\tfollowing feature(s)" 8133 " not supported on this system:\n")); 8134 zpool_print_unsup_feat(config); 8135 (void) printf("\n"); 8136 printf_color(ANSI_BOLD, gettext("action: ")); 8137 printf_color(ANSI_YELLOW, gettext("Access the pool from a " 8138 "system that supports the required feature(s),\n\tor " 8139 "restore the pool from backup.\n")); 8140 break; 8141 8142 case ZPOOL_STATUS_UNSUP_FEAT_WRITE: 8143 printf_color(ANSI_BOLD, gettext("status: ")); 8144 printf_color(ANSI_YELLOW, gettext("The pool can only be " 8145 "accessed in read-only mode on this system. It\n\tcannot be" 8146 " accessed in read-write mode because it uses the " 8147 "following\n\tfeature(s) not supported on this system:\n")); 8148 zpool_print_unsup_feat(config); 8149 (void) printf("\n"); 8150 printf_color(ANSI_BOLD, gettext("action: ")); 8151 printf_color(ANSI_YELLOW, gettext("The pool cannot be accessed " 8152 "in read-write mode. Import the pool with\n" 8153 "\t\"-o readonly=on\", access the pool from a system that " 8154 "supports the\n\trequired feature(s), or restore the " 8155 "pool from backup.\n")); 8156 break; 8157 8158 case ZPOOL_STATUS_FAULTED_DEV_R: 8159 printf_color(ANSI_BOLD, gettext("status: ")); 8160 printf_color(ANSI_YELLOW, gettext("One or more devices are " 8161 "faulted in response to persistent errors.\n\tSufficient " 8162 "replicas exist for the pool to continue functioning " 8163 "in a\n\tdegraded state.\n")); 8164 printf_color(ANSI_BOLD, gettext("action: ")); 8165 printf_color(ANSI_YELLOW, gettext("Replace the faulted device, " 8166 "or use 'zpool clear' to mark the device\n\trepaired.\n")); 8167 break; 8168 8169 case ZPOOL_STATUS_FAULTED_DEV_NR: 8170 printf_color(ANSI_BOLD, gettext("status: ")); 8171 printf_color(ANSI_YELLOW, gettext("One or more devices are " 8172 "faulted in response to persistent errors. There are " 8173 "insufficient replicas for the pool to\n\tcontinue " 8174 "functioning.\n")); 8175 printf_color(ANSI_BOLD, gettext("action: ")); 8176 printf_color(ANSI_YELLOW, gettext("Destroy and re-create the " 8177 "pool from a backup source. Manually marking the device\n" 8178 "\trepaired using 'zpool clear' may allow some data " 8179 "to be recovered.\n")); 8180 break; 8181 8182 case ZPOOL_STATUS_IO_FAILURE_MMP: 8183 printf_color(ANSI_BOLD, gettext("status: ")); 8184 printf_color(ANSI_YELLOW, gettext("The pool is suspended " 8185 "because multihost writes failed or were delayed;\n\t" 8186 "another system could import the pool undetected.\n")); 8187 printf_color(ANSI_BOLD, gettext("action: ")); 8188 printf_color(ANSI_YELLOW, gettext("Make sure the pool's devices" 8189 " are connected, then reboot your system and\n\timport the " 8190 "pool.\n")); 8191 break; 8192 8193 case ZPOOL_STATUS_IO_FAILURE_WAIT: 8194 case ZPOOL_STATUS_IO_FAILURE_CONTINUE: 8195 printf_color(ANSI_BOLD, gettext("status: ")); 8196 printf_color(ANSI_YELLOW, gettext("One or more devices are " 8197 "faulted in response to IO failures.\n")); 8198 printf_color(ANSI_BOLD, gettext("action: ")); 8199 printf_color(ANSI_YELLOW, gettext("Make sure the affected " 8200 "devices are connected, then run 'zpool clear'.\n")); 8201 break; 8202 8203 case ZPOOL_STATUS_BAD_LOG: 8204 printf_color(ANSI_BOLD, gettext("status: ")); 8205 printf_color(ANSI_YELLOW, gettext("An intent log record " 8206 "could not be read.\n" 8207 "\tWaiting for administrator intervention to fix the " 8208 "faulted pool.\n")); 8209 printf_color(ANSI_BOLD, gettext("action: ")); 8210 printf_color(ANSI_YELLOW, gettext("Either restore the affected " 8211 "device(s) and run 'zpool online',\n" 8212 "\tor ignore the intent log records by running " 8213 "'zpool clear'.\n")); 8214 break; 8215 8216 case ZPOOL_STATUS_NON_NATIVE_ASHIFT: 8217 (void) printf(gettext("status: One or more devices are " 8218 "configured to use a non-native block size.\n" 8219 "\tExpect reduced performance.\n")); 8220 (void) printf(gettext("action: Replace affected devices with " 8221 "devices that support the\n\tconfigured block size, or " 8222 "migrate data to a properly configured\n\tpool.\n")); 8223 break; 8224 8225 case ZPOOL_STATUS_HOSTID_MISMATCH: 8226 printf_color(ANSI_BOLD, gettext("status: ")); 8227 printf_color(ANSI_YELLOW, gettext("Mismatch between pool hostid" 8228 " and system hostid on imported pool.\n\tThis pool was " 8229 "previously imported into a system with a different " 8230 "hostid,\n\tand then was verbatim imported into this " 8231 "system.\n")); 8232 printf_color(ANSI_BOLD, gettext("action: ")); 8233 printf_color(ANSI_YELLOW, gettext("Export this pool on all " 8234 "systems on which it is imported.\n" 8235 "\tThen import it to correct the mismatch.\n")); 8236 break; 8237 8238 case ZPOOL_STATUS_ERRATA: 8239 printf_color(ANSI_BOLD, gettext("status: ")); 8240 printf_color(ANSI_YELLOW, gettext("Errata #%d detected.\n"), 8241 errata); 8242 8243 switch (errata) { 8244 case ZPOOL_ERRATA_NONE: 8245 break; 8246 8247 case ZPOOL_ERRATA_ZOL_2094_SCRUB: 8248 printf_color(ANSI_BOLD, gettext("action: ")); 8249 printf_color(ANSI_YELLOW, gettext("To correct the issue" 8250 " run 'zpool scrub'.\n")); 8251 break; 8252 8253 case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION: 8254 (void) printf(gettext("\tExisting encrypted datasets " 8255 "contain an on-disk incompatibility\n\twhich " 8256 "needs to be corrected.\n")); 8257 printf_color(ANSI_BOLD, gettext("action: ")); 8258 printf_color(ANSI_YELLOW, gettext("To correct the issue" 8259 " backup existing encrypted datasets to new\n\t" 8260 "encrypted datasets and destroy the old ones. " 8261 "'zfs mount -o ro' can\n\tbe used to temporarily " 8262 "mount existing encrypted datasets readonly.\n")); 8263 break; 8264 8265 case ZPOOL_ERRATA_ZOL_8308_ENCRYPTION: 8266 (void) printf(gettext("\tExisting encrypted snapshots " 8267 "and bookmarks contain an on-disk\n\tincompat" 8268 "ibility. This may cause on-disk corruption if " 8269 "they are used\n\twith 'zfs recv'.\n")); 8270 printf_color(ANSI_BOLD, gettext("action: ")); 8271 printf_color(ANSI_YELLOW, gettext("To correct the" 8272 "issue, enable the bookmark_v2 feature. No " 8273 "additional\n\taction is needed if there are no " 8274 "encrypted snapshots or bookmarks.\n\tIf preserving" 8275 "the encrypted snapshots and bookmarks is required," 8276 " use\n\ta non-raw send to backup and restore them." 8277 " Alternately, they may be\n\tremoved to resolve " 8278 "the incompatibility.\n")); 8279 break; 8280 8281 default: 8282 /* 8283 * All errata which allow the pool to be imported 8284 * must contain an action message. 8285 */ 8286 assert(0); 8287 } 8288 break; 8289 8290 default: 8291 /* 8292 * The remaining errors can't actually be generated, yet. 8293 */ 8294 assert(reason == ZPOOL_STATUS_OK); 8295 } 8296 8297 if (msgid != NULL) { 8298 printf(" "); 8299 printf_color(ANSI_BOLD, gettext("see:")); 8300 printf(gettext( 8301 " https://openzfs.github.io/openzfs-docs/msg/%s\n"), 8302 msgid); 8303 } 8304 8305 if (config != NULL) { 8306 uint64_t nerr; 8307 nvlist_t **spares, **l2cache; 8308 uint_t nspares, nl2cache; 8309 pool_checkpoint_stat_t *pcs = NULL; 8310 pool_removal_stat_t *prs = NULL; 8311 8312 print_scan_status(zhp, nvroot); 8313 8314 (void) nvlist_lookup_uint64_array(nvroot, 8315 ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c); 8316 print_removal_status(zhp, prs); 8317 8318 (void) nvlist_lookup_uint64_array(nvroot, 8319 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c); 8320 print_checkpoint_status(pcs); 8321 8322 cbp->cb_namewidth = max_width(zhp, nvroot, 0, 0, 8323 cbp->cb_name_flags | VDEV_NAME_TYPE_ID); 8324 if (cbp->cb_namewidth < 10) 8325 cbp->cb_namewidth = 10; 8326 8327 color_start(ANSI_BOLD); 8328 (void) printf(gettext("config:\n\n")); 8329 (void) printf(gettext("\t%-*s %-8s %5s %5s %5s"), 8330 cbp->cb_namewidth, "NAME", "STATE", "READ", "WRITE", 8331 "CKSUM"); 8332 color_end(); 8333 8334 if (cbp->cb_print_slow_ios) { 8335 printf_color(ANSI_BOLD, " %5s", gettext("SLOW")); 8336 } 8337 8338 if (cbp->vcdl != NULL) 8339 print_cmd_columns(cbp->vcdl, 0); 8340 8341 printf("\n"); 8342 8343 print_status_config(zhp, cbp, zpool_get_name(zhp), nvroot, 0, 8344 B_FALSE, NULL); 8345 8346 print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_BIAS_DEDUP); 8347 print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_BIAS_SPECIAL); 8348 print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_CLASS_LOGS); 8349 8350 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 8351 &l2cache, &nl2cache) == 0) 8352 print_l2cache(zhp, cbp, l2cache, nl2cache); 8353 8354 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 8355 &spares, &nspares) == 0) 8356 print_spares(zhp, cbp, spares, nspares); 8357 8358 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT, 8359 &nerr) == 0) { 8360 nvlist_t *nverrlist = NULL; 8361 8362 /* 8363 * If the approximate error count is small, get a 8364 * precise count by fetching the entire log and 8365 * uniquifying the results. 8366 */ 8367 if (nerr > 0 && nerr < 100 && !cbp->cb_verbose && 8368 zpool_get_errlog(zhp, &nverrlist) == 0) { 8369 nvpair_t *elem; 8370 8371 elem = NULL; 8372 nerr = 0; 8373 while ((elem = nvlist_next_nvpair(nverrlist, 8374 elem)) != NULL) { 8375 nerr++; 8376 } 8377 } 8378 nvlist_free(nverrlist); 8379 8380 (void) printf("\n"); 8381 8382 if (nerr == 0) 8383 (void) printf(gettext("errors: No known data " 8384 "errors\n")); 8385 else if (!cbp->cb_verbose) 8386 (void) printf(gettext("errors: %llu data " 8387 "errors, use '-v' for a list\n"), 8388 (u_longlong_t)nerr); 8389 else 8390 print_error_log(zhp); 8391 } 8392 8393 if (cbp->cb_dedup_stats) 8394 print_dedup_stats(config); 8395 } else { 8396 (void) printf(gettext("config: The configuration cannot be " 8397 "determined.\n")); 8398 } 8399 8400 return (0); 8401 } 8402 8403 /* 8404 * zpool status [-c [script1,script2,...]] [-igLpPstvx] [-T d|u] [pool] ... 8405 * [interval [count]] 8406 * 8407 * -c CMD For each vdev, run command CMD 8408 * -i Display vdev initialization status. 8409 * -g Display guid for individual vdev name. 8410 * -L Follow links when resolving vdev path name. 8411 * -p Display values in parsable (exact) format. 8412 * -P Display full path for vdev name. 8413 * -s Display slow IOs column. 8414 * -v Display complete error logs 8415 * -x Display only pools with potential problems 8416 * -D Display dedup status (undocumented) 8417 * -t Display vdev TRIM status. 8418 * -T Display a timestamp in date(1) or Unix format 8419 * 8420 * Describes the health status of all pools or some subset. 8421 */ 8422 int 8423 zpool_do_status(int argc, char **argv) 8424 { 8425 int c; 8426 int ret; 8427 float interval = 0; 8428 unsigned long count = 0; 8429 status_cbdata_t cb = { 0 }; 8430 char *cmd = NULL; 8431 8432 /* check options */ 8433 while ((c = getopt(argc, argv, "c:igLpPsvxDtT:")) != -1) { 8434 switch (c) { 8435 case 'c': 8436 if (cmd != NULL) { 8437 fprintf(stderr, 8438 gettext("Can't set -c flag twice\n")); 8439 exit(1); 8440 } 8441 8442 if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL && 8443 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) { 8444 fprintf(stderr, gettext( 8445 "Can't run -c, disabled by " 8446 "ZPOOL_SCRIPTS_ENABLED.\n")); 8447 exit(1); 8448 } 8449 8450 if ((getuid() <= 0 || geteuid() <= 0) && 8451 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) { 8452 fprintf(stderr, gettext( 8453 "Can't run -c with root privileges " 8454 "unless ZPOOL_SCRIPTS_AS_ROOT is set.\n")); 8455 exit(1); 8456 } 8457 cmd = optarg; 8458 break; 8459 case 'i': 8460 cb.cb_print_vdev_init = B_TRUE; 8461 break; 8462 case 'g': 8463 cb.cb_name_flags |= VDEV_NAME_GUID; 8464 break; 8465 case 'L': 8466 cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS; 8467 break; 8468 case 'p': 8469 cb.cb_literal = B_TRUE; 8470 break; 8471 case 'P': 8472 cb.cb_name_flags |= VDEV_NAME_PATH; 8473 break; 8474 case 's': 8475 cb.cb_print_slow_ios = B_TRUE; 8476 break; 8477 case 'v': 8478 cb.cb_verbose = B_TRUE; 8479 break; 8480 case 'x': 8481 cb.cb_explain = B_TRUE; 8482 break; 8483 case 'D': 8484 cb.cb_dedup_stats = B_TRUE; 8485 break; 8486 case 't': 8487 cb.cb_print_vdev_trim = B_TRUE; 8488 break; 8489 case 'T': 8490 get_timestamp_arg(*optarg); 8491 break; 8492 case '?': 8493 if (optopt == 'c') { 8494 print_zpool_script_list("status"); 8495 exit(0); 8496 } else { 8497 fprintf(stderr, 8498 gettext("invalid option '%c'\n"), optopt); 8499 } 8500 usage(B_FALSE); 8501 } 8502 } 8503 8504 argc -= optind; 8505 argv += optind; 8506 8507 get_interval_count(&argc, argv, &interval, &count); 8508 8509 if (argc == 0) 8510 cb.cb_allpools = B_TRUE; 8511 8512 cb.cb_first = B_TRUE; 8513 cb.cb_print_status = B_TRUE; 8514 8515 for (;;) { 8516 if (timestamp_fmt != NODATE) 8517 print_timestamp(timestamp_fmt); 8518 8519 if (cmd != NULL) 8520 cb.vcdl = all_pools_for_each_vdev_run(argc, argv, cmd, 8521 NULL, NULL, 0, 0); 8522 8523 ret = for_each_pool(argc, argv, B_TRUE, NULL, cb.cb_literal, 8524 status_callback, &cb); 8525 8526 if (cb.vcdl != NULL) 8527 free_vdev_cmd_data_list(cb.vcdl); 8528 8529 if (argc == 0 && cb.cb_count == 0) 8530 (void) fprintf(stderr, gettext("no pools available\n")); 8531 else if (cb.cb_explain && cb.cb_first && cb.cb_allpools) 8532 (void) printf(gettext("all pools are healthy\n")); 8533 8534 if (ret != 0) 8535 return (ret); 8536 8537 if (interval == 0) 8538 break; 8539 8540 if (count != 0 && --count == 0) 8541 break; 8542 8543 (void) fsleep(interval); 8544 } 8545 8546 return (0); 8547 } 8548 8549 typedef struct upgrade_cbdata { 8550 int cb_first; 8551 int cb_argc; 8552 uint64_t cb_version; 8553 char **cb_argv; 8554 } upgrade_cbdata_t; 8555 8556 static int 8557 check_unsupp_fs(zfs_handle_t *zhp, void *unsupp_fs) 8558 { 8559 int zfs_version = (int)zfs_prop_get_int(zhp, ZFS_PROP_VERSION); 8560 int *count = (int *)unsupp_fs; 8561 8562 if (zfs_version > ZPL_VERSION) { 8563 (void) printf(gettext("%s (v%d) is not supported by this " 8564 "implementation of ZFS.\n"), 8565 zfs_get_name(zhp), zfs_version); 8566 (*count)++; 8567 } 8568 8569 zfs_iter_filesystems(zhp, check_unsupp_fs, unsupp_fs); 8570 8571 zfs_close(zhp); 8572 8573 return (0); 8574 } 8575 8576 static int 8577 upgrade_version(zpool_handle_t *zhp, uint64_t version) 8578 { 8579 int ret; 8580 nvlist_t *config; 8581 uint64_t oldversion; 8582 int unsupp_fs = 0; 8583 8584 config = zpool_get_config(zhp, NULL); 8585 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 8586 &oldversion) == 0); 8587 8588 assert(SPA_VERSION_IS_SUPPORTED(oldversion)); 8589 assert(oldversion < version); 8590 8591 ret = zfs_iter_root(zpool_get_handle(zhp), check_unsupp_fs, &unsupp_fs); 8592 if (ret != 0) 8593 return (ret); 8594 8595 if (unsupp_fs) { 8596 (void) fprintf(stderr, gettext("Upgrade not performed due " 8597 "to %d unsupported filesystems (max v%d).\n"), 8598 unsupp_fs, (int)ZPL_VERSION); 8599 return (1); 8600 } 8601 8602 ret = zpool_upgrade(zhp, version); 8603 if (ret != 0) 8604 return (ret); 8605 8606 if (version >= SPA_VERSION_FEATURES) { 8607 (void) printf(gettext("Successfully upgraded " 8608 "'%s' from version %llu to feature flags.\n"), 8609 zpool_get_name(zhp), (u_longlong_t)oldversion); 8610 } else { 8611 (void) printf(gettext("Successfully upgraded " 8612 "'%s' from version %llu to version %llu.\n"), 8613 zpool_get_name(zhp), (u_longlong_t)oldversion, 8614 (u_longlong_t)version); 8615 } 8616 8617 return (0); 8618 } 8619 8620 static int 8621 upgrade_enable_all(zpool_handle_t *zhp, int *countp) 8622 { 8623 int i, ret, count; 8624 boolean_t firstff = B_TRUE; 8625 nvlist_t *enabled = zpool_get_features(zhp); 8626 8627 count = 0; 8628 for (i = 0; i < SPA_FEATURES; i++) { 8629 const char *fname = spa_feature_table[i].fi_uname; 8630 const char *fguid = spa_feature_table[i].fi_guid; 8631 if (!nvlist_exists(enabled, fguid)) { 8632 char *propname; 8633 verify(-1 != asprintf(&propname, "feature@%s", fname)); 8634 ret = zpool_set_prop(zhp, propname, 8635 ZFS_FEATURE_ENABLED); 8636 if (ret != 0) { 8637 free(propname); 8638 return (ret); 8639 } 8640 count++; 8641 8642 if (firstff) { 8643 (void) printf(gettext("Enabled the " 8644 "following features on '%s':\n"), 8645 zpool_get_name(zhp)); 8646 firstff = B_FALSE; 8647 } 8648 (void) printf(gettext(" %s\n"), fname); 8649 free(propname); 8650 } 8651 } 8652 8653 if (countp != NULL) 8654 *countp = count; 8655 return (0); 8656 } 8657 8658 static int 8659 upgrade_cb(zpool_handle_t *zhp, void *arg) 8660 { 8661 upgrade_cbdata_t *cbp = arg; 8662 nvlist_t *config; 8663 uint64_t version; 8664 boolean_t printnl = B_FALSE; 8665 int ret; 8666 8667 config = zpool_get_config(zhp, NULL); 8668 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 8669 &version) == 0); 8670 8671 assert(SPA_VERSION_IS_SUPPORTED(version)); 8672 8673 if (version < cbp->cb_version) { 8674 cbp->cb_first = B_FALSE; 8675 ret = upgrade_version(zhp, cbp->cb_version); 8676 if (ret != 0) 8677 return (ret); 8678 printnl = B_TRUE; 8679 8680 /* 8681 * If they did "zpool upgrade -a", then we could 8682 * be doing ioctls to different pools. We need 8683 * to log this history once to each pool, and bypass 8684 * the normal history logging that happens in main(). 8685 */ 8686 (void) zpool_log_history(g_zfs, history_str); 8687 log_history = B_FALSE; 8688 } 8689 8690 if (cbp->cb_version >= SPA_VERSION_FEATURES) { 8691 int count; 8692 ret = upgrade_enable_all(zhp, &count); 8693 if (ret != 0) 8694 return (ret); 8695 8696 if (count > 0) { 8697 cbp->cb_first = B_FALSE; 8698 printnl = B_TRUE; 8699 } 8700 } 8701 8702 if (printnl) { 8703 (void) printf(gettext("\n")); 8704 } 8705 8706 return (0); 8707 } 8708 8709 static int 8710 upgrade_list_older_cb(zpool_handle_t *zhp, void *arg) 8711 { 8712 upgrade_cbdata_t *cbp = arg; 8713 nvlist_t *config; 8714 uint64_t version; 8715 8716 config = zpool_get_config(zhp, NULL); 8717 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 8718 &version) == 0); 8719 8720 assert(SPA_VERSION_IS_SUPPORTED(version)); 8721 8722 if (version < SPA_VERSION_FEATURES) { 8723 if (cbp->cb_first) { 8724 (void) printf(gettext("The following pools are " 8725 "formatted with legacy version numbers and can\n" 8726 "be upgraded to use feature flags. After " 8727 "being upgraded, these pools\nwill no " 8728 "longer be accessible by software that does not " 8729 "support feature\nflags.\n\n")); 8730 (void) printf(gettext("VER POOL\n")); 8731 (void) printf(gettext("--- ------------\n")); 8732 cbp->cb_first = B_FALSE; 8733 } 8734 8735 (void) printf("%2llu %s\n", (u_longlong_t)version, 8736 zpool_get_name(zhp)); 8737 } 8738 8739 return (0); 8740 } 8741 8742 static int 8743 upgrade_list_disabled_cb(zpool_handle_t *zhp, void *arg) 8744 { 8745 upgrade_cbdata_t *cbp = arg; 8746 nvlist_t *config; 8747 uint64_t version; 8748 8749 config = zpool_get_config(zhp, NULL); 8750 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 8751 &version) == 0); 8752 8753 if (version >= SPA_VERSION_FEATURES) { 8754 int i; 8755 boolean_t poolfirst = B_TRUE; 8756 nvlist_t *enabled = zpool_get_features(zhp); 8757 8758 for (i = 0; i < SPA_FEATURES; i++) { 8759 const char *fguid = spa_feature_table[i].fi_guid; 8760 const char *fname = spa_feature_table[i].fi_uname; 8761 if (!nvlist_exists(enabled, fguid)) { 8762 if (cbp->cb_first) { 8763 (void) printf(gettext("\nSome " 8764 "supported features are not " 8765 "enabled on the following pools. " 8766 "Once a\nfeature is enabled the " 8767 "pool may become incompatible with " 8768 "software\nthat does not support " 8769 "the feature. See " 8770 "zpool-features(5) for " 8771 "details.\n\n")); 8772 (void) printf(gettext("POOL " 8773 "FEATURE\n")); 8774 (void) printf(gettext("------" 8775 "---------\n")); 8776 cbp->cb_first = B_FALSE; 8777 } 8778 8779 if (poolfirst) { 8780 (void) printf(gettext("%s\n"), 8781 zpool_get_name(zhp)); 8782 poolfirst = B_FALSE; 8783 } 8784 8785 (void) printf(gettext(" %s\n"), fname); 8786 } 8787 /* 8788 * If they did "zpool upgrade -a", then we could 8789 * be doing ioctls to different pools. We need 8790 * to log this history once to each pool, and bypass 8791 * the normal history logging that happens in main(). 8792 */ 8793 (void) zpool_log_history(g_zfs, history_str); 8794 log_history = B_FALSE; 8795 } 8796 } 8797 8798 return (0); 8799 } 8800 8801 /* ARGSUSED */ 8802 static int 8803 upgrade_one(zpool_handle_t *zhp, void *data) 8804 { 8805 boolean_t printnl = B_FALSE; 8806 upgrade_cbdata_t *cbp = data; 8807 uint64_t cur_version; 8808 int ret; 8809 8810 if (strcmp("log", zpool_get_name(zhp)) == 0) { 8811 (void) fprintf(stderr, gettext("'log' is now a reserved word\n" 8812 "Pool 'log' must be renamed using export and import" 8813 " to upgrade.\n")); 8814 return (1); 8815 } 8816 8817 cur_version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 8818 if (cur_version > cbp->cb_version) { 8819 (void) printf(gettext("Pool '%s' is already formatted " 8820 "using more current version '%llu'.\n\n"), 8821 zpool_get_name(zhp), (u_longlong_t)cur_version); 8822 return (0); 8823 } 8824 8825 if (cbp->cb_version != SPA_VERSION && cur_version == cbp->cb_version) { 8826 (void) printf(gettext("Pool '%s' is already formatted " 8827 "using version %llu.\n\n"), zpool_get_name(zhp), 8828 (u_longlong_t)cbp->cb_version); 8829 return (0); 8830 } 8831 8832 if (cur_version != cbp->cb_version) { 8833 printnl = B_TRUE; 8834 ret = upgrade_version(zhp, cbp->cb_version); 8835 if (ret != 0) 8836 return (ret); 8837 } 8838 8839 if (cbp->cb_version >= SPA_VERSION_FEATURES) { 8840 int count = 0; 8841 ret = upgrade_enable_all(zhp, &count); 8842 if (ret != 0) 8843 return (ret); 8844 8845 if (count != 0) { 8846 printnl = B_TRUE; 8847 } else if (cur_version == SPA_VERSION) { 8848 (void) printf(gettext("Pool '%s' already has all " 8849 "supported features enabled.\n"), 8850 zpool_get_name(zhp)); 8851 } 8852 } 8853 8854 if (printnl) { 8855 (void) printf(gettext("\n")); 8856 } 8857 8858 return (0); 8859 } 8860 8861 /* 8862 * zpool upgrade 8863 * zpool upgrade -v 8864 * zpool upgrade [-V version] <-a | pool ...> 8865 * 8866 * With no arguments, display downrev'd ZFS pool available for upgrade. 8867 * Individual pools can be upgraded by specifying the pool, and '-a' will 8868 * upgrade all pools. 8869 */ 8870 int 8871 zpool_do_upgrade(int argc, char **argv) 8872 { 8873 int c; 8874 upgrade_cbdata_t cb = { 0 }; 8875 int ret = 0; 8876 boolean_t showversions = B_FALSE; 8877 boolean_t upgradeall = B_FALSE; 8878 char *end; 8879 8880 8881 /* check options */ 8882 while ((c = getopt(argc, argv, ":avV:")) != -1) { 8883 switch (c) { 8884 case 'a': 8885 upgradeall = B_TRUE; 8886 break; 8887 case 'v': 8888 showversions = B_TRUE; 8889 break; 8890 case 'V': 8891 cb.cb_version = strtoll(optarg, &end, 10); 8892 if (*end != '\0' || 8893 !SPA_VERSION_IS_SUPPORTED(cb.cb_version)) { 8894 (void) fprintf(stderr, 8895 gettext("invalid version '%s'\n"), optarg); 8896 usage(B_FALSE); 8897 } 8898 break; 8899 case ':': 8900 (void) fprintf(stderr, gettext("missing argument for " 8901 "'%c' option\n"), optopt); 8902 usage(B_FALSE); 8903 break; 8904 case '?': 8905 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 8906 optopt); 8907 usage(B_FALSE); 8908 } 8909 } 8910 8911 cb.cb_argc = argc; 8912 cb.cb_argv = argv; 8913 argc -= optind; 8914 argv += optind; 8915 8916 if (cb.cb_version == 0) { 8917 cb.cb_version = SPA_VERSION; 8918 } else if (!upgradeall && argc == 0) { 8919 (void) fprintf(stderr, gettext("-V option is " 8920 "incompatible with other arguments\n")); 8921 usage(B_FALSE); 8922 } 8923 8924 if (showversions) { 8925 if (upgradeall || argc != 0) { 8926 (void) fprintf(stderr, gettext("-v option is " 8927 "incompatible with other arguments\n")); 8928 usage(B_FALSE); 8929 } 8930 } else if (upgradeall) { 8931 if (argc != 0) { 8932 (void) fprintf(stderr, gettext("-a option should not " 8933 "be used along with a pool name\n")); 8934 usage(B_FALSE); 8935 } 8936 } 8937 8938 (void) printf(gettext("This system supports ZFS pool feature " 8939 "flags.\n\n")); 8940 if (showversions) { 8941 int i; 8942 8943 (void) printf(gettext("The following features are " 8944 "supported:\n\n")); 8945 (void) printf(gettext("FEAT DESCRIPTION\n")); 8946 (void) printf("----------------------------------------------" 8947 "---------------\n"); 8948 for (i = 0; i < SPA_FEATURES; i++) { 8949 zfeature_info_t *fi = &spa_feature_table[i]; 8950 const char *ro = 8951 (fi->fi_flags & ZFEATURE_FLAG_READONLY_COMPAT) ? 8952 " (read-only compatible)" : ""; 8953 8954 (void) printf("%-37s%s\n", fi->fi_uname, ro); 8955 (void) printf(" %s\n", fi->fi_desc); 8956 } 8957 (void) printf("\n"); 8958 8959 (void) printf(gettext("The following legacy versions are also " 8960 "supported:\n\n")); 8961 (void) printf(gettext("VER DESCRIPTION\n")); 8962 (void) printf("--- -----------------------------------------" 8963 "---------------\n"); 8964 (void) printf(gettext(" 1 Initial ZFS version\n")); 8965 (void) printf(gettext(" 2 Ditto blocks " 8966 "(replicated metadata)\n")); 8967 (void) printf(gettext(" 3 Hot spares and double parity " 8968 "RAID-Z\n")); 8969 (void) printf(gettext(" 4 zpool history\n")); 8970 (void) printf(gettext(" 5 Compression using the gzip " 8971 "algorithm\n")); 8972 (void) printf(gettext(" 6 bootfs pool property\n")); 8973 (void) printf(gettext(" 7 Separate intent log devices\n")); 8974 (void) printf(gettext(" 8 Delegated administration\n")); 8975 (void) printf(gettext(" 9 refquota and refreservation " 8976 "properties\n")); 8977 (void) printf(gettext(" 10 Cache devices\n")); 8978 (void) printf(gettext(" 11 Improved scrub performance\n")); 8979 (void) printf(gettext(" 12 Snapshot properties\n")); 8980 (void) printf(gettext(" 13 snapused property\n")); 8981 (void) printf(gettext(" 14 passthrough-x aclinherit\n")); 8982 (void) printf(gettext(" 15 user/group space accounting\n")); 8983 (void) printf(gettext(" 16 stmf property support\n")); 8984 (void) printf(gettext(" 17 Triple-parity RAID-Z\n")); 8985 (void) printf(gettext(" 18 Snapshot user holds\n")); 8986 (void) printf(gettext(" 19 Log device removal\n")); 8987 (void) printf(gettext(" 20 Compression using zle " 8988 "(zero-length encoding)\n")); 8989 (void) printf(gettext(" 21 Deduplication\n")); 8990 (void) printf(gettext(" 22 Received properties\n")); 8991 (void) printf(gettext(" 23 Slim ZIL\n")); 8992 (void) printf(gettext(" 24 System attributes\n")); 8993 (void) printf(gettext(" 25 Improved scrub stats\n")); 8994 (void) printf(gettext(" 26 Improved snapshot deletion " 8995 "performance\n")); 8996 (void) printf(gettext(" 27 Improved snapshot creation " 8997 "performance\n")); 8998 (void) printf(gettext(" 28 Multiple vdev replacements\n")); 8999 (void) printf(gettext("\nFor more information on a particular " 9000 "version, including supported releases,\n")); 9001 (void) printf(gettext("see the ZFS Administration Guide.\n\n")); 9002 } else if (argc == 0 && upgradeall) { 9003 cb.cb_first = B_TRUE; 9004 ret = zpool_iter(g_zfs, upgrade_cb, &cb); 9005 if (ret == 0 && cb.cb_first) { 9006 if (cb.cb_version == SPA_VERSION) { 9007 (void) printf(gettext("All pools are already " 9008 "formatted using feature flags.\n\n")); 9009 (void) printf(gettext("Every feature flags " 9010 "pool already has all supported features " 9011 "enabled.\n")); 9012 } else { 9013 (void) printf(gettext("All pools are already " 9014 "formatted with version %llu or higher.\n"), 9015 (u_longlong_t)cb.cb_version); 9016 } 9017 } 9018 } else if (argc == 0) { 9019 cb.cb_first = B_TRUE; 9020 ret = zpool_iter(g_zfs, upgrade_list_older_cb, &cb); 9021 assert(ret == 0); 9022 9023 if (cb.cb_first) { 9024 (void) printf(gettext("All pools are formatted " 9025 "using feature flags.\n\n")); 9026 } else { 9027 (void) printf(gettext("\nUse 'zpool upgrade -v' " 9028 "for a list of available legacy versions.\n")); 9029 } 9030 9031 cb.cb_first = B_TRUE; 9032 ret = zpool_iter(g_zfs, upgrade_list_disabled_cb, &cb); 9033 assert(ret == 0); 9034 9035 if (cb.cb_first) { 9036 (void) printf(gettext("Every feature flags pool has " 9037 "all supported features enabled.\n")); 9038 } else { 9039 (void) printf(gettext("\n")); 9040 } 9041 } else { 9042 ret = for_each_pool(argc, argv, B_FALSE, NULL, B_FALSE, 9043 upgrade_one, &cb); 9044 } 9045 9046 return (ret); 9047 } 9048 9049 typedef struct hist_cbdata { 9050 boolean_t first; 9051 boolean_t longfmt; 9052 boolean_t internal; 9053 } hist_cbdata_t; 9054 9055 static void 9056 print_history_records(nvlist_t *nvhis, hist_cbdata_t *cb) 9057 { 9058 nvlist_t **records; 9059 uint_t numrecords; 9060 int i; 9061 9062 verify(nvlist_lookup_nvlist_array(nvhis, ZPOOL_HIST_RECORD, 9063 &records, &numrecords) == 0); 9064 for (i = 0; i < numrecords; i++) { 9065 nvlist_t *rec = records[i]; 9066 char tbuf[30] = ""; 9067 9068 if (nvlist_exists(rec, ZPOOL_HIST_TIME)) { 9069 time_t tsec; 9070 struct tm t; 9071 9072 tsec = fnvlist_lookup_uint64(records[i], 9073 ZPOOL_HIST_TIME); 9074 (void) localtime_r(&tsec, &t); 9075 (void) strftime(tbuf, sizeof (tbuf), "%F.%T", &t); 9076 } 9077 9078 if (nvlist_exists(rec, ZPOOL_HIST_CMD)) { 9079 (void) printf("%s %s", tbuf, 9080 fnvlist_lookup_string(rec, ZPOOL_HIST_CMD)); 9081 } else if (nvlist_exists(rec, ZPOOL_HIST_INT_EVENT)) { 9082 int ievent = 9083 fnvlist_lookup_uint64(rec, ZPOOL_HIST_INT_EVENT); 9084 if (!cb->internal) 9085 continue; 9086 if (ievent >= ZFS_NUM_LEGACY_HISTORY_EVENTS) { 9087 (void) printf("%s unrecognized record:\n", 9088 tbuf); 9089 dump_nvlist(rec, 4); 9090 continue; 9091 } 9092 (void) printf("%s [internal %s txg:%lld] %s", tbuf, 9093 zfs_history_event_names[ievent], 9094 (longlong_t)fnvlist_lookup_uint64( 9095 rec, ZPOOL_HIST_TXG), 9096 fnvlist_lookup_string(rec, ZPOOL_HIST_INT_STR)); 9097 } else if (nvlist_exists(rec, ZPOOL_HIST_INT_NAME)) { 9098 if (!cb->internal) 9099 continue; 9100 (void) printf("%s [txg:%lld] %s", tbuf, 9101 (longlong_t)fnvlist_lookup_uint64( 9102 rec, ZPOOL_HIST_TXG), 9103 fnvlist_lookup_string(rec, ZPOOL_HIST_INT_NAME)); 9104 if (nvlist_exists(rec, ZPOOL_HIST_DSNAME)) { 9105 (void) printf(" %s (%llu)", 9106 fnvlist_lookup_string(rec, 9107 ZPOOL_HIST_DSNAME), 9108 (u_longlong_t)fnvlist_lookup_uint64(rec, 9109 ZPOOL_HIST_DSID)); 9110 } 9111 (void) printf(" %s", fnvlist_lookup_string(rec, 9112 ZPOOL_HIST_INT_STR)); 9113 } else if (nvlist_exists(rec, ZPOOL_HIST_IOCTL)) { 9114 if (!cb->internal) 9115 continue; 9116 (void) printf("%s ioctl %s\n", tbuf, 9117 fnvlist_lookup_string(rec, ZPOOL_HIST_IOCTL)); 9118 if (nvlist_exists(rec, ZPOOL_HIST_INPUT_NVL)) { 9119 (void) printf(" input:\n"); 9120 dump_nvlist(fnvlist_lookup_nvlist(rec, 9121 ZPOOL_HIST_INPUT_NVL), 8); 9122 } 9123 if (nvlist_exists(rec, ZPOOL_HIST_OUTPUT_NVL)) { 9124 (void) printf(" output:\n"); 9125 dump_nvlist(fnvlist_lookup_nvlist(rec, 9126 ZPOOL_HIST_OUTPUT_NVL), 8); 9127 } 9128 if (nvlist_exists(rec, ZPOOL_HIST_OUTPUT_SIZE)) { 9129 (void) printf(" output nvlist omitted; " 9130 "original size: %lldKB\n", 9131 (longlong_t)fnvlist_lookup_int64(rec, 9132 ZPOOL_HIST_OUTPUT_SIZE) / 1024); 9133 } 9134 if (nvlist_exists(rec, ZPOOL_HIST_ERRNO)) { 9135 (void) printf(" errno: %lld\n", 9136 (longlong_t)fnvlist_lookup_int64(rec, 9137 ZPOOL_HIST_ERRNO)); 9138 } 9139 } else { 9140 if (!cb->internal) 9141 continue; 9142 (void) printf("%s unrecognized record:\n", tbuf); 9143 dump_nvlist(rec, 4); 9144 } 9145 9146 if (!cb->longfmt) { 9147 (void) printf("\n"); 9148 continue; 9149 } 9150 (void) printf(" ["); 9151 if (nvlist_exists(rec, ZPOOL_HIST_WHO)) { 9152 uid_t who = fnvlist_lookup_uint64(rec, ZPOOL_HIST_WHO); 9153 struct passwd *pwd = getpwuid(who); 9154 (void) printf("user %d ", (int)who); 9155 if (pwd != NULL) 9156 (void) printf("(%s) ", pwd->pw_name); 9157 } 9158 if (nvlist_exists(rec, ZPOOL_HIST_HOST)) { 9159 (void) printf("on %s", 9160 fnvlist_lookup_string(rec, ZPOOL_HIST_HOST)); 9161 } 9162 if (nvlist_exists(rec, ZPOOL_HIST_ZONE)) { 9163 (void) printf(":%s", 9164 fnvlist_lookup_string(rec, ZPOOL_HIST_ZONE)); 9165 } 9166 9167 (void) printf("]"); 9168 (void) printf("\n"); 9169 } 9170 } 9171 9172 /* 9173 * Print out the command history for a specific pool. 9174 */ 9175 static int 9176 get_history_one(zpool_handle_t *zhp, void *data) 9177 { 9178 nvlist_t *nvhis; 9179 int ret; 9180 hist_cbdata_t *cb = (hist_cbdata_t *)data; 9181 uint64_t off = 0; 9182 boolean_t eof = B_FALSE; 9183 9184 cb->first = B_FALSE; 9185 9186 (void) printf(gettext("History for '%s':\n"), zpool_get_name(zhp)); 9187 9188 while (!eof) { 9189 if ((ret = zpool_get_history(zhp, &nvhis, &off, &eof)) != 0) 9190 return (ret); 9191 9192 print_history_records(nvhis, cb); 9193 nvlist_free(nvhis); 9194 } 9195 (void) printf("\n"); 9196 9197 return (ret); 9198 } 9199 9200 /* 9201 * zpool history <pool> 9202 * 9203 * Displays the history of commands that modified pools. 9204 */ 9205 int 9206 zpool_do_history(int argc, char **argv) 9207 { 9208 hist_cbdata_t cbdata = { 0 }; 9209 int ret; 9210 int c; 9211 9212 cbdata.first = B_TRUE; 9213 /* check options */ 9214 while ((c = getopt(argc, argv, "li")) != -1) { 9215 switch (c) { 9216 case 'l': 9217 cbdata.longfmt = B_TRUE; 9218 break; 9219 case 'i': 9220 cbdata.internal = B_TRUE; 9221 break; 9222 case '?': 9223 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 9224 optopt); 9225 usage(B_FALSE); 9226 } 9227 } 9228 argc -= optind; 9229 argv += optind; 9230 9231 ret = for_each_pool(argc, argv, B_FALSE, NULL, B_FALSE, get_history_one, 9232 &cbdata); 9233 9234 if (argc == 0 && cbdata.first == B_TRUE) { 9235 (void) fprintf(stderr, gettext("no pools available\n")); 9236 return (0); 9237 } 9238 9239 return (ret); 9240 } 9241 9242 typedef struct ev_opts { 9243 int verbose; 9244 int scripted; 9245 int follow; 9246 int clear; 9247 char poolname[ZFS_MAX_DATASET_NAME_LEN]; 9248 } ev_opts_t; 9249 9250 static void 9251 zpool_do_events_short(nvlist_t *nvl, ev_opts_t *opts) 9252 { 9253 char ctime_str[26], str[32], *ptr; 9254 int64_t *tv; 9255 uint_t n; 9256 9257 verify(nvlist_lookup_int64_array(nvl, FM_EREPORT_TIME, &tv, &n) == 0); 9258 memset(str, ' ', 32); 9259 (void) ctime_r((const time_t *)&tv[0], ctime_str); 9260 (void) memcpy(str, ctime_str+4, 6); /* 'Jun 30' */ 9261 (void) memcpy(str+7, ctime_str+20, 4); /* '1993' */ 9262 (void) memcpy(str+12, ctime_str+11, 8); /* '21:49:08' */ 9263 (void) sprintf(str+20, ".%09lld", (longlong_t)tv[1]); /* '.123456789' */ 9264 if (opts->scripted) 9265 (void) printf(gettext("%s\t"), str); 9266 else 9267 (void) printf(gettext("%s "), str); 9268 9269 verify(nvlist_lookup_string(nvl, FM_CLASS, &ptr) == 0); 9270 (void) printf(gettext("%s\n"), ptr); 9271 } 9272 9273 static void 9274 zpool_do_events_nvprint(nvlist_t *nvl, int depth) 9275 { 9276 nvpair_t *nvp; 9277 9278 for (nvp = nvlist_next_nvpair(nvl, NULL); 9279 nvp != NULL; nvp = nvlist_next_nvpair(nvl, nvp)) { 9280 9281 data_type_t type = nvpair_type(nvp); 9282 const char *name = nvpair_name(nvp); 9283 9284 boolean_t b; 9285 uint8_t i8; 9286 uint16_t i16; 9287 uint32_t i32; 9288 uint64_t i64; 9289 char *str; 9290 nvlist_t *cnv; 9291 9292 printf(gettext("%*s%s = "), depth, "", name); 9293 9294 switch (type) { 9295 case DATA_TYPE_BOOLEAN: 9296 printf(gettext("%s"), "1"); 9297 break; 9298 9299 case DATA_TYPE_BOOLEAN_VALUE: 9300 (void) nvpair_value_boolean_value(nvp, &b); 9301 printf(gettext("%s"), b ? "1" : "0"); 9302 break; 9303 9304 case DATA_TYPE_BYTE: 9305 (void) nvpair_value_byte(nvp, &i8); 9306 printf(gettext("0x%x"), i8); 9307 break; 9308 9309 case DATA_TYPE_INT8: 9310 (void) nvpair_value_int8(nvp, (void *)&i8); 9311 printf(gettext("0x%x"), i8); 9312 break; 9313 9314 case DATA_TYPE_UINT8: 9315 (void) nvpair_value_uint8(nvp, &i8); 9316 printf(gettext("0x%x"), i8); 9317 break; 9318 9319 case DATA_TYPE_INT16: 9320 (void) nvpair_value_int16(nvp, (void *)&i16); 9321 printf(gettext("0x%x"), i16); 9322 break; 9323 9324 case DATA_TYPE_UINT16: 9325 (void) nvpair_value_uint16(nvp, &i16); 9326 printf(gettext("0x%x"), i16); 9327 break; 9328 9329 case DATA_TYPE_INT32: 9330 (void) nvpair_value_int32(nvp, (void *)&i32); 9331 printf(gettext("0x%x"), i32); 9332 break; 9333 9334 case DATA_TYPE_UINT32: 9335 (void) nvpair_value_uint32(nvp, &i32); 9336 printf(gettext("0x%x"), i32); 9337 break; 9338 9339 case DATA_TYPE_INT64: 9340 (void) nvpair_value_int64(nvp, (void *)&i64); 9341 printf(gettext("0x%llx"), (u_longlong_t)i64); 9342 break; 9343 9344 case DATA_TYPE_UINT64: 9345 (void) nvpair_value_uint64(nvp, &i64); 9346 /* 9347 * translate vdev state values to readable 9348 * strings to aide zpool events consumers 9349 */ 9350 if (strcmp(name, 9351 FM_EREPORT_PAYLOAD_ZFS_VDEV_STATE) == 0 || 9352 strcmp(name, 9353 FM_EREPORT_PAYLOAD_ZFS_VDEV_LASTSTATE) == 0) { 9354 printf(gettext("\"%s\" (0x%llx)"), 9355 zpool_state_to_name(i64, VDEV_AUX_NONE), 9356 (u_longlong_t)i64); 9357 } else { 9358 printf(gettext("0x%llx"), (u_longlong_t)i64); 9359 } 9360 break; 9361 9362 case DATA_TYPE_HRTIME: 9363 (void) nvpair_value_hrtime(nvp, (void *)&i64); 9364 printf(gettext("0x%llx"), (u_longlong_t)i64); 9365 break; 9366 9367 case DATA_TYPE_STRING: 9368 (void) nvpair_value_string(nvp, &str); 9369 printf(gettext("\"%s\""), str ? str : "<NULL>"); 9370 break; 9371 9372 case DATA_TYPE_NVLIST: 9373 printf(gettext("(embedded nvlist)\n")); 9374 (void) nvpair_value_nvlist(nvp, &cnv); 9375 zpool_do_events_nvprint(cnv, depth + 8); 9376 printf(gettext("%*s(end %s)"), depth, "", name); 9377 break; 9378 9379 case DATA_TYPE_NVLIST_ARRAY: { 9380 nvlist_t **val; 9381 uint_t i, nelem; 9382 9383 (void) nvpair_value_nvlist_array(nvp, &val, &nelem); 9384 printf(gettext("(%d embedded nvlists)\n"), nelem); 9385 for (i = 0; i < nelem; i++) { 9386 printf(gettext("%*s%s[%d] = %s\n"), 9387 depth, "", name, i, "(embedded nvlist)"); 9388 zpool_do_events_nvprint(val[i], depth + 8); 9389 printf(gettext("%*s(end %s[%i])\n"), 9390 depth, "", name, i); 9391 } 9392 printf(gettext("%*s(end %s)\n"), depth, "", name); 9393 } 9394 break; 9395 9396 case DATA_TYPE_INT8_ARRAY: { 9397 int8_t *val; 9398 uint_t i, nelem; 9399 9400 (void) nvpair_value_int8_array(nvp, &val, &nelem); 9401 for (i = 0; i < nelem; i++) 9402 printf(gettext("0x%x "), val[i]); 9403 9404 break; 9405 } 9406 9407 case DATA_TYPE_UINT8_ARRAY: { 9408 uint8_t *val; 9409 uint_t i, nelem; 9410 9411 (void) nvpair_value_uint8_array(nvp, &val, &nelem); 9412 for (i = 0; i < nelem; i++) 9413 printf(gettext("0x%x "), val[i]); 9414 9415 break; 9416 } 9417 9418 case DATA_TYPE_INT16_ARRAY: { 9419 int16_t *val; 9420 uint_t i, nelem; 9421 9422 (void) nvpair_value_int16_array(nvp, &val, &nelem); 9423 for (i = 0; i < nelem; i++) 9424 printf(gettext("0x%x "), val[i]); 9425 9426 break; 9427 } 9428 9429 case DATA_TYPE_UINT16_ARRAY: { 9430 uint16_t *val; 9431 uint_t i, nelem; 9432 9433 (void) nvpair_value_uint16_array(nvp, &val, &nelem); 9434 for (i = 0; i < nelem; i++) 9435 printf(gettext("0x%x "), val[i]); 9436 9437 break; 9438 } 9439 9440 case DATA_TYPE_INT32_ARRAY: { 9441 int32_t *val; 9442 uint_t i, nelem; 9443 9444 (void) nvpair_value_int32_array(nvp, &val, &nelem); 9445 for (i = 0; i < nelem; i++) 9446 printf(gettext("0x%x "), val[i]); 9447 9448 break; 9449 } 9450 9451 case DATA_TYPE_UINT32_ARRAY: { 9452 uint32_t *val; 9453 uint_t i, nelem; 9454 9455 (void) nvpair_value_uint32_array(nvp, &val, &nelem); 9456 for (i = 0; i < nelem; i++) 9457 printf(gettext("0x%x "), val[i]); 9458 9459 break; 9460 } 9461 9462 case DATA_TYPE_INT64_ARRAY: { 9463 int64_t *val; 9464 uint_t i, nelem; 9465 9466 (void) nvpair_value_int64_array(nvp, &val, &nelem); 9467 for (i = 0; i < nelem; i++) 9468 printf(gettext("0x%llx "), 9469 (u_longlong_t)val[i]); 9470 9471 break; 9472 } 9473 9474 case DATA_TYPE_UINT64_ARRAY: { 9475 uint64_t *val; 9476 uint_t i, nelem; 9477 9478 (void) nvpair_value_uint64_array(nvp, &val, &nelem); 9479 for (i = 0; i < nelem; i++) 9480 printf(gettext("0x%llx "), 9481 (u_longlong_t)val[i]); 9482 9483 break; 9484 } 9485 9486 case DATA_TYPE_STRING_ARRAY: { 9487 char **str; 9488 uint_t i, nelem; 9489 9490 (void) nvpair_value_string_array(nvp, &str, &nelem); 9491 for (i = 0; i < nelem; i++) 9492 printf(gettext("\"%s\" "), 9493 str[i] ? str[i] : "<NULL>"); 9494 9495 break; 9496 } 9497 9498 case DATA_TYPE_BOOLEAN_ARRAY: 9499 case DATA_TYPE_BYTE_ARRAY: 9500 case DATA_TYPE_DOUBLE: 9501 case DATA_TYPE_DONTCARE: 9502 case DATA_TYPE_UNKNOWN: 9503 printf(gettext("<unknown>")); 9504 break; 9505 } 9506 9507 printf(gettext("\n")); 9508 } 9509 } 9510 9511 static int 9512 zpool_do_events_next(ev_opts_t *opts) 9513 { 9514 nvlist_t *nvl; 9515 int zevent_fd, ret, dropped; 9516 char *pool; 9517 9518 zevent_fd = open(ZFS_DEV, O_RDWR); 9519 VERIFY(zevent_fd >= 0); 9520 9521 if (!opts->scripted) 9522 (void) printf(gettext("%-30s %s\n"), "TIME", "CLASS"); 9523 9524 while (1) { 9525 ret = zpool_events_next(g_zfs, &nvl, &dropped, 9526 (opts->follow ? ZEVENT_NONE : ZEVENT_NONBLOCK), zevent_fd); 9527 if (ret || nvl == NULL) 9528 break; 9529 9530 if (dropped > 0) 9531 (void) printf(gettext("dropped %d events\n"), dropped); 9532 9533 if (strlen(opts->poolname) > 0 && 9534 nvlist_lookup_string(nvl, FM_FMRI_ZFS_POOL, &pool) == 0 && 9535 strcmp(opts->poolname, pool) != 0) 9536 continue; 9537 9538 zpool_do_events_short(nvl, opts); 9539 9540 if (opts->verbose) { 9541 zpool_do_events_nvprint(nvl, 8); 9542 printf(gettext("\n")); 9543 } 9544 (void) fflush(stdout); 9545 9546 nvlist_free(nvl); 9547 } 9548 9549 VERIFY(0 == close(zevent_fd)); 9550 9551 return (ret); 9552 } 9553 9554 static int 9555 zpool_do_events_clear(ev_opts_t *opts) 9556 { 9557 int count, ret; 9558 9559 ret = zpool_events_clear(g_zfs, &count); 9560 if (!ret) 9561 (void) printf(gettext("cleared %d events\n"), count); 9562 9563 return (ret); 9564 } 9565 9566 /* 9567 * zpool events [-vHf [pool] | -c] 9568 * 9569 * Displays events logs by ZFS. 9570 */ 9571 int 9572 zpool_do_events(int argc, char **argv) 9573 { 9574 ev_opts_t opts = { 0 }; 9575 int ret; 9576 int c; 9577 9578 /* check options */ 9579 while ((c = getopt(argc, argv, "vHfc")) != -1) { 9580 switch (c) { 9581 case 'v': 9582 opts.verbose = 1; 9583 break; 9584 case 'H': 9585 opts.scripted = 1; 9586 break; 9587 case 'f': 9588 opts.follow = 1; 9589 break; 9590 case 'c': 9591 opts.clear = 1; 9592 break; 9593 case '?': 9594 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 9595 optopt); 9596 usage(B_FALSE); 9597 } 9598 } 9599 argc -= optind; 9600 argv += optind; 9601 9602 if (argc > 1) { 9603 (void) fprintf(stderr, gettext("too many arguments\n")); 9604 usage(B_FALSE); 9605 } else if (argc == 1) { 9606 (void) strlcpy(opts.poolname, argv[0], sizeof (opts.poolname)); 9607 if (!zfs_name_valid(opts.poolname, ZFS_TYPE_POOL)) { 9608 (void) fprintf(stderr, 9609 gettext("invalid pool name '%s'\n"), opts.poolname); 9610 usage(B_FALSE); 9611 } 9612 } 9613 9614 if ((argc == 1 || opts.verbose || opts.scripted || opts.follow) && 9615 opts.clear) { 9616 (void) fprintf(stderr, 9617 gettext("invalid options combined with -c\n")); 9618 usage(B_FALSE); 9619 } 9620 9621 if (opts.clear) 9622 ret = zpool_do_events_clear(&opts); 9623 else 9624 ret = zpool_do_events_next(&opts); 9625 9626 return (ret); 9627 } 9628 9629 static int 9630 get_callback(zpool_handle_t *zhp, void *data) 9631 { 9632 zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data; 9633 char value[MAXNAMELEN]; 9634 zprop_source_t srctype; 9635 zprop_list_t *pl; 9636 9637 for (pl = cbp->cb_proplist; pl != NULL; pl = pl->pl_next) { 9638 9639 /* 9640 * Skip the special fake placeholder. This will also skip 9641 * over the name property when 'all' is specified. 9642 */ 9643 if (pl->pl_prop == ZPOOL_PROP_NAME && 9644 pl == cbp->cb_proplist) 9645 continue; 9646 9647 if (pl->pl_prop == ZPROP_INVAL && 9648 (zpool_prop_feature(pl->pl_user_prop) || 9649 zpool_prop_unsupported(pl->pl_user_prop))) { 9650 srctype = ZPROP_SRC_LOCAL; 9651 9652 if (zpool_prop_get_feature(zhp, pl->pl_user_prop, 9653 value, sizeof (value)) == 0) { 9654 zprop_print_one_property(zpool_get_name(zhp), 9655 cbp, pl->pl_user_prop, value, srctype, 9656 NULL, NULL); 9657 } 9658 } else { 9659 if (zpool_get_prop(zhp, pl->pl_prop, value, 9660 sizeof (value), &srctype, cbp->cb_literal) != 0) 9661 continue; 9662 9663 zprop_print_one_property(zpool_get_name(zhp), cbp, 9664 zpool_prop_to_name(pl->pl_prop), value, srctype, 9665 NULL, NULL); 9666 } 9667 } 9668 return (0); 9669 } 9670 9671 /* 9672 * zpool get [-Hp] [-o "all" | field[,...]] <"all" | property[,...]> <pool> ... 9673 * 9674 * -H Scripted mode. Don't display headers, and separate properties 9675 * by a single tab. 9676 * -o List of columns to display. Defaults to 9677 * "name,property,value,source". 9678 * -p Display values in parsable (exact) format. 9679 * 9680 * Get properties of pools in the system. Output space statistics 9681 * for each one as well as other attributes. 9682 */ 9683 int 9684 zpool_do_get(int argc, char **argv) 9685 { 9686 zprop_get_cbdata_t cb = { 0 }; 9687 zprop_list_t fake_name = { 0 }; 9688 int ret; 9689 int c, i; 9690 char *value; 9691 9692 cb.cb_first = B_TRUE; 9693 9694 /* 9695 * Set up default columns and sources. 9696 */ 9697 cb.cb_sources = ZPROP_SRC_ALL; 9698 cb.cb_columns[0] = GET_COL_NAME; 9699 cb.cb_columns[1] = GET_COL_PROPERTY; 9700 cb.cb_columns[2] = GET_COL_VALUE; 9701 cb.cb_columns[3] = GET_COL_SOURCE; 9702 cb.cb_type = ZFS_TYPE_POOL; 9703 9704 /* check options */ 9705 while ((c = getopt(argc, argv, ":Hpo:")) != -1) { 9706 switch (c) { 9707 case 'p': 9708 cb.cb_literal = B_TRUE; 9709 break; 9710 case 'H': 9711 cb.cb_scripted = B_TRUE; 9712 break; 9713 case 'o': 9714 bzero(&cb.cb_columns, sizeof (cb.cb_columns)); 9715 i = 0; 9716 while (*optarg != '\0') { 9717 static char *col_subopts[] = 9718 { "name", "property", "value", "source", 9719 "all", NULL }; 9720 9721 if (i == ZFS_GET_NCOLS) { 9722 (void) fprintf(stderr, gettext("too " 9723 "many fields given to -o " 9724 "option\n")); 9725 usage(B_FALSE); 9726 } 9727 9728 switch (getsubopt(&optarg, col_subopts, 9729 &value)) { 9730 case 0: 9731 cb.cb_columns[i++] = GET_COL_NAME; 9732 break; 9733 case 1: 9734 cb.cb_columns[i++] = GET_COL_PROPERTY; 9735 break; 9736 case 2: 9737 cb.cb_columns[i++] = GET_COL_VALUE; 9738 break; 9739 case 3: 9740 cb.cb_columns[i++] = GET_COL_SOURCE; 9741 break; 9742 case 4: 9743 if (i > 0) { 9744 (void) fprintf(stderr, 9745 gettext("\"all\" conflicts " 9746 "with specific fields " 9747 "given to -o option\n")); 9748 usage(B_FALSE); 9749 } 9750 cb.cb_columns[0] = GET_COL_NAME; 9751 cb.cb_columns[1] = GET_COL_PROPERTY; 9752 cb.cb_columns[2] = GET_COL_VALUE; 9753 cb.cb_columns[3] = GET_COL_SOURCE; 9754 i = ZFS_GET_NCOLS; 9755 break; 9756 default: 9757 (void) fprintf(stderr, 9758 gettext("invalid column name " 9759 "'%s'\n"), value); 9760 usage(B_FALSE); 9761 } 9762 } 9763 break; 9764 case '?': 9765 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 9766 optopt); 9767 usage(B_FALSE); 9768 } 9769 } 9770 9771 argc -= optind; 9772 argv += optind; 9773 9774 if (argc < 1) { 9775 (void) fprintf(stderr, gettext("missing property " 9776 "argument\n")); 9777 usage(B_FALSE); 9778 } 9779 9780 if (zprop_get_list(g_zfs, argv[0], &cb.cb_proplist, 9781 ZFS_TYPE_POOL) != 0) 9782 usage(B_FALSE); 9783 9784 argc--; 9785 argv++; 9786 9787 if (cb.cb_proplist != NULL) { 9788 fake_name.pl_prop = ZPOOL_PROP_NAME; 9789 fake_name.pl_width = strlen(gettext("NAME")); 9790 fake_name.pl_next = cb.cb_proplist; 9791 cb.cb_proplist = &fake_name; 9792 } 9793 9794 ret = for_each_pool(argc, argv, B_TRUE, &cb.cb_proplist, cb.cb_literal, 9795 get_callback, &cb); 9796 9797 if (cb.cb_proplist == &fake_name) 9798 zprop_free_list(fake_name.pl_next); 9799 else 9800 zprop_free_list(cb.cb_proplist); 9801 9802 return (ret); 9803 } 9804 9805 typedef struct set_cbdata { 9806 char *cb_propname; 9807 char *cb_value; 9808 boolean_t cb_any_successful; 9809 } set_cbdata_t; 9810 9811 static int 9812 set_callback(zpool_handle_t *zhp, void *data) 9813 { 9814 int error; 9815 set_cbdata_t *cb = (set_cbdata_t *)data; 9816 9817 error = zpool_set_prop(zhp, cb->cb_propname, cb->cb_value); 9818 9819 if (!error) 9820 cb->cb_any_successful = B_TRUE; 9821 9822 return (error); 9823 } 9824 9825 int 9826 zpool_do_set(int argc, char **argv) 9827 { 9828 set_cbdata_t cb = { 0 }; 9829 int error; 9830 9831 if (argc > 1 && argv[1][0] == '-') { 9832 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 9833 argv[1][1]); 9834 usage(B_FALSE); 9835 } 9836 9837 if (argc < 2) { 9838 (void) fprintf(stderr, gettext("missing property=value " 9839 "argument\n")); 9840 usage(B_FALSE); 9841 } 9842 9843 if (argc < 3) { 9844 (void) fprintf(stderr, gettext("missing pool name\n")); 9845 usage(B_FALSE); 9846 } 9847 9848 if (argc > 3) { 9849 (void) fprintf(stderr, gettext("too many pool names\n")); 9850 usage(B_FALSE); 9851 } 9852 9853 cb.cb_propname = argv[1]; 9854 cb.cb_value = strchr(cb.cb_propname, '='); 9855 if (cb.cb_value == NULL) { 9856 (void) fprintf(stderr, gettext("missing value in " 9857 "property=value argument\n")); 9858 usage(B_FALSE); 9859 } 9860 9861 *(cb.cb_value) = '\0'; 9862 cb.cb_value++; 9863 9864 error = for_each_pool(argc - 2, argv + 2, B_TRUE, NULL, B_FALSE, 9865 set_callback, &cb); 9866 9867 return (error); 9868 } 9869 9870 /* Add up the total number of bytes left to initialize/trim across all vdevs */ 9871 static uint64_t 9872 vdev_activity_remaining(nvlist_t *nv, zpool_wait_activity_t activity) 9873 { 9874 uint64_t bytes_remaining; 9875 nvlist_t **child; 9876 uint_t c, children; 9877 vdev_stat_t *vs; 9878 9879 assert(activity == ZPOOL_WAIT_INITIALIZE || 9880 activity == ZPOOL_WAIT_TRIM); 9881 9882 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, 9883 (uint64_t **)&vs, &c) == 0); 9884 9885 if (activity == ZPOOL_WAIT_INITIALIZE && 9886 vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE) 9887 bytes_remaining = vs->vs_initialize_bytes_est - 9888 vs->vs_initialize_bytes_done; 9889 else if (activity == ZPOOL_WAIT_TRIM && 9890 vs->vs_trim_state == VDEV_TRIM_ACTIVE) 9891 bytes_remaining = vs->vs_trim_bytes_est - 9892 vs->vs_trim_bytes_done; 9893 else 9894 bytes_remaining = 0; 9895 9896 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 9897 &child, &children) != 0) 9898 children = 0; 9899 9900 for (c = 0; c < children; c++) 9901 bytes_remaining += vdev_activity_remaining(child[c], activity); 9902 9903 return (bytes_remaining); 9904 } 9905 9906 /* Add up the total number of bytes left to rebuild across top-level vdevs */ 9907 static uint64_t 9908 vdev_activity_top_remaining(nvlist_t *nv) 9909 { 9910 uint64_t bytes_remaining = 0; 9911 nvlist_t **child; 9912 uint_t children; 9913 int error; 9914 9915 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 9916 &child, &children) != 0) 9917 children = 0; 9918 9919 for (uint_t c = 0; c < children; c++) { 9920 vdev_rebuild_stat_t *vrs; 9921 uint_t i; 9922 9923 error = nvlist_lookup_uint64_array(child[c], 9924 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i); 9925 if (error == 0) { 9926 if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) { 9927 bytes_remaining += (vrs->vrs_bytes_est - 9928 vrs->vrs_bytes_rebuilt); 9929 } 9930 } 9931 } 9932 9933 return (bytes_remaining); 9934 } 9935 9936 /* Whether any vdevs are 'spare' or 'replacing' vdevs */ 9937 static boolean_t 9938 vdev_any_spare_replacing(nvlist_t *nv) 9939 { 9940 nvlist_t **child; 9941 uint_t c, children; 9942 char *vdev_type; 9943 9944 (void) nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &vdev_type); 9945 9946 if (strcmp(vdev_type, VDEV_TYPE_REPLACING) == 0 || 9947 strcmp(vdev_type, VDEV_TYPE_SPARE) == 0 || 9948 strcmp(vdev_type, VDEV_TYPE_DRAID_SPARE) == 0) { 9949 return (B_TRUE); 9950 } 9951 9952 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 9953 &child, &children) != 0) 9954 children = 0; 9955 9956 for (c = 0; c < children; c++) { 9957 if (vdev_any_spare_replacing(child[c])) 9958 return (B_TRUE); 9959 } 9960 9961 return (B_FALSE); 9962 } 9963 9964 typedef struct wait_data { 9965 char *wd_poolname; 9966 boolean_t wd_scripted; 9967 boolean_t wd_exact; 9968 boolean_t wd_headers_once; 9969 boolean_t wd_should_exit; 9970 /* Which activities to wait for */ 9971 boolean_t wd_enabled[ZPOOL_WAIT_NUM_ACTIVITIES]; 9972 float wd_interval; 9973 pthread_cond_t wd_cv; 9974 pthread_mutex_t wd_mutex; 9975 } wait_data_t; 9976 9977 /* 9978 * Print to stdout a single line, containing one column for each activity that 9979 * we are waiting for specifying how many bytes of work are left for that 9980 * activity. 9981 */ 9982 static void 9983 print_wait_status_row(wait_data_t *wd, zpool_handle_t *zhp, int row) 9984 { 9985 nvlist_t *config, *nvroot; 9986 uint_t c; 9987 int i; 9988 pool_checkpoint_stat_t *pcs = NULL; 9989 pool_scan_stat_t *pss = NULL; 9990 pool_removal_stat_t *prs = NULL; 9991 char *headers[] = {"DISCARD", "FREE", "INITIALIZE", "REPLACE", 9992 "REMOVE", "RESILVER", "SCRUB", "TRIM"}; 9993 int col_widths[ZPOOL_WAIT_NUM_ACTIVITIES]; 9994 9995 /* Calculate the width of each column */ 9996 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) { 9997 /* 9998 * Make sure we have enough space in the col for pretty-printed 9999 * numbers and for the column header, and then leave a couple 10000 * spaces between cols for readability. 10001 */ 10002 col_widths[i] = MAX(strlen(headers[i]), 6) + 2; 10003 } 10004 10005 /* Print header if appropriate */ 10006 int term_height = terminal_height(); 10007 boolean_t reprint_header = (!wd->wd_headers_once && term_height > 0 && 10008 row % (term_height-1) == 0); 10009 if (!wd->wd_scripted && (row == 0 || reprint_header)) { 10010 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) { 10011 if (wd->wd_enabled[i]) 10012 (void) printf("%*s", col_widths[i], headers[i]); 10013 } 10014 (void) printf("\n"); 10015 } 10016 10017 /* Bytes of work remaining in each activity */ 10018 int64_t bytes_rem[ZPOOL_WAIT_NUM_ACTIVITIES] = {0}; 10019 10020 bytes_rem[ZPOOL_WAIT_FREE] = 10021 zpool_get_prop_int(zhp, ZPOOL_PROP_FREEING, NULL); 10022 10023 config = zpool_get_config(zhp, NULL); 10024 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE); 10025 10026 (void) nvlist_lookup_uint64_array(nvroot, 10027 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c); 10028 if (pcs != NULL && pcs->pcs_state == CS_CHECKPOINT_DISCARDING) 10029 bytes_rem[ZPOOL_WAIT_CKPT_DISCARD] = pcs->pcs_space; 10030 10031 (void) nvlist_lookup_uint64_array(nvroot, 10032 ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c); 10033 if (prs != NULL && prs->prs_state == DSS_SCANNING) 10034 bytes_rem[ZPOOL_WAIT_REMOVE] = prs->prs_to_copy - 10035 prs->prs_copied; 10036 10037 (void) nvlist_lookup_uint64_array(nvroot, 10038 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&pss, &c); 10039 if (pss != NULL && pss->pss_state == DSS_SCANNING && 10040 pss->pss_pass_scrub_pause == 0) { 10041 int64_t rem = pss->pss_to_examine - pss->pss_issued; 10042 if (pss->pss_func == POOL_SCAN_SCRUB) 10043 bytes_rem[ZPOOL_WAIT_SCRUB] = rem; 10044 else 10045 bytes_rem[ZPOOL_WAIT_RESILVER] = rem; 10046 } else if (check_rebuilding(nvroot, NULL)) { 10047 bytes_rem[ZPOOL_WAIT_RESILVER] = 10048 vdev_activity_top_remaining(nvroot); 10049 } 10050 10051 bytes_rem[ZPOOL_WAIT_INITIALIZE] = 10052 vdev_activity_remaining(nvroot, ZPOOL_WAIT_INITIALIZE); 10053 bytes_rem[ZPOOL_WAIT_TRIM] = 10054 vdev_activity_remaining(nvroot, ZPOOL_WAIT_TRIM); 10055 10056 /* 10057 * A replace finishes after resilvering finishes, so the amount of work 10058 * left for a replace is the same as for resilvering. 10059 * 10060 * It isn't quite correct to say that if we have any 'spare' or 10061 * 'replacing' vdevs and a resilver is happening, then a replace is in 10062 * progress, like we do here. When a hot spare is used, the faulted vdev 10063 * is not removed after the hot spare is resilvered, so parent 'spare' 10064 * vdev is not removed either. So we could have a 'spare' vdev, but be 10065 * resilvering for a different reason. However, we use it as a heuristic 10066 * because we don't have access to the DTLs, which could tell us whether 10067 * or not we have really finished resilvering a hot spare. 10068 */ 10069 if (vdev_any_spare_replacing(nvroot)) 10070 bytes_rem[ZPOOL_WAIT_REPLACE] = bytes_rem[ZPOOL_WAIT_RESILVER]; 10071 10072 if (timestamp_fmt != NODATE) 10073 print_timestamp(timestamp_fmt); 10074 10075 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) { 10076 char buf[64]; 10077 if (!wd->wd_enabled[i]) 10078 continue; 10079 10080 if (wd->wd_exact) 10081 (void) snprintf(buf, sizeof (buf), "%" PRIi64, 10082 bytes_rem[i]); 10083 else 10084 zfs_nicenum(bytes_rem[i], buf, sizeof (buf)); 10085 10086 if (wd->wd_scripted) 10087 (void) printf(i == 0 ? "%s" : "\t%s", buf); 10088 else 10089 (void) printf(" %*s", col_widths[i] - 1, buf); 10090 } 10091 (void) printf("\n"); 10092 (void) fflush(stdout); 10093 } 10094 10095 static void * 10096 wait_status_thread(void *arg) 10097 { 10098 wait_data_t *wd = (wait_data_t *)arg; 10099 zpool_handle_t *zhp; 10100 10101 if ((zhp = zpool_open(g_zfs, wd->wd_poolname)) == NULL) 10102 return (void *)(1); 10103 10104 for (int row = 0; ; row++) { 10105 boolean_t missing; 10106 struct timespec timeout; 10107 int ret = 0; 10108 (void) clock_gettime(CLOCK_REALTIME, &timeout); 10109 10110 if (zpool_refresh_stats(zhp, &missing) != 0 || missing || 10111 zpool_props_refresh(zhp) != 0) { 10112 zpool_close(zhp); 10113 return (void *)(uintptr_t)(missing ? 0 : 1); 10114 } 10115 10116 print_wait_status_row(wd, zhp, row); 10117 10118 timeout.tv_sec += floor(wd->wd_interval); 10119 long nanos = timeout.tv_nsec + 10120 (wd->wd_interval - floor(wd->wd_interval)) * NANOSEC; 10121 if (nanos >= NANOSEC) { 10122 timeout.tv_sec++; 10123 timeout.tv_nsec = nanos - NANOSEC; 10124 } else { 10125 timeout.tv_nsec = nanos; 10126 } 10127 pthread_mutex_lock(&wd->wd_mutex); 10128 if (!wd->wd_should_exit) 10129 ret = pthread_cond_timedwait(&wd->wd_cv, &wd->wd_mutex, 10130 &timeout); 10131 pthread_mutex_unlock(&wd->wd_mutex); 10132 if (ret == 0) { 10133 break; /* signaled by main thread */ 10134 } else if (ret != ETIMEDOUT) { 10135 (void) fprintf(stderr, gettext("pthread_cond_timedwait " 10136 "failed: %s\n"), strerror(ret)); 10137 zpool_close(zhp); 10138 return (void *)(uintptr_t)(1); 10139 } 10140 } 10141 10142 zpool_close(zhp); 10143 return (void *)(0); 10144 } 10145 10146 int 10147 zpool_do_wait(int argc, char **argv) 10148 { 10149 boolean_t verbose = B_FALSE; 10150 int c; 10151 char *value; 10152 int i; 10153 unsigned long count; 10154 pthread_t status_thr; 10155 int error = 0; 10156 zpool_handle_t *zhp; 10157 10158 wait_data_t wd; 10159 wd.wd_scripted = B_FALSE; 10160 wd.wd_exact = B_FALSE; 10161 wd.wd_headers_once = B_FALSE; 10162 wd.wd_should_exit = B_FALSE; 10163 10164 pthread_mutex_init(&wd.wd_mutex, NULL); 10165 pthread_cond_init(&wd.wd_cv, NULL); 10166 10167 /* By default, wait for all types of activity. */ 10168 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) 10169 wd.wd_enabled[i] = B_TRUE; 10170 10171 while ((c = getopt(argc, argv, "HpT:t:")) != -1) { 10172 switch (c) { 10173 case 'H': 10174 wd.wd_scripted = B_TRUE; 10175 break; 10176 case 'n': 10177 wd.wd_headers_once = B_TRUE; 10178 break; 10179 case 'p': 10180 wd.wd_exact = B_TRUE; 10181 break; 10182 case 'T': 10183 get_timestamp_arg(*optarg); 10184 break; 10185 case 't': 10186 { 10187 static char *col_subopts[] = { "discard", "free", 10188 "initialize", "replace", "remove", "resilver", 10189 "scrub", "trim", NULL }; 10190 10191 /* Reset activities array */ 10192 bzero(&wd.wd_enabled, sizeof (wd.wd_enabled)); 10193 while (*optarg != '\0') { 10194 int activity = getsubopt(&optarg, col_subopts, 10195 &value); 10196 10197 if (activity < 0) { 10198 (void) fprintf(stderr, 10199 gettext("invalid activity '%s'\n"), 10200 value); 10201 usage(B_FALSE); 10202 } 10203 10204 wd.wd_enabled[activity] = B_TRUE; 10205 } 10206 break; 10207 } 10208 case '?': 10209 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 10210 optopt); 10211 usage(B_FALSE); 10212 } 10213 } 10214 10215 argc -= optind; 10216 argv += optind; 10217 10218 get_interval_count(&argc, argv, &wd.wd_interval, &count); 10219 if (count != 0) { 10220 /* This subcmd only accepts an interval, not a count */ 10221 (void) fprintf(stderr, gettext("too many arguments\n")); 10222 usage(B_FALSE); 10223 } 10224 10225 if (wd.wd_interval != 0) 10226 verbose = B_TRUE; 10227 10228 if (argc < 1) { 10229 (void) fprintf(stderr, gettext("missing 'pool' argument\n")); 10230 usage(B_FALSE); 10231 } 10232 if (argc > 1) { 10233 (void) fprintf(stderr, gettext("too many arguments\n")); 10234 usage(B_FALSE); 10235 } 10236 10237 wd.wd_poolname = argv[0]; 10238 10239 if ((zhp = zpool_open(g_zfs, wd.wd_poolname)) == NULL) 10240 return (1); 10241 10242 if (verbose) { 10243 /* 10244 * We use a separate thread for printing status updates because 10245 * the main thread will call lzc_wait(), which blocks as long 10246 * as an activity is in progress, which can be a long time. 10247 */ 10248 if (pthread_create(&status_thr, NULL, wait_status_thread, &wd) 10249 != 0) { 10250 (void) fprintf(stderr, gettext("failed to create status" 10251 "thread: %s\n"), strerror(errno)); 10252 zpool_close(zhp); 10253 return (1); 10254 } 10255 } 10256 10257 /* 10258 * Loop over all activities that we are supposed to wait for until none 10259 * of them are in progress. Note that this means we can end up waiting 10260 * for more activities to complete than just those that were in progress 10261 * when we began waiting; if an activity we are interested in begins 10262 * while we are waiting for another activity, we will wait for both to 10263 * complete before exiting. 10264 */ 10265 for (;;) { 10266 boolean_t missing = B_FALSE; 10267 boolean_t any_waited = B_FALSE; 10268 10269 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) { 10270 boolean_t waited; 10271 10272 if (!wd.wd_enabled[i]) 10273 continue; 10274 10275 error = zpool_wait_status(zhp, i, &missing, &waited); 10276 if (error != 0 || missing) 10277 break; 10278 10279 any_waited = (any_waited || waited); 10280 } 10281 10282 if (error != 0 || missing || !any_waited) 10283 break; 10284 } 10285 10286 zpool_close(zhp); 10287 10288 if (verbose) { 10289 uintptr_t status; 10290 pthread_mutex_lock(&wd.wd_mutex); 10291 wd.wd_should_exit = B_TRUE; 10292 pthread_cond_signal(&wd.wd_cv); 10293 pthread_mutex_unlock(&wd.wd_mutex); 10294 (void) pthread_join(status_thr, (void *)&status); 10295 if (status != 0) 10296 error = status; 10297 } 10298 10299 pthread_mutex_destroy(&wd.wd_mutex); 10300 pthread_cond_destroy(&wd.wd_cv); 10301 return (error); 10302 } 10303 10304 static int 10305 find_command_idx(char *command, int *idx) 10306 { 10307 int i; 10308 10309 for (i = 0; i < NCOMMAND; i++) { 10310 if (command_table[i].name == NULL) 10311 continue; 10312 10313 if (strcmp(command, command_table[i].name) == 0) { 10314 *idx = i; 10315 return (0); 10316 } 10317 } 10318 return (1); 10319 } 10320 10321 /* 10322 * Display version message 10323 */ 10324 static int 10325 zpool_do_version(int argc, char **argv) 10326 { 10327 if (zfs_version_print() == -1) 10328 return (1); 10329 10330 return (0); 10331 } 10332 10333 int 10334 main(int argc, char **argv) 10335 { 10336 int ret = 0; 10337 int i = 0; 10338 char *cmdname; 10339 char **newargv; 10340 10341 (void) setlocale(LC_ALL, ""); 10342 (void) setlocale(LC_NUMERIC, "C"); 10343 (void) textdomain(TEXT_DOMAIN); 10344 srand(time(NULL)); 10345 10346 opterr = 0; 10347 10348 /* 10349 * Make sure the user has specified some command. 10350 */ 10351 if (argc < 2) { 10352 (void) fprintf(stderr, gettext("missing command\n")); 10353 usage(B_FALSE); 10354 } 10355 10356 cmdname = argv[1]; 10357 10358 /* 10359 * Special case '-?' 10360 */ 10361 if ((strcmp(cmdname, "-?") == 0) || strcmp(cmdname, "--help") == 0) 10362 usage(B_TRUE); 10363 10364 /* 10365 * Special case '-V|--version' 10366 */ 10367 if ((strcmp(cmdname, "-V") == 0) || (strcmp(cmdname, "--version") == 0)) 10368 return (zpool_do_version(argc, argv)); 10369 10370 if ((g_zfs = libzfs_init()) == NULL) { 10371 (void) fprintf(stderr, "%s\n", libzfs_error_init(errno)); 10372 return (1); 10373 } 10374 10375 libzfs_print_on_error(g_zfs, B_TRUE); 10376 10377 zfs_save_arguments(argc, argv, history_str, sizeof (history_str)); 10378 10379 /* 10380 * Many commands modify input strings for string parsing reasons. 10381 * We create a copy to protect the original argv. 10382 */ 10383 newargv = malloc((argc + 1) * sizeof (newargv[0])); 10384 for (i = 0; i < argc; i++) 10385 newargv[i] = strdup(argv[i]); 10386 newargv[argc] = NULL; 10387 10388 /* 10389 * Run the appropriate command. 10390 */ 10391 if (find_command_idx(cmdname, &i) == 0) { 10392 current_command = &command_table[i]; 10393 ret = command_table[i].func(argc - 1, newargv + 1); 10394 } else if (strchr(cmdname, '=')) { 10395 verify(find_command_idx("set", &i) == 0); 10396 current_command = &command_table[i]; 10397 ret = command_table[i].func(argc, newargv); 10398 } else if (strcmp(cmdname, "freeze") == 0 && argc == 3) { 10399 /* 10400 * 'freeze' is a vile debugging abomination, so we treat 10401 * it as such. 10402 */ 10403 zfs_cmd_t zc = {"\0"}; 10404 10405 (void) strlcpy(zc.zc_name, argv[2], sizeof (zc.zc_name)); 10406 ret = zfs_ioctl(g_zfs, ZFS_IOC_POOL_FREEZE, &zc); 10407 if (ret != 0) { 10408 (void) fprintf(stderr, 10409 gettext("failed to freeze pool: %d\n"), errno); 10410 ret = 1; 10411 } 10412 10413 log_history = 0; 10414 } else { 10415 (void) fprintf(stderr, gettext("unrecognized " 10416 "command '%s'\n"), cmdname); 10417 usage(B_FALSE); 10418 ret = 1; 10419 } 10420 10421 for (i = 0; i < argc; i++) 10422 free(newargv[i]); 10423 free(newargv); 10424 10425 if (ret == 0 && log_history) 10426 (void) zpool_log_history(g_zfs, history_str); 10427 10428 libzfs_fini(g_zfs); 10429 10430 /* 10431 * The 'ZFS_ABORT' environment variable causes us to dump core on exit 10432 * for the purposes of running ::findleaks. 10433 */ 10434 if (getenv("ZFS_ABORT") != NULL) { 10435 (void) printf("dumping core by request\n"); 10436 abort(); 10437 } 10438 10439 return (ret); 10440 } 10441