1 // SPDX-License-Identifier: CDDL-1.0 2 /* 3 * CDDL HEADER START 4 * 5 * The contents of this file are subject to the terms of the 6 * Common Development and Distribution License (the "License"). 7 * You may not use this file except in compliance with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or https://opensource.org/licenses/CDDL-1.0. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 23 /* 24 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 25 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 26 * Copyright (c) 2011, 2024 by Delphix. All rights reserved. 27 * Copyright (c) 2012 by Frederik Wessels. All rights reserved. 28 * Copyright (c) 2012 by Cyril Plisko. All rights reserved. 29 * Copyright (c) 2013 by Prasad Joshi (sTec). All rights reserved. 30 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>. 31 * Copyright (c) 2017 Datto Inc. 32 * Copyright (c) 2017 Open-E, Inc. All Rights Reserved. 33 * Copyright (c) 2017, Intel Corporation. 34 * Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com> 35 * Copyright (c) 2021, Colm Buckley <colm@tuatha.org> 36 * Copyright (c) 2021, 2023, 2025, Klara, Inc. 37 * Copyright (c) 2021, 2025 Hewlett Packard Enterprise Development LP. 38 */ 39 40 #include <assert.h> 41 #include <ctype.h> 42 #include <dirent.h> 43 #include <errno.h> 44 #include <fcntl.h> 45 #include <getopt.h> 46 #include <libgen.h> 47 #include <libintl.h> 48 #include <libuutil.h> 49 #include <locale.h> 50 #include <pthread.h> 51 #include <stdio.h> 52 #include <stdlib.h> 53 #include <string.h> 54 #include <thread_pool.h> 55 #include <time.h> 56 #include <unistd.h> 57 #include <pwd.h> 58 #include <zone.h> 59 #include <sys/wait.h> 60 #include <zfs_prop.h> 61 #include <sys/fs/zfs.h> 62 #include <sys/stat.h> 63 #include <sys/systeminfo.h> 64 #include <sys/fm/fs/zfs.h> 65 #include <sys/fm/util.h> 66 #include <sys/fm/protocol.h> 67 #include <sys/zfs_ioctl.h> 68 #include <sys/mount.h> 69 #include <sys/sysmacros.h> 70 #include <string.h> 71 #include <math.h> 72 73 #include <libzfs.h> 74 #include <libzutil.h> 75 76 #include "zpool_util.h" 77 #include "zfs_comutil.h" 78 #include "zfeature_common.h" 79 #include "zfs_valstr.h" 80 81 #include "statcommon.h" 82 83 libzfs_handle_t *g_zfs; 84 85 static int mount_tp_nthr = 512; /* tpool threads for multi-threaded mounting */ 86 87 static int zpool_do_create(int, char **); 88 static int zpool_do_destroy(int, char **); 89 90 static int zpool_do_add(int, char **); 91 static int zpool_do_remove(int, char **); 92 static int zpool_do_labelclear(int, char **); 93 94 static int zpool_do_checkpoint(int, char **); 95 static int zpool_do_prefetch(int, char **); 96 97 static int zpool_do_list(int, char **); 98 static int zpool_do_iostat(int, char **); 99 static int zpool_do_status(int, char **); 100 101 static int zpool_do_online(int, char **); 102 static int zpool_do_offline(int, char **); 103 static int zpool_do_clear(int, char **); 104 static int zpool_do_reopen(int, char **); 105 106 static int zpool_do_reguid(int, char **); 107 108 static int zpool_do_attach(int, char **); 109 static int zpool_do_detach(int, char **); 110 static int zpool_do_replace(int, char **); 111 static int zpool_do_split(int, char **); 112 113 static int zpool_do_initialize(int, char **); 114 static int zpool_do_scrub(int, char **); 115 static int zpool_do_resilver(int, char **); 116 static int zpool_do_trim(int, char **); 117 118 static int zpool_do_import(int, char **); 119 static int zpool_do_export(int, char **); 120 121 static int zpool_do_upgrade(int, char **); 122 123 static int zpool_do_history(int, char **); 124 static int zpool_do_events(int, char **); 125 126 static int zpool_do_get(int, char **); 127 static int zpool_do_set(int, char **); 128 129 static int zpool_do_sync(int, char **); 130 131 static int zpool_do_version(int, char **); 132 133 static int zpool_do_wait(int, char **); 134 135 static int zpool_do_ddt_prune(int, char **); 136 137 static int zpool_do_help(int argc, char **argv); 138 139 static zpool_compat_status_t zpool_do_load_compat( 140 const char *, boolean_t *); 141 142 enum zpool_options { 143 ZPOOL_OPTION_POWER = 1024, 144 ZPOOL_OPTION_ALLOW_INUSE, 145 ZPOOL_OPTION_ALLOW_REPLICATION_MISMATCH, 146 ZPOOL_OPTION_ALLOW_ASHIFT_MISMATCH, 147 ZPOOL_OPTION_POOL_KEY_GUID, 148 ZPOOL_OPTION_JSON_NUMS_AS_INT, 149 ZPOOL_OPTION_JSON_FLAT_VDEVS 150 }; 151 152 /* 153 * These libumem hooks provide a reasonable set of defaults for the allocator's 154 * debugging facilities. 155 */ 156 157 #ifdef DEBUG 158 const char * 159 _umem_debug_init(void) 160 { 161 return ("default,verbose"); /* $UMEM_DEBUG setting */ 162 } 163 164 const char * 165 _umem_logging_init(void) 166 { 167 return ("fail,contents"); /* $UMEM_LOGGING setting */ 168 } 169 #endif 170 171 typedef enum { 172 HELP_ADD, 173 HELP_ATTACH, 174 HELP_CLEAR, 175 HELP_CREATE, 176 HELP_CHECKPOINT, 177 HELP_DDT_PRUNE, 178 HELP_DESTROY, 179 HELP_DETACH, 180 HELP_EXPORT, 181 HELP_HISTORY, 182 HELP_IMPORT, 183 HELP_IOSTAT, 184 HELP_LABELCLEAR, 185 HELP_LIST, 186 HELP_OFFLINE, 187 HELP_ONLINE, 188 HELP_PREFETCH, 189 HELP_REPLACE, 190 HELP_REMOVE, 191 HELP_INITIALIZE, 192 HELP_SCRUB, 193 HELP_RESILVER, 194 HELP_TRIM, 195 HELP_STATUS, 196 HELP_UPGRADE, 197 HELP_EVENTS, 198 HELP_GET, 199 HELP_SET, 200 HELP_SPLIT, 201 HELP_SYNC, 202 HELP_REGUID, 203 HELP_REOPEN, 204 HELP_VERSION, 205 HELP_WAIT 206 } zpool_help_t; 207 208 209 /* 210 * Flags for stats to display with "zpool iostats" 211 */ 212 enum iostat_type { 213 IOS_DEFAULT = 0, 214 IOS_LATENCY = 1, 215 IOS_QUEUES = 2, 216 IOS_L_HISTO = 3, 217 IOS_RQ_HISTO = 4, 218 IOS_COUNT, /* always last element */ 219 }; 220 221 /* iostat_type entries as bitmasks */ 222 #define IOS_DEFAULT_M (1ULL << IOS_DEFAULT) 223 #define IOS_LATENCY_M (1ULL << IOS_LATENCY) 224 #define IOS_QUEUES_M (1ULL << IOS_QUEUES) 225 #define IOS_L_HISTO_M (1ULL << IOS_L_HISTO) 226 #define IOS_RQ_HISTO_M (1ULL << IOS_RQ_HISTO) 227 228 /* Mask of all the histo bits */ 229 #define IOS_ANYHISTO_M (IOS_L_HISTO_M | IOS_RQ_HISTO_M) 230 231 /* 232 * Lookup table for iostat flags to nvlist names. Basically a list 233 * of all the nvlists a flag requires. Also specifies the order in 234 * which data gets printed in zpool iostat. 235 */ 236 static const char *vsx_type_to_nvlist[IOS_COUNT][15] = { 237 [IOS_L_HISTO] = { 238 ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO, 239 ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO, 240 ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO, 241 ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO, 242 ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO, 243 ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO, 244 ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO, 245 ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO, 246 ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO, 247 ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO, 248 ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO, 249 NULL}, 250 [IOS_LATENCY] = { 251 ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO, 252 ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO, 253 ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO, 254 ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO, 255 ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO, 256 ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO, 257 NULL}, 258 [IOS_QUEUES] = { 259 ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE, 260 ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE, 261 ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE, 262 ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE, 263 ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE, 264 ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE, 265 ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE, 266 NULL}, 267 [IOS_RQ_HISTO] = { 268 ZPOOL_CONFIG_VDEV_SYNC_IND_R_HISTO, 269 ZPOOL_CONFIG_VDEV_SYNC_AGG_R_HISTO, 270 ZPOOL_CONFIG_VDEV_SYNC_IND_W_HISTO, 271 ZPOOL_CONFIG_VDEV_SYNC_AGG_W_HISTO, 272 ZPOOL_CONFIG_VDEV_ASYNC_IND_R_HISTO, 273 ZPOOL_CONFIG_VDEV_ASYNC_AGG_R_HISTO, 274 ZPOOL_CONFIG_VDEV_ASYNC_IND_W_HISTO, 275 ZPOOL_CONFIG_VDEV_ASYNC_AGG_W_HISTO, 276 ZPOOL_CONFIG_VDEV_IND_SCRUB_HISTO, 277 ZPOOL_CONFIG_VDEV_AGG_SCRUB_HISTO, 278 ZPOOL_CONFIG_VDEV_IND_TRIM_HISTO, 279 ZPOOL_CONFIG_VDEV_AGG_TRIM_HISTO, 280 ZPOOL_CONFIG_VDEV_IND_REBUILD_HISTO, 281 ZPOOL_CONFIG_VDEV_AGG_REBUILD_HISTO, 282 NULL}, 283 }; 284 285 static const char *pool_scan_func_str[] = { 286 "NONE", 287 "SCRUB", 288 "RESILVER", 289 "ERRORSCRUB" 290 }; 291 292 static const char *pool_scan_state_str[] = { 293 "NONE", 294 "SCANNING", 295 "FINISHED", 296 "CANCELED", 297 "ERRORSCRUBBING" 298 }; 299 300 static const char *vdev_rebuild_state_str[] = { 301 "NONE", 302 "ACTIVE", 303 "CANCELED", 304 "COMPLETE" 305 }; 306 307 static const char *checkpoint_state_str[] = { 308 "NONE", 309 "EXISTS", 310 "DISCARDING" 311 }; 312 313 static const char *vdev_state_str[] = { 314 "UNKNOWN", 315 "CLOSED", 316 "OFFLINE", 317 "REMOVED", 318 "CANT_OPEN", 319 "FAULTED", 320 "DEGRADED", 321 "ONLINE" 322 }; 323 324 static const char *vdev_aux_str[] = { 325 "NONE", 326 "OPEN_FAILED", 327 "CORRUPT_DATA", 328 "NO_REPLICAS", 329 "BAD_GUID_SUM", 330 "TOO_SMALL", 331 "BAD_LABEL", 332 "VERSION_NEWER", 333 "VERSION_OLDER", 334 "UNSUP_FEAT", 335 "SPARED", 336 "ERR_EXCEEDED", 337 "IO_FAILURE", 338 "BAD_LOG", 339 "EXTERNAL", 340 "SPLIT_POOL", 341 "BAD_ASHIFT", 342 "EXTERNAL_PERSIST", 343 "ACTIVE", 344 "CHILDREN_OFFLINE", 345 "ASHIFT_TOO_BIG" 346 }; 347 348 static const char *vdev_init_state_str[] = { 349 "NONE", 350 "ACTIVE", 351 "CANCELED", 352 "SUSPENDED", 353 "COMPLETE" 354 }; 355 356 static const char *vdev_trim_state_str[] = { 357 "NONE", 358 "ACTIVE", 359 "CANCELED", 360 "SUSPENDED", 361 "COMPLETE" 362 }; 363 364 #define ZFS_NICE_TIMESTAMP 100 365 366 /* 367 * Given a cb->cb_flags with a histogram bit set, return the iostat_type. 368 * Right now, only one histo bit is ever set at one time, so we can 369 * just do a highbit64(a) 370 */ 371 #define IOS_HISTO_IDX(a) (highbit64(a & IOS_ANYHISTO_M) - 1) 372 373 typedef struct zpool_command { 374 const char *name; 375 int (*func)(int, char **); 376 zpool_help_t usage; 377 } zpool_command_t; 378 379 /* 380 * Master command table. Each ZFS command has a name, associated function, and 381 * usage message. The usage messages need to be internationalized, so we have 382 * to have a function to return the usage message based on a command index. 383 * 384 * These commands are organized according to how they are displayed in the usage 385 * message. An empty command (one with a NULL name) indicates an empty line in 386 * the generic usage message. 387 */ 388 static zpool_command_t command_table[] = { 389 { "version", zpool_do_version, HELP_VERSION }, 390 { NULL }, 391 { "create", zpool_do_create, HELP_CREATE }, 392 { "destroy", zpool_do_destroy, HELP_DESTROY }, 393 { NULL }, 394 { "add", zpool_do_add, HELP_ADD }, 395 { "remove", zpool_do_remove, HELP_REMOVE }, 396 { NULL }, 397 { "labelclear", zpool_do_labelclear, HELP_LABELCLEAR }, 398 { NULL }, 399 { "checkpoint", zpool_do_checkpoint, HELP_CHECKPOINT }, 400 { "prefetch", zpool_do_prefetch, HELP_PREFETCH }, 401 { NULL }, 402 { "list", zpool_do_list, HELP_LIST }, 403 { "iostat", zpool_do_iostat, HELP_IOSTAT }, 404 { "status", zpool_do_status, HELP_STATUS }, 405 { NULL }, 406 { "online", zpool_do_online, HELP_ONLINE }, 407 { "offline", zpool_do_offline, HELP_OFFLINE }, 408 { "clear", zpool_do_clear, HELP_CLEAR }, 409 { "reopen", zpool_do_reopen, HELP_REOPEN }, 410 { NULL }, 411 { "attach", zpool_do_attach, HELP_ATTACH }, 412 { "detach", zpool_do_detach, HELP_DETACH }, 413 { "replace", zpool_do_replace, HELP_REPLACE }, 414 { "split", zpool_do_split, HELP_SPLIT }, 415 { NULL }, 416 { "initialize", zpool_do_initialize, HELP_INITIALIZE }, 417 { "resilver", zpool_do_resilver, HELP_RESILVER }, 418 { "scrub", zpool_do_scrub, HELP_SCRUB }, 419 { "trim", zpool_do_trim, HELP_TRIM }, 420 { NULL }, 421 { "import", zpool_do_import, HELP_IMPORT }, 422 { "export", zpool_do_export, HELP_EXPORT }, 423 { "upgrade", zpool_do_upgrade, HELP_UPGRADE }, 424 { "reguid", zpool_do_reguid, HELP_REGUID }, 425 { NULL }, 426 { "history", zpool_do_history, HELP_HISTORY }, 427 { "events", zpool_do_events, HELP_EVENTS }, 428 { NULL }, 429 { "get", zpool_do_get, HELP_GET }, 430 { "set", zpool_do_set, HELP_SET }, 431 { "sync", zpool_do_sync, HELP_SYNC }, 432 { NULL }, 433 { "wait", zpool_do_wait, HELP_WAIT }, 434 { NULL }, 435 { "ddtprune", zpool_do_ddt_prune, HELP_DDT_PRUNE }, 436 }; 437 438 #define NCOMMAND (ARRAY_SIZE(command_table)) 439 440 #define VDEV_ALLOC_CLASS_LOGS "logs" 441 442 #define MAX_CMD_LEN 256 443 444 static zpool_command_t *current_command; 445 static zfs_type_t current_prop_type = (ZFS_TYPE_POOL | ZFS_TYPE_VDEV); 446 static char history_str[HIS_MAX_RECORD_LEN]; 447 static boolean_t log_history = B_TRUE; 448 static uint_t timestamp_fmt = NODATE; 449 450 static const char * 451 get_usage(zpool_help_t idx) 452 { 453 switch (idx) { 454 case HELP_ADD: 455 return (gettext("\tadd [-afgLnP] [-o property=value] " 456 "<pool> <vdev> ...\n")); 457 case HELP_ATTACH: 458 return (gettext("\tattach [-fsw] [-o property=value] " 459 "<pool> <vdev> <new-device>\n")); 460 case HELP_CLEAR: 461 return (gettext("\tclear [[--power]|[-nF]] <pool> [device]\n")); 462 case HELP_CREATE: 463 return (gettext("\tcreate [-fnd] [-o property=value] ... \n" 464 "\t [-O file-system-property=value] ... \n" 465 "\t [-m mountpoint] [-R root] <pool> <vdev> ...\n")); 466 case HELP_CHECKPOINT: 467 return (gettext("\tcheckpoint [-d [-w]] <pool> ...\n")); 468 case HELP_DESTROY: 469 return (gettext("\tdestroy [-f] <pool>\n")); 470 case HELP_DETACH: 471 return (gettext("\tdetach <pool> <device>\n")); 472 case HELP_EXPORT: 473 return (gettext("\texport [-af] <pool> ...\n")); 474 case HELP_HISTORY: 475 return (gettext("\thistory [-il] [<pool>] ...\n")); 476 case HELP_IMPORT: 477 return (gettext("\timport [-d dir] [-D]\n" 478 "\timport [-o mntopts] [-o property=value] ... \n" 479 "\t [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] " 480 "[-R root] [-F [-n]] -a\n" 481 "\timport [-o mntopts] [-o property=value] ... \n" 482 "\t [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] " 483 "[-R root] [-F [-n]]\n" 484 "\t [--rewind-to-checkpoint] <pool | id> [newpool]\n")); 485 case HELP_IOSTAT: 486 return (gettext("\tiostat [[[-c [script1,script2,...]" 487 "[-lq]]|[-rw]] [-T d | u] [-ghHLpPvy]\n" 488 "\t [[pool ...]|[pool vdev ...]|[vdev ...]]" 489 " [[-n] interval [count]]\n")); 490 case HELP_LABELCLEAR: 491 return (gettext("\tlabelclear [-f] <vdev>\n")); 492 case HELP_LIST: 493 return (gettext("\tlist [-gHLpPv] [-o property[,...]] [-j " 494 "[--json-int, --json-pool-key-guid]] ...\n" 495 "\t [-T d|u] [pool] [interval [count]]\n")); 496 case HELP_PREFETCH: 497 return (gettext("\tprefetch -t <type> [<type opts>] <pool>\n" 498 "\t -t ddt <pool>\n")); 499 case HELP_OFFLINE: 500 return (gettext("\toffline [--power]|[[-f][-t]] <pool> " 501 "<device> ...\n")); 502 case HELP_ONLINE: 503 return (gettext("\tonline [--power][-e] <pool> <device> " 504 "...\n")); 505 case HELP_REPLACE: 506 return (gettext("\treplace [-fsw] [-o property=value] " 507 "<pool> <device> [new-device]\n")); 508 case HELP_REMOVE: 509 return (gettext("\tremove [-npsw] <pool> <device> ...\n")); 510 case HELP_REOPEN: 511 return (gettext("\treopen [-n] <pool>\n")); 512 case HELP_INITIALIZE: 513 return (gettext("\tinitialize [-c | -s | -u] [-w] <-a | <pool> " 514 "[<device> ...]>\n")); 515 case HELP_SCRUB: 516 return (gettext("\tscrub [-e | -s | -p | -C | -E | -S] [-w] " 517 "<-a | <pool> [<pool> ...]>\n")); 518 case HELP_RESILVER: 519 return (gettext("\tresilver <pool> ...\n")); 520 case HELP_TRIM: 521 return (gettext("\ttrim [-dw] [-r <rate>] [-c | -s] " 522 "<-a | <pool> [<device> ...]>\n")); 523 case HELP_STATUS: 524 return (gettext("\tstatus [-DdegiLPpstvx] " 525 "[-c script1[,script2,...]] ...\n" 526 "\t [-j|--json [--json-flat-vdevs] [--json-int] " 527 "[--json-pool-key-guid]] ...\n" 528 "\t [-T d|u] [--power] [pool] [interval [count]]\n")); 529 case HELP_UPGRADE: 530 return (gettext("\tupgrade\n" 531 "\tupgrade -v\n" 532 "\tupgrade [-V version] <-a | pool ...>\n")); 533 case HELP_EVENTS: 534 return (gettext("\tevents [-vHf [pool] | -c]\n")); 535 case HELP_GET: 536 return (gettext("\tget [-Hp] [-j [--json-int, " 537 "--json-pool-key-guid]] ...\n" 538 "\t [-o \"all\" | field[,...]] " 539 "<\"all\" | property[,...]> <pool> ...\n")); 540 case HELP_SET: 541 return (gettext("\tset <property=value> <pool>\n" 542 "\tset <vdev_property=value> <pool> <vdev>\n")); 543 case HELP_SPLIT: 544 return (gettext("\tsplit [-gLnPl] [-R altroot] [-o mntopts]\n" 545 "\t [-o property=value] <pool> <newpool> " 546 "[<device> ...]\n")); 547 case HELP_REGUID: 548 return (gettext("\treguid [-g guid] <pool>\n")); 549 case HELP_SYNC: 550 return (gettext("\tsync [pool] ...\n")); 551 case HELP_VERSION: 552 return (gettext("\tversion [-j]\n")); 553 case HELP_WAIT: 554 return (gettext("\twait [-Hp] [-T d|u] [-t <activity>[,...]] " 555 "<pool> [interval]\n")); 556 case HELP_DDT_PRUNE: 557 return (gettext("\tddtprune -d|-p <amount> <pool>\n")); 558 default: 559 __builtin_unreachable(); 560 } 561 } 562 563 /* 564 * Callback routine that will print out a pool property value. 565 */ 566 static int 567 print_pool_prop_cb(int prop, void *cb) 568 { 569 FILE *fp = cb; 570 571 (void) fprintf(fp, "\t%-19s ", zpool_prop_to_name(prop)); 572 573 if (zpool_prop_readonly(prop)) 574 (void) fprintf(fp, " NO "); 575 else 576 (void) fprintf(fp, " YES "); 577 578 if (zpool_prop_values(prop) == NULL) 579 (void) fprintf(fp, "-\n"); 580 else 581 (void) fprintf(fp, "%s\n", zpool_prop_values(prop)); 582 583 return (ZPROP_CONT); 584 } 585 586 /* 587 * Callback routine that will print out a vdev property value. 588 */ 589 static int 590 print_vdev_prop_cb(int prop, void *cb) 591 { 592 FILE *fp = cb; 593 594 (void) fprintf(fp, "\t%-19s ", vdev_prop_to_name(prop)); 595 596 if (vdev_prop_readonly(prop)) 597 (void) fprintf(fp, " NO "); 598 else 599 (void) fprintf(fp, " YES "); 600 601 if (vdev_prop_values(prop) == NULL) 602 (void) fprintf(fp, "-\n"); 603 else 604 (void) fprintf(fp, "%s\n", vdev_prop_values(prop)); 605 606 return (ZPROP_CONT); 607 } 608 609 /* 610 * Given a leaf vdev name like 'L5' return its VDEV_CONFIG_PATH like 611 * '/dev/disk/by-vdev/L5'. 612 */ 613 static const char * 614 vdev_name_to_path(zpool_handle_t *zhp, char *vdev) 615 { 616 nvlist_t *vdev_nv = zpool_find_vdev(zhp, vdev, NULL, NULL, NULL); 617 if (vdev_nv == NULL) { 618 return (NULL); 619 } 620 return (fnvlist_lookup_string(vdev_nv, ZPOOL_CONFIG_PATH)); 621 } 622 623 static int 624 zpool_power_on(zpool_handle_t *zhp, char *vdev) 625 { 626 return (zpool_power(zhp, vdev, B_TRUE)); 627 } 628 629 static int 630 zpool_power_on_and_disk_wait(zpool_handle_t *zhp, char *vdev) 631 { 632 int rc; 633 634 rc = zpool_power_on(zhp, vdev); 635 if (rc != 0) 636 return (rc); 637 638 zpool_disk_wait(vdev_name_to_path(zhp, vdev)); 639 640 return (0); 641 } 642 643 static int 644 zpool_power_on_pool_and_wait_for_devices(zpool_handle_t *zhp) 645 { 646 nvlist_t *nv; 647 const char *path = NULL; 648 int rc; 649 650 /* Power up all the devices first */ 651 FOR_EACH_REAL_LEAF_VDEV(zhp, nv) { 652 path = fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH); 653 if (path != NULL) { 654 rc = zpool_power_on(zhp, (char *)path); 655 if (rc != 0) { 656 return (rc); 657 } 658 } 659 } 660 661 /* 662 * Wait for their devices to show up. Since we powered them on 663 * at roughly the same time, they should all come online around 664 * the same time. 665 */ 666 FOR_EACH_REAL_LEAF_VDEV(zhp, nv) { 667 path = fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH); 668 zpool_disk_wait(path); 669 } 670 671 return (0); 672 } 673 674 static int 675 zpool_power_off(zpool_handle_t *zhp, char *vdev) 676 { 677 return (zpool_power(zhp, vdev, B_FALSE)); 678 } 679 680 /* 681 * Display usage message. If we're inside a command, display only the usage for 682 * that command. Otherwise, iterate over the entire command table and display 683 * a complete usage message. 684 */ 685 static __attribute__((noreturn)) void 686 usage(boolean_t requested) 687 { 688 FILE *fp = requested ? stdout : stderr; 689 690 if (current_command == NULL) { 691 int i; 692 693 (void) fprintf(fp, gettext("usage: zpool command args ...\n")); 694 (void) fprintf(fp, 695 gettext("where 'command' is one of the following:\n\n")); 696 697 for (i = 0; i < NCOMMAND; i++) { 698 if (command_table[i].name == NULL) 699 (void) fprintf(fp, "\n"); 700 else 701 (void) fprintf(fp, "%s", 702 get_usage(command_table[i].usage)); 703 } 704 705 (void) fprintf(fp, 706 gettext("\nFor further help on a command or topic, " 707 "run: %s\n"), "zpool help [<topic>]"); 708 } else { 709 (void) fprintf(fp, gettext("usage:\n")); 710 (void) fprintf(fp, "%s", get_usage(current_command->usage)); 711 } 712 713 if (current_command != NULL && 714 current_prop_type != (ZFS_TYPE_POOL | ZFS_TYPE_VDEV) && 715 ((strcmp(current_command->name, "set") == 0) || 716 (strcmp(current_command->name, "get") == 0) || 717 (strcmp(current_command->name, "list") == 0))) { 718 719 (void) fprintf(fp, "%s", 720 gettext("\nthe following properties are supported:\n")); 721 722 (void) fprintf(fp, "\n\t%-19s %s %s\n\n", 723 "PROPERTY", "EDIT", "VALUES"); 724 725 /* Iterate over all properties */ 726 if (current_prop_type == ZFS_TYPE_POOL) { 727 (void) zprop_iter(print_pool_prop_cb, fp, B_FALSE, 728 B_TRUE, current_prop_type); 729 730 (void) fprintf(fp, "\t%-19s ", "feature@..."); 731 (void) fprintf(fp, "YES " 732 "disabled | enabled | active\n"); 733 734 (void) fprintf(fp, gettext("\nThe feature@ properties " 735 "must be appended with a feature name.\n" 736 "See zpool-features(7).\n")); 737 } else if (current_prop_type == ZFS_TYPE_VDEV) { 738 (void) zprop_iter(print_vdev_prop_cb, fp, B_FALSE, 739 B_TRUE, current_prop_type); 740 } 741 } 742 743 /* 744 * See comments at end of main(). 745 */ 746 if (getenv("ZFS_ABORT") != NULL) { 747 (void) printf("dumping core by request\n"); 748 abort(); 749 } 750 751 exit(requested ? 0 : 2); 752 } 753 754 /* 755 * zpool initialize [-c | -s | -u] [-w] <-a | pool> [<vdev> ...] 756 * Initialize all unused blocks in the specified vdevs, or all vdevs in the pool 757 * if none specified. 758 * 759 * -a Use all pools. 760 * -c Cancel. Ends active initializing. 761 * -s Suspend. Initializing can then be restarted with no flags. 762 * -u Uninitialize. Clears initialization state. 763 * -w Wait. Blocks until initializing has completed. 764 */ 765 int 766 zpool_do_initialize(int argc, char **argv) 767 { 768 int c; 769 char *poolname; 770 zpool_handle_t *zhp; 771 int err = 0; 772 boolean_t wait = B_FALSE; 773 boolean_t initialize_all = B_FALSE; 774 775 struct option long_options[] = { 776 {"cancel", no_argument, NULL, 'c'}, 777 {"suspend", no_argument, NULL, 's'}, 778 {"uninit", no_argument, NULL, 'u'}, 779 {"wait", no_argument, NULL, 'w'}, 780 {"all", no_argument, NULL, 'a'}, 781 {0, 0, 0, 0} 782 }; 783 784 pool_initialize_func_t cmd_type = POOL_INITIALIZE_START; 785 while ((c = getopt_long(argc, argv, "acsuw", long_options, 786 NULL)) != -1) { 787 switch (c) { 788 case 'a': 789 initialize_all = B_TRUE; 790 break; 791 case 'c': 792 if (cmd_type != POOL_INITIALIZE_START && 793 cmd_type != POOL_INITIALIZE_CANCEL) { 794 (void) fprintf(stderr, gettext("-c cannot be " 795 "combined with other options\n")); 796 usage(B_FALSE); 797 } 798 cmd_type = POOL_INITIALIZE_CANCEL; 799 break; 800 case 's': 801 if (cmd_type != POOL_INITIALIZE_START && 802 cmd_type != POOL_INITIALIZE_SUSPEND) { 803 (void) fprintf(stderr, gettext("-s cannot be " 804 "combined with other options\n")); 805 usage(B_FALSE); 806 } 807 cmd_type = POOL_INITIALIZE_SUSPEND; 808 break; 809 case 'u': 810 if (cmd_type != POOL_INITIALIZE_START && 811 cmd_type != POOL_INITIALIZE_UNINIT) { 812 (void) fprintf(stderr, gettext("-u cannot be " 813 "combined with other options\n")); 814 usage(B_FALSE); 815 } 816 cmd_type = POOL_INITIALIZE_UNINIT; 817 break; 818 case 'w': 819 wait = B_TRUE; 820 break; 821 case '?': 822 if (optopt != 0) { 823 (void) fprintf(stderr, 824 gettext("invalid option '%c'\n"), optopt); 825 } else { 826 (void) fprintf(stderr, 827 gettext("invalid option '%s'\n"), 828 argv[optind - 1]); 829 } 830 usage(B_FALSE); 831 } 832 } 833 834 argc -= optind; 835 argv += optind; 836 837 initialize_cbdata_t cbdata = { 838 .wait = wait, 839 .cmd_type = cmd_type 840 }; 841 842 if (initialize_all && argc > 0) { 843 (void) fprintf(stderr, gettext("-a cannot be combined with " 844 "individual pools or vdevs\n")); 845 usage(B_FALSE); 846 } 847 848 if (argc < 1 && !initialize_all) { 849 (void) fprintf(stderr, gettext("missing pool name argument\n")); 850 usage(B_FALSE); 851 return (-1); 852 } 853 854 if (wait && (cmd_type != POOL_INITIALIZE_START)) { 855 (void) fprintf(stderr, gettext("-w cannot be used with -c, -s" 856 "or -u\n")); 857 usage(B_FALSE); 858 } 859 860 if (argc == 0 && initialize_all) { 861 /* Initilize each pool recursively */ 862 err = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL, 863 B_FALSE, zpool_initialize_one, &cbdata); 864 return (err); 865 } else if (argc == 1) { 866 /* no individual leaf vdevs specified, initialize the pool */ 867 poolname = argv[0]; 868 zhp = zpool_open(g_zfs, poolname); 869 if (zhp == NULL) 870 return (-1); 871 err = zpool_initialize_one(zhp, &cbdata); 872 } else { 873 /* individual leaf vdevs specified, initialize them */ 874 poolname = argv[0]; 875 zhp = zpool_open(g_zfs, poolname); 876 if (zhp == NULL) 877 return (-1); 878 nvlist_t *vdevs = fnvlist_alloc(); 879 for (int i = 1; i < argc; i++) { 880 fnvlist_add_boolean(vdevs, argv[i]); 881 } 882 if (wait) 883 err = zpool_initialize_wait(zhp, cmd_type, vdevs); 884 else 885 err = zpool_initialize(zhp, cmd_type, vdevs); 886 fnvlist_free(vdevs); 887 } 888 889 zpool_close(zhp); 890 891 return (err); 892 } 893 894 /* 895 * print a pool vdev config for dry runs 896 */ 897 static void 898 print_vdev_tree(zpool_handle_t *zhp, const char *name, nvlist_t *nv, int indent, 899 const char *match, int name_flags) 900 { 901 nvlist_t **child; 902 uint_t c, children; 903 char *vname; 904 boolean_t printed = B_FALSE; 905 906 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 907 &child, &children) != 0) { 908 if (name != NULL) 909 (void) printf("\t%*s%s\n", indent, "", name); 910 return; 911 } 912 913 for (c = 0; c < children; c++) { 914 uint64_t is_log = B_FALSE, is_hole = B_FALSE; 915 const char *class = ""; 916 917 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 918 &is_hole); 919 920 if (is_hole == B_TRUE) { 921 continue; 922 } 923 924 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 925 &is_log); 926 if (is_log) 927 class = VDEV_ALLOC_BIAS_LOG; 928 (void) nvlist_lookup_string(child[c], 929 ZPOOL_CONFIG_ALLOCATION_BIAS, &class); 930 if (strcmp(match, class) != 0) 931 continue; 932 933 if (!printed && name != NULL) { 934 (void) printf("\t%*s%s\n", indent, "", name); 935 printed = B_TRUE; 936 } 937 vname = zpool_vdev_name(g_zfs, zhp, child[c], name_flags); 938 print_vdev_tree(zhp, vname, child[c], indent + 2, "", 939 name_flags); 940 free(vname); 941 } 942 } 943 944 /* 945 * Print the list of l2cache devices for dry runs. 946 */ 947 static void 948 print_cache_list(nvlist_t *nv, int indent) 949 { 950 nvlist_t **child; 951 uint_t c, children; 952 953 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, 954 &child, &children) == 0 && children > 0) { 955 (void) printf("\t%*s%s\n", indent, "", "cache"); 956 } else { 957 return; 958 } 959 for (c = 0; c < children; c++) { 960 char *vname; 961 962 vname = zpool_vdev_name(g_zfs, NULL, child[c], 0); 963 (void) printf("\t%*s%s\n", indent + 2, "", vname); 964 free(vname); 965 } 966 } 967 968 /* 969 * Print the list of spares for dry runs. 970 */ 971 static void 972 print_spare_list(nvlist_t *nv, int indent) 973 { 974 nvlist_t **child; 975 uint_t c, children; 976 977 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, 978 &child, &children) == 0 && children > 0) { 979 (void) printf("\t%*s%s\n", indent, "", "spares"); 980 } else { 981 return; 982 } 983 for (c = 0; c < children; c++) { 984 char *vname; 985 986 vname = zpool_vdev_name(g_zfs, NULL, child[c], 0); 987 (void) printf("\t%*s%s\n", indent + 2, "", vname); 988 free(vname); 989 } 990 } 991 992 typedef struct spare_cbdata { 993 uint64_t cb_guid; 994 zpool_handle_t *cb_zhp; 995 } spare_cbdata_t; 996 997 static boolean_t 998 find_vdev(nvlist_t *nv, uint64_t search) 999 { 1000 uint64_t guid; 1001 nvlist_t **child; 1002 uint_t c, children; 1003 1004 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0 && 1005 search == guid) 1006 return (B_TRUE); 1007 1008 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1009 &child, &children) == 0) { 1010 for (c = 0; c < children; c++) 1011 if (find_vdev(child[c], search)) 1012 return (B_TRUE); 1013 } 1014 1015 return (B_FALSE); 1016 } 1017 1018 static int 1019 find_spare(zpool_handle_t *zhp, void *data) 1020 { 1021 spare_cbdata_t *cbp = data; 1022 nvlist_t *config, *nvroot; 1023 1024 config = zpool_get_config(zhp, NULL); 1025 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 1026 &nvroot) == 0); 1027 1028 if (find_vdev(nvroot, cbp->cb_guid)) { 1029 cbp->cb_zhp = zhp; 1030 return (1); 1031 } 1032 1033 zpool_close(zhp); 1034 return (0); 1035 } 1036 1037 static void 1038 nice_num_str_nvlist(nvlist_t *item, const char *key, uint64_t value, 1039 boolean_t literal, boolean_t as_int, int format) 1040 { 1041 char buf[256]; 1042 if (literal) { 1043 if (!as_int) 1044 snprintf(buf, 256, "%llu", (u_longlong_t)value); 1045 } else { 1046 switch (format) { 1047 case ZFS_NICENUM_1024: 1048 zfs_nicenum_format(value, buf, 256, ZFS_NICENUM_1024); 1049 break; 1050 case ZFS_NICENUM_BYTES: 1051 zfs_nicenum_format(value, buf, 256, ZFS_NICENUM_BYTES); 1052 break; 1053 case ZFS_NICENUM_TIME: 1054 zfs_nicenum_format(value, buf, 256, ZFS_NICENUM_TIME); 1055 break; 1056 case ZFS_NICE_TIMESTAMP: 1057 format_timestamp(value, buf, 256); 1058 break; 1059 default: 1060 fprintf(stderr, "Invalid number format"); 1061 exit(1); 1062 } 1063 } 1064 if (as_int) 1065 fnvlist_add_uint64(item, key, value); 1066 else 1067 fnvlist_add_string(item, key, buf); 1068 } 1069 1070 /* 1071 * Generates an nvlist with output version for every command based on params. 1072 * Purpose of this is to add a version of JSON output, considering the schema 1073 * format might be updated for each command in future. 1074 * 1075 * Schema: 1076 * 1077 * "output_version": { 1078 * "command": string, 1079 * "vers_major": integer, 1080 * "vers_minor": integer, 1081 * } 1082 */ 1083 static nvlist_t * 1084 zpool_json_schema(int maj_v, int min_v) 1085 { 1086 char cmd[MAX_CMD_LEN]; 1087 nvlist_t *sch = fnvlist_alloc(); 1088 nvlist_t *ov = fnvlist_alloc(); 1089 1090 snprintf(cmd, MAX_CMD_LEN, "zpool %s", current_command->name); 1091 fnvlist_add_string(ov, "command", cmd); 1092 fnvlist_add_uint32(ov, "vers_major", maj_v); 1093 fnvlist_add_uint32(ov, "vers_minor", min_v); 1094 fnvlist_add_nvlist(sch, "output_version", ov); 1095 fnvlist_free(ov); 1096 return (sch); 1097 } 1098 1099 static void 1100 fill_pool_info(nvlist_t *list, zpool_handle_t *zhp, boolean_t addtype, 1101 boolean_t as_int) 1102 { 1103 nvlist_t *config = zpool_get_config(zhp, NULL); 1104 uint64_t guid = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID); 1105 uint64_t txg = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG); 1106 1107 fnvlist_add_string(list, "name", zpool_get_name(zhp)); 1108 if (addtype) 1109 fnvlist_add_string(list, "type", "POOL"); 1110 fnvlist_add_string(list, "state", zpool_get_state_str(zhp)); 1111 if (as_int) { 1112 if (guid) 1113 fnvlist_add_uint64(list, ZPOOL_CONFIG_POOL_GUID, guid); 1114 if (txg) 1115 fnvlist_add_uint64(list, ZPOOL_CONFIG_POOL_TXG, txg); 1116 fnvlist_add_uint64(list, "spa_version", SPA_VERSION); 1117 fnvlist_add_uint64(list, "zpl_version", ZPL_VERSION); 1118 } else { 1119 char value[ZFS_MAXPROPLEN]; 1120 if (guid) { 1121 snprintf(value, ZFS_MAXPROPLEN, "%llu", 1122 (u_longlong_t)guid); 1123 fnvlist_add_string(list, ZPOOL_CONFIG_POOL_GUID, value); 1124 } 1125 if (txg) { 1126 snprintf(value, ZFS_MAXPROPLEN, "%llu", 1127 (u_longlong_t)txg); 1128 fnvlist_add_string(list, ZPOOL_CONFIG_POOL_TXG, value); 1129 } 1130 fnvlist_add_string(list, "spa_version", SPA_VERSION_STRING); 1131 fnvlist_add_string(list, "zpl_version", ZPL_VERSION_STRING); 1132 } 1133 } 1134 1135 static void 1136 used_by_other(zpool_handle_t *zhp, nvlist_t *nvdev, nvlist_t *list) 1137 { 1138 spare_cbdata_t spare_cb; 1139 verify(nvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_GUID, 1140 &spare_cb.cb_guid) == 0); 1141 if (zpool_iter(g_zfs, find_spare, &spare_cb) == 1) { 1142 if (strcmp(zpool_get_name(spare_cb.cb_zhp), 1143 zpool_get_name(zhp)) != 0) { 1144 fnvlist_add_string(list, "used_by", 1145 zpool_get_name(spare_cb.cb_zhp)); 1146 } 1147 zpool_close(spare_cb.cb_zhp); 1148 } 1149 } 1150 1151 static void 1152 fill_vdev_info(nvlist_t *list, zpool_handle_t *zhp, char *name, 1153 boolean_t addtype, boolean_t as_int) 1154 { 1155 boolean_t l2c = B_FALSE; 1156 const char *path, *phys, *devid, *bias = NULL; 1157 uint64_t hole = 0, log = 0, spare = 0; 1158 vdev_stat_t *vs; 1159 uint_t c; 1160 nvlist_t *nvdev; 1161 nvlist_t *nvdev_parent = NULL; 1162 char *_name; 1163 1164 if (strcmp(name, zpool_get_name(zhp)) != 0) 1165 _name = name; 1166 else 1167 _name = (char *)"root-0"; 1168 1169 nvdev = zpool_find_vdev(zhp, _name, NULL, &l2c, NULL); 1170 1171 fnvlist_add_string(list, "name", name); 1172 if (addtype) 1173 fnvlist_add_string(list, "type", "VDEV"); 1174 if (nvdev) { 1175 const char *type = fnvlist_lookup_string(nvdev, 1176 ZPOOL_CONFIG_TYPE); 1177 if (type) 1178 fnvlist_add_string(list, "vdev_type", type); 1179 uint64_t guid = fnvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_GUID); 1180 if (guid) { 1181 if (as_int) { 1182 fnvlist_add_uint64(list, "guid", guid); 1183 } else { 1184 char buf[ZFS_MAXPROPLEN]; 1185 snprintf(buf, ZFS_MAXPROPLEN, "%llu", 1186 (u_longlong_t)guid); 1187 fnvlist_add_string(list, "guid", buf); 1188 } 1189 } 1190 if (nvlist_lookup_string(nvdev, ZPOOL_CONFIG_PATH, &path) == 0) 1191 fnvlist_add_string(list, "path", path); 1192 if (nvlist_lookup_string(nvdev, ZPOOL_CONFIG_PHYS_PATH, 1193 &phys) == 0) 1194 fnvlist_add_string(list, "phys_path", phys); 1195 if (nvlist_lookup_string(nvdev, ZPOOL_CONFIG_DEVID, 1196 &devid) == 0) 1197 fnvlist_add_string(list, "devid", devid); 1198 (void) nvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_IS_LOG, &log); 1199 (void) nvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_IS_SPARE, 1200 &spare); 1201 (void) nvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_IS_HOLE, &hole); 1202 if (hole) 1203 fnvlist_add_string(list, "class", VDEV_TYPE_HOLE); 1204 else if (l2c) 1205 fnvlist_add_string(list, "class", VDEV_TYPE_L2CACHE); 1206 else if (spare) 1207 fnvlist_add_string(list, "class", VDEV_TYPE_SPARE); 1208 else if (log) 1209 fnvlist_add_string(list, "class", VDEV_TYPE_LOG); 1210 else { 1211 (void) nvlist_lookup_string(nvdev, 1212 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias); 1213 if (bias != NULL) 1214 fnvlist_add_string(list, "class", bias); 1215 else { 1216 nvdev_parent = NULL; 1217 nvdev_parent = zpool_find_parent_vdev(zhp, 1218 _name, NULL, NULL, NULL); 1219 1220 /* 1221 * With a mirrored special device, the parent 1222 * "mirror" vdev will have 1223 * ZPOOL_CONFIG_ALLOCATION_BIAS set to "special" 1224 * not the leaf vdevs. If we're a leaf vdev 1225 * in that case we need to look at our parent 1226 * to see if they're "special" to know if we 1227 * are "special" too. 1228 */ 1229 if (nvdev_parent) { 1230 (void) nvlist_lookup_string( 1231 nvdev_parent, 1232 ZPOOL_CONFIG_ALLOCATION_BIAS, 1233 &bias); 1234 } 1235 if (bias != NULL) 1236 fnvlist_add_string(list, "class", bias); 1237 else 1238 fnvlist_add_string(list, "class", 1239 "normal"); 1240 } 1241 } 1242 if (nvlist_lookup_uint64_array(nvdev, ZPOOL_CONFIG_VDEV_STATS, 1243 (uint64_t **)&vs, &c) == 0) { 1244 fnvlist_add_string(list, "state", 1245 vdev_state_str[vs->vs_state]); 1246 } 1247 } 1248 } 1249 1250 static boolean_t 1251 prop_list_contains_feature(nvlist_t *proplist) 1252 { 1253 nvpair_t *nvp; 1254 for (nvp = nvlist_next_nvpair(proplist, NULL); NULL != nvp; 1255 nvp = nvlist_next_nvpair(proplist, nvp)) { 1256 if (zpool_prop_feature(nvpair_name(nvp))) 1257 return (B_TRUE); 1258 } 1259 return (B_FALSE); 1260 } 1261 1262 /* 1263 * Add a property pair (name, string-value) into a property nvlist. 1264 */ 1265 static int 1266 add_prop_list(const char *propname, const char *propval, nvlist_t **props, 1267 boolean_t poolprop) 1268 { 1269 zpool_prop_t prop = ZPOOL_PROP_INVAL; 1270 nvlist_t *proplist; 1271 const char *normnm; 1272 const char *strval; 1273 1274 if (*props == NULL && 1275 nvlist_alloc(props, NV_UNIQUE_NAME, 0) != 0) { 1276 (void) fprintf(stderr, 1277 gettext("internal error: out of memory\n")); 1278 return (1); 1279 } 1280 1281 proplist = *props; 1282 1283 if (poolprop) { 1284 const char *vname = zpool_prop_to_name(ZPOOL_PROP_VERSION); 1285 const char *cname = 1286 zpool_prop_to_name(ZPOOL_PROP_COMPATIBILITY); 1287 1288 if ((prop = zpool_name_to_prop(propname)) == ZPOOL_PROP_INVAL && 1289 (!zpool_prop_feature(propname) && 1290 !zpool_prop_vdev(propname))) { 1291 (void) fprintf(stderr, gettext("property '%s' is " 1292 "not a valid pool or vdev property\n"), propname); 1293 return (2); 1294 } 1295 1296 /* 1297 * feature@ properties and version should not be specified 1298 * at the same time. 1299 */ 1300 if ((prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname) && 1301 nvlist_exists(proplist, vname)) || 1302 (prop == ZPOOL_PROP_VERSION && 1303 prop_list_contains_feature(proplist))) { 1304 (void) fprintf(stderr, gettext("'feature@' and " 1305 "'version' properties cannot be specified " 1306 "together\n")); 1307 return (2); 1308 } 1309 1310 /* 1311 * if version is specified, only "legacy" compatibility 1312 * may be requested 1313 */ 1314 if ((prop == ZPOOL_PROP_COMPATIBILITY && 1315 strcmp(propval, ZPOOL_COMPAT_LEGACY) != 0 && 1316 nvlist_exists(proplist, vname)) || 1317 (prop == ZPOOL_PROP_VERSION && 1318 nvlist_exists(proplist, cname) && 1319 strcmp(fnvlist_lookup_string(proplist, cname), 1320 ZPOOL_COMPAT_LEGACY) != 0)) { 1321 (void) fprintf(stderr, gettext("when 'version' is " 1322 "specified, the 'compatibility' feature may only " 1323 "be set to '" ZPOOL_COMPAT_LEGACY "'\n")); 1324 return (2); 1325 } 1326 1327 if (zpool_prop_feature(propname) || zpool_prop_vdev(propname)) 1328 normnm = propname; 1329 else 1330 normnm = zpool_prop_to_name(prop); 1331 } else { 1332 zfs_prop_t fsprop = zfs_name_to_prop(propname); 1333 1334 if (zfs_prop_valid_for_type(fsprop, ZFS_TYPE_FILESYSTEM, 1335 B_FALSE)) { 1336 normnm = zfs_prop_to_name(fsprop); 1337 } else if (zfs_prop_user(propname) || 1338 zfs_prop_userquota(propname)) { 1339 normnm = propname; 1340 } else { 1341 (void) fprintf(stderr, gettext("property '%s' is " 1342 "not a valid filesystem property\n"), propname); 1343 return (2); 1344 } 1345 } 1346 1347 if (nvlist_lookup_string(proplist, normnm, &strval) == 0 && 1348 prop != ZPOOL_PROP_CACHEFILE) { 1349 (void) fprintf(stderr, gettext("property '%s' " 1350 "specified multiple times\n"), propname); 1351 return (2); 1352 } 1353 1354 if (nvlist_add_string(proplist, normnm, propval) != 0) { 1355 (void) fprintf(stderr, gettext("internal " 1356 "error: out of memory\n")); 1357 return (1); 1358 } 1359 1360 return (0); 1361 } 1362 1363 /* 1364 * Set a default property pair (name, string-value) in a property nvlist 1365 */ 1366 static int 1367 add_prop_list_default(const char *propname, const char *propval, 1368 nvlist_t **props) 1369 { 1370 const char *pval; 1371 1372 if (nvlist_lookup_string(*props, propname, &pval) == 0) 1373 return (0); 1374 1375 return (add_prop_list(propname, propval, props, B_TRUE)); 1376 } 1377 1378 /* 1379 * zpool add [-afgLnP] [-o property=value] <pool> <vdev> ... 1380 * 1381 * -a Disable the ashift validation checks 1382 * -f Force addition of devices, even if they appear in use 1383 * -g Display guid for individual vdev name. 1384 * -L Follow links when resolving vdev path name. 1385 * -n Do not add the devices, but display the resulting layout if 1386 * they were to be added. 1387 * -o Set property=value. 1388 * -P Display full path for vdev name. 1389 * 1390 * Adds the given vdevs to 'pool'. As with create, the bulk of this work is 1391 * handled by make_root_vdev(), which constructs the nvlist needed to pass to 1392 * libzfs. 1393 */ 1394 int 1395 zpool_do_add(int argc, char **argv) 1396 { 1397 boolean_t check_replication = B_TRUE; 1398 boolean_t check_inuse = B_TRUE; 1399 boolean_t dryrun = B_FALSE; 1400 boolean_t check_ashift = B_TRUE; 1401 boolean_t force = B_FALSE; 1402 int name_flags = 0; 1403 int c; 1404 nvlist_t *nvroot; 1405 char *poolname; 1406 int ret; 1407 zpool_handle_t *zhp; 1408 nvlist_t *config; 1409 nvlist_t *props = NULL; 1410 char *propval; 1411 1412 struct option long_options[] = { 1413 {"allow-in-use", no_argument, NULL, ZPOOL_OPTION_ALLOW_INUSE}, 1414 {"allow-replication-mismatch", no_argument, NULL, 1415 ZPOOL_OPTION_ALLOW_REPLICATION_MISMATCH}, 1416 {"allow-ashift-mismatch", no_argument, NULL, 1417 ZPOOL_OPTION_ALLOW_ASHIFT_MISMATCH}, 1418 {0, 0, 0, 0} 1419 }; 1420 1421 /* check options */ 1422 while ((c = getopt_long(argc, argv, "fgLno:P", long_options, NULL)) 1423 != -1) { 1424 switch (c) { 1425 case 'f': 1426 force = B_TRUE; 1427 break; 1428 case 'g': 1429 name_flags |= VDEV_NAME_GUID; 1430 break; 1431 case 'L': 1432 name_flags |= VDEV_NAME_FOLLOW_LINKS; 1433 break; 1434 case 'n': 1435 dryrun = B_TRUE; 1436 break; 1437 case 'o': 1438 if ((propval = strchr(optarg, '=')) == NULL) { 1439 (void) fprintf(stderr, gettext("missing " 1440 "'=' for -o option\n")); 1441 usage(B_FALSE); 1442 } 1443 *propval = '\0'; 1444 propval++; 1445 1446 if ((strcmp(optarg, ZPOOL_CONFIG_ASHIFT) != 0) || 1447 (add_prop_list(optarg, propval, &props, B_TRUE))) 1448 usage(B_FALSE); 1449 break; 1450 case 'P': 1451 name_flags |= VDEV_NAME_PATH; 1452 break; 1453 case ZPOOL_OPTION_ALLOW_INUSE: 1454 check_inuse = B_FALSE; 1455 break; 1456 case ZPOOL_OPTION_ALLOW_REPLICATION_MISMATCH: 1457 check_replication = B_FALSE; 1458 break; 1459 case ZPOOL_OPTION_ALLOW_ASHIFT_MISMATCH: 1460 check_ashift = B_FALSE; 1461 break; 1462 case '?': 1463 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 1464 optopt); 1465 usage(B_FALSE); 1466 } 1467 } 1468 1469 argc -= optind; 1470 argv += optind; 1471 1472 /* get pool name and check number of arguments */ 1473 if (argc < 1) { 1474 (void) fprintf(stderr, gettext("missing pool name argument\n")); 1475 usage(B_FALSE); 1476 } 1477 if (argc < 2) { 1478 (void) fprintf(stderr, gettext("missing vdev specification\n")); 1479 usage(B_FALSE); 1480 } 1481 1482 if (force) { 1483 if (!check_inuse || !check_replication || !check_ashift) { 1484 (void) fprintf(stderr, gettext("'-f' option is not " 1485 "allowed with '--allow-replication-mismatch', " 1486 "'--allow-ashift-mismatch', or " 1487 "'--allow-in-use'\n")); 1488 usage(B_FALSE); 1489 } 1490 check_inuse = B_FALSE; 1491 check_replication = B_FALSE; 1492 check_ashift = B_FALSE; 1493 } 1494 1495 poolname = argv[0]; 1496 1497 argc--; 1498 argv++; 1499 1500 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) 1501 return (1); 1502 1503 if ((config = zpool_get_config(zhp, NULL)) == NULL) { 1504 (void) fprintf(stderr, gettext("pool '%s' is unavailable\n"), 1505 poolname); 1506 zpool_close(zhp); 1507 return (1); 1508 } 1509 1510 /* unless manually specified use "ashift" pool property (if set) */ 1511 if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) { 1512 int intval; 1513 zprop_source_t src; 1514 char strval[ZPOOL_MAXPROPLEN]; 1515 1516 intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src); 1517 if (src != ZPROP_SRC_DEFAULT) { 1518 (void) sprintf(strval, "%" PRId32, intval); 1519 verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval, 1520 &props, B_TRUE) == 0); 1521 } 1522 } 1523 1524 /* pass off to make_root_vdev for processing */ 1525 nvroot = make_root_vdev(zhp, props, !check_inuse, 1526 check_replication, B_FALSE, dryrun, argc, argv); 1527 if (nvroot == NULL) { 1528 zpool_close(zhp); 1529 return (1); 1530 } 1531 1532 if (dryrun) { 1533 nvlist_t *poolnvroot; 1534 nvlist_t **l2child, **sparechild; 1535 uint_t l2children, sparechildren, c; 1536 char *vname; 1537 boolean_t hadcache = B_FALSE, hadspare = B_FALSE; 1538 1539 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 1540 &poolnvroot) == 0); 1541 1542 (void) printf(gettext("would update '%s' to the following " 1543 "configuration:\n\n"), zpool_get_name(zhp)); 1544 1545 /* print original main pool and new tree */ 1546 print_vdev_tree(zhp, poolname, poolnvroot, 0, "", 1547 name_flags | VDEV_NAME_TYPE_ID); 1548 print_vdev_tree(zhp, NULL, nvroot, 0, "", name_flags); 1549 1550 /* print other classes: 'dedup', 'special', and 'log' */ 1551 if (zfs_special_devs(poolnvroot, VDEV_ALLOC_BIAS_DEDUP)) { 1552 print_vdev_tree(zhp, "dedup", poolnvroot, 0, 1553 VDEV_ALLOC_BIAS_DEDUP, name_flags); 1554 print_vdev_tree(zhp, NULL, nvroot, 0, 1555 VDEV_ALLOC_BIAS_DEDUP, name_flags); 1556 } else if (zfs_special_devs(nvroot, VDEV_ALLOC_BIAS_DEDUP)) { 1557 print_vdev_tree(zhp, "dedup", nvroot, 0, 1558 VDEV_ALLOC_BIAS_DEDUP, name_flags); 1559 } 1560 1561 if (zfs_special_devs(poolnvroot, VDEV_ALLOC_BIAS_SPECIAL)) { 1562 print_vdev_tree(zhp, "special", poolnvroot, 0, 1563 VDEV_ALLOC_BIAS_SPECIAL, name_flags); 1564 print_vdev_tree(zhp, NULL, nvroot, 0, 1565 VDEV_ALLOC_BIAS_SPECIAL, name_flags); 1566 } else if (zfs_special_devs(nvroot, VDEV_ALLOC_BIAS_SPECIAL)) { 1567 print_vdev_tree(zhp, "special", nvroot, 0, 1568 VDEV_ALLOC_BIAS_SPECIAL, name_flags); 1569 } 1570 1571 if (num_logs(poolnvroot) > 0) { 1572 print_vdev_tree(zhp, "logs", poolnvroot, 0, 1573 VDEV_ALLOC_BIAS_LOG, name_flags); 1574 print_vdev_tree(zhp, NULL, nvroot, 0, 1575 VDEV_ALLOC_BIAS_LOG, name_flags); 1576 } else if (num_logs(nvroot) > 0) { 1577 print_vdev_tree(zhp, "logs", nvroot, 0, 1578 VDEV_ALLOC_BIAS_LOG, name_flags); 1579 } 1580 1581 /* Do the same for the caches */ 1582 if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_L2CACHE, 1583 &l2child, &l2children) == 0 && l2children) { 1584 hadcache = B_TRUE; 1585 (void) printf(gettext("\tcache\n")); 1586 for (c = 0; c < l2children; c++) { 1587 vname = zpool_vdev_name(g_zfs, NULL, 1588 l2child[c], name_flags); 1589 (void) printf("\t %s\n", vname); 1590 free(vname); 1591 } 1592 } 1593 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 1594 &l2child, &l2children) == 0 && l2children) { 1595 if (!hadcache) 1596 (void) printf(gettext("\tcache\n")); 1597 for (c = 0; c < l2children; c++) { 1598 vname = zpool_vdev_name(g_zfs, NULL, 1599 l2child[c], name_flags); 1600 (void) printf("\t %s\n", vname); 1601 free(vname); 1602 } 1603 } 1604 /* And finally the spares */ 1605 if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_SPARES, 1606 &sparechild, &sparechildren) == 0 && sparechildren > 0) { 1607 hadspare = B_TRUE; 1608 (void) printf(gettext("\tspares\n")); 1609 for (c = 0; c < sparechildren; c++) { 1610 vname = zpool_vdev_name(g_zfs, NULL, 1611 sparechild[c], name_flags); 1612 (void) printf("\t %s\n", vname); 1613 free(vname); 1614 } 1615 } 1616 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 1617 &sparechild, &sparechildren) == 0 && sparechildren > 0) { 1618 if (!hadspare) 1619 (void) printf(gettext("\tspares\n")); 1620 for (c = 0; c < sparechildren; c++) { 1621 vname = zpool_vdev_name(g_zfs, NULL, 1622 sparechild[c], name_flags); 1623 (void) printf("\t %s\n", vname); 1624 free(vname); 1625 } 1626 } 1627 1628 ret = 0; 1629 } else { 1630 ret = (zpool_add(zhp, nvroot, check_ashift) != 0); 1631 } 1632 1633 nvlist_free(props); 1634 nvlist_free(nvroot); 1635 zpool_close(zhp); 1636 1637 return (ret); 1638 } 1639 1640 /* 1641 * zpool remove [-npsw] <pool> <vdev> ... 1642 * 1643 * Removes the given vdev from the pool. 1644 */ 1645 int 1646 zpool_do_remove(int argc, char **argv) 1647 { 1648 char *poolname; 1649 int i, ret = 0; 1650 zpool_handle_t *zhp = NULL; 1651 boolean_t stop = B_FALSE; 1652 int c; 1653 boolean_t noop = B_FALSE; 1654 boolean_t parsable = B_FALSE; 1655 boolean_t wait = B_FALSE; 1656 1657 /* check options */ 1658 while ((c = getopt(argc, argv, "npsw")) != -1) { 1659 switch (c) { 1660 case 'n': 1661 noop = B_TRUE; 1662 break; 1663 case 'p': 1664 parsable = B_TRUE; 1665 break; 1666 case 's': 1667 stop = B_TRUE; 1668 break; 1669 case 'w': 1670 wait = B_TRUE; 1671 break; 1672 case '?': 1673 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 1674 optopt); 1675 usage(B_FALSE); 1676 } 1677 } 1678 1679 argc -= optind; 1680 argv += optind; 1681 1682 /* get pool name and check number of arguments */ 1683 if (argc < 1) { 1684 (void) fprintf(stderr, gettext("missing pool name argument\n")); 1685 usage(B_FALSE); 1686 } 1687 1688 poolname = argv[0]; 1689 1690 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) 1691 return (1); 1692 1693 if (stop && noop) { 1694 zpool_close(zhp); 1695 (void) fprintf(stderr, gettext("stop request ignored\n")); 1696 return (0); 1697 } 1698 1699 if (stop) { 1700 if (argc > 1) { 1701 (void) fprintf(stderr, gettext("too many arguments\n")); 1702 usage(B_FALSE); 1703 } 1704 if (zpool_vdev_remove_cancel(zhp) != 0) 1705 ret = 1; 1706 if (wait) { 1707 (void) fprintf(stderr, gettext("invalid option " 1708 "combination: -w cannot be used with -s\n")); 1709 usage(B_FALSE); 1710 } 1711 } else { 1712 if (argc < 2) { 1713 (void) fprintf(stderr, gettext("missing device\n")); 1714 usage(B_FALSE); 1715 } 1716 1717 for (i = 1; i < argc; i++) { 1718 if (noop) { 1719 uint64_t size; 1720 1721 if (zpool_vdev_indirect_size(zhp, argv[i], 1722 &size) != 0) { 1723 ret = 1; 1724 break; 1725 } 1726 if (parsable) { 1727 (void) printf("%s %llu\n", 1728 argv[i], (unsigned long long)size); 1729 } else { 1730 char valstr[32]; 1731 zfs_nicenum(size, valstr, 1732 sizeof (valstr)); 1733 (void) printf("Memory that will be " 1734 "used after removing %s: %s\n", 1735 argv[i], valstr); 1736 } 1737 } else { 1738 if (zpool_vdev_remove(zhp, argv[i]) != 0) 1739 ret = 1; 1740 } 1741 } 1742 1743 if (ret == 0 && wait) 1744 ret = zpool_wait(zhp, ZPOOL_WAIT_REMOVE); 1745 } 1746 zpool_close(zhp); 1747 1748 return (ret); 1749 } 1750 1751 /* 1752 * Return 1 if a vdev is active (being used in a pool) 1753 * Return 0 if a vdev is inactive (offlined or faulted, or not in active pool) 1754 * 1755 * This is useful for checking if a disk in an active pool is offlined or 1756 * faulted. 1757 */ 1758 static int 1759 vdev_is_active(char *vdev_path) 1760 { 1761 int fd; 1762 fd = open(vdev_path, O_EXCL); 1763 if (fd < 0) { 1764 return (1); /* cant open O_EXCL - disk is active */ 1765 } 1766 1767 close(fd); 1768 return (0); /* disk is inactive in the pool */ 1769 } 1770 1771 /* 1772 * zpool labelclear [-f] <vdev> 1773 * 1774 * -f Force clearing the label for the vdevs which are members of 1775 * the exported or foreign pools. 1776 * 1777 * Verifies that the vdev is not active and zeros out the label information 1778 * on the device. 1779 */ 1780 int 1781 zpool_do_labelclear(int argc, char **argv) 1782 { 1783 char vdev[MAXPATHLEN]; 1784 char *name = NULL; 1785 int c, fd, ret = 0; 1786 nvlist_t *config; 1787 pool_state_t state; 1788 boolean_t inuse = B_FALSE; 1789 boolean_t force = B_FALSE; 1790 1791 /* check options */ 1792 while ((c = getopt(argc, argv, "f")) != -1) { 1793 switch (c) { 1794 case 'f': 1795 force = B_TRUE; 1796 break; 1797 default: 1798 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 1799 optopt); 1800 usage(B_FALSE); 1801 } 1802 } 1803 1804 argc -= optind; 1805 argv += optind; 1806 1807 /* get vdev name */ 1808 if (argc < 1) { 1809 (void) fprintf(stderr, gettext("missing vdev name\n")); 1810 usage(B_FALSE); 1811 } 1812 if (argc > 1) { 1813 (void) fprintf(stderr, gettext("too many arguments\n")); 1814 usage(B_FALSE); 1815 } 1816 1817 (void) strlcpy(vdev, argv[0], sizeof (vdev)); 1818 1819 /* 1820 * If we cannot open an absolute path, we quit. 1821 * Otherwise if the provided vdev name doesn't point to a file, 1822 * try prepending expected disk paths and partition numbers. 1823 */ 1824 if ((fd = open(vdev, O_RDWR)) < 0) { 1825 int error; 1826 if (vdev[0] == '/') { 1827 (void) fprintf(stderr, gettext("failed to open " 1828 "%s: %s\n"), vdev, strerror(errno)); 1829 return (1); 1830 } 1831 1832 error = zfs_resolve_shortname(argv[0], vdev, MAXPATHLEN); 1833 if (error == 0 && zfs_dev_is_whole_disk(vdev)) { 1834 if (zfs_append_partition(vdev, MAXPATHLEN) == -1) 1835 error = ENOENT; 1836 } 1837 1838 if (error || ((fd = open(vdev, O_RDWR)) < 0)) { 1839 if (errno == ENOENT) { 1840 (void) fprintf(stderr, gettext( 1841 "failed to find device %s, try " 1842 "specifying absolute path instead\n"), 1843 argv[0]); 1844 return (1); 1845 } 1846 1847 (void) fprintf(stderr, gettext("failed to open %s:" 1848 " %s\n"), vdev, strerror(errno)); 1849 return (1); 1850 } 1851 } 1852 1853 /* 1854 * Flush all dirty pages for the block device. This should not be 1855 * fatal when the device does not support BLKFLSBUF as would be the 1856 * case for a file vdev. 1857 */ 1858 if ((zfs_dev_flush(fd) != 0) && (errno != ENOTTY)) 1859 (void) fprintf(stderr, gettext("failed to invalidate " 1860 "cache for %s: %s\n"), vdev, strerror(errno)); 1861 1862 if (zpool_read_label(fd, &config, NULL) != 0) { 1863 (void) fprintf(stderr, 1864 gettext("failed to read label from %s\n"), vdev); 1865 ret = 1; 1866 goto errout; 1867 } 1868 nvlist_free(config); 1869 1870 ret = zpool_in_use(g_zfs, fd, &state, &name, &inuse); 1871 if (ret != 0) { 1872 (void) fprintf(stderr, 1873 gettext("failed to check state for %s\n"), vdev); 1874 ret = 1; 1875 goto errout; 1876 } 1877 1878 if (!inuse) 1879 goto wipe_label; 1880 1881 switch (state) { 1882 default: 1883 case POOL_STATE_ACTIVE: 1884 case POOL_STATE_SPARE: 1885 case POOL_STATE_L2CACHE: 1886 /* 1887 * We allow the user to call 'zpool offline -f' 1888 * on an offlined disk in an active pool. We can check if 1889 * the disk is online by calling vdev_is_active(). 1890 */ 1891 if (force && !vdev_is_active(vdev)) 1892 break; 1893 1894 (void) fprintf(stderr, gettext( 1895 "%s is a member (%s) of pool \"%s\""), 1896 vdev, zpool_pool_state_to_name(state), name); 1897 1898 if (force) { 1899 (void) fprintf(stderr, gettext( 1900 ". Offline the disk first to clear its label.")); 1901 } 1902 printf("\n"); 1903 ret = 1; 1904 goto errout; 1905 1906 case POOL_STATE_EXPORTED: 1907 if (force) 1908 break; 1909 (void) fprintf(stderr, gettext( 1910 "use '-f' to override the following error:\n" 1911 "%s is a member of exported pool \"%s\"\n"), 1912 vdev, name); 1913 ret = 1; 1914 goto errout; 1915 1916 case POOL_STATE_POTENTIALLY_ACTIVE: 1917 if (force) 1918 break; 1919 (void) fprintf(stderr, gettext( 1920 "use '-f' to override the following error:\n" 1921 "%s is a member of potentially active pool \"%s\"\n"), 1922 vdev, name); 1923 ret = 1; 1924 goto errout; 1925 1926 case POOL_STATE_DESTROYED: 1927 /* inuse should never be set for a destroyed pool */ 1928 assert(0); 1929 break; 1930 } 1931 1932 wipe_label: 1933 ret = zpool_clear_label(fd); 1934 if (ret != 0) { 1935 (void) fprintf(stderr, 1936 gettext("failed to clear label for %s\n"), vdev); 1937 } 1938 1939 errout: 1940 free(name); 1941 (void) close(fd); 1942 1943 return (ret); 1944 } 1945 1946 /* 1947 * zpool create [-fnd] [-o property=value] ... 1948 * [-O file-system-property=value] ... 1949 * [-R root] [-m mountpoint] <pool> <dev> ... 1950 * 1951 * -f Force creation, even if devices appear in use 1952 * -n Do not create the pool, but display the resulting layout if it 1953 * were to be created. 1954 * -R Create a pool under an alternate root 1955 * -m Set default mountpoint for the root dataset. By default it's 1956 * '/<pool>' 1957 * -o Set property=value. 1958 * -o Set feature@feature=enabled|disabled. 1959 * -d Don't automatically enable all supported pool features 1960 * (individual features can be enabled with -o). 1961 * -O Set fsproperty=value in the pool's root file system 1962 * 1963 * Creates the named pool according to the given vdev specification. The 1964 * bulk of the vdev processing is done in make_root_vdev() in zpool_vdev.c. 1965 * Once we get the nvlist back from make_root_vdev(), we either print out the 1966 * contents (if '-n' was specified), or pass it to libzfs to do the creation. 1967 */ 1968 int 1969 zpool_do_create(int argc, char **argv) 1970 { 1971 boolean_t force = B_FALSE; 1972 boolean_t dryrun = B_FALSE; 1973 boolean_t enable_pool_features = B_TRUE; 1974 1975 int c; 1976 nvlist_t *nvroot = NULL; 1977 char *poolname; 1978 char *tname = NULL; 1979 int ret = 1; 1980 char *altroot = NULL; 1981 char *compat = NULL; 1982 char *mountpoint = NULL; 1983 nvlist_t *fsprops = NULL; 1984 nvlist_t *props = NULL; 1985 char *propval; 1986 1987 /* check options */ 1988 while ((c = getopt(argc, argv, ":fndR:m:o:O:t:")) != -1) { 1989 switch (c) { 1990 case 'f': 1991 force = B_TRUE; 1992 break; 1993 case 'n': 1994 dryrun = B_TRUE; 1995 break; 1996 case 'd': 1997 enable_pool_features = B_FALSE; 1998 break; 1999 case 'R': 2000 altroot = optarg; 2001 if (add_prop_list(zpool_prop_to_name( 2002 ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE)) 2003 goto errout; 2004 if (add_prop_list_default(zpool_prop_to_name( 2005 ZPOOL_PROP_CACHEFILE), "none", &props)) 2006 goto errout; 2007 break; 2008 case 'm': 2009 /* Equivalent to -O mountpoint=optarg */ 2010 mountpoint = optarg; 2011 break; 2012 case 'o': 2013 if ((propval = strchr(optarg, '=')) == NULL) { 2014 (void) fprintf(stderr, gettext("missing " 2015 "'=' for -o option\n")); 2016 goto errout; 2017 } 2018 *propval = '\0'; 2019 propval++; 2020 2021 if (add_prop_list(optarg, propval, &props, B_TRUE)) 2022 goto errout; 2023 2024 /* 2025 * If the user is creating a pool that doesn't support 2026 * feature flags, don't enable any features. 2027 */ 2028 if (zpool_name_to_prop(optarg) == ZPOOL_PROP_VERSION) { 2029 char *end; 2030 u_longlong_t ver; 2031 2032 ver = strtoull(propval, &end, 0); 2033 if (*end == '\0' && 2034 ver < SPA_VERSION_FEATURES) { 2035 enable_pool_features = B_FALSE; 2036 } 2037 } 2038 if (zpool_name_to_prop(optarg) == ZPOOL_PROP_ALTROOT) 2039 altroot = propval; 2040 if (zpool_name_to_prop(optarg) == 2041 ZPOOL_PROP_COMPATIBILITY) 2042 compat = propval; 2043 break; 2044 case 'O': 2045 if ((propval = strchr(optarg, '=')) == NULL) { 2046 (void) fprintf(stderr, gettext("missing " 2047 "'=' for -O option\n")); 2048 goto errout; 2049 } 2050 *propval = '\0'; 2051 propval++; 2052 2053 /* 2054 * Mountpoints are checked and then added later. 2055 * Uniquely among properties, they can be specified 2056 * more than once, to avoid conflict with -m. 2057 */ 2058 if (0 == strcmp(optarg, 2059 zfs_prop_to_name(ZFS_PROP_MOUNTPOINT))) { 2060 mountpoint = propval; 2061 } else if (add_prop_list(optarg, propval, &fsprops, 2062 B_FALSE)) { 2063 goto errout; 2064 } 2065 break; 2066 case 't': 2067 /* 2068 * Sanity check temporary pool name. 2069 */ 2070 if (strchr(optarg, '/') != NULL) { 2071 (void) fprintf(stderr, gettext("cannot create " 2072 "'%s': invalid character '/' in temporary " 2073 "name\n"), optarg); 2074 (void) fprintf(stderr, gettext("use 'zfs " 2075 "create' to create a dataset\n")); 2076 goto errout; 2077 } 2078 2079 if (add_prop_list(zpool_prop_to_name( 2080 ZPOOL_PROP_TNAME), optarg, &props, B_TRUE)) 2081 goto errout; 2082 if (add_prop_list_default(zpool_prop_to_name( 2083 ZPOOL_PROP_CACHEFILE), "none", &props)) 2084 goto errout; 2085 tname = optarg; 2086 break; 2087 case ':': 2088 (void) fprintf(stderr, gettext("missing argument for " 2089 "'%c' option\n"), optopt); 2090 goto badusage; 2091 case '?': 2092 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 2093 optopt); 2094 goto badusage; 2095 } 2096 } 2097 2098 argc -= optind; 2099 argv += optind; 2100 2101 /* get pool name and check number of arguments */ 2102 if (argc < 1) { 2103 (void) fprintf(stderr, gettext("missing pool name argument\n")); 2104 goto badusage; 2105 } 2106 if (argc < 2) { 2107 (void) fprintf(stderr, gettext("missing vdev specification\n")); 2108 goto badusage; 2109 } 2110 2111 poolname = argv[0]; 2112 2113 /* 2114 * As a special case, check for use of '/' in the name, and direct the 2115 * user to use 'zfs create' instead. 2116 */ 2117 if (strchr(poolname, '/') != NULL) { 2118 (void) fprintf(stderr, gettext("cannot create '%s': invalid " 2119 "character '/' in pool name\n"), poolname); 2120 (void) fprintf(stderr, gettext("use 'zfs create' to " 2121 "create a dataset\n")); 2122 goto errout; 2123 } 2124 2125 /* pass off to make_root_vdev for bulk processing */ 2126 nvroot = make_root_vdev(NULL, props, force, !force, B_FALSE, dryrun, 2127 argc - 1, argv + 1); 2128 if (nvroot == NULL) 2129 goto errout; 2130 2131 /* make_root_vdev() allows 0 toplevel children if there are spares */ 2132 if (!zfs_allocatable_devs(nvroot)) { 2133 (void) fprintf(stderr, gettext("invalid vdev " 2134 "specification: at least one toplevel vdev must be " 2135 "specified\n")); 2136 goto errout; 2137 } 2138 2139 if (altroot != NULL && altroot[0] != '/') { 2140 (void) fprintf(stderr, gettext("invalid alternate root '%s': " 2141 "must be an absolute path\n"), altroot); 2142 goto errout; 2143 } 2144 2145 /* 2146 * Check the validity of the mountpoint and direct the user to use the 2147 * '-m' mountpoint option if it looks like its in use. 2148 */ 2149 if (mountpoint == NULL || 2150 (strcmp(mountpoint, ZFS_MOUNTPOINT_LEGACY) != 0 && 2151 strcmp(mountpoint, ZFS_MOUNTPOINT_NONE) != 0)) { 2152 char buf[MAXPATHLEN]; 2153 DIR *dirp; 2154 2155 if (mountpoint && mountpoint[0] != '/') { 2156 (void) fprintf(stderr, gettext("invalid mountpoint " 2157 "'%s': must be an absolute path, 'legacy', or " 2158 "'none'\n"), mountpoint); 2159 goto errout; 2160 } 2161 2162 if (mountpoint == NULL) { 2163 if (altroot != NULL) 2164 (void) snprintf(buf, sizeof (buf), "%s/%s", 2165 altroot, poolname); 2166 else 2167 (void) snprintf(buf, sizeof (buf), "/%s", 2168 poolname); 2169 } else { 2170 if (altroot != NULL) 2171 (void) snprintf(buf, sizeof (buf), "%s%s", 2172 altroot, mountpoint); 2173 else 2174 (void) snprintf(buf, sizeof (buf), "%s", 2175 mountpoint); 2176 } 2177 2178 if ((dirp = opendir(buf)) == NULL && errno != ENOENT) { 2179 (void) fprintf(stderr, gettext("mountpoint '%s' : " 2180 "%s\n"), buf, strerror(errno)); 2181 (void) fprintf(stderr, gettext("use '-m' " 2182 "option to provide a different default\n")); 2183 goto errout; 2184 } else if (dirp) { 2185 int count = 0; 2186 2187 while (count < 3 && readdir(dirp) != NULL) 2188 count++; 2189 (void) closedir(dirp); 2190 2191 if (count > 2) { 2192 (void) fprintf(stderr, gettext("mountpoint " 2193 "'%s' exists and is not empty\n"), buf); 2194 (void) fprintf(stderr, gettext("use '-m' " 2195 "option to provide a " 2196 "different default\n")); 2197 goto errout; 2198 } 2199 } 2200 } 2201 2202 /* 2203 * Now that the mountpoint's validity has been checked, ensure that 2204 * the property is set appropriately prior to creating the pool. 2205 */ 2206 if (mountpoint != NULL) { 2207 ret = add_prop_list(zfs_prop_to_name(ZFS_PROP_MOUNTPOINT), 2208 mountpoint, &fsprops, B_FALSE); 2209 if (ret != 0) 2210 goto errout; 2211 } 2212 2213 ret = 1; 2214 if (dryrun) { 2215 /* 2216 * For a dry run invocation, print out a basic message and run 2217 * through all the vdevs in the list and print out in an 2218 * appropriate hierarchy. 2219 */ 2220 (void) printf(gettext("would create '%s' with the " 2221 "following layout:\n\n"), poolname); 2222 2223 print_vdev_tree(NULL, poolname, nvroot, 0, "", 0); 2224 print_vdev_tree(NULL, "dedup", nvroot, 0, 2225 VDEV_ALLOC_BIAS_DEDUP, 0); 2226 print_vdev_tree(NULL, "special", nvroot, 0, 2227 VDEV_ALLOC_BIAS_SPECIAL, 0); 2228 print_vdev_tree(NULL, "logs", nvroot, 0, 2229 VDEV_ALLOC_BIAS_LOG, 0); 2230 print_cache_list(nvroot, 0); 2231 print_spare_list(nvroot, 0); 2232 2233 ret = 0; 2234 } else { 2235 /* 2236 * Load in feature set. 2237 * Note: if compatibility property not given, we'll have 2238 * NULL, which means 'all features'. 2239 */ 2240 boolean_t requested_features[SPA_FEATURES]; 2241 if (zpool_do_load_compat(compat, requested_features) != 2242 ZPOOL_COMPATIBILITY_OK) 2243 goto errout; 2244 2245 /* 2246 * props contains list of features to enable. 2247 * For each feature: 2248 * - remove it if feature@name=disabled 2249 * - leave it there if feature@name=enabled 2250 * - add it if: 2251 * - enable_pool_features (ie: no '-d' or '-o version') 2252 * - it's supported by the kernel module 2253 * - it's in the requested feature set 2254 * - warn if it's enabled but not in compat 2255 */ 2256 for (spa_feature_t i = 0; i < SPA_FEATURES; i++) { 2257 char propname[MAXPATHLEN]; 2258 const char *propval; 2259 zfeature_info_t *feat = &spa_feature_table[i]; 2260 2261 (void) snprintf(propname, sizeof (propname), 2262 "feature@%s", feat->fi_uname); 2263 2264 if (!nvlist_lookup_string(props, propname, &propval)) { 2265 if (strcmp(propval, 2266 ZFS_FEATURE_DISABLED) == 0) { 2267 (void) nvlist_remove_all(props, 2268 propname); 2269 } else if (strcmp(propval, 2270 ZFS_FEATURE_ENABLED) == 0 && 2271 !requested_features[i]) { 2272 (void) fprintf(stderr, gettext( 2273 "Warning: feature \"%s\" enabled " 2274 "but is not in specified " 2275 "'compatibility' feature set.\n"), 2276 feat->fi_uname); 2277 } 2278 } else if ( 2279 enable_pool_features && 2280 feat->fi_zfs_mod_supported && 2281 requested_features[i]) { 2282 ret = add_prop_list(propname, 2283 ZFS_FEATURE_ENABLED, &props, B_TRUE); 2284 if (ret != 0) 2285 goto errout; 2286 } 2287 } 2288 2289 ret = 1; 2290 if (zpool_create(g_zfs, poolname, 2291 nvroot, props, fsprops) == 0) { 2292 zfs_handle_t *pool = zfs_open(g_zfs, 2293 tname ? tname : poolname, ZFS_TYPE_FILESYSTEM); 2294 if (pool != NULL) { 2295 if (zfs_mount(pool, NULL, 0) == 0) { 2296 ret = zfs_share(pool, NULL); 2297 zfs_commit_shares(NULL); 2298 } 2299 zfs_close(pool); 2300 } 2301 } else if (libzfs_errno(g_zfs) == EZFS_INVALIDNAME) { 2302 (void) fprintf(stderr, gettext("pool name may have " 2303 "been omitted\n")); 2304 } 2305 } 2306 2307 errout: 2308 nvlist_free(nvroot); 2309 nvlist_free(fsprops); 2310 nvlist_free(props); 2311 return (ret); 2312 badusage: 2313 nvlist_free(fsprops); 2314 nvlist_free(props); 2315 usage(B_FALSE); 2316 return (2); 2317 } 2318 2319 /* 2320 * zpool destroy <pool> 2321 * 2322 * -f Forcefully unmount any datasets 2323 * 2324 * Destroy the given pool. Automatically unmounts any datasets in the pool. 2325 */ 2326 int 2327 zpool_do_destroy(int argc, char **argv) 2328 { 2329 boolean_t force = B_FALSE; 2330 int c; 2331 char *pool; 2332 zpool_handle_t *zhp; 2333 int ret; 2334 2335 /* check options */ 2336 while ((c = getopt(argc, argv, "f")) != -1) { 2337 switch (c) { 2338 case 'f': 2339 force = B_TRUE; 2340 break; 2341 case '?': 2342 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 2343 optopt); 2344 usage(B_FALSE); 2345 } 2346 } 2347 2348 argc -= optind; 2349 argv += optind; 2350 2351 /* check arguments */ 2352 if (argc < 1) { 2353 (void) fprintf(stderr, gettext("missing pool argument\n")); 2354 usage(B_FALSE); 2355 } 2356 if (argc > 1) { 2357 (void) fprintf(stderr, gettext("too many arguments\n")); 2358 usage(B_FALSE); 2359 } 2360 2361 pool = argv[0]; 2362 2363 if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) { 2364 /* 2365 * As a special case, check for use of '/' in the name, and 2366 * direct the user to use 'zfs destroy' instead. 2367 */ 2368 if (strchr(pool, '/') != NULL) 2369 (void) fprintf(stderr, gettext("use 'zfs destroy' to " 2370 "destroy a dataset\n")); 2371 return (1); 2372 } 2373 2374 if (zpool_disable_datasets(zhp, force) != 0) { 2375 (void) fprintf(stderr, gettext("could not destroy '%s': " 2376 "could not unmount datasets\n"), zpool_get_name(zhp)); 2377 zpool_close(zhp); 2378 return (1); 2379 } 2380 2381 /* The history must be logged as part of the export */ 2382 log_history = B_FALSE; 2383 2384 ret = (zpool_destroy(zhp, history_str) != 0); 2385 2386 zpool_close(zhp); 2387 2388 return (ret); 2389 } 2390 2391 typedef struct export_cbdata { 2392 tpool_t *tpool; 2393 pthread_mutex_t mnttab_lock; 2394 boolean_t force; 2395 boolean_t hardforce; 2396 int retval; 2397 } export_cbdata_t; 2398 2399 2400 typedef struct { 2401 char *aea_poolname; 2402 export_cbdata_t *aea_cbdata; 2403 } async_export_args_t; 2404 2405 /* 2406 * Export one pool 2407 */ 2408 static int 2409 zpool_export_one(zpool_handle_t *zhp, void *data) 2410 { 2411 export_cbdata_t *cb = data; 2412 2413 /* 2414 * zpool_disable_datasets() is not thread-safe for mnttab access. 2415 * So we serialize access here for 'zpool export -a' parallel case. 2416 */ 2417 if (cb->tpool != NULL) 2418 pthread_mutex_lock(&cb->mnttab_lock); 2419 2420 int retval = zpool_disable_datasets(zhp, cb->force); 2421 2422 if (cb->tpool != NULL) 2423 pthread_mutex_unlock(&cb->mnttab_lock); 2424 2425 if (retval) 2426 return (1); 2427 2428 if (cb->hardforce) { 2429 if (zpool_export_force(zhp, history_str) != 0) 2430 return (1); 2431 } else if (zpool_export(zhp, cb->force, history_str) != 0) { 2432 return (1); 2433 } 2434 2435 return (0); 2436 } 2437 2438 /* 2439 * Asynchronous export request 2440 */ 2441 static void 2442 zpool_export_task(void *arg) 2443 { 2444 async_export_args_t *aea = arg; 2445 2446 zpool_handle_t *zhp = zpool_open(g_zfs, aea->aea_poolname); 2447 if (zhp != NULL) { 2448 int ret = zpool_export_one(zhp, aea->aea_cbdata); 2449 if (ret != 0) 2450 aea->aea_cbdata->retval = ret; 2451 zpool_close(zhp); 2452 } else { 2453 aea->aea_cbdata->retval = 1; 2454 } 2455 2456 free(aea->aea_poolname); 2457 free(aea); 2458 } 2459 2460 /* 2461 * Process an export request in parallel 2462 */ 2463 static int 2464 zpool_export_one_async(zpool_handle_t *zhp, void *data) 2465 { 2466 tpool_t *tpool = ((export_cbdata_t *)data)->tpool; 2467 async_export_args_t *aea = safe_malloc(sizeof (async_export_args_t)); 2468 2469 /* save pool name since zhp will go out of scope */ 2470 aea->aea_poolname = strdup(zpool_get_name(zhp)); 2471 aea->aea_cbdata = data; 2472 2473 /* ship off actual export to another thread */ 2474 if (tpool_dispatch(tpool, zpool_export_task, (void *)aea) != 0) 2475 return (errno); /* unlikely */ 2476 else 2477 return (0); 2478 } 2479 2480 /* 2481 * zpool export [-f] <pool> ... 2482 * 2483 * -a Export all pools 2484 * -f Forcefully unmount datasets 2485 * 2486 * Export the given pools. By default, the command will attempt to cleanly 2487 * unmount any active datasets within the pool. If the '-f' flag is specified, 2488 * then the datasets will be forcefully unmounted. 2489 */ 2490 int 2491 zpool_do_export(int argc, char **argv) 2492 { 2493 export_cbdata_t cb; 2494 boolean_t do_all = B_FALSE; 2495 boolean_t force = B_FALSE; 2496 boolean_t hardforce = B_FALSE; 2497 int c, ret; 2498 2499 /* check options */ 2500 while ((c = getopt(argc, argv, "afF")) != -1) { 2501 switch (c) { 2502 case 'a': 2503 do_all = B_TRUE; 2504 break; 2505 case 'f': 2506 force = B_TRUE; 2507 break; 2508 case 'F': 2509 hardforce = B_TRUE; 2510 break; 2511 case '?': 2512 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 2513 optopt); 2514 usage(B_FALSE); 2515 } 2516 } 2517 2518 cb.force = force; 2519 cb.hardforce = hardforce; 2520 cb.tpool = NULL; 2521 cb.retval = 0; 2522 argc -= optind; 2523 argv += optind; 2524 2525 /* The history will be logged as part of the export itself */ 2526 log_history = B_FALSE; 2527 2528 if (do_all) { 2529 if (argc != 0) { 2530 (void) fprintf(stderr, gettext("too many arguments\n")); 2531 usage(B_FALSE); 2532 } 2533 2534 cb.tpool = tpool_create(1, 5 * sysconf(_SC_NPROCESSORS_ONLN), 2535 0, NULL); 2536 pthread_mutex_init(&cb.mnttab_lock, NULL); 2537 2538 /* Asynchronously call zpool_export_one using thread pool */ 2539 ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL, 2540 B_FALSE, zpool_export_one_async, &cb); 2541 2542 tpool_wait(cb.tpool); 2543 tpool_destroy(cb.tpool); 2544 (void) pthread_mutex_destroy(&cb.mnttab_lock); 2545 2546 return (ret | cb.retval); 2547 } 2548 2549 /* check arguments */ 2550 if (argc < 1) { 2551 (void) fprintf(stderr, gettext("missing pool argument\n")); 2552 usage(B_FALSE); 2553 } 2554 2555 ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL, 2556 B_FALSE, zpool_export_one, &cb); 2557 2558 return (ret); 2559 } 2560 2561 /* 2562 * Given a vdev configuration, determine the maximum width needed for the device 2563 * name column. 2564 */ 2565 static int 2566 max_width(zpool_handle_t *zhp, nvlist_t *nv, int depth, int max, 2567 int name_flags) 2568 { 2569 static const char *const subtypes[] = 2570 {ZPOOL_CONFIG_SPARES, ZPOOL_CONFIG_L2CACHE, ZPOOL_CONFIG_CHILDREN}; 2571 2572 char *name = zpool_vdev_name(g_zfs, zhp, nv, name_flags); 2573 max = MAX(strlen(name) + depth, max); 2574 free(name); 2575 2576 nvlist_t **child; 2577 uint_t children; 2578 for (size_t i = 0; i < ARRAY_SIZE(subtypes); ++i) 2579 if (nvlist_lookup_nvlist_array(nv, subtypes[i], 2580 &child, &children) == 0) 2581 for (uint_t c = 0; c < children; ++c) 2582 max = MAX(max_width(zhp, child[c], depth + 2, 2583 max, name_flags), max); 2584 2585 return (max); 2586 } 2587 2588 typedef struct status_cbdata { 2589 int cb_count; 2590 int cb_name_flags; 2591 int cb_namewidth; 2592 boolean_t cb_allpools; 2593 boolean_t cb_verbose; 2594 boolean_t cb_literal; 2595 boolean_t cb_explain; 2596 boolean_t cb_first; 2597 boolean_t cb_dedup_stats; 2598 boolean_t cb_print_unhealthy; 2599 boolean_t cb_print_status; 2600 boolean_t cb_print_slow_ios; 2601 boolean_t cb_print_dio_verify; 2602 boolean_t cb_print_vdev_init; 2603 boolean_t cb_print_vdev_trim; 2604 vdev_cmd_data_list_t *vcdl; 2605 boolean_t cb_print_power; 2606 boolean_t cb_json; 2607 boolean_t cb_flat_vdevs; 2608 nvlist_t *cb_jsobj; 2609 boolean_t cb_json_as_int; 2610 boolean_t cb_json_pool_key_guid; 2611 } status_cbdata_t; 2612 2613 /* Return 1 if string is NULL, empty, or whitespace; return 0 otherwise. */ 2614 static boolean_t 2615 is_blank_str(const char *str) 2616 { 2617 for (; str != NULL && *str != '\0'; ++str) 2618 if (!isblank(*str)) 2619 return (B_FALSE); 2620 return (B_TRUE); 2621 } 2622 2623 static void 2624 zpool_nvlist_cmd(vdev_cmd_data_list_t *vcdl, const char *pool, const char *path, 2625 nvlist_t *item) 2626 { 2627 vdev_cmd_data_t *data; 2628 int i, j, k = 1; 2629 char tmp[256]; 2630 const char *val; 2631 2632 for (i = 0; i < vcdl->count; i++) { 2633 if ((strcmp(vcdl->data[i].path, path) != 0) || 2634 (strcmp(vcdl->data[i].pool, pool) != 0)) 2635 continue; 2636 2637 data = &vcdl->data[i]; 2638 for (j = 0; j < vcdl->uniq_cols_cnt; j++) { 2639 val = NULL; 2640 for (int k = 0; k < data->cols_cnt; k++) { 2641 if (strcmp(data->cols[k], 2642 vcdl->uniq_cols[j]) == 0) { 2643 val = data->lines[k]; 2644 break; 2645 } 2646 } 2647 if (val == NULL || is_blank_str(val)) 2648 val = "-"; 2649 fnvlist_add_string(item, vcdl->uniq_cols[j], val); 2650 } 2651 2652 for (j = data->cols_cnt; j < data->lines_cnt; j++) { 2653 if (data->lines[j]) { 2654 snprintf(tmp, 256, "extra_%d", k++); 2655 fnvlist_add_string(item, tmp, 2656 data->lines[j]); 2657 } 2658 } 2659 break; 2660 } 2661 } 2662 2663 /* Print command output lines for specific vdev in a specific pool */ 2664 static void 2665 zpool_print_cmd(vdev_cmd_data_list_t *vcdl, const char *pool, const char *path) 2666 { 2667 vdev_cmd_data_t *data; 2668 int i, j; 2669 const char *val; 2670 2671 for (i = 0; i < vcdl->count; i++) { 2672 if ((strcmp(vcdl->data[i].path, path) != 0) || 2673 (strcmp(vcdl->data[i].pool, pool) != 0)) { 2674 /* Not the vdev we're looking for */ 2675 continue; 2676 } 2677 2678 data = &vcdl->data[i]; 2679 /* Print out all the output values for this vdev */ 2680 for (j = 0; j < vcdl->uniq_cols_cnt; j++) { 2681 val = NULL; 2682 /* Does this vdev have values for this column? */ 2683 for (int k = 0; k < data->cols_cnt; k++) { 2684 if (strcmp(data->cols[k], 2685 vcdl->uniq_cols[j]) == 0) { 2686 /* yes it does, record the value */ 2687 val = data->lines[k]; 2688 break; 2689 } 2690 } 2691 /* 2692 * Mark empty values with dashes to make output 2693 * awk-able. 2694 */ 2695 if (val == NULL || is_blank_str(val)) 2696 val = "-"; 2697 2698 printf("%*s", vcdl->uniq_cols_width[j], val); 2699 if (j < vcdl->uniq_cols_cnt - 1) 2700 fputs(" ", stdout); 2701 } 2702 2703 /* Print out any values that aren't in a column at the end */ 2704 for (j = data->cols_cnt; j < data->lines_cnt; j++) { 2705 /* Did we have any columns? If so print a spacer. */ 2706 if (vcdl->uniq_cols_cnt > 0) 2707 fputs(" ", stdout); 2708 2709 val = data->lines[j]; 2710 fputs(val ?: "", stdout); 2711 } 2712 break; 2713 } 2714 } 2715 2716 /* 2717 * Print vdev initialization status for leaves 2718 */ 2719 static void 2720 print_status_initialize(vdev_stat_t *vs, boolean_t verbose) 2721 { 2722 if (verbose) { 2723 if ((vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE || 2724 vs->vs_initialize_state == VDEV_INITIALIZE_SUSPENDED || 2725 vs->vs_initialize_state == VDEV_INITIALIZE_COMPLETE) && 2726 !vs->vs_scan_removing) { 2727 char zbuf[1024]; 2728 char tbuf[256]; 2729 2730 time_t t = vs->vs_initialize_action_time; 2731 int initialize_pct = 100; 2732 if (vs->vs_initialize_state != 2733 VDEV_INITIALIZE_COMPLETE) { 2734 initialize_pct = (vs->vs_initialize_bytes_done * 2735 100 / (vs->vs_initialize_bytes_est + 1)); 2736 } 2737 2738 (void) ctime_r(&t, tbuf); 2739 tbuf[24] = 0; 2740 2741 switch (vs->vs_initialize_state) { 2742 case VDEV_INITIALIZE_SUSPENDED: 2743 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s", 2744 gettext("suspended, started at"), tbuf); 2745 break; 2746 case VDEV_INITIALIZE_ACTIVE: 2747 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s", 2748 gettext("started at"), tbuf); 2749 break; 2750 case VDEV_INITIALIZE_COMPLETE: 2751 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s", 2752 gettext("completed at"), tbuf); 2753 break; 2754 } 2755 2756 (void) printf(gettext(" (%d%% initialized%s)"), 2757 initialize_pct, zbuf); 2758 } else { 2759 (void) printf(gettext(" (uninitialized)")); 2760 } 2761 } else if (vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE) { 2762 (void) printf(gettext(" (initializing)")); 2763 } 2764 } 2765 2766 /* 2767 * Print vdev TRIM status for leaves 2768 */ 2769 static void 2770 print_status_trim(vdev_stat_t *vs, boolean_t verbose) 2771 { 2772 if (verbose) { 2773 if ((vs->vs_trim_state == VDEV_TRIM_ACTIVE || 2774 vs->vs_trim_state == VDEV_TRIM_SUSPENDED || 2775 vs->vs_trim_state == VDEV_TRIM_COMPLETE) && 2776 !vs->vs_scan_removing) { 2777 char zbuf[1024]; 2778 char tbuf[256]; 2779 2780 time_t t = vs->vs_trim_action_time; 2781 int trim_pct = 100; 2782 if (vs->vs_trim_state != VDEV_TRIM_COMPLETE) { 2783 trim_pct = (vs->vs_trim_bytes_done * 2784 100 / (vs->vs_trim_bytes_est + 1)); 2785 } 2786 2787 (void) ctime_r(&t, tbuf); 2788 tbuf[24] = 0; 2789 2790 switch (vs->vs_trim_state) { 2791 case VDEV_TRIM_SUSPENDED: 2792 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s", 2793 gettext("suspended, started at"), tbuf); 2794 break; 2795 case VDEV_TRIM_ACTIVE: 2796 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s", 2797 gettext("started at"), tbuf); 2798 break; 2799 case VDEV_TRIM_COMPLETE: 2800 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s", 2801 gettext("completed at"), tbuf); 2802 break; 2803 } 2804 2805 (void) printf(gettext(" (%d%% trimmed%s)"), 2806 trim_pct, zbuf); 2807 } else if (vs->vs_trim_notsup) { 2808 (void) printf(gettext(" (trim unsupported)")); 2809 } else { 2810 (void) printf(gettext(" (untrimmed)")); 2811 } 2812 } else if (vs->vs_trim_state == VDEV_TRIM_ACTIVE) { 2813 (void) printf(gettext(" (trimming)")); 2814 } 2815 } 2816 2817 /* 2818 * Return the color associated with a health string. This includes returning 2819 * NULL for no color change. 2820 */ 2821 static const char * 2822 health_str_to_color(const char *health) 2823 { 2824 if (strcmp(health, gettext("FAULTED")) == 0 || 2825 strcmp(health, gettext("SUSPENDED")) == 0 || 2826 strcmp(health, gettext("UNAVAIL")) == 0) { 2827 return (ANSI_RED); 2828 } 2829 2830 if (strcmp(health, gettext("OFFLINE")) == 0 || 2831 strcmp(health, gettext("DEGRADED")) == 0 || 2832 strcmp(health, gettext("REMOVED")) == 0) { 2833 return (ANSI_YELLOW); 2834 } 2835 2836 return (NULL); 2837 } 2838 2839 /* 2840 * Called for each leaf vdev. Returns 0 if the vdev is healthy. 2841 * A vdev is unhealthy if any of the following are true: 2842 * 1) there are read, write, or checksum errors, 2843 * 2) its state is not ONLINE, or 2844 * 3) slow IO reporting was requested (-s) and there are slow IOs. 2845 */ 2846 static int 2847 vdev_health_check_cb(void *hdl_data, nvlist_t *nv, void *data) 2848 { 2849 status_cbdata_t *cb = data; 2850 vdev_stat_t *vs; 2851 uint_t vsc; 2852 (void) hdl_data; 2853 2854 if (nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, 2855 (uint64_t **)&vs, &vsc) != 0) 2856 return (1); 2857 2858 if (vs->vs_checksum_errors || vs->vs_read_errors || 2859 vs->vs_write_errors || vs->vs_state != VDEV_STATE_HEALTHY) 2860 return (1); 2861 2862 if (cb->cb_print_slow_ios && vs->vs_slow_ios) 2863 return (1); 2864 2865 return (0); 2866 } 2867 2868 /* 2869 * Print out configuration state as requested by status_callback. 2870 */ 2871 static void 2872 print_status_config(zpool_handle_t *zhp, status_cbdata_t *cb, const char *name, 2873 nvlist_t *nv, int depth, boolean_t isspare, vdev_rebuild_stat_t *vrs) 2874 { 2875 nvlist_t **child, *root; 2876 uint_t c, i, vsc, children; 2877 pool_scan_stat_t *ps = NULL; 2878 vdev_stat_t *vs; 2879 char rbuf[6], wbuf[6], cbuf[6], dbuf[6]; 2880 char *vname; 2881 uint64_t notpresent; 2882 spare_cbdata_t spare_cb; 2883 const char *state; 2884 const char *type; 2885 const char *path = NULL; 2886 const char *rcolor = NULL, *wcolor = NULL, *ccolor = NULL, 2887 *scolor = NULL; 2888 2889 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 2890 &child, &children) != 0) 2891 children = 0; 2892 2893 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, 2894 (uint64_t **)&vs, &vsc) == 0); 2895 2896 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0); 2897 2898 if (strcmp(type, VDEV_TYPE_INDIRECT) == 0) 2899 return; 2900 2901 state = zpool_state_to_name(vs->vs_state, vs->vs_aux); 2902 2903 if (isspare) { 2904 /* 2905 * For hot spares, we use the terms 'INUSE' and 'AVAILABLE' for 2906 * online drives. 2907 */ 2908 if (vs->vs_aux == VDEV_AUX_SPARED) 2909 state = gettext("INUSE"); 2910 else if (vs->vs_state == VDEV_STATE_HEALTHY) 2911 state = gettext("AVAIL"); 2912 } 2913 2914 /* 2915 * If '-e' is specified then top-level vdevs and their children 2916 * can be pruned if all of their leaves are healthy. 2917 */ 2918 if (cb->cb_print_unhealthy && depth > 0 && 2919 for_each_vdev_in_nvlist(nv, vdev_health_check_cb, cb) == 0) { 2920 return; 2921 } 2922 2923 printf_color(health_str_to_color(state), 2924 "\t%*s%-*s %-8s", depth, "", cb->cb_namewidth - depth, 2925 name, state); 2926 2927 if (!isspare) { 2928 if (vs->vs_read_errors) 2929 rcolor = ANSI_RED; 2930 2931 if (vs->vs_write_errors) 2932 wcolor = ANSI_RED; 2933 2934 if (vs->vs_checksum_errors) 2935 ccolor = ANSI_RED; 2936 2937 if (vs->vs_slow_ios) 2938 scolor = ANSI_BLUE; 2939 2940 if (cb->cb_literal) { 2941 fputc(' ', stdout); 2942 printf_color(rcolor, "%5llu", 2943 (u_longlong_t)vs->vs_read_errors); 2944 fputc(' ', stdout); 2945 printf_color(wcolor, "%5llu", 2946 (u_longlong_t)vs->vs_write_errors); 2947 fputc(' ', stdout); 2948 printf_color(ccolor, "%5llu", 2949 (u_longlong_t)vs->vs_checksum_errors); 2950 } else { 2951 zfs_nicenum(vs->vs_read_errors, rbuf, sizeof (rbuf)); 2952 zfs_nicenum(vs->vs_write_errors, wbuf, sizeof (wbuf)); 2953 zfs_nicenum(vs->vs_checksum_errors, cbuf, 2954 sizeof (cbuf)); 2955 fputc(' ', stdout); 2956 printf_color(rcolor, "%5s", rbuf); 2957 fputc(' ', stdout); 2958 printf_color(wcolor, "%5s", wbuf); 2959 fputc(' ', stdout); 2960 printf_color(ccolor, "%5s", cbuf); 2961 } 2962 if (cb->cb_print_slow_ios) { 2963 if (children == 0) { 2964 /* Only leafs vdevs have slow IOs */ 2965 zfs_nicenum(vs->vs_slow_ios, rbuf, 2966 sizeof (rbuf)); 2967 } else { 2968 snprintf(rbuf, sizeof (rbuf), "-"); 2969 } 2970 2971 if (cb->cb_literal) 2972 printf_color(scolor, " %5llu", 2973 (u_longlong_t)vs->vs_slow_ios); 2974 else 2975 printf_color(scolor, " %5s", rbuf); 2976 } 2977 if (cb->cb_print_power) { 2978 if (children == 0) { 2979 /* Only leaf vdevs have physical slots */ 2980 switch (zpool_power_current_state(zhp, (char *) 2981 fnvlist_lookup_string(nv, 2982 ZPOOL_CONFIG_PATH))) { 2983 case 0: 2984 printf_color(ANSI_RED, " %5s", 2985 gettext("off")); 2986 break; 2987 case 1: 2988 printf(" %5s", gettext("on")); 2989 break; 2990 default: 2991 printf(" %5s", "-"); 2992 } 2993 } else { 2994 printf(" %5s", "-"); 2995 } 2996 } 2997 if (VDEV_STAT_VALID(vs_dio_verify_errors, vsc) && 2998 cb->cb_print_dio_verify) { 2999 zfs_nicenum(vs->vs_dio_verify_errors, dbuf, 3000 sizeof (dbuf)); 3001 3002 if (cb->cb_literal) 3003 printf(" %5llu", 3004 (u_longlong_t)vs->vs_dio_verify_errors); 3005 else 3006 printf(" %5s", dbuf); 3007 } 3008 } 3009 3010 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 3011 ¬present) == 0) { 3012 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0); 3013 (void) printf(" %s %s", gettext("was"), path); 3014 } else if (vs->vs_aux != 0) { 3015 (void) printf(" "); 3016 color_start(ANSI_RED); 3017 switch (vs->vs_aux) { 3018 case VDEV_AUX_OPEN_FAILED: 3019 (void) printf(gettext("cannot open")); 3020 break; 3021 3022 case VDEV_AUX_BAD_GUID_SUM: 3023 (void) printf(gettext("missing device")); 3024 break; 3025 3026 case VDEV_AUX_NO_REPLICAS: 3027 (void) printf(gettext("insufficient replicas")); 3028 break; 3029 3030 case VDEV_AUX_VERSION_NEWER: 3031 (void) printf(gettext("newer version")); 3032 break; 3033 3034 case VDEV_AUX_UNSUP_FEAT: 3035 (void) printf(gettext("unsupported feature(s)")); 3036 break; 3037 3038 case VDEV_AUX_ASHIFT_TOO_BIG: 3039 (void) printf(gettext("unsupported minimum blocksize")); 3040 break; 3041 3042 case VDEV_AUX_SPARED: 3043 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 3044 &spare_cb.cb_guid) == 0); 3045 if (zpool_iter(g_zfs, find_spare, &spare_cb) == 1) { 3046 if (strcmp(zpool_get_name(spare_cb.cb_zhp), 3047 zpool_get_name(zhp)) == 0) 3048 (void) printf(gettext("currently in " 3049 "use")); 3050 else 3051 (void) printf(gettext("in use by " 3052 "pool '%s'"), 3053 zpool_get_name(spare_cb.cb_zhp)); 3054 zpool_close(spare_cb.cb_zhp); 3055 } else { 3056 (void) printf(gettext("currently in use")); 3057 } 3058 break; 3059 3060 case VDEV_AUX_ERR_EXCEEDED: 3061 if (vs->vs_read_errors + vs->vs_write_errors + 3062 vs->vs_checksum_errors == 0 && children == 0 && 3063 vs->vs_slow_ios > 0) { 3064 (void) printf(gettext("too many slow I/Os")); 3065 } else { 3066 (void) printf(gettext("too many errors")); 3067 } 3068 break; 3069 3070 case VDEV_AUX_IO_FAILURE: 3071 (void) printf(gettext("experienced I/O failures")); 3072 break; 3073 3074 case VDEV_AUX_BAD_LOG: 3075 (void) printf(gettext("bad intent log")); 3076 break; 3077 3078 case VDEV_AUX_EXTERNAL: 3079 (void) printf(gettext("external device fault")); 3080 break; 3081 3082 case VDEV_AUX_SPLIT_POOL: 3083 (void) printf(gettext("split into new pool")); 3084 break; 3085 3086 case VDEV_AUX_ACTIVE: 3087 (void) printf(gettext("currently in use")); 3088 break; 3089 3090 case VDEV_AUX_CHILDREN_OFFLINE: 3091 (void) printf(gettext("all children offline")); 3092 break; 3093 3094 case VDEV_AUX_BAD_LABEL: 3095 (void) printf(gettext("invalid label")); 3096 break; 3097 3098 default: 3099 (void) printf(gettext("corrupted data")); 3100 break; 3101 } 3102 color_end(); 3103 } else if (children == 0 && !isspare && 3104 getenv("ZPOOL_STATUS_NON_NATIVE_ASHIFT_IGNORE") == NULL && 3105 VDEV_STAT_VALID(vs_physical_ashift, vsc) && 3106 vs->vs_configured_ashift < vs->vs_physical_ashift) { 3107 (void) printf( 3108 gettext(" block size: %dB configured, %dB native"), 3109 1 << vs->vs_configured_ashift, 1 << vs->vs_physical_ashift); 3110 } 3111 3112 if (vs->vs_scan_removing != 0) { 3113 (void) printf(gettext(" (removing)")); 3114 } else if (VDEV_STAT_VALID(vs_noalloc, vsc) && vs->vs_noalloc != 0) { 3115 (void) printf(gettext(" (non-allocating)")); 3116 } 3117 3118 /* The root vdev has the scrub/resilver stats */ 3119 root = fnvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 3120 ZPOOL_CONFIG_VDEV_TREE); 3121 (void) nvlist_lookup_uint64_array(root, ZPOOL_CONFIG_SCAN_STATS, 3122 (uint64_t **)&ps, &c); 3123 3124 /* 3125 * If you force fault a drive that's resilvering, its scan stats can 3126 * get frozen in time, giving the false impression that it's 3127 * being resilvered. That's why we check the state to see if the vdev 3128 * is healthy before reporting "resilvering" or "repairing". 3129 */ 3130 if (ps != NULL && ps->pss_state == DSS_SCANNING && children == 0 && 3131 vs->vs_state == VDEV_STATE_HEALTHY) { 3132 if (vs->vs_scan_processed != 0) { 3133 (void) printf(gettext(" (%s)"), 3134 (ps->pss_func == POOL_SCAN_RESILVER) ? 3135 "resilvering" : "repairing"); 3136 } else if (vs->vs_resilver_deferred) { 3137 (void) printf(gettext(" (awaiting resilver)")); 3138 } 3139 } 3140 3141 /* The top-level vdevs have the rebuild stats */ 3142 if (vrs != NULL && vrs->vrs_state == VDEV_REBUILD_ACTIVE && 3143 children == 0 && vs->vs_state == VDEV_STATE_HEALTHY) { 3144 if (vs->vs_rebuild_processed != 0) { 3145 (void) printf(gettext(" (resilvering)")); 3146 } 3147 } 3148 3149 if (cb->vcdl != NULL) { 3150 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) { 3151 printf(" "); 3152 zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path); 3153 } 3154 } 3155 3156 /* Display vdev initialization and trim status for leaves. */ 3157 if (children == 0) { 3158 print_status_initialize(vs, cb->cb_print_vdev_init); 3159 print_status_trim(vs, cb->cb_print_vdev_trim); 3160 } 3161 3162 (void) printf("\n"); 3163 3164 for (c = 0; c < children; c++) { 3165 uint64_t islog = B_FALSE, ishole = B_FALSE; 3166 3167 /* Don't print logs or holes here */ 3168 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 3169 &islog); 3170 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 3171 &ishole); 3172 if (islog || ishole) 3173 continue; 3174 /* Only print normal classes here */ 3175 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS)) 3176 continue; 3177 3178 /* Provide vdev_rebuild_stats to children if available */ 3179 if (vrs == NULL) { 3180 (void) nvlist_lookup_uint64_array(nv, 3181 ZPOOL_CONFIG_REBUILD_STATS, 3182 (uint64_t **)&vrs, &i); 3183 } 3184 3185 vname = zpool_vdev_name(g_zfs, zhp, child[c], 3186 cb->cb_name_flags | VDEV_NAME_TYPE_ID); 3187 print_status_config(zhp, cb, vname, child[c], depth + 2, 3188 isspare, vrs); 3189 free(vname); 3190 } 3191 } 3192 3193 /* 3194 * Print the configuration of an exported pool. Iterate over all vdevs in the 3195 * pool, printing out the name and status for each one. 3196 */ 3197 static void 3198 print_import_config(status_cbdata_t *cb, const char *name, nvlist_t *nv, 3199 int depth) 3200 { 3201 nvlist_t **child; 3202 uint_t c, children; 3203 vdev_stat_t *vs; 3204 const char *type; 3205 char *vname; 3206 3207 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0); 3208 if (strcmp(type, VDEV_TYPE_MISSING) == 0 || 3209 strcmp(type, VDEV_TYPE_HOLE) == 0) 3210 return; 3211 3212 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, 3213 (uint64_t **)&vs, &c) == 0); 3214 3215 (void) printf("\t%*s%-*s", depth, "", cb->cb_namewidth - depth, name); 3216 (void) printf(" %s", zpool_state_to_name(vs->vs_state, vs->vs_aux)); 3217 3218 if (vs->vs_aux != 0) { 3219 (void) printf(" "); 3220 3221 switch (vs->vs_aux) { 3222 case VDEV_AUX_OPEN_FAILED: 3223 (void) printf(gettext("cannot open")); 3224 break; 3225 3226 case VDEV_AUX_BAD_GUID_SUM: 3227 (void) printf(gettext("missing device")); 3228 break; 3229 3230 case VDEV_AUX_NO_REPLICAS: 3231 (void) printf(gettext("insufficient replicas")); 3232 break; 3233 3234 case VDEV_AUX_VERSION_NEWER: 3235 (void) printf(gettext("newer version")); 3236 break; 3237 3238 case VDEV_AUX_UNSUP_FEAT: 3239 (void) printf(gettext("unsupported feature(s)")); 3240 break; 3241 3242 case VDEV_AUX_ERR_EXCEEDED: 3243 (void) printf(gettext("too many errors")); 3244 break; 3245 3246 case VDEV_AUX_ACTIVE: 3247 (void) printf(gettext("currently in use")); 3248 break; 3249 3250 case VDEV_AUX_CHILDREN_OFFLINE: 3251 (void) printf(gettext("all children offline")); 3252 break; 3253 3254 case VDEV_AUX_BAD_LABEL: 3255 (void) printf(gettext("invalid label")); 3256 break; 3257 3258 default: 3259 (void) printf(gettext("corrupted data")); 3260 break; 3261 } 3262 } 3263 (void) printf("\n"); 3264 3265 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 3266 &child, &children) != 0) 3267 return; 3268 3269 for (c = 0; c < children; c++) { 3270 uint64_t is_log = B_FALSE; 3271 3272 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 3273 &is_log); 3274 if (is_log) 3275 continue; 3276 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS)) 3277 continue; 3278 3279 vname = zpool_vdev_name(g_zfs, NULL, child[c], 3280 cb->cb_name_flags | VDEV_NAME_TYPE_ID); 3281 print_import_config(cb, vname, child[c], depth + 2); 3282 free(vname); 3283 } 3284 3285 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, 3286 &child, &children) == 0) { 3287 (void) printf(gettext("\tcache\n")); 3288 for (c = 0; c < children; c++) { 3289 vname = zpool_vdev_name(g_zfs, NULL, child[c], 3290 cb->cb_name_flags); 3291 (void) printf("\t %s\n", vname); 3292 free(vname); 3293 } 3294 } 3295 3296 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, 3297 &child, &children) == 0) { 3298 (void) printf(gettext("\tspares\n")); 3299 for (c = 0; c < children; c++) { 3300 vname = zpool_vdev_name(g_zfs, NULL, child[c], 3301 cb->cb_name_flags); 3302 (void) printf("\t %s\n", vname); 3303 free(vname); 3304 } 3305 } 3306 } 3307 3308 /* 3309 * Print specialized class vdevs. 3310 * 3311 * These are recorded as top level vdevs in the main pool child array 3312 * but with "is_log" set to 1 or an "alloc_bias" string. We use either 3313 * print_status_config() or print_import_config() to print the top level 3314 * class vdevs then any of their children (eg mirrored slogs) are printed 3315 * recursively - which works because only the top level vdev is marked. 3316 */ 3317 static void 3318 print_class_vdevs(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv, 3319 const char *class) 3320 { 3321 uint_t c, children; 3322 nvlist_t **child; 3323 boolean_t printed = B_FALSE; 3324 3325 assert(zhp != NULL || !cb->cb_verbose); 3326 3327 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &child, 3328 &children) != 0) 3329 return; 3330 3331 for (c = 0; c < children; c++) { 3332 uint64_t is_log = B_FALSE; 3333 const char *bias = NULL; 3334 const char *type = NULL; 3335 3336 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 3337 &is_log); 3338 3339 if (is_log) { 3340 bias = (char *)VDEV_ALLOC_CLASS_LOGS; 3341 } else { 3342 (void) nvlist_lookup_string(child[c], 3343 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias); 3344 (void) nvlist_lookup_string(child[c], 3345 ZPOOL_CONFIG_TYPE, &type); 3346 } 3347 3348 if (bias == NULL || strcmp(bias, class) != 0) 3349 continue; 3350 if (!is_log && strcmp(type, VDEV_TYPE_INDIRECT) == 0) 3351 continue; 3352 3353 if (!printed) { 3354 (void) printf("\t%s\t\n", gettext(class)); 3355 printed = B_TRUE; 3356 } 3357 3358 char *name = zpool_vdev_name(g_zfs, zhp, child[c], 3359 cb->cb_name_flags | VDEV_NAME_TYPE_ID); 3360 if (cb->cb_print_status) 3361 print_status_config(zhp, cb, name, child[c], 2, 3362 B_FALSE, NULL); 3363 else 3364 print_import_config(cb, name, child[c], 2); 3365 free(name); 3366 } 3367 } 3368 3369 /* 3370 * Display the status for the given pool. 3371 */ 3372 static int 3373 show_import(nvlist_t *config, boolean_t report_error) 3374 { 3375 uint64_t pool_state; 3376 vdev_stat_t *vs; 3377 const char *name; 3378 uint64_t guid; 3379 uint64_t hostid = 0; 3380 const char *msgid; 3381 const char *hostname = "unknown"; 3382 nvlist_t *nvroot, *nvinfo; 3383 zpool_status_t reason; 3384 zpool_errata_t errata; 3385 const char *health; 3386 uint_t vsc; 3387 const char *comment; 3388 const char *indent; 3389 char buf[2048]; 3390 status_cbdata_t cb = { 0 }; 3391 3392 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 3393 &name) == 0); 3394 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 3395 &guid) == 0); 3396 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, 3397 &pool_state) == 0); 3398 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 3399 &nvroot) == 0); 3400 3401 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS, 3402 (uint64_t **)&vs, &vsc) == 0); 3403 health = zpool_state_to_name(vs->vs_state, vs->vs_aux); 3404 3405 reason = zpool_import_status(config, &msgid, &errata); 3406 3407 /* 3408 * If we're importing using a cachefile, then we won't report any 3409 * errors unless we are in the scan phase of the import. 3410 */ 3411 if (reason != ZPOOL_STATUS_OK && !report_error) 3412 return (reason); 3413 3414 if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0) { 3415 indent = " "; 3416 } else { 3417 comment = NULL; 3418 indent = ""; 3419 } 3420 3421 (void) printf(gettext("%s pool: %s\n"), indent, name); 3422 (void) printf(gettext("%s id: %llu\n"), indent, (u_longlong_t)guid); 3423 (void) printf(gettext("%s state: %s"), indent, health); 3424 if (pool_state == POOL_STATE_DESTROYED) 3425 (void) printf(gettext(" (DESTROYED)")); 3426 (void) printf("\n"); 3427 3428 if (reason != ZPOOL_STATUS_OK) { 3429 (void) printf("%s", indent); 3430 printf_color(ANSI_BOLD, gettext("status: ")); 3431 } 3432 switch (reason) { 3433 case ZPOOL_STATUS_MISSING_DEV_R: 3434 case ZPOOL_STATUS_MISSING_DEV_NR: 3435 case ZPOOL_STATUS_BAD_GUID_SUM: 3436 printf_color(ANSI_YELLOW, gettext("One or more devices are " 3437 "missing from the system.\n")); 3438 break; 3439 3440 case ZPOOL_STATUS_CORRUPT_LABEL_R: 3441 case ZPOOL_STATUS_CORRUPT_LABEL_NR: 3442 printf_color(ANSI_YELLOW, gettext("One or more devices " 3443 "contains corrupted data.\n")); 3444 break; 3445 3446 case ZPOOL_STATUS_CORRUPT_DATA: 3447 printf_color(ANSI_YELLOW, gettext("The pool data is " 3448 "corrupted.\n")); 3449 break; 3450 3451 case ZPOOL_STATUS_OFFLINE_DEV: 3452 printf_color(ANSI_YELLOW, gettext("One or more devices " 3453 "are offlined.\n")); 3454 break; 3455 3456 case ZPOOL_STATUS_CORRUPT_POOL: 3457 printf_color(ANSI_YELLOW, gettext("The pool metadata is " 3458 "corrupted.\n")); 3459 break; 3460 3461 case ZPOOL_STATUS_VERSION_OLDER: 3462 printf_color(ANSI_YELLOW, gettext("The pool is formatted using " 3463 "a legacy on-disk version.\n")); 3464 break; 3465 3466 case ZPOOL_STATUS_VERSION_NEWER: 3467 printf_color(ANSI_YELLOW, gettext("The pool is formatted using " 3468 "an incompatible version.\n")); 3469 break; 3470 3471 case ZPOOL_STATUS_FEAT_DISABLED: 3472 printf_color(ANSI_YELLOW, gettext("Some supported " 3473 "features are not enabled on the pool.\n" 3474 "\t%s(Note that they may be intentionally disabled if the\n" 3475 "\t%s'compatibility' property is set.)\n"), indent, indent); 3476 break; 3477 3478 case ZPOOL_STATUS_COMPATIBILITY_ERR: 3479 printf_color(ANSI_YELLOW, gettext("Error reading or parsing " 3480 "the file(s) indicated by the 'compatibility'\n" 3481 "\t%sproperty.\n"), indent); 3482 break; 3483 3484 case ZPOOL_STATUS_INCOMPATIBLE_FEAT: 3485 printf_color(ANSI_YELLOW, gettext("One or more features " 3486 "are enabled on the pool despite not being\n" 3487 "\t%srequested by the 'compatibility' property.\n"), 3488 indent); 3489 break; 3490 3491 case ZPOOL_STATUS_UNSUP_FEAT_READ: 3492 printf_color(ANSI_YELLOW, gettext("The pool uses the following " 3493 "feature(s) not supported on this system:\n")); 3494 color_start(ANSI_YELLOW); 3495 zpool_collect_unsup_feat(config, buf, 2048); 3496 (void) printf("%s", buf); 3497 color_end(); 3498 break; 3499 3500 case ZPOOL_STATUS_UNSUP_FEAT_WRITE: 3501 printf_color(ANSI_YELLOW, gettext("The pool can only be " 3502 "accessed in read-only mode on this system. It\n" 3503 "\t%scannot be accessed in read-write mode because it uses " 3504 "the following\n" 3505 "\t%sfeature(s) not supported on this system:\n"), 3506 indent, indent); 3507 color_start(ANSI_YELLOW); 3508 zpool_collect_unsup_feat(config, buf, 2048); 3509 (void) printf("%s", buf); 3510 color_end(); 3511 break; 3512 3513 case ZPOOL_STATUS_HOSTID_ACTIVE: 3514 printf_color(ANSI_YELLOW, gettext("The pool is currently " 3515 "imported by another system.\n")); 3516 break; 3517 3518 case ZPOOL_STATUS_HOSTID_REQUIRED: 3519 printf_color(ANSI_YELLOW, gettext("The pool has the " 3520 "multihost property on. It cannot\n" 3521 "\t%sbe safely imported when the system hostid is not " 3522 "set.\n"), indent); 3523 break; 3524 3525 case ZPOOL_STATUS_HOSTID_MISMATCH: 3526 printf_color(ANSI_YELLOW, gettext("The pool was last accessed " 3527 "by another system.\n")); 3528 break; 3529 3530 case ZPOOL_STATUS_FAULTED_DEV_R: 3531 case ZPOOL_STATUS_FAULTED_DEV_NR: 3532 printf_color(ANSI_YELLOW, gettext("One or more devices are " 3533 "faulted.\n")); 3534 break; 3535 3536 case ZPOOL_STATUS_BAD_LOG: 3537 printf_color(ANSI_YELLOW, gettext("An intent log record cannot " 3538 "be read.\n")); 3539 break; 3540 3541 case ZPOOL_STATUS_RESILVERING: 3542 case ZPOOL_STATUS_REBUILDING: 3543 printf_color(ANSI_YELLOW, gettext("One or more devices were " 3544 "being resilvered.\n")); 3545 break; 3546 3547 case ZPOOL_STATUS_ERRATA: 3548 printf_color(ANSI_YELLOW, gettext("Errata #%d detected.\n"), 3549 errata); 3550 break; 3551 3552 case ZPOOL_STATUS_NON_NATIVE_ASHIFT: 3553 printf_color(ANSI_YELLOW, gettext("One or more devices are " 3554 "configured to use a non-native block size.\n" 3555 "\t%sExpect reduced performance.\n"), indent); 3556 break; 3557 3558 default: 3559 /* 3560 * No other status can be seen when importing pools. 3561 */ 3562 assert(reason == ZPOOL_STATUS_OK); 3563 } 3564 3565 /* 3566 * Print out an action according to the overall state of the pool. 3567 */ 3568 if (vs->vs_state != VDEV_STATE_HEALTHY || 3569 reason != ZPOOL_STATUS_ERRATA || errata != ZPOOL_ERRATA_NONE) { 3570 (void) printf("%s", indent); 3571 (void) printf(gettext("action: ")); 3572 } 3573 if (vs->vs_state == VDEV_STATE_HEALTHY) { 3574 if (reason == ZPOOL_STATUS_VERSION_OLDER || 3575 reason == ZPOOL_STATUS_FEAT_DISABLED) { 3576 (void) printf(gettext("The pool can be imported using " 3577 "its name or numeric identifier, though\n" 3578 "\t%ssome features will not be available without " 3579 "an explicit 'zpool upgrade'.\n"), indent); 3580 } else if (reason == ZPOOL_STATUS_COMPATIBILITY_ERR) { 3581 (void) printf(gettext("The pool can be imported using " 3582 "its name or numeric\n" 3583 "\t%sidentifier, though the file(s) indicated by " 3584 "its 'compatibility'\n" 3585 "\t%sproperty cannot be parsed at this time.\n"), 3586 indent, indent); 3587 } else if (reason == ZPOOL_STATUS_HOSTID_MISMATCH) { 3588 (void) printf(gettext("The pool can be imported using " 3589 "its name or numeric identifier and\n" 3590 "\t%sthe '-f' flag.\n"), indent); 3591 } else if (reason == ZPOOL_STATUS_ERRATA) { 3592 switch (errata) { 3593 case ZPOOL_ERRATA_ZOL_2094_SCRUB: 3594 (void) printf(gettext("The pool can be " 3595 "imported using its name or numeric " 3596 "identifier,\n" 3597 "\t%showever there is a compatibility " 3598 "issue which should be corrected\n" 3599 "\t%sby running 'zpool scrub'\n"), 3600 indent, indent); 3601 break; 3602 3603 case ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY: 3604 (void) printf(gettext("The pool cannot be " 3605 "imported with this version of ZFS due to\n" 3606 "\t%san active asynchronous destroy. " 3607 "Revert to an earlier version\n" 3608 "\t%sand allow the destroy to complete " 3609 "before updating.\n"), indent, indent); 3610 break; 3611 3612 case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION: 3613 (void) printf(gettext("Existing encrypted " 3614 "datasets contain an on-disk " 3615 "incompatibility, which\n" 3616 "\t%sneeds to be corrected. Backup these " 3617 "datasets to new encrypted datasets\n" 3618 "\t%sand destroy the old ones.\n"), 3619 indent, indent); 3620 break; 3621 3622 case ZPOOL_ERRATA_ZOL_8308_ENCRYPTION: 3623 (void) printf(gettext("Existing encrypted " 3624 "snapshots and bookmarks contain an " 3625 "on-disk\n" 3626 "\t%sincompatibility. This may cause " 3627 "on-disk corruption if they are used\n" 3628 "\t%swith 'zfs recv'. To correct the " 3629 "issue, enable the bookmark_v2 feature.\n" 3630 "\t%sNo additional action is needed if " 3631 "there are no encrypted snapshots or\n" 3632 "\t%sbookmarks. If preserving the " 3633 "encrypted snapshots and bookmarks is\n" 3634 "\t%srequired, use a non-raw send to " 3635 "backup and restore them. Alternately,\n" 3636 "\t%sthey may be removed to resolve the " 3637 "incompatibility.\n"), indent, indent, 3638 indent, indent, indent, indent); 3639 break; 3640 default: 3641 /* 3642 * All errata must contain an action message. 3643 */ 3644 assert(errata == ZPOOL_ERRATA_NONE); 3645 } 3646 } else { 3647 (void) printf(gettext("The pool can be imported using " 3648 "its name or numeric identifier.\n")); 3649 } 3650 } else if (vs->vs_state == VDEV_STATE_DEGRADED) { 3651 (void) printf(gettext("The pool can be imported despite " 3652 "missing or damaged devices. The\n" 3653 "\t%sfault tolerance of the pool may be compromised if " 3654 "imported.\n"), indent); 3655 } else { 3656 switch (reason) { 3657 case ZPOOL_STATUS_VERSION_NEWER: 3658 (void) printf(gettext("The pool cannot be imported. " 3659 "Access the pool on a system running newer\n" 3660 "\t%ssoftware, or recreate the pool from " 3661 "backup.\n"), indent); 3662 break; 3663 case ZPOOL_STATUS_UNSUP_FEAT_READ: 3664 (void) printf(gettext("The pool cannot be imported. " 3665 "Access the pool on a system that supports\n" 3666 "\t%sthe required feature(s), or recreate the pool " 3667 "from backup.\n"), indent); 3668 break; 3669 case ZPOOL_STATUS_UNSUP_FEAT_WRITE: 3670 (void) printf(gettext("The pool cannot be imported in " 3671 "read-write mode. Import the pool with\n" 3672 "\t%s'-o readonly=on', access the pool on a system " 3673 "that supports the\n" 3674 "\t%srequired feature(s), or recreate the pool " 3675 "from backup.\n"), indent, indent); 3676 break; 3677 case ZPOOL_STATUS_MISSING_DEV_R: 3678 case ZPOOL_STATUS_MISSING_DEV_NR: 3679 case ZPOOL_STATUS_BAD_GUID_SUM: 3680 (void) printf(gettext("The pool cannot be imported. " 3681 "Attach the missing\n" 3682 "\t%sdevices and try again.\n"), indent); 3683 break; 3684 case ZPOOL_STATUS_HOSTID_ACTIVE: 3685 VERIFY0(nvlist_lookup_nvlist(config, 3686 ZPOOL_CONFIG_LOAD_INFO, &nvinfo)); 3687 3688 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME)) 3689 hostname = fnvlist_lookup_string(nvinfo, 3690 ZPOOL_CONFIG_MMP_HOSTNAME); 3691 3692 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID)) 3693 hostid = fnvlist_lookup_uint64(nvinfo, 3694 ZPOOL_CONFIG_MMP_HOSTID); 3695 3696 (void) printf(gettext("The pool must be exported from " 3697 "%s (hostid=%"PRIx64")\n" 3698 "\t%sbefore it can be safely imported.\n"), 3699 hostname, hostid, indent); 3700 break; 3701 case ZPOOL_STATUS_HOSTID_REQUIRED: 3702 (void) printf(gettext("Set a unique system hostid with " 3703 "the zgenhostid(8) command.\n")); 3704 break; 3705 default: 3706 (void) printf(gettext("The pool cannot be imported due " 3707 "to damaged devices or data.\n")); 3708 } 3709 } 3710 3711 /* Print the comment attached to the pool. */ 3712 if (comment != NULL) 3713 (void) printf(gettext("comment: %s\n"), comment); 3714 3715 /* 3716 * If the state is "closed" or "can't open", and the aux state 3717 * is "corrupt data": 3718 */ 3719 if ((vs->vs_state == VDEV_STATE_CLOSED || 3720 vs->vs_state == VDEV_STATE_CANT_OPEN) && 3721 vs->vs_aux == VDEV_AUX_CORRUPT_DATA) { 3722 if (pool_state == POOL_STATE_DESTROYED) 3723 (void) printf(gettext("\t%sThe pool was destroyed, " 3724 "but can be imported using the '-Df' flags.\n"), 3725 indent); 3726 else if (pool_state != POOL_STATE_EXPORTED) 3727 (void) printf(gettext("\t%sThe pool may be active on " 3728 "another system, but can be imported using\n" 3729 "\t%sthe '-f' flag.\n"), indent, indent); 3730 } 3731 3732 if (msgid != NULL) { 3733 (void) printf(gettext("%s see: " 3734 "https://openzfs.github.io/openzfs-docs/msg/%s\n"), 3735 indent, msgid); 3736 } 3737 3738 (void) printf(gettext("%sconfig:\n\n"), indent); 3739 3740 cb.cb_namewidth = max_width(NULL, nvroot, 0, strlen(name), 3741 VDEV_NAME_TYPE_ID); 3742 if (cb.cb_namewidth < 10) 3743 cb.cb_namewidth = 10; 3744 3745 print_import_config(&cb, name, nvroot, 0); 3746 3747 print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_BIAS_DEDUP); 3748 print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_BIAS_SPECIAL); 3749 print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_CLASS_LOGS); 3750 3751 if (reason == ZPOOL_STATUS_BAD_GUID_SUM) { 3752 (void) printf(gettext("\n\t%sAdditional devices are known to " 3753 "be part of this pool, though their\n" 3754 "\t%sexact configuration cannot be determined.\n"), 3755 indent, indent); 3756 } 3757 return (0); 3758 } 3759 3760 static boolean_t 3761 zfs_force_import_required(nvlist_t *config) 3762 { 3763 uint64_t state; 3764 uint64_t hostid = 0; 3765 nvlist_t *nvinfo; 3766 3767 state = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE); 3768 nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO); 3769 3770 /* 3771 * The hostid on LOAD_INFO comes from the MOS label via 3772 * spa_tryimport(). If its not there then we're likely talking to an 3773 * older kernel, so use the top one, which will be from the label 3774 * discovered in zpool_find_import(), or if a cachefile is in use, the 3775 * local hostid. 3776 */ 3777 if (nvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_HOSTID, &hostid) != 0) 3778 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_HOSTID, 3779 &hostid); 3780 3781 if (state != POOL_STATE_EXPORTED && hostid != get_system_hostid()) 3782 return (B_TRUE); 3783 3784 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE)) { 3785 mmp_state_t mmp_state = fnvlist_lookup_uint64(nvinfo, 3786 ZPOOL_CONFIG_MMP_STATE); 3787 3788 if (mmp_state != MMP_STATE_INACTIVE) 3789 return (B_TRUE); 3790 } 3791 3792 return (B_FALSE); 3793 } 3794 3795 /* 3796 * Perform the import for the given configuration. This passes the heavy 3797 * lifting off to zpool_import_props(), and then mounts the datasets contained 3798 * within the pool. 3799 */ 3800 static int 3801 do_import(nvlist_t *config, const char *newname, const char *mntopts, 3802 nvlist_t *props, int flags, uint_t mntthreads) 3803 { 3804 int ret = 0; 3805 int ms_status = 0; 3806 zpool_handle_t *zhp; 3807 const char *name; 3808 uint64_t version; 3809 3810 name = fnvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME); 3811 version = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION); 3812 3813 if (!SPA_VERSION_IS_SUPPORTED(version)) { 3814 (void) fprintf(stderr, gettext("cannot import '%s': pool " 3815 "is formatted using an unsupported ZFS version\n"), name); 3816 return (1); 3817 } else if (zfs_force_import_required(config) && 3818 !(flags & ZFS_IMPORT_ANY_HOST)) { 3819 mmp_state_t mmp_state = MMP_STATE_INACTIVE; 3820 nvlist_t *nvinfo; 3821 3822 nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO); 3823 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE)) 3824 mmp_state = fnvlist_lookup_uint64(nvinfo, 3825 ZPOOL_CONFIG_MMP_STATE); 3826 3827 if (mmp_state == MMP_STATE_ACTIVE) { 3828 const char *hostname = "<unknown>"; 3829 uint64_t hostid = 0; 3830 3831 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME)) 3832 hostname = fnvlist_lookup_string(nvinfo, 3833 ZPOOL_CONFIG_MMP_HOSTNAME); 3834 3835 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID)) 3836 hostid = fnvlist_lookup_uint64(nvinfo, 3837 ZPOOL_CONFIG_MMP_HOSTID); 3838 3839 (void) fprintf(stderr, gettext("cannot import '%s': " 3840 "pool is imported on %s (hostid: " 3841 "0x%"PRIx64")\nExport the pool on the other " 3842 "system, then run 'zpool import'.\n"), 3843 name, hostname, hostid); 3844 } else if (mmp_state == MMP_STATE_NO_HOSTID) { 3845 (void) fprintf(stderr, gettext("Cannot import '%s': " 3846 "pool has the multihost property on and the\n" 3847 "system's hostid is not set. Set a unique hostid " 3848 "with the zgenhostid(8) command.\n"), name); 3849 } else { 3850 const char *hostname = "<unknown>"; 3851 time_t timestamp = 0; 3852 uint64_t hostid = 0; 3853 3854 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_HOSTNAME)) 3855 hostname = fnvlist_lookup_string(nvinfo, 3856 ZPOOL_CONFIG_HOSTNAME); 3857 else if (nvlist_exists(config, ZPOOL_CONFIG_HOSTNAME)) 3858 hostname = fnvlist_lookup_string(config, 3859 ZPOOL_CONFIG_HOSTNAME); 3860 3861 if (nvlist_exists(config, ZPOOL_CONFIG_TIMESTAMP)) 3862 timestamp = fnvlist_lookup_uint64(config, 3863 ZPOOL_CONFIG_TIMESTAMP); 3864 3865 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_HOSTID)) 3866 hostid = fnvlist_lookup_uint64(nvinfo, 3867 ZPOOL_CONFIG_HOSTID); 3868 else if (nvlist_exists(config, ZPOOL_CONFIG_HOSTID)) 3869 hostid = fnvlist_lookup_uint64(config, 3870 ZPOOL_CONFIG_HOSTID); 3871 3872 (void) fprintf(stderr, gettext("cannot import '%s': " 3873 "pool was previously in use from another system.\n" 3874 "Last accessed by %s (hostid=%"PRIx64") at %s" 3875 "The pool can be imported, use 'zpool import -f' " 3876 "to import the pool.\n"), name, hostname, 3877 hostid, ctime(×tamp)); 3878 } 3879 3880 return (1); 3881 } 3882 3883 if (zpool_import_props(g_zfs, config, newname, props, flags) != 0) 3884 return (1); 3885 3886 if (newname != NULL) 3887 name = newname; 3888 3889 if ((zhp = zpool_open_canfail(g_zfs, name)) == NULL) 3890 return (1); 3891 3892 /* 3893 * Loading keys is best effort. We don't want to return immediately 3894 * if it fails but we do want to give the error to the caller. 3895 */ 3896 if (flags & ZFS_IMPORT_LOAD_KEYS && 3897 zfs_crypto_attempt_load_keys(g_zfs, name) != 0) 3898 ret = 1; 3899 3900 if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL && 3901 !(flags & ZFS_IMPORT_ONLY)) { 3902 ms_status = zpool_enable_datasets(zhp, mntopts, 0, mntthreads); 3903 if (ms_status == EZFS_SHAREFAILED) { 3904 (void) fprintf(stderr, gettext("Import was " 3905 "successful, but unable to share some datasets\n")); 3906 } else if (ms_status == EZFS_MOUNTFAILED) { 3907 (void) fprintf(stderr, gettext("Import was " 3908 "successful, but unable to mount some datasets\n")); 3909 } 3910 } 3911 3912 zpool_close(zhp); 3913 return (ret); 3914 } 3915 3916 typedef struct import_parameters { 3917 nvlist_t *ip_config; 3918 const char *ip_mntopts; 3919 nvlist_t *ip_props; 3920 int ip_flags; 3921 uint_t ip_mntthreads; 3922 int *ip_err; 3923 } import_parameters_t; 3924 3925 static void 3926 do_import_task(void *arg) 3927 { 3928 import_parameters_t *ip = arg; 3929 *ip->ip_err |= do_import(ip->ip_config, NULL, ip->ip_mntopts, 3930 ip->ip_props, ip->ip_flags, ip->ip_mntthreads); 3931 free(ip); 3932 } 3933 3934 3935 static int 3936 import_pools(nvlist_t *pools, nvlist_t *props, char *mntopts, int flags, 3937 char *orig_name, char *new_name, importargs_t *import) 3938 { 3939 nvlist_t *config = NULL; 3940 nvlist_t *found_config = NULL; 3941 uint64_t pool_state; 3942 boolean_t pool_specified = (import->poolname != NULL || 3943 import->guid != 0); 3944 uint_t npools = 0; 3945 3946 3947 tpool_t *tp = NULL; 3948 if (import->do_all) { 3949 tp = tpool_create(1, 5 * sysconf(_SC_NPROCESSORS_ONLN), 3950 0, NULL); 3951 } 3952 3953 /* 3954 * At this point we have a list of import candidate configs. Even if 3955 * we were searching by pool name or guid, we still need to 3956 * post-process the list to deal with pool state and possible 3957 * duplicate names. 3958 */ 3959 int err = 0; 3960 nvpair_t *elem = NULL; 3961 boolean_t first = B_TRUE; 3962 if (!pool_specified && import->do_all) { 3963 while ((elem = nvlist_next_nvpair(pools, elem)) != NULL) 3964 npools++; 3965 } 3966 while ((elem = nvlist_next_nvpair(pools, elem)) != NULL) { 3967 3968 verify(nvpair_value_nvlist(elem, &config) == 0); 3969 3970 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, 3971 &pool_state) == 0); 3972 if (!import->do_destroyed && 3973 pool_state == POOL_STATE_DESTROYED) 3974 continue; 3975 if (import->do_destroyed && 3976 pool_state != POOL_STATE_DESTROYED) 3977 continue; 3978 3979 verify(nvlist_add_nvlist(config, ZPOOL_LOAD_POLICY, 3980 import->policy) == 0); 3981 3982 if (!pool_specified) { 3983 if (first) 3984 first = B_FALSE; 3985 else if (!import->do_all) 3986 (void) fputc('\n', stdout); 3987 3988 if (import->do_all) { 3989 import_parameters_t *ip = safe_malloc( 3990 sizeof (import_parameters_t)); 3991 3992 ip->ip_config = config; 3993 ip->ip_mntopts = mntopts; 3994 ip->ip_props = props; 3995 ip->ip_flags = flags; 3996 ip->ip_mntthreads = mount_tp_nthr / npools; 3997 ip->ip_err = &err; 3998 3999 (void) tpool_dispatch(tp, do_import_task, 4000 (void *)ip); 4001 } else { 4002 /* 4003 * If we're importing from cachefile, then 4004 * we don't want to report errors until we 4005 * are in the scan phase of the import. If 4006 * we get an error, then we return that error 4007 * to invoke the scan phase. 4008 */ 4009 if (import->cachefile && !import->scan) 4010 err = show_import(config, B_FALSE); 4011 else 4012 (void) show_import(config, B_TRUE); 4013 } 4014 } else if (import->poolname != NULL) { 4015 const char *name; 4016 4017 /* 4018 * We are searching for a pool based on name. 4019 */ 4020 verify(nvlist_lookup_string(config, 4021 ZPOOL_CONFIG_POOL_NAME, &name) == 0); 4022 4023 if (strcmp(name, import->poolname) == 0) { 4024 if (found_config != NULL) { 4025 (void) fprintf(stderr, gettext( 4026 "cannot import '%s': more than " 4027 "one matching pool\n"), 4028 import->poolname); 4029 (void) fprintf(stderr, gettext( 4030 "import by numeric ID instead\n")); 4031 err = B_TRUE; 4032 } 4033 found_config = config; 4034 } 4035 } else { 4036 uint64_t guid; 4037 4038 /* 4039 * Search for a pool by guid. 4040 */ 4041 verify(nvlist_lookup_uint64(config, 4042 ZPOOL_CONFIG_POOL_GUID, &guid) == 0); 4043 4044 if (guid == import->guid) 4045 found_config = config; 4046 } 4047 } 4048 if (import->do_all) { 4049 tpool_wait(tp); 4050 tpool_destroy(tp); 4051 } 4052 4053 /* 4054 * If we were searching for a specific pool, verify that we found a 4055 * pool, and then do the import. 4056 */ 4057 if (pool_specified && err == 0) { 4058 if (found_config == NULL) { 4059 (void) fprintf(stderr, gettext("cannot import '%s': " 4060 "no such pool available\n"), orig_name); 4061 err = B_TRUE; 4062 } else { 4063 err |= do_import(found_config, new_name, 4064 mntopts, props, flags, mount_tp_nthr); 4065 } 4066 } 4067 4068 /* 4069 * If we were just looking for pools, report an error if none were 4070 * found. 4071 */ 4072 if (!pool_specified && first) 4073 (void) fprintf(stderr, 4074 gettext("no pools available to import\n")); 4075 return (err); 4076 } 4077 4078 typedef struct target_exists_args { 4079 const char *poolname; 4080 uint64_t poolguid; 4081 } target_exists_args_t; 4082 4083 static int 4084 name_or_guid_exists(zpool_handle_t *zhp, void *data) 4085 { 4086 target_exists_args_t *args = data; 4087 nvlist_t *config = zpool_get_config(zhp, NULL); 4088 int found = 0; 4089 4090 if (config == NULL) 4091 return (0); 4092 4093 if (args->poolname != NULL) { 4094 const char *pool_name; 4095 4096 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 4097 &pool_name) == 0); 4098 if (strcmp(pool_name, args->poolname) == 0) 4099 found = 1; 4100 } else { 4101 uint64_t pool_guid; 4102 4103 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 4104 &pool_guid) == 0); 4105 if (pool_guid == args->poolguid) 4106 found = 1; 4107 } 4108 zpool_close(zhp); 4109 4110 return (found); 4111 } 4112 /* 4113 * zpool checkpoint <pool> 4114 * checkpoint --discard <pool> 4115 * 4116 * -d Discard the checkpoint from a checkpointed 4117 * --discard pool. 4118 * 4119 * -w Wait for discarding a checkpoint to complete. 4120 * --wait 4121 * 4122 * Checkpoints the specified pool, by taking a "snapshot" of its 4123 * current state. A pool can only have one checkpoint at a time. 4124 */ 4125 int 4126 zpool_do_checkpoint(int argc, char **argv) 4127 { 4128 boolean_t discard, wait; 4129 char *pool; 4130 zpool_handle_t *zhp; 4131 int c, err; 4132 4133 struct option long_options[] = { 4134 {"discard", no_argument, NULL, 'd'}, 4135 {"wait", no_argument, NULL, 'w'}, 4136 {0, 0, 0, 0} 4137 }; 4138 4139 discard = B_FALSE; 4140 wait = B_FALSE; 4141 while ((c = getopt_long(argc, argv, ":dw", long_options, NULL)) != -1) { 4142 switch (c) { 4143 case 'd': 4144 discard = B_TRUE; 4145 break; 4146 case 'w': 4147 wait = B_TRUE; 4148 break; 4149 case '?': 4150 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 4151 optopt); 4152 usage(B_FALSE); 4153 } 4154 } 4155 4156 if (wait && !discard) { 4157 (void) fprintf(stderr, gettext("--wait only valid when " 4158 "--discard also specified\n")); 4159 usage(B_FALSE); 4160 } 4161 4162 argc -= optind; 4163 argv += optind; 4164 4165 if (argc < 1) { 4166 (void) fprintf(stderr, gettext("missing pool argument\n")); 4167 usage(B_FALSE); 4168 } 4169 4170 if (argc > 1) { 4171 (void) fprintf(stderr, gettext("too many arguments\n")); 4172 usage(B_FALSE); 4173 } 4174 4175 pool = argv[0]; 4176 4177 if ((zhp = zpool_open(g_zfs, pool)) == NULL) { 4178 /* As a special case, check for use of '/' in the name */ 4179 if (strchr(pool, '/') != NULL) 4180 (void) fprintf(stderr, gettext("'zpool checkpoint' " 4181 "doesn't work on datasets. To save the state " 4182 "of a dataset from a specific point in time " 4183 "please use 'zfs snapshot'\n")); 4184 return (1); 4185 } 4186 4187 if (discard) { 4188 err = (zpool_discard_checkpoint(zhp) != 0); 4189 if (err == 0 && wait) 4190 err = zpool_wait(zhp, ZPOOL_WAIT_CKPT_DISCARD); 4191 } else { 4192 err = (zpool_checkpoint(zhp) != 0); 4193 } 4194 4195 zpool_close(zhp); 4196 4197 return (err); 4198 } 4199 4200 #define CHECKPOINT_OPT 1024 4201 4202 /* 4203 * zpool prefetch <type> [<type opts>] <pool> 4204 * 4205 * Prefetchs a particular type of data in the specified pool. 4206 */ 4207 int 4208 zpool_do_prefetch(int argc, char **argv) 4209 { 4210 int c; 4211 char *poolname; 4212 char *typestr = NULL; 4213 zpool_prefetch_type_t type; 4214 zpool_handle_t *zhp; 4215 int err = 0; 4216 4217 while ((c = getopt(argc, argv, "t:")) != -1) { 4218 switch (c) { 4219 case 't': 4220 typestr = optarg; 4221 break; 4222 case ':': 4223 (void) fprintf(stderr, gettext("missing argument for " 4224 "'%c' option\n"), optopt); 4225 usage(B_FALSE); 4226 break; 4227 case '?': 4228 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 4229 optopt); 4230 usage(B_FALSE); 4231 } 4232 } 4233 argc -= optind; 4234 argv += optind; 4235 4236 if (argc < 1) { 4237 (void) fprintf(stderr, gettext("missing pool name argument\n")); 4238 usage(B_FALSE); 4239 } 4240 4241 if (argc > 1) { 4242 (void) fprintf(stderr, gettext("too many arguments\n")); 4243 usage(B_FALSE); 4244 } 4245 4246 poolname = argv[0]; 4247 4248 argc--; 4249 argv++; 4250 4251 if (strcmp(typestr, "ddt") == 0) { 4252 type = ZPOOL_PREFETCH_DDT; 4253 } else { 4254 (void) fprintf(stderr, gettext("unsupported prefetch type\n")); 4255 usage(B_FALSE); 4256 } 4257 4258 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) 4259 return (1); 4260 4261 err = zpool_prefetch(zhp, type); 4262 4263 zpool_close(zhp); 4264 4265 return (err); 4266 } 4267 4268 /* 4269 * zpool import [-d dir] [-D] 4270 * import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l] 4271 * [-d dir | -c cachefile | -s] [-f] -a 4272 * import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l] 4273 * [-d dir | -c cachefile | -s] [-f] [-n] [-F] <pool | id> 4274 * [newpool] 4275 * 4276 * -c Read pool information from a cachefile instead of searching 4277 * devices. If importing from a cachefile config fails, then 4278 * fallback to searching for devices only in the directories that 4279 * exist in the cachefile. 4280 * 4281 * -d Scan in a specific directory, other than /dev/. More than 4282 * one directory can be specified using multiple '-d' options. 4283 * 4284 * -D Scan for previously destroyed pools or import all or only 4285 * specified destroyed pools. 4286 * 4287 * -R Temporarily import the pool, with all mountpoints relative to 4288 * the given root. The pool will remain exported when the machine 4289 * is rebooted. 4290 * 4291 * -V Import even in the presence of faulted vdevs. This is an 4292 * intentionally undocumented option for testing purposes, and 4293 * treats the pool configuration as complete, leaving any bad 4294 * vdevs in the FAULTED state. In other words, it does verbatim 4295 * import. 4296 * 4297 * -f Force import, even if it appears that the pool is active. 4298 * 4299 * -F Attempt rewind if necessary. 4300 * 4301 * -n See if rewind would work, but don't actually rewind. 4302 * 4303 * -N Import the pool but don't mount datasets. 4304 * 4305 * -T Specify a starting txg to use for import. This option is 4306 * intentionally undocumented option for testing purposes. 4307 * 4308 * -a Import all pools found. 4309 * 4310 * -l Load encryption keys while importing. 4311 * 4312 * -o Set property=value and/or temporary mount options (without '='). 4313 * 4314 * -s Scan using the default search path, the libblkid cache will 4315 * not be consulted. 4316 * 4317 * --rewind-to-checkpoint 4318 * Import the pool and revert back to the checkpoint. 4319 * 4320 * The import command scans for pools to import, and import pools based on pool 4321 * name and GUID. The pool can also be renamed as part of the import process. 4322 */ 4323 int 4324 zpool_do_import(int argc, char **argv) 4325 { 4326 char **searchdirs = NULL; 4327 char *env, *envdup = NULL; 4328 int nsearch = 0; 4329 int c; 4330 int err = 0; 4331 nvlist_t *pools = NULL; 4332 boolean_t do_all = B_FALSE; 4333 boolean_t do_destroyed = B_FALSE; 4334 char *mntopts = NULL; 4335 uint64_t searchguid = 0; 4336 char *searchname = NULL; 4337 char *propval; 4338 nvlist_t *policy = NULL; 4339 nvlist_t *props = NULL; 4340 int flags = ZFS_IMPORT_NORMAL; 4341 uint32_t rewind_policy = ZPOOL_NO_REWIND; 4342 boolean_t dryrun = B_FALSE; 4343 boolean_t do_rewind = B_FALSE; 4344 boolean_t xtreme_rewind = B_FALSE; 4345 boolean_t do_scan = B_FALSE; 4346 boolean_t pool_exists = B_FALSE; 4347 uint64_t txg = -1ULL; 4348 char *cachefile = NULL; 4349 importargs_t idata = { 0 }; 4350 char *endptr; 4351 4352 struct option long_options[] = { 4353 {"rewind-to-checkpoint", no_argument, NULL, CHECKPOINT_OPT}, 4354 {0, 0, 0, 0} 4355 }; 4356 4357 /* check options */ 4358 while ((c = getopt_long(argc, argv, ":aCc:d:DEfFlmnNo:R:stT:VX", 4359 long_options, NULL)) != -1) { 4360 switch (c) { 4361 case 'a': 4362 do_all = B_TRUE; 4363 break; 4364 case 'c': 4365 cachefile = optarg; 4366 break; 4367 case 'd': 4368 searchdirs = safe_realloc(searchdirs, 4369 (nsearch + 1) * sizeof (char *)); 4370 searchdirs[nsearch++] = optarg; 4371 break; 4372 case 'D': 4373 do_destroyed = B_TRUE; 4374 break; 4375 case 'f': 4376 flags |= ZFS_IMPORT_ANY_HOST; 4377 break; 4378 case 'F': 4379 do_rewind = B_TRUE; 4380 break; 4381 case 'l': 4382 flags |= ZFS_IMPORT_LOAD_KEYS; 4383 break; 4384 case 'm': 4385 flags |= ZFS_IMPORT_MISSING_LOG; 4386 break; 4387 case 'n': 4388 dryrun = B_TRUE; 4389 break; 4390 case 'N': 4391 flags |= ZFS_IMPORT_ONLY; 4392 break; 4393 case 'o': 4394 if ((propval = strchr(optarg, '=')) != NULL) { 4395 *propval = '\0'; 4396 propval++; 4397 if (add_prop_list(optarg, propval, 4398 &props, B_TRUE)) 4399 goto error; 4400 } else { 4401 mntopts = optarg; 4402 } 4403 break; 4404 case 'R': 4405 if (add_prop_list(zpool_prop_to_name( 4406 ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE)) 4407 goto error; 4408 if (add_prop_list_default(zpool_prop_to_name( 4409 ZPOOL_PROP_CACHEFILE), "none", &props)) 4410 goto error; 4411 break; 4412 case 's': 4413 do_scan = B_TRUE; 4414 break; 4415 case 't': 4416 flags |= ZFS_IMPORT_TEMP_NAME; 4417 if (add_prop_list_default(zpool_prop_to_name( 4418 ZPOOL_PROP_CACHEFILE), "none", &props)) 4419 goto error; 4420 break; 4421 4422 case 'T': 4423 errno = 0; 4424 txg = strtoull(optarg, &endptr, 0); 4425 if (errno != 0 || *endptr != '\0') { 4426 (void) fprintf(stderr, 4427 gettext("invalid txg value\n")); 4428 usage(B_FALSE); 4429 } 4430 rewind_policy = ZPOOL_DO_REWIND | ZPOOL_EXTREME_REWIND; 4431 break; 4432 case 'V': 4433 flags |= ZFS_IMPORT_VERBATIM; 4434 break; 4435 case 'X': 4436 xtreme_rewind = B_TRUE; 4437 break; 4438 case CHECKPOINT_OPT: 4439 flags |= ZFS_IMPORT_CHECKPOINT; 4440 break; 4441 case ':': 4442 (void) fprintf(stderr, gettext("missing argument for " 4443 "'%c' option\n"), optopt); 4444 usage(B_FALSE); 4445 break; 4446 case '?': 4447 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 4448 optopt); 4449 usage(B_FALSE); 4450 } 4451 } 4452 4453 argc -= optind; 4454 argv += optind; 4455 4456 if (cachefile && nsearch != 0) { 4457 (void) fprintf(stderr, gettext("-c is incompatible with -d\n")); 4458 usage(B_FALSE); 4459 } 4460 4461 if (cachefile && do_scan) { 4462 (void) fprintf(stderr, gettext("-c is incompatible with -s\n")); 4463 usage(B_FALSE); 4464 } 4465 4466 if ((flags & ZFS_IMPORT_LOAD_KEYS) && (flags & ZFS_IMPORT_ONLY)) { 4467 (void) fprintf(stderr, gettext("-l is incompatible with -N\n")); 4468 usage(B_FALSE); 4469 } 4470 4471 if ((flags & ZFS_IMPORT_LOAD_KEYS) && !do_all && argc == 0) { 4472 (void) fprintf(stderr, gettext("-l is only meaningful during " 4473 "an import\n")); 4474 usage(B_FALSE); 4475 } 4476 4477 if ((dryrun || xtreme_rewind) && !do_rewind) { 4478 (void) fprintf(stderr, 4479 gettext("-n or -X only meaningful with -F\n")); 4480 usage(B_FALSE); 4481 } 4482 if (dryrun) 4483 rewind_policy = ZPOOL_TRY_REWIND; 4484 else if (do_rewind) 4485 rewind_policy = ZPOOL_DO_REWIND; 4486 if (xtreme_rewind) 4487 rewind_policy |= ZPOOL_EXTREME_REWIND; 4488 4489 /* In the future, we can capture further policy and include it here */ 4490 if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 || 4491 nvlist_add_uint64(policy, ZPOOL_LOAD_REQUEST_TXG, txg) != 0 || 4492 nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY, 4493 rewind_policy) != 0) 4494 goto error; 4495 4496 /* check argument count */ 4497 if (do_all) { 4498 if (argc != 0) { 4499 (void) fprintf(stderr, gettext("too many arguments\n")); 4500 usage(B_FALSE); 4501 } 4502 } else { 4503 if (argc > 2) { 4504 (void) fprintf(stderr, gettext("too many arguments\n")); 4505 usage(B_FALSE); 4506 } 4507 } 4508 4509 /* 4510 * Check for the effective uid. We do this explicitly here because 4511 * otherwise any attempt to discover pools will silently fail. 4512 */ 4513 if (argc == 0 && geteuid() != 0) { 4514 (void) fprintf(stderr, gettext("cannot " 4515 "discover pools: permission denied\n")); 4516 4517 free(searchdirs); 4518 nvlist_free(props); 4519 nvlist_free(policy); 4520 return (1); 4521 } 4522 4523 /* 4524 * Depending on the arguments given, we do one of the following: 4525 * 4526 * <none> Iterate through all pools and display information about 4527 * each one. 4528 * 4529 * -a Iterate through all pools and try to import each one. 4530 * 4531 * <id> Find the pool that corresponds to the given GUID/pool 4532 * name and import that one. 4533 * 4534 * -D Above options applies only to destroyed pools. 4535 */ 4536 if (argc != 0) { 4537 char *endptr; 4538 4539 errno = 0; 4540 searchguid = strtoull(argv[0], &endptr, 10); 4541 if (errno != 0 || *endptr != '\0') { 4542 searchname = argv[0]; 4543 searchguid = 0; 4544 } 4545 4546 /* 4547 * User specified a name or guid. Ensure it's unique. 4548 */ 4549 target_exists_args_t search = {searchname, searchguid}; 4550 pool_exists = zpool_iter(g_zfs, name_or_guid_exists, &search); 4551 } 4552 4553 /* 4554 * Check the environment for the preferred search path. 4555 */ 4556 if ((searchdirs == NULL) && (env = getenv("ZPOOL_IMPORT_PATH"))) { 4557 char *dir, *tmp = NULL; 4558 4559 envdup = strdup(env); 4560 4561 for (dir = strtok_r(envdup, ":", &tmp); 4562 dir != NULL; 4563 dir = strtok_r(NULL, ":", &tmp)) { 4564 searchdirs = safe_realloc(searchdirs, 4565 (nsearch + 1) * sizeof (char *)); 4566 searchdirs[nsearch++] = dir; 4567 } 4568 } 4569 4570 idata.path = searchdirs; 4571 idata.paths = nsearch; 4572 idata.poolname = searchname; 4573 idata.guid = searchguid; 4574 idata.cachefile = cachefile; 4575 idata.scan = do_scan; 4576 idata.policy = policy; 4577 idata.do_destroyed = do_destroyed; 4578 idata.do_all = do_all; 4579 4580 libpc_handle_t lpch = { 4581 .lpc_lib_handle = g_zfs, 4582 .lpc_ops = &libzfs_config_ops, 4583 .lpc_printerr = B_TRUE 4584 }; 4585 pools = zpool_search_import(&lpch, &idata); 4586 4587 if (pools != NULL && pool_exists && 4588 (argc == 1 || strcmp(argv[0], argv[1]) == 0)) { 4589 (void) fprintf(stderr, gettext("cannot import '%s': " 4590 "a pool with that name already exists\n"), 4591 argv[0]); 4592 (void) fprintf(stderr, gettext("use the form '%s " 4593 "<pool | id> <newpool>' to give it a new name\n"), 4594 "zpool import"); 4595 err = 1; 4596 } else if (pools == NULL && pool_exists) { 4597 (void) fprintf(stderr, gettext("cannot import '%s': " 4598 "a pool with that name is already created/imported,\n"), 4599 argv[0]); 4600 (void) fprintf(stderr, gettext("and no additional pools " 4601 "with that name were found\n")); 4602 err = 1; 4603 } else if (pools == NULL) { 4604 if (argc != 0) { 4605 (void) fprintf(stderr, gettext("cannot import '%s': " 4606 "no such pool available\n"), argv[0]); 4607 } 4608 err = 1; 4609 } 4610 4611 if (err == 1) { 4612 free(searchdirs); 4613 free(envdup); 4614 nvlist_free(policy); 4615 nvlist_free(pools); 4616 nvlist_free(props); 4617 return (1); 4618 } 4619 4620 err = import_pools(pools, props, mntopts, flags, 4621 argc >= 1 ? argv[0] : NULL, argc >= 2 ? argv[1] : NULL, &idata); 4622 4623 /* 4624 * If we're using the cachefile and we failed to import, then 4625 * fallback to scanning the directory for pools that match 4626 * those in the cachefile. 4627 */ 4628 if (err != 0 && cachefile != NULL) { 4629 (void) printf(gettext("cachefile import failed, retrying\n")); 4630 4631 /* 4632 * We use the scan flag to gather the directories that exist 4633 * in the cachefile. If we need to fallback to searching for 4634 * the pool config, we will only search devices in these 4635 * directories. 4636 */ 4637 idata.scan = B_TRUE; 4638 nvlist_free(pools); 4639 pools = zpool_search_import(&lpch, &idata); 4640 4641 err = import_pools(pools, props, mntopts, flags, 4642 argc >= 1 ? argv[0] : NULL, argc >= 2 ? argv[1] : NULL, 4643 &idata); 4644 } 4645 4646 error: 4647 nvlist_free(props); 4648 nvlist_free(pools); 4649 nvlist_free(policy); 4650 free(searchdirs); 4651 free(envdup); 4652 4653 return (err ? 1 : 0); 4654 } 4655 4656 /* 4657 * zpool sync [-f] [pool] ... 4658 * 4659 * -f (undocumented) force uberblock (and config including zpool cache file) 4660 * update. 4661 * 4662 * Sync the specified pool(s). 4663 * Without arguments "zpool sync" will sync all pools. 4664 * This command initiates TXG sync(s) and will return after the TXG(s) commit. 4665 * 4666 */ 4667 static int 4668 zpool_do_sync(int argc, char **argv) 4669 { 4670 int ret; 4671 boolean_t force = B_FALSE; 4672 4673 /* check options */ 4674 while ((ret = getopt(argc, argv, "f")) != -1) { 4675 switch (ret) { 4676 case 'f': 4677 force = B_TRUE; 4678 break; 4679 case '?': 4680 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 4681 optopt); 4682 usage(B_FALSE); 4683 } 4684 } 4685 4686 argc -= optind; 4687 argv += optind; 4688 4689 /* if argc == 0 we will execute zpool_sync_one on all pools */ 4690 ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL, 4691 B_FALSE, zpool_sync_one, &force); 4692 4693 return (ret); 4694 } 4695 4696 typedef struct iostat_cbdata { 4697 uint64_t cb_flags; 4698 int cb_namewidth; 4699 int cb_iteration; 4700 boolean_t cb_verbose; 4701 boolean_t cb_literal; 4702 boolean_t cb_scripted; 4703 zpool_list_t *cb_list; 4704 vdev_cmd_data_list_t *vcdl; 4705 vdev_cbdata_t cb_vdevs; 4706 } iostat_cbdata_t; 4707 4708 /* iostat labels */ 4709 typedef struct name_and_columns { 4710 const char *name; /* Column name */ 4711 unsigned int columns; /* Center name to this number of columns */ 4712 } name_and_columns_t; 4713 4714 #define IOSTAT_MAX_LABELS 15 /* Max number of labels on one line */ 4715 4716 static const name_and_columns_t iostat_top_labels[][IOSTAT_MAX_LABELS] = 4717 { 4718 [IOS_DEFAULT] = {{"capacity", 2}, {"operations", 2}, {"bandwidth", 2}, 4719 {NULL}}, 4720 [IOS_LATENCY] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2}, 4721 {"asyncq_wait", 2}, {"scrub", 1}, {"trim", 1}, {"rebuild", 1}, 4722 {NULL}}, 4723 [IOS_QUEUES] = {{"syncq_read", 2}, {"syncq_write", 2}, 4724 {"asyncq_read", 2}, {"asyncq_write", 2}, {"scrubq_read", 2}, 4725 {"trimq_write", 2}, {"rebuildq_write", 2}, {NULL}}, 4726 [IOS_L_HISTO] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2}, 4727 {"asyncq_wait", 2}, {NULL}}, 4728 [IOS_RQ_HISTO] = {{"sync_read", 2}, {"sync_write", 2}, 4729 {"async_read", 2}, {"async_write", 2}, {"scrub", 2}, 4730 {"trim", 2}, {"rebuild", 2}, {NULL}}, 4731 }; 4732 4733 /* Shorthand - if "columns" field not set, default to 1 column */ 4734 static const name_and_columns_t iostat_bottom_labels[][IOSTAT_MAX_LABELS] = 4735 { 4736 [IOS_DEFAULT] = {{"alloc"}, {"free"}, {"read"}, {"write"}, {"read"}, 4737 {"write"}, {NULL}}, 4738 [IOS_LATENCY] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"}, 4739 {"write"}, {"read"}, {"write"}, {"wait"}, {"wait"}, {"wait"}, 4740 {NULL}}, 4741 [IOS_QUEUES] = {{"pend"}, {"activ"}, {"pend"}, {"activ"}, {"pend"}, 4742 {"activ"}, {"pend"}, {"activ"}, {"pend"}, {"activ"}, 4743 {"pend"}, {"activ"}, {"pend"}, {"activ"}, {NULL}}, 4744 [IOS_L_HISTO] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"}, 4745 {"write"}, {"read"}, {"write"}, {"scrub"}, {"trim"}, {"rebuild"}, 4746 {NULL}}, 4747 [IOS_RQ_HISTO] = {{"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"}, 4748 {"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"}, 4749 {"ind"}, {"agg"}, {NULL}}, 4750 }; 4751 4752 static const char *histo_to_title[] = { 4753 [IOS_L_HISTO] = "latency", 4754 [IOS_RQ_HISTO] = "req_size", 4755 }; 4756 4757 /* 4758 * Return the number of labels in a null-terminated name_and_columns_t 4759 * array. 4760 * 4761 */ 4762 static unsigned int 4763 label_array_len(const name_and_columns_t *labels) 4764 { 4765 int i = 0; 4766 4767 while (labels[i].name) 4768 i++; 4769 4770 return (i); 4771 } 4772 4773 /* 4774 * Return the number of strings in a null-terminated string array. 4775 * For example: 4776 * 4777 * const char foo[] = {"bar", "baz", NULL} 4778 * 4779 * returns 2 4780 */ 4781 static uint64_t 4782 str_array_len(const char *array[]) 4783 { 4784 uint64_t i = 0; 4785 while (array[i]) 4786 i++; 4787 4788 return (i); 4789 } 4790 4791 4792 /* 4793 * Return a default column width for default/latency/queue columns. This does 4794 * not include histograms, which have their columns autosized. 4795 */ 4796 static unsigned int 4797 default_column_width(iostat_cbdata_t *cb, enum iostat_type type) 4798 { 4799 unsigned long column_width = 5; /* Normal niceprint */ 4800 static unsigned long widths[] = { 4801 /* 4802 * Choose some sane default column sizes for printing the 4803 * raw numbers. 4804 */ 4805 [IOS_DEFAULT] = 15, /* 1PB capacity */ 4806 [IOS_LATENCY] = 10, /* 1B ns = 10sec */ 4807 [IOS_QUEUES] = 6, /* 1M queue entries */ 4808 [IOS_L_HISTO] = 10, /* 1B ns = 10sec */ 4809 [IOS_RQ_HISTO] = 6, /* 1M queue entries */ 4810 }; 4811 4812 if (cb->cb_literal) 4813 column_width = widths[type]; 4814 4815 return (column_width); 4816 } 4817 4818 /* 4819 * Print the column labels, i.e: 4820 * 4821 * capacity operations bandwidth 4822 * alloc free read write read write ... 4823 * 4824 * If force_column_width is set, use it for the column width. If not set, use 4825 * the default column width. 4826 */ 4827 static void 4828 print_iostat_labels(iostat_cbdata_t *cb, unsigned int force_column_width, 4829 const name_and_columns_t labels[][IOSTAT_MAX_LABELS]) 4830 { 4831 int i, idx, s; 4832 int text_start, rw_column_width, spaces_to_end; 4833 uint64_t flags = cb->cb_flags; 4834 uint64_t f; 4835 unsigned int column_width = force_column_width; 4836 4837 /* For each bit set in flags */ 4838 for (f = flags; f; f &= ~(1ULL << idx)) { 4839 idx = lowbit64(f) - 1; 4840 if (!force_column_width) 4841 column_width = default_column_width(cb, idx); 4842 /* Print our top labels centered over "read write" label. */ 4843 for (i = 0; i < label_array_len(labels[idx]); i++) { 4844 const char *name = labels[idx][i].name; 4845 /* 4846 * We treat labels[][].columns == 0 as shorthand 4847 * for one column. It makes writing out the label 4848 * tables more concise. 4849 */ 4850 unsigned int columns = MAX(1, labels[idx][i].columns); 4851 unsigned int slen = strlen(name); 4852 4853 rw_column_width = (column_width * columns) + 4854 (2 * (columns - 1)); 4855 4856 text_start = (int)((rw_column_width) / columns - 4857 slen / columns); 4858 if (text_start < 0) 4859 text_start = 0; 4860 4861 printf(" "); /* Two spaces between columns */ 4862 4863 /* Space from beginning of column to label */ 4864 for (s = 0; s < text_start; s++) 4865 printf(" "); 4866 4867 printf("%s", name); 4868 4869 /* Print space after label to end of column */ 4870 spaces_to_end = rw_column_width - text_start - slen; 4871 if (spaces_to_end < 0) 4872 spaces_to_end = 0; 4873 4874 for (s = 0; s < spaces_to_end; s++) 4875 printf(" "); 4876 } 4877 } 4878 } 4879 4880 4881 /* 4882 * print_cmd_columns - Print custom column titles from -c 4883 * 4884 * If the user specified the "zpool status|iostat -c" then print their custom 4885 * column titles in the header. For example, print_cmd_columns() would print 4886 * the " col1 col2" part of this: 4887 * 4888 * $ zpool iostat -vc 'echo col1=val1; echo col2=val2' 4889 * ... 4890 * capacity operations bandwidth 4891 * pool alloc free read write read write col1 col2 4892 * ---------- ----- ----- ----- ----- ----- ----- ---- ---- 4893 * mypool 269K 1008M 0 0 107 946 4894 * mirror 269K 1008M 0 0 107 946 4895 * sdb - - 0 0 102 473 val1 val2 4896 * sdc - - 0 0 5 473 val1 val2 4897 * ---------- ----- ----- ----- ----- ----- ----- ---- ---- 4898 */ 4899 static void 4900 print_cmd_columns(vdev_cmd_data_list_t *vcdl, int use_dashes) 4901 { 4902 int i, j; 4903 vdev_cmd_data_t *data = &vcdl->data[0]; 4904 4905 if (vcdl->count == 0 || data == NULL) 4906 return; 4907 4908 /* 4909 * Each vdev cmd should have the same column names unless the user did 4910 * something weird with their cmd. Just take the column names from the 4911 * first vdev and assume it works for all of them. 4912 */ 4913 for (i = 0; i < vcdl->uniq_cols_cnt; i++) { 4914 printf(" "); 4915 if (use_dashes) { 4916 for (j = 0; j < vcdl->uniq_cols_width[i]; j++) 4917 printf("-"); 4918 } else { 4919 printf_color(ANSI_BOLD, "%*s", vcdl->uniq_cols_width[i], 4920 vcdl->uniq_cols[i]); 4921 } 4922 } 4923 } 4924 4925 4926 /* 4927 * Utility function to print out a line of dashes like: 4928 * 4929 * -------------------------------- ----- ----- ----- ----- ----- 4930 * 4931 * ...or a dashed named-row line like: 4932 * 4933 * logs - - - - - 4934 * 4935 * @cb: iostat data 4936 * 4937 * @force_column_width If non-zero, use the value as the column width. 4938 * Otherwise use the default column widths. 4939 * 4940 * @name: Print a dashed named-row line starting 4941 * with @name. Otherwise, print a regular 4942 * dashed line. 4943 */ 4944 static void 4945 print_iostat_dashes(iostat_cbdata_t *cb, unsigned int force_column_width, 4946 const char *name) 4947 { 4948 int i; 4949 unsigned int namewidth; 4950 uint64_t flags = cb->cb_flags; 4951 uint64_t f; 4952 int idx; 4953 const name_and_columns_t *labels; 4954 const char *title; 4955 4956 4957 if (cb->cb_flags & IOS_ANYHISTO_M) { 4958 title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)]; 4959 } else if (cb->cb_vdevs.cb_names_count) { 4960 title = "vdev"; 4961 } else { 4962 title = "pool"; 4963 } 4964 4965 namewidth = MAX(MAX(strlen(title), cb->cb_namewidth), 4966 name ? strlen(name) : 0); 4967 4968 4969 if (name) { 4970 printf("%-*s", namewidth, name); 4971 } else { 4972 for (i = 0; i < namewidth; i++) 4973 (void) printf("-"); 4974 } 4975 4976 /* For each bit in flags */ 4977 for (f = flags; f; f &= ~(1ULL << idx)) { 4978 unsigned int column_width; 4979 idx = lowbit64(f) - 1; 4980 if (force_column_width) 4981 column_width = force_column_width; 4982 else 4983 column_width = default_column_width(cb, idx); 4984 4985 labels = iostat_bottom_labels[idx]; 4986 for (i = 0; i < label_array_len(labels); i++) { 4987 if (name) 4988 printf(" %*s-", column_width - 1, " "); 4989 else 4990 printf(" %.*s", column_width, 4991 "--------------------"); 4992 } 4993 } 4994 } 4995 4996 4997 static void 4998 print_iostat_separator_impl(iostat_cbdata_t *cb, 4999 unsigned int force_column_width) 5000 { 5001 print_iostat_dashes(cb, force_column_width, NULL); 5002 } 5003 5004 static void 5005 print_iostat_separator(iostat_cbdata_t *cb) 5006 { 5007 print_iostat_separator_impl(cb, 0); 5008 } 5009 5010 static void 5011 print_iostat_header_impl(iostat_cbdata_t *cb, unsigned int force_column_width, 5012 const char *histo_vdev_name) 5013 { 5014 unsigned int namewidth; 5015 const char *title; 5016 5017 color_start(ANSI_BOLD); 5018 5019 if (cb->cb_flags & IOS_ANYHISTO_M) { 5020 title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)]; 5021 } else if (cb->cb_vdevs.cb_names_count) { 5022 title = "vdev"; 5023 } else { 5024 title = "pool"; 5025 } 5026 5027 namewidth = MAX(MAX(strlen(title), cb->cb_namewidth), 5028 histo_vdev_name ? strlen(histo_vdev_name) : 0); 5029 5030 if (histo_vdev_name) 5031 printf("%-*s", namewidth, histo_vdev_name); 5032 else 5033 printf("%*s", namewidth, ""); 5034 5035 5036 print_iostat_labels(cb, force_column_width, iostat_top_labels); 5037 printf("\n"); 5038 5039 printf("%-*s", namewidth, title); 5040 5041 print_iostat_labels(cb, force_column_width, iostat_bottom_labels); 5042 if (cb->vcdl != NULL) 5043 print_cmd_columns(cb->vcdl, 0); 5044 5045 printf("\n"); 5046 5047 print_iostat_separator_impl(cb, force_column_width); 5048 5049 if (cb->vcdl != NULL) 5050 print_cmd_columns(cb->vcdl, 1); 5051 5052 color_end(); 5053 5054 printf("\n"); 5055 } 5056 5057 static void 5058 print_iostat_header(iostat_cbdata_t *cb) 5059 { 5060 print_iostat_header_impl(cb, 0, NULL); 5061 } 5062 5063 /* 5064 * Prints a size string (i.e. 120M) with the suffix ("M") colored 5065 * by order of magnitude. Uses column_size to add padding. 5066 */ 5067 static void 5068 print_stat_color(const char *statbuf, unsigned int column_size) 5069 { 5070 fputs(" ", stdout); 5071 size_t len = strlen(statbuf); 5072 while (len < column_size) { 5073 fputc(' ', stdout); 5074 column_size--; 5075 } 5076 if (*statbuf == '0') { 5077 color_start(ANSI_GRAY); 5078 fputc('0', stdout); 5079 } else { 5080 for (; *statbuf; statbuf++) { 5081 if (*statbuf == 'K') color_start(ANSI_GREEN); 5082 else if (*statbuf == 'M') color_start(ANSI_YELLOW); 5083 else if (*statbuf == 'G') color_start(ANSI_RED); 5084 else if (*statbuf == 'T') color_start(ANSI_BOLD_BLUE); 5085 else if (*statbuf == 'P') color_start(ANSI_MAGENTA); 5086 else if (*statbuf == 'E') color_start(ANSI_CYAN); 5087 fputc(*statbuf, stdout); 5088 if (--column_size <= 0) 5089 break; 5090 } 5091 } 5092 color_end(); 5093 } 5094 5095 /* 5096 * Display a single statistic. 5097 */ 5098 static void 5099 print_one_stat(uint64_t value, enum zfs_nicenum_format format, 5100 unsigned int column_size, boolean_t scripted) 5101 { 5102 char buf[64]; 5103 5104 zfs_nicenum_format(value, buf, sizeof (buf), format); 5105 5106 if (scripted) 5107 printf("\t%s", buf); 5108 else 5109 print_stat_color(buf, column_size); 5110 } 5111 5112 /* 5113 * Calculate the default vdev stats 5114 * 5115 * Subtract oldvs from newvs, apply a scaling factor, and save the resulting 5116 * stats into calcvs. 5117 */ 5118 static void 5119 calc_default_iostats(vdev_stat_t *oldvs, vdev_stat_t *newvs, 5120 vdev_stat_t *calcvs) 5121 { 5122 int i; 5123 5124 memcpy(calcvs, newvs, sizeof (*calcvs)); 5125 for (i = 0; i < ARRAY_SIZE(calcvs->vs_ops); i++) 5126 calcvs->vs_ops[i] = (newvs->vs_ops[i] - oldvs->vs_ops[i]); 5127 5128 for (i = 0; i < ARRAY_SIZE(calcvs->vs_bytes); i++) 5129 calcvs->vs_bytes[i] = (newvs->vs_bytes[i] - oldvs->vs_bytes[i]); 5130 } 5131 5132 /* 5133 * Internal representation of the extended iostats data. 5134 * 5135 * The extended iostat stats are exported in nvlists as either uint64_t arrays 5136 * or single uint64_t's. We make both look like arrays to make them easier 5137 * to process. In order to make single uint64_t's look like arrays, we set 5138 * __data to the stat data, and then set *data = &__data with count = 1. Then, 5139 * we can just use *data and count. 5140 */ 5141 struct stat_array { 5142 uint64_t *data; 5143 uint_t count; /* Number of entries in data[] */ 5144 uint64_t __data; /* Only used when data is a single uint64_t */ 5145 }; 5146 5147 static uint64_t 5148 stat_histo_max(struct stat_array *nva, unsigned int len) 5149 { 5150 uint64_t max = 0; 5151 int i; 5152 for (i = 0; i < len; i++) 5153 max = MAX(max, array64_max(nva[i].data, nva[i].count)); 5154 5155 return (max); 5156 } 5157 5158 /* 5159 * Helper function to lookup a uint64_t array or uint64_t value and store its 5160 * data as a stat_array. If the nvpair is a single uint64_t value, then we make 5161 * it look like a one element array to make it easier to process. 5162 */ 5163 static int 5164 nvpair64_to_stat_array(nvlist_t *nvl, const char *name, 5165 struct stat_array *nva) 5166 { 5167 nvpair_t *tmp; 5168 int ret; 5169 5170 verify(nvlist_lookup_nvpair(nvl, name, &tmp) == 0); 5171 switch (nvpair_type(tmp)) { 5172 case DATA_TYPE_UINT64_ARRAY: 5173 ret = nvpair_value_uint64_array(tmp, &nva->data, &nva->count); 5174 break; 5175 case DATA_TYPE_UINT64: 5176 ret = nvpair_value_uint64(tmp, &nva->__data); 5177 nva->data = &nva->__data; 5178 nva->count = 1; 5179 break; 5180 default: 5181 /* Not a uint64_t */ 5182 ret = EINVAL; 5183 break; 5184 } 5185 5186 return (ret); 5187 } 5188 5189 /* 5190 * Given a list of nvlist names, look up the extended stats in newnv and oldnv, 5191 * subtract them, and return the results in a newly allocated stat_array. 5192 * You must free the returned array after you are done with it with 5193 * free_calc_stats(). 5194 * 5195 * Additionally, you can set "oldnv" to NULL if you simply want the newnv 5196 * values. 5197 */ 5198 static struct stat_array * 5199 calc_and_alloc_stats_ex(const char **names, unsigned int len, nvlist_t *oldnv, 5200 nvlist_t *newnv) 5201 { 5202 nvlist_t *oldnvx = NULL, *newnvx; 5203 struct stat_array *oldnva, *newnva, *calcnva; 5204 int i, j; 5205 unsigned int alloc_size = (sizeof (struct stat_array)) * len; 5206 5207 /* Extract our extended stats nvlist from the main list */ 5208 verify(nvlist_lookup_nvlist(newnv, ZPOOL_CONFIG_VDEV_STATS_EX, 5209 &newnvx) == 0); 5210 if (oldnv) { 5211 verify(nvlist_lookup_nvlist(oldnv, ZPOOL_CONFIG_VDEV_STATS_EX, 5212 &oldnvx) == 0); 5213 } 5214 5215 newnva = safe_malloc(alloc_size); 5216 oldnva = safe_malloc(alloc_size); 5217 calcnva = safe_malloc(alloc_size); 5218 5219 for (j = 0; j < len; j++) { 5220 verify(nvpair64_to_stat_array(newnvx, names[j], 5221 &newnva[j]) == 0); 5222 calcnva[j].count = newnva[j].count; 5223 alloc_size = calcnva[j].count * sizeof (calcnva[j].data[0]); 5224 calcnva[j].data = safe_malloc(alloc_size); 5225 memcpy(calcnva[j].data, newnva[j].data, alloc_size); 5226 5227 if (oldnvx) { 5228 verify(nvpair64_to_stat_array(oldnvx, names[j], 5229 &oldnva[j]) == 0); 5230 for (i = 0; i < oldnva[j].count; i++) 5231 calcnva[j].data[i] -= oldnva[j].data[i]; 5232 } 5233 } 5234 free(newnva); 5235 free(oldnva); 5236 return (calcnva); 5237 } 5238 5239 static void 5240 free_calc_stats(struct stat_array *nva, unsigned int len) 5241 { 5242 int i; 5243 for (i = 0; i < len; i++) 5244 free(nva[i].data); 5245 5246 free(nva); 5247 } 5248 5249 static void 5250 print_iostat_histo(struct stat_array *nva, unsigned int len, 5251 iostat_cbdata_t *cb, unsigned int column_width, unsigned int namewidth, 5252 double scale) 5253 { 5254 int i, j; 5255 char buf[6]; 5256 uint64_t val; 5257 enum zfs_nicenum_format format; 5258 unsigned int buckets; 5259 unsigned int start_bucket; 5260 5261 if (cb->cb_literal) 5262 format = ZFS_NICENUM_RAW; 5263 else 5264 format = ZFS_NICENUM_1024; 5265 5266 /* All these histos are the same size, so just use nva[0].count */ 5267 buckets = nva[0].count; 5268 5269 if (cb->cb_flags & IOS_RQ_HISTO_M) { 5270 /* Start at 512 - req size should never be lower than this */ 5271 start_bucket = 9; 5272 } else { 5273 start_bucket = 0; 5274 } 5275 5276 for (j = start_bucket; j < buckets; j++) { 5277 /* Print histogram bucket label */ 5278 if (cb->cb_flags & IOS_L_HISTO_M) { 5279 /* Ending range of this bucket */ 5280 val = (1UL << (j + 1)) - 1; 5281 zfs_nicetime(val, buf, sizeof (buf)); 5282 } else { 5283 /* Request size (starting range of bucket) */ 5284 val = (1UL << j); 5285 zfs_nicenum(val, buf, sizeof (buf)); 5286 } 5287 5288 if (cb->cb_scripted) 5289 printf("%llu", (u_longlong_t)val); 5290 else 5291 printf("%-*s", namewidth, buf); 5292 5293 /* Print the values on the line */ 5294 for (i = 0; i < len; i++) { 5295 print_one_stat(nva[i].data[j] * scale, format, 5296 column_width, cb->cb_scripted); 5297 } 5298 printf("\n"); 5299 } 5300 } 5301 5302 static void 5303 print_solid_separator(unsigned int length) 5304 { 5305 while (length--) 5306 printf("-"); 5307 printf("\n"); 5308 } 5309 5310 static void 5311 print_iostat_histos(iostat_cbdata_t *cb, nvlist_t *oldnv, 5312 nvlist_t *newnv, double scale, const char *name) 5313 { 5314 unsigned int column_width; 5315 unsigned int namewidth; 5316 unsigned int entire_width; 5317 enum iostat_type type; 5318 struct stat_array *nva; 5319 const char **names; 5320 unsigned int names_len; 5321 5322 /* What type of histo are we? */ 5323 type = IOS_HISTO_IDX(cb->cb_flags); 5324 5325 /* Get NULL-terminated array of nvlist names for our histo */ 5326 names = vsx_type_to_nvlist[type]; 5327 names_len = str_array_len(names); /* num of names */ 5328 5329 nva = calc_and_alloc_stats_ex(names, names_len, oldnv, newnv); 5330 5331 if (cb->cb_literal) { 5332 column_width = MAX(5, 5333 (unsigned int) log10(stat_histo_max(nva, names_len)) + 1); 5334 } else { 5335 column_width = 5; 5336 } 5337 5338 namewidth = MAX(cb->cb_namewidth, 5339 strlen(histo_to_title[IOS_HISTO_IDX(cb->cb_flags)])); 5340 5341 /* 5342 * Calculate the entire line width of what we're printing. The 5343 * +2 is for the two spaces between columns: 5344 */ 5345 /* read write */ 5346 /* ----- ----- */ 5347 /* |___| <---------- column_width */ 5348 /* */ 5349 /* |__________| <--- entire_width */ 5350 /* */ 5351 entire_width = namewidth + (column_width + 2) * 5352 label_array_len(iostat_bottom_labels[type]); 5353 5354 if (cb->cb_scripted) 5355 printf("%s\n", name); 5356 else 5357 print_iostat_header_impl(cb, column_width, name); 5358 5359 print_iostat_histo(nva, names_len, cb, column_width, 5360 namewidth, scale); 5361 5362 free_calc_stats(nva, names_len); 5363 if (!cb->cb_scripted) 5364 print_solid_separator(entire_width); 5365 } 5366 5367 /* 5368 * Calculate the average latency of a power-of-two latency histogram 5369 */ 5370 static uint64_t 5371 single_histo_average(uint64_t *histo, unsigned int buckets) 5372 { 5373 int i; 5374 uint64_t count = 0, total = 0; 5375 5376 for (i = 0; i < buckets; i++) { 5377 /* 5378 * Our buckets are power-of-two latency ranges. Use the 5379 * midpoint latency of each bucket to calculate the average. 5380 * For example: 5381 * 5382 * Bucket Midpoint 5383 * 8ns-15ns: 12ns 5384 * 16ns-31ns: 24ns 5385 * ... 5386 */ 5387 if (histo[i] != 0) { 5388 total += histo[i] * (((1UL << i) + ((1UL << i)/2))); 5389 count += histo[i]; 5390 } 5391 } 5392 5393 /* Prevent divide by zero */ 5394 return (count == 0 ? 0 : total / count); 5395 } 5396 5397 static void 5398 print_iostat_queues(iostat_cbdata_t *cb, nvlist_t *newnv) 5399 { 5400 const char *names[] = { 5401 ZPOOL_CONFIG_VDEV_SYNC_R_PEND_QUEUE, 5402 ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE, 5403 ZPOOL_CONFIG_VDEV_SYNC_W_PEND_QUEUE, 5404 ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE, 5405 ZPOOL_CONFIG_VDEV_ASYNC_R_PEND_QUEUE, 5406 ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE, 5407 ZPOOL_CONFIG_VDEV_ASYNC_W_PEND_QUEUE, 5408 ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE, 5409 ZPOOL_CONFIG_VDEV_SCRUB_PEND_QUEUE, 5410 ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE, 5411 ZPOOL_CONFIG_VDEV_TRIM_PEND_QUEUE, 5412 ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE, 5413 ZPOOL_CONFIG_VDEV_REBUILD_PEND_QUEUE, 5414 ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE, 5415 }; 5416 5417 struct stat_array *nva; 5418 5419 unsigned int column_width = default_column_width(cb, IOS_QUEUES); 5420 enum zfs_nicenum_format format; 5421 5422 nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), NULL, newnv); 5423 5424 if (cb->cb_literal) 5425 format = ZFS_NICENUM_RAW; 5426 else 5427 format = ZFS_NICENUM_1024; 5428 5429 for (int i = 0; i < ARRAY_SIZE(names); i++) { 5430 uint64_t val = nva[i].data[0]; 5431 print_one_stat(val, format, column_width, cb->cb_scripted); 5432 } 5433 5434 free_calc_stats(nva, ARRAY_SIZE(names)); 5435 } 5436 5437 static void 5438 print_iostat_latency(iostat_cbdata_t *cb, nvlist_t *oldnv, 5439 nvlist_t *newnv) 5440 { 5441 int i; 5442 uint64_t val; 5443 const char *names[] = { 5444 ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO, 5445 ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO, 5446 ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO, 5447 ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO, 5448 ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO, 5449 ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO, 5450 ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO, 5451 ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO, 5452 ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO, 5453 ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO, 5454 ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO, 5455 }; 5456 struct stat_array *nva; 5457 5458 unsigned int column_width = default_column_width(cb, IOS_LATENCY); 5459 enum zfs_nicenum_format format; 5460 5461 nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), oldnv, newnv); 5462 5463 if (cb->cb_literal) 5464 format = ZFS_NICENUM_RAWTIME; 5465 else 5466 format = ZFS_NICENUM_TIME; 5467 5468 /* Print our avg latencies on the line */ 5469 for (i = 0; i < ARRAY_SIZE(names); i++) { 5470 /* Compute average latency for a latency histo */ 5471 val = single_histo_average(nva[i].data, nva[i].count); 5472 print_one_stat(val, format, column_width, cb->cb_scripted); 5473 } 5474 free_calc_stats(nva, ARRAY_SIZE(names)); 5475 } 5476 5477 /* 5478 * Print default statistics (capacity/operations/bandwidth) 5479 */ 5480 static void 5481 print_iostat_default(vdev_stat_t *vs, iostat_cbdata_t *cb, double scale) 5482 { 5483 unsigned int column_width = default_column_width(cb, IOS_DEFAULT); 5484 enum zfs_nicenum_format format; 5485 char na; /* char to print for "not applicable" values */ 5486 5487 if (cb->cb_literal) { 5488 format = ZFS_NICENUM_RAW; 5489 na = '0'; 5490 } else { 5491 format = ZFS_NICENUM_1024; 5492 na = '-'; 5493 } 5494 5495 /* only toplevel vdevs have capacity stats */ 5496 if (vs->vs_space == 0) { 5497 if (cb->cb_scripted) 5498 printf("\t%c\t%c", na, na); 5499 else 5500 printf(" %*c %*c", column_width, na, column_width, 5501 na); 5502 } else { 5503 print_one_stat(vs->vs_alloc, format, column_width, 5504 cb->cb_scripted); 5505 print_one_stat(vs->vs_space - vs->vs_alloc, format, 5506 column_width, cb->cb_scripted); 5507 } 5508 5509 print_one_stat((uint64_t)(vs->vs_ops[ZIO_TYPE_READ] * scale), 5510 format, column_width, cb->cb_scripted); 5511 print_one_stat((uint64_t)(vs->vs_ops[ZIO_TYPE_WRITE] * scale), 5512 format, column_width, cb->cb_scripted); 5513 print_one_stat((uint64_t)(vs->vs_bytes[ZIO_TYPE_READ] * scale), 5514 format, column_width, cb->cb_scripted); 5515 print_one_stat((uint64_t)(vs->vs_bytes[ZIO_TYPE_WRITE] * scale), 5516 format, column_width, cb->cb_scripted); 5517 } 5518 5519 static const char *const class_name[] = { 5520 VDEV_ALLOC_BIAS_DEDUP, 5521 VDEV_ALLOC_BIAS_SPECIAL, 5522 VDEV_ALLOC_CLASS_LOGS 5523 }; 5524 5525 /* 5526 * Print out all the statistics for the given vdev. This can either be the 5527 * toplevel configuration, or called recursively. If 'name' is NULL, then this 5528 * is a verbose output, and we don't want to display the toplevel pool stats. 5529 * 5530 * Returns the number of stat lines printed. 5531 */ 5532 static unsigned int 5533 print_vdev_stats(zpool_handle_t *zhp, const char *name, nvlist_t *oldnv, 5534 nvlist_t *newnv, iostat_cbdata_t *cb, int depth) 5535 { 5536 nvlist_t **oldchild, **newchild; 5537 uint_t c, children, oldchildren; 5538 vdev_stat_t *oldvs, *newvs, *calcvs; 5539 vdev_stat_t zerovs = { 0 }; 5540 char *vname; 5541 int i; 5542 int ret = 0; 5543 uint64_t tdelta; 5544 double scale; 5545 5546 if (strcmp(name, VDEV_TYPE_INDIRECT) == 0) 5547 return (ret); 5548 5549 calcvs = safe_malloc(sizeof (*calcvs)); 5550 5551 if (oldnv != NULL) { 5552 verify(nvlist_lookup_uint64_array(oldnv, 5553 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&oldvs, &c) == 0); 5554 } else { 5555 oldvs = &zerovs; 5556 } 5557 5558 /* Do we only want to see a specific vdev? */ 5559 for (i = 0; i < cb->cb_vdevs.cb_names_count; i++) { 5560 /* Yes we do. Is this the vdev? */ 5561 if (strcmp(name, cb->cb_vdevs.cb_names[i]) == 0) { 5562 /* 5563 * This is our vdev. Since it is the only vdev we 5564 * will be displaying, make depth = 0 so that it 5565 * doesn't get indented. 5566 */ 5567 depth = 0; 5568 break; 5569 } 5570 } 5571 5572 if (cb->cb_vdevs.cb_names_count && (i == cb->cb_vdevs.cb_names_count)) { 5573 /* Couldn't match the name */ 5574 goto children; 5575 } 5576 5577 5578 verify(nvlist_lookup_uint64_array(newnv, ZPOOL_CONFIG_VDEV_STATS, 5579 (uint64_t **)&newvs, &c) == 0); 5580 5581 /* 5582 * Print the vdev name unless it's is a histogram. Histograms 5583 * display the vdev name in the header itself. 5584 */ 5585 if (!(cb->cb_flags & IOS_ANYHISTO_M)) { 5586 if (cb->cb_scripted) { 5587 printf("%s", name); 5588 } else { 5589 if (strlen(name) + depth > cb->cb_namewidth) 5590 (void) printf("%*s%s", depth, "", name); 5591 else 5592 (void) printf("%*s%s%*s", depth, "", name, 5593 (int)(cb->cb_namewidth - strlen(name) - 5594 depth), ""); 5595 } 5596 } 5597 5598 /* Calculate our scaling factor */ 5599 tdelta = newvs->vs_timestamp - oldvs->vs_timestamp; 5600 if ((oldvs->vs_timestamp == 0) && (cb->cb_flags & IOS_ANYHISTO_M)) { 5601 /* 5602 * If we specify printing histograms with no time interval, then 5603 * print the histogram numbers over the entire lifetime of the 5604 * vdev. 5605 */ 5606 scale = 1; 5607 } else { 5608 if (tdelta == 0) 5609 scale = 1.0; 5610 else 5611 scale = (double)NANOSEC / tdelta; 5612 } 5613 5614 if (cb->cb_flags & IOS_DEFAULT_M) { 5615 calc_default_iostats(oldvs, newvs, calcvs); 5616 print_iostat_default(calcvs, cb, scale); 5617 } 5618 if (cb->cb_flags & IOS_LATENCY_M) 5619 print_iostat_latency(cb, oldnv, newnv); 5620 if (cb->cb_flags & IOS_QUEUES_M) 5621 print_iostat_queues(cb, newnv); 5622 if (cb->cb_flags & IOS_ANYHISTO_M) { 5623 printf("\n"); 5624 print_iostat_histos(cb, oldnv, newnv, scale, name); 5625 } 5626 5627 if (cb->vcdl != NULL) { 5628 const char *path; 5629 if (nvlist_lookup_string(newnv, ZPOOL_CONFIG_PATH, 5630 &path) == 0) { 5631 printf(" "); 5632 zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path); 5633 } 5634 } 5635 5636 if (!(cb->cb_flags & IOS_ANYHISTO_M)) 5637 printf("\n"); 5638 5639 ret++; 5640 5641 children: 5642 5643 free(calcvs); 5644 5645 if (!cb->cb_verbose) 5646 return (ret); 5647 5648 if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_CHILDREN, 5649 &newchild, &children) != 0) 5650 return (ret); 5651 5652 if (oldnv) { 5653 if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_CHILDREN, 5654 &oldchild, &oldchildren) != 0) 5655 return (ret); 5656 5657 children = MIN(oldchildren, children); 5658 } 5659 5660 /* 5661 * print normal top-level devices 5662 */ 5663 for (c = 0; c < children; c++) { 5664 uint64_t ishole = B_FALSE, islog = B_FALSE; 5665 5666 (void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_HOLE, 5667 &ishole); 5668 5669 (void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_LOG, 5670 &islog); 5671 5672 if (ishole || islog) 5673 continue; 5674 5675 if (nvlist_exists(newchild[c], ZPOOL_CONFIG_ALLOCATION_BIAS)) 5676 continue; 5677 5678 vname = zpool_vdev_name(g_zfs, zhp, newchild[c], 5679 cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID); 5680 ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c] : NULL, 5681 newchild[c], cb, depth + 2); 5682 free(vname); 5683 } 5684 5685 /* 5686 * print all other top-level devices 5687 */ 5688 for (uint_t n = 0; n < ARRAY_SIZE(class_name); n++) { 5689 boolean_t printed = B_FALSE; 5690 5691 for (c = 0; c < children; c++) { 5692 uint64_t islog = B_FALSE; 5693 const char *bias = NULL; 5694 const char *type = NULL; 5695 5696 (void) nvlist_lookup_uint64(newchild[c], 5697 ZPOOL_CONFIG_IS_LOG, &islog); 5698 if (islog) { 5699 bias = VDEV_ALLOC_CLASS_LOGS; 5700 } else { 5701 (void) nvlist_lookup_string(newchild[c], 5702 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias); 5703 (void) nvlist_lookup_string(newchild[c], 5704 ZPOOL_CONFIG_TYPE, &type); 5705 } 5706 if (bias == NULL || strcmp(bias, class_name[n]) != 0) 5707 continue; 5708 if (!islog && strcmp(type, VDEV_TYPE_INDIRECT) == 0) 5709 continue; 5710 5711 if (!printed) { 5712 if ((!(cb->cb_flags & IOS_ANYHISTO_M)) && 5713 !cb->cb_scripted && 5714 !cb->cb_vdevs.cb_names) { 5715 print_iostat_dashes(cb, 0, 5716 class_name[n]); 5717 } 5718 printf("\n"); 5719 printed = B_TRUE; 5720 } 5721 5722 vname = zpool_vdev_name(g_zfs, zhp, newchild[c], 5723 cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID); 5724 ret += print_vdev_stats(zhp, vname, oldnv ? 5725 oldchild[c] : NULL, newchild[c], cb, depth + 2); 5726 free(vname); 5727 } 5728 } 5729 5730 /* 5731 * Include level 2 ARC devices in iostat output 5732 */ 5733 if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_L2CACHE, 5734 &newchild, &children) != 0) 5735 return (ret); 5736 5737 if (oldnv) { 5738 if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_L2CACHE, 5739 &oldchild, &oldchildren) != 0) 5740 return (ret); 5741 5742 children = MIN(oldchildren, children); 5743 } 5744 5745 if (children > 0) { 5746 if ((!(cb->cb_flags & IOS_ANYHISTO_M)) && !cb->cb_scripted && 5747 !cb->cb_vdevs.cb_names) { 5748 print_iostat_dashes(cb, 0, "cache"); 5749 } 5750 printf("\n"); 5751 5752 for (c = 0; c < children; c++) { 5753 vname = zpool_vdev_name(g_zfs, zhp, newchild[c], 5754 cb->cb_vdevs.cb_name_flags); 5755 ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c] 5756 : NULL, newchild[c], cb, depth + 2); 5757 free(vname); 5758 } 5759 } 5760 5761 return (ret); 5762 } 5763 5764 /* 5765 * Callback to print out the iostats for the given pool. 5766 */ 5767 static int 5768 print_iostat(zpool_handle_t *zhp, void *data) 5769 { 5770 iostat_cbdata_t *cb = data; 5771 nvlist_t *oldconfig, *newconfig; 5772 nvlist_t *oldnvroot, *newnvroot; 5773 int ret; 5774 5775 newconfig = zpool_get_config(zhp, &oldconfig); 5776 5777 if (cb->cb_iteration == 1) 5778 oldconfig = NULL; 5779 5780 verify(nvlist_lookup_nvlist(newconfig, ZPOOL_CONFIG_VDEV_TREE, 5781 &newnvroot) == 0); 5782 5783 if (oldconfig == NULL) 5784 oldnvroot = NULL; 5785 else 5786 verify(nvlist_lookup_nvlist(oldconfig, ZPOOL_CONFIG_VDEV_TREE, 5787 &oldnvroot) == 0); 5788 5789 ret = print_vdev_stats(zhp, zpool_get_name(zhp), oldnvroot, newnvroot, 5790 cb, 0); 5791 if ((ret != 0) && !(cb->cb_flags & IOS_ANYHISTO_M) && 5792 !cb->cb_scripted && cb->cb_verbose && 5793 !cb->cb_vdevs.cb_names_count) { 5794 print_iostat_separator(cb); 5795 if (cb->vcdl != NULL) { 5796 print_cmd_columns(cb->vcdl, 1); 5797 } 5798 printf("\n"); 5799 } 5800 5801 return (ret); 5802 } 5803 5804 static int 5805 get_columns(void) 5806 { 5807 struct winsize ws; 5808 int columns = 80; 5809 int error; 5810 5811 if (isatty(STDOUT_FILENO)) { 5812 error = ioctl(STDOUT_FILENO, TIOCGWINSZ, &ws); 5813 if (error == 0) 5814 columns = ws.ws_col; 5815 } else { 5816 columns = 999; 5817 } 5818 5819 return (columns); 5820 } 5821 5822 /* 5823 * Return the required length of the pool/vdev name column. The minimum 5824 * allowed width and output formatting flags must be provided. 5825 */ 5826 static int 5827 get_namewidth(zpool_handle_t *zhp, int min_width, int flags, boolean_t verbose) 5828 { 5829 nvlist_t *config, *nvroot; 5830 int width = min_width; 5831 5832 if ((config = zpool_get_config(zhp, NULL)) != NULL) { 5833 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 5834 &nvroot) == 0); 5835 size_t poolname_len = strlen(zpool_get_name(zhp)); 5836 if (verbose == B_FALSE) { 5837 width = MAX(poolname_len, min_width); 5838 } else { 5839 width = MAX(poolname_len, 5840 max_width(zhp, nvroot, 0, min_width, flags)); 5841 } 5842 } 5843 5844 return (width); 5845 } 5846 5847 /* 5848 * Parse the input string, get the 'interval' and 'count' value if there is one. 5849 */ 5850 static void 5851 get_interval_count(int *argcp, char **argv, float *iv, 5852 unsigned long *cnt) 5853 { 5854 float interval = 0; 5855 unsigned long count = 0; 5856 int argc = *argcp; 5857 5858 /* 5859 * Determine if the last argument is an integer or a pool name 5860 */ 5861 if (argc > 0 && zfs_isnumber(argv[argc - 1])) { 5862 char *end; 5863 5864 errno = 0; 5865 interval = strtof(argv[argc - 1], &end); 5866 5867 if (*end == '\0' && errno == 0) { 5868 if (interval == 0) { 5869 (void) fprintf(stderr, gettext( 5870 "interval cannot be zero\n")); 5871 usage(B_FALSE); 5872 } 5873 /* 5874 * Ignore the last parameter 5875 */ 5876 argc--; 5877 } else { 5878 /* 5879 * If this is not a valid number, just plow on. The 5880 * user will get a more informative error message later 5881 * on. 5882 */ 5883 interval = 0; 5884 } 5885 } 5886 5887 /* 5888 * If the last argument is also an integer, then we have both a count 5889 * and an interval. 5890 */ 5891 if (argc > 0 && zfs_isnumber(argv[argc - 1])) { 5892 char *end; 5893 5894 errno = 0; 5895 count = interval; 5896 interval = strtof(argv[argc - 1], &end); 5897 5898 if (*end == '\0' && errno == 0) { 5899 if (interval == 0) { 5900 (void) fprintf(stderr, gettext( 5901 "interval cannot be zero\n")); 5902 usage(B_FALSE); 5903 } 5904 5905 /* 5906 * Ignore the last parameter 5907 */ 5908 argc--; 5909 } else { 5910 interval = 0; 5911 } 5912 } 5913 5914 *iv = interval; 5915 *cnt = count; 5916 *argcp = argc; 5917 } 5918 5919 static void 5920 get_timestamp_arg(char c) 5921 { 5922 if (c == 'u') 5923 timestamp_fmt = UDATE; 5924 else if (c == 'd') 5925 timestamp_fmt = DDATE; 5926 else 5927 usage(B_FALSE); 5928 } 5929 5930 /* 5931 * Return stat flags that are supported by all pools by both the module and 5932 * zpool iostat. "*data" should be initialized to all 0xFFs before running. 5933 * It will get ANDed down until only the flags that are supported on all pools 5934 * remain. 5935 */ 5936 static int 5937 get_stat_flags_cb(zpool_handle_t *zhp, void *data) 5938 { 5939 uint64_t *mask = data; 5940 nvlist_t *config, *nvroot, *nvx; 5941 uint64_t flags = 0; 5942 int i, j; 5943 5944 config = zpool_get_config(zhp, NULL); 5945 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 5946 &nvroot) == 0); 5947 5948 /* Default stats are always supported, but for completeness.. */ 5949 if (nvlist_exists(nvroot, ZPOOL_CONFIG_VDEV_STATS)) 5950 flags |= IOS_DEFAULT_M; 5951 5952 /* Get our extended stats nvlist from the main list */ 5953 if (nvlist_lookup_nvlist(nvroot, ZPOOL_CONFIG_VDEV_STATS_EX, 5954 &nvx) != 0) { 5955 /* 5956 * No extended stats; they're probably running an older 5957 * module. No big deal, we support that too. 5958 */ 5959 goto end; 5960 } 5961 5962 /* For each extended stat, make sure all its nvpairs are supported */ 5963 for (j = 0; j < ARRAY_SIZE(vsx_type_to_nvlist); j++) { 5964 if (!vsx_type_to_nvlist[j][0]) 5965 continue; 5966 5967 /* Start off by assuming the flag is supported, then check */ 5968 flags |= (1ULL << j); 5969 for (i = 0; vsx_type_to_nvlist[j][i]; i++) { 5970 if (!nvlist_exists(nvx, vsx_type_to_nvlist[j][i])) { 5971 /* flag isn't supported */ 5972 flags = flags & ~(1ULL << j); 5973 break; 5974 } 5975 } 5976 } 5977 end: 5978 *mask = *mask & flags; 5979 return (0); 5980 } 5981 5982 /* 5983 * Return a bitmask of stats that are supported on all pools by both the module 5984 * and zpool iostat. 5985 */ 5986 static uint64_t 5987 get_stat_flags(zpool_list_t *list) 5988 { 5989 uint64_t mask = -1; 5990 5991 /* 5992 * get_stat_flags_cb() will lop off bits from "mask" until only the 5993 * flags that are supported on all pools remain. 5994 */ 5995 pool_list_iter(list, B_FALSE, get_stat_flags_cb, &mask); 5996 return (mask); 5997 } 5998 5999 /* 6000 * Return 1 if cb_data->cb_names[0] is this vdev's name, 0 otherwise. 6001 */ 6002 static int 6003 is_vdev_cb(void *zhp_data, nvlist_t *nv, void *cb_data) 6004 { 6005 uint64_t guid; 6006 vdev_cbdata_t *cb = cb_data; 6007 zpool_handle_t *zhp = zhp_data; 6008 6009 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 6010 return (0); 6011 6012 return (guid == zpool_vdev_path_to_guid(zhp, cb->cb_names[0])); 6013 } 6014 6015 /* 6016 * Returns 1 if cb_data->cb_names[0] is a vdev name, 0 otherwise. 6017 */ 6018 static int 6019 is_vdev(zpool_handle_t *zhp, void *cb_data) 6020 { 6021 return (for_each_vdev(zhp, is_vdev_cb, cb_data)); 6022 } 6023 6024 /* 6025 * Check if vdevs are in a pool 6026 * 6027 * Return 1 if all argv[] strings are vdev names in pool "pool_name". Otherwise 6028 * return 0. If pool_name is NULL, then search all pools. 6029 */ 6030 static int 6031 are_vdevs_in_pool(int argc, char **argv, char *pool_name, 6032 vdev_cbdata_t *cb) 6033 { 6034 char **tmp_name; 6035 int ret = 0; 6036 int i; 6037 int pool_count = 0; 6038 6039 if ((argc == 0) || !*argv) 6040 return (0); 6041 6042 if (pool_name) 6043 pool_count = 1; 6044 6045 /* Temporarily hijack cb_names for a second... */ 6046 tmp_name = cb->cb_names; 6047 6048 /* Go though our list of prospective vdev names */ 6049 for (i = 0; i < argc; i++) { 6050 cb->cb_names = argv + i; 6051 6052 /* Is this name a vdev in our pools? */ 6053 ret = for_each_pool(pool_count, &pool_name, B_TRUE, NULL, 6054 ZFS_TYPE_POOL, B_FALSE, is_vdev, cb); 6055 if (!ret) { 6056 /* No match */ 6057 break; 6058 } 6059 } 6060 6061 cb->cb_names = tmp_name; 6062 6063 return (ret); 6064 } 6065 6066 static int 6067 is_pool_cb(zpool_handle_t *zhp, void *data) 6068 { 6069 char *name = data; 6070 if (strcmp(name, zpool_get_name(zhp)) == 0) 6071 return (1); 6072 6073 return (0); 6074 } 6075 6076 /* 6077 * Do we have a pool named *name? If so, return 1, otherwise 0. 6078 */ 6079 static int 6080 is_pool(char *name) 6081 { 6082 return (for_each_pool(0, NULL, B_TRUE, NULL, ZFS_TYPE_POOL, B_FALSE, 6083 is_pool_cb, name)); 6084 } 6085 6086 /* Are all our argv[] strings pool names? If so return 1, 0 otherwise. */ 6087 static int 6088 are_all_pools(int argc, char **argv) 6089 { 6090 if ((argc == 0) || !*argv) 6091 return (0); 6092 6093 while (--argc >= 0) 6094 if (!is_pool(argv[argc])) 6095 return (0); 6096 6097 return (1); 6098 } 6099 6100 /* 6101 * Helper function to print out vdev/pool names we can't resolve. Used for an 6102 * error message. 6103 */ 6104 static void 6105 error_list_unresolved_vdevs(int argc, char **argv, char *pool_name, 6106 vdev_cbdata_t *cb) 6107 { 6108 int i; 6109 char *name; 6110 char *str; 6111 for (i = 0; i < argc; i++) { 6112 name = argv[i]; 6113 6114 if (is_pool(name)) 6115 str = gettext("pool"); 6116 else if (are_vdevs_in_pool(1, &name, pool_name, cb)) 6117 str = gettext("vdev in this pool"); 6118 else if (are_vdevs_in_pool(1, &name, NULL, cb)) 6119 str = gettext("vdev in another pool"); 6120 else 6121 str = gettext("unknown"); 6122 6123 fprintf(stderr, "\t%s (%s)\n", name, str); 6124 } 6125 } 6126 6127 /* 6128 * Same as get_interval_count(), but with additional checks to not misinterpret 6129 * guids as interval/count values. Assumes VDEV_NAME_GUID is set in 6130 * cb.cb_vdevs.cb_name_flags. 6131 */ 6132 static void 6133 get_interval_count_filter_guids(int *argc, char **argv, float *interval, 6134 unsigned long *count, iostat_cbdata_t *cb) 6135 { 6136 int argc_for_interval = 0; 6137 6138 /* Is the last arg an interval value? Or a guid? */ 6139 if (*argc >= 1 && !are_vdevs_in_pool(1, &argv[*argc - 1], NULL, 6140 &cb->cb_vdevs)) { 6141 /* 6142 * The last arg is not a guid, so it's probably an 6143 * interval value. 6144 */ 6145 argc_for_interval++; 6146 6147 if (*argc >= 2 && 6148 !are_vdevs_in_pool(1, &argv[*argc - 2], NULL, 6149 &cb->cb_vdevs)) { 6150 /* 6151 * The 2nd to last arg is not a guid, so it's probably 6152 * an interval value. 6153 */ 6154 argc_for_interval++; 6155 } 6156 } 6157 6158 /* Point to our list of possible intervals */ 6159 char **tmpargv = &argv[*argc - argc_for_interval]; 6160 6161 *argc = *argc - argc_for_interval; 6162 get_interval_count(&argc_for_interval, tmpargv, 6163 interval, count); 6164 } 6165 6166 /* 6167 * Terminal height, in rows. Returns -1 if stdout is not connected to a TTY or 6168 * if we were unable to determine its size. 6169 */ 6170 static int 6171 terminal_height(void) 6172 { 6173 struct winsize win; 6174 6175 if (isatty(STDOUT_FILENO) == 0) 6176 return (-1); 6177 6178 if (ioctl(STDOUT_FILENO, TIOCGWINSZ, &win) != -1 && win.ws_row > 0) 6179 return (win.ws_row); 6180 6181 return (-1); 6182 } 6183 6184 /* 6185 * Run one of the zpool status/iostat -c scripts with the help (-h) option and 6186 * print the result. 6187 * 6188 * name: Short name of the script ('iostat'). 6189 * path: Full path to the script ('/usr/local/etc/zfs/zpool.d/iostat'); 6190 */ 6191 static void 6192 print_zpool_script_help(char *name, char *path) 6193 { 6194 char *argv[] = {path, (char *)"-h", NULL}; 6195 char **lines = NULL; 6196 int lines_cnt = 0; 6197 int rc; 6198 6199 rc = libzfs_run_process_get_stdout_nopath(path, argv, NULL, &lines, 6200 &lines_cnt); 6201 if (rc != 0 || lines == NULL || lines_cnt <= 0) { 6202 if (lines != NULL) 6203 libzfs_free_str_array(lines, lines_cnt); 6204 return; 6205 } 6206 6207 for (int i = 0; i < lines_cnt; i++) 6208 if (!is_blank_str(lines[i])) 6209 printf(" %-14s %s\n", name, lines[i]); 6210 6211 libzfs_free_str_array(lines, lines_cnt); 6212 } 6213 6214 /* 6215 * Go though the zpool status/iostat -c scripts in the user's path, run their 6216 * help option (-h), and print out the results. 6217 */ 6218 static void 6219 print_zpool_dir_scripts(char *dirpath) 6220 { 6221 DIR *dir; 6222 struct dirent *ent; 6223 char fullpath[MAXPATHLEN]; 6224 struct stat dir_stat; 6225 6226 if ((dir = opendir(dirpath)) != NULL) { 6227 /* print all the files and directories within directory */ 6228 while ((ent = readdir(dir)) != NULL) { 6229 if (snprintf(fullpath, sizeof (fullpath), "%s/%s", 6230 dirpath, ent->d_name) >= sizeof (fullpath)) { 6231 (void) fprintf(stderr, 6232 gettext("internal error: " 6233 "ZPOOL_SCRIPTS_PATH too large.\n")); 6234 exit(1); 6235 } 6236 6237 /* Print the scripts */ 6238 if (stat(fullpath, &dir_stat) == 0) 6239 if (dir_stat.st_mode & S_IXUSR && 6240 S_ISREG(dir_stat.st_mode)) 6241 print_zpool_script_help(ent->d_name, 6242 fullpath); 6243 } 6244 closedir(dir); 6245 } 6246 } 6247 6248 /* 6249 * Print out help text for all zpool status/iostat -c scripts. 6250 */ 6251 static void 6252 print_zpool_script_list(const char *subcommand) 6253 { 6254 char *dir, *sp, *tmp; 6255 6256 printf(gettext("Available 'zpool %s -c' commands:\n"), subcommand); 6257 6258 sp = zpool_get_cmd_search_path(); 6259 if (sp == NULL) 6260 return; 6261 6262 for (dir = strtok_r(sp, ":", &tmp); 6263 dir != NULL; 6264 dir = strtok_r(NULL, ":", &tmp)) 6265 print_zpool_dir_scripts(dir); 6266 6267 free(sp); 6268 } 6269 6270 /* 6271 * Set the minimum pool/vdev name column width. The width must be at least 10, 6272 * but may be as large as the column width - 42 so it still fits on one line. 6273 * NOTE: 42 is the width of the default capacity/operations/bandwidth output 6274 */ 6275 static int 6276 get_namewidth_iostat(zpool_handle_t *zhp, void *data) 6277 { 6278 iostat_cbdata_t *cb = data; 6279 int width, available_width; 6280 6281 /* 6282 * get_namewidth() returns the maximum width of any name in that column 6283 * for any pool/vdev/device line that will be output. 6284 */ 6285 width = get_namewidth(zhp, cb->cb_namewidth, 6286 cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID, cb->cb_verbose); 6287 6288 /* 6289 * The width we are calculating is the width of the header and also the 6290 * padding width for names that are less than maximum width. The stats 6291 * take up 42 characters, so the width available for names is: 6292 */ 6293 available_width = get_columns() - 42; 6294 6295 /* 6296 * If the maximum width fits on a screen, then great! Make everything 6297 * line up by justifying all lines to the same width. If that max 6298 * width is larger than what's available, the name plus stats won't fit 6299 * on one line, and justifying to that width would cause every line to 6300 * wrap on the screen. We only want lines with long names to wrap. 6301 * Limit the padding to what won't wrap. 6302 */ 6303 if (width > available_width) 6304 width = available_width; 6305 6306 /* 6307 * And regardless of whatever the screen width is (get_columns can 6308 * return 0 if the width is not known or less than 42 for a narrow 6309 * terminal) have the width be a minimum of 10. 6310 */ 6311 if (width < 10) 6312 width = 10; 6313 6314 /* Save the calculated width */ 6315 cb->cb_namewidth = width; 6316 6317 return (0); 6318 } 6319 6320 /* 6321 * zpool iostat [[-c [script1,script2,...]] [-lq]|[-rw]] [-ghHLpPvy] [-n name] 6322 * [-T d|u] [[ pool ...]|[pool vdev ...]|[vdev ...]] 6323 * [interval [count]] 6324 * 6325 * -c CMD For each vdev, run command CMD 6326 * -g Display guid for individual vdev name. 6327 * -L Follow links when resolving vdev path name. 6328 * -P Display full path for vdev name. 6329 * -v Display statistics for individual vdevs 6330 * -h Display help 6331 * -p Display values in parsable (exact) format. 6332 * -H Scripted mode. Don't display headers, and separate properties 6333 * by a single tab. 6334 * -l Display average latency 6335 * -q Display queue depths 6336 * -w Display latency histograms 6337 * -r Display request size histogram 6338 * -T Display a timestamp in date(1) or Unix format 6339 * -n Only print headers once 6340 * 6341 * This command can be tricky because we want to be able to deal with pool 6342 * creation/destruction as well as vdev configuration changes. The bulk of this 6343 * processing is handled by the pool_list_* routines in zpool_iter.c. We rely 6344 * on pool_list_refresh() to detect the addition and removal of pools. 6345 * Configuration changes are all handled within libzfs. 6346 */ 6347 int 6348 zpool_do_iostat(int argc, char **argv) 6349 { 6350 int c; 6351 int ret; 6352 float interval = 0; 6353 unsigned long count = 0; 6354 zpool_list_t *list; 6355 boolean_t verbose = B_FALSE; 6356 boolean_t latency = B_FALSE, l_histo = B_FALSE, rq_histo = B_FALSE; 6357 boolean_t queues = B_FALSE, parsable = B_FALSE, scripted = B_FALSE; 6358 boolean_t omit_since_boot = B_FALSE; 6359 boolean_t guid = B_FALSE; 6360 boolean_t follow_links = B_FALSE; 6361 boolean_t full_name = B_FALSE; 6362 boolean_t headers_once = B_FALSE; 6363 iostat_cbdata_t cb = { 0 }; 6364 char *cmd = NULL; 6365 6366 /* Used for printing error message */ 6367 const char flag_to_arg[] = {[IOS_LATENCY] = 'l', [IOS_QUEUES] = 'q', 6368 [IOS_L_HISTO] = 'w', [IOS_RQ_HISTO] = 'r'}; 6369 6370 uint64_t unsupported_flags; 6371 6372 /* check options */ 6373 while ((c = getopt(argc, argv, "c:gLPT:vyhplqrwnH")) != -1) { 6374 switch (c) { 6375 case 'c': 6376 if (cmd != NULL) { 6377 fprintf(stderr, 6378 gettext("Can't set -c flag twice\n")); 6379 exit(1); 6380 } 6381 6382 if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL && 6383 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) { 6384 fprintf(stderr, gettext( 6385 "Can't run -c, disabled by " 6386 "ZPOOL_SCRIPTS_ENABLED.\n")); 6387 exit(1); 6388 } 6389 6390 if ((getuid() <= 0 || geteuid() <= 0) && 6391 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) { 6392 fprintf(stderr, gettext( 6393 "Can't run -c with root privileges " 6394 "unless ZPOOL_SCRIPTS_AS_ROOT is set.\n")); 6395 exit(1); 6396 } 6397 cmd = optarg; 6398 verbose = B_TRUE; 6399 break; 6400 case 'g': 6401 guid = B_TRUE; 6402 break; 6403 case 'L': 6404 follow_links = B_TRUE; 6405 break; 6406 case 'P': 6407 full_name = B_TRUE; 6408 break; 6409 case 'T': 6410 get_timestamp_arg(*optarg); 6411 break; 6412 case 'v': 6413 verbose = B_TRUE; 6414 break; 6415 case 'p': 6416 parsable = B_TRUE; 6417 break; 6418 case 'l': 6419 latency = B_TRUE; 6420 break; 6421 case 'q': 6422 queues = B_TRUE; 6423 break; 6424 case 'H': 6425 scripted = B_TRUE; 6426 break; 6427 case 'w': 6428 l_histo = B_TRUE; 6429 break; 6430 case 'r': 6431 rq_histo = B_TRUE; 6432 break; 6433 case 'y': 6434 omit_since_boot = B_TRUE; 6435 break; 6436 case 'n': 6437 headers_once = B_TRUE; 6438 break; 6439 case 'h': 6440 usage(B_FALSE); 6441 break; 6442 case '?': 6443 if (optopt == 'c') { 6444 print_zpool_script_list("iostat"); 6445 exit(0); 6446 } else { 6447 fprintf(stderr, 6448 gettext("invalid option '%c'\n"), optopt); 6449 } 6450 usage(B_FALSE); 6451 } 6452 } 6453 6454 argc -= optind; 6455 argv += optind; 6456 6457 cb.cb_literal = parsable; 6458 cb.cb_scripted = scripted; 6459 6460 if (guid) 6461 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_GUID; 6462 if (follow_links) 6463 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS; 6464 if (full_name) 6465 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_PATH; 6466 cb.cb_iteration = 0; 6467 cb.cb_namewidth = 0; 6468 cb.cb_verbose = verbose; 6469 6470 /* Get our interval and count values (if any) */ 6471 if (guid) { 6472 get_interval_count_filter_guids(&argc, argv, &interval, 6473 &count, &cb); 6474 } else { 6475 get_interval_count(&argc, argv, &interval, &count); 6476 } 6477 6478 if (argc == 0) { 6479 /* No args, so just print the defaults. */ 6480 } else if (are_all_pools(argc, argv)) { 6481 /* All the args are pool names */ 6482 } else if (are_vdevs_in_pool(argc, argv, NULL, &cb.cb_vdevs)) { 6483 /* All the args are vdevs */ 6484 cb.cb_vdevs.cb_names = argv; 6485 cb.cb_vdevs.cb_names_count = argc; 6486 argc = 0; /* No pools to process */ 6487 } else if (are_all_pools(1, argv)) { 6488 /* The first arg is a pool name */ 6489 if (are_vdevs_in_pool(argc - 1, argv + 1, argv[0], 6490 &cb.cb_vdevs)) { 6491 /* ...and the rest are vdev names */ 6492 cb.cb_vdevs.cb_names = argv + 1; 6493 cb.cb_vdevs.cb_names_count = argc - 1; 6494 argc = 1; /* One pool to process */ 6495 } else { 6496 fprintf(stderr, gettext("Expected either a list of ")); 6497 fprintf(stderr, gettext("pools, or list of vdevs in")); 6498 fprintf(stderr, " \"%s\", ", argv[0]); 6499 fprintf(stderr, gettext("but got:\n")); 6500 error_list_unresolved_vdevs(argc - 1, argv + 1, 6501 argv[0], &cb.cb_vdevs); 6502 fprintf(stderr, "\n"); 6503 usage(B_FALSE); 6504 return (1); 6505 } 6506 } else { 6507 /* 6508 * The args don't make sense. The first arg isn't a pool name, 6509 * nor are all the args vdevs. 6510 */ 6511 fprintf(stderr, gettext("Unable to parse pools/vdevs list.\n")); 6512 fprintf(stderr, "\n"); 6513 return (1); 6514 } 6515 6516 if (cb.cb_vdevs.cb_names_count != 0) { 6517 /* 6518 * If user specified vdevs, it implies verbose. 6519 */ 6520 cb.cb_verbose = B_TRUE; 6521 } 6522 6523 /* 6524 * Construct the list of all interesting pools. 6525 */ 6526 ret = 0; 6527 if ((list = pool_list_get(argc, argv, NULL, ZFS_TYPE_POOL, parsable, 6528 &ret)) == NULL) 6529 return (1); 6530 6531 if (pool_list_count(list) == 0 && argc != 0) { 6532 pool_list_free(list); 6533 return (1); 6534 } 6535 6536 if (pool_list_count(list) == 0 && interval == 0) { 6537 pool_list_free(list); 6538 (void) fprintf(stderr, gettext("no pools available\n")); 6539 return (1); 6540 } 6541 6542 if ((l_histo || rq_histo) && (cmd != NULL || latency || queues)) { 6543 pool_list_free(list); 6544 (void) fprintf(stderr, 6545 gettext("[-r|-w] isn't allowed with [-c|-l|-q]\n")); 6546 usage(B_FALSE); 6547 return (1); 6548 } 6549 6550 if (l_histo && rq_histo) { 6551 pool_list_free(list); 6552 (void) fprintf(stderr, 6553 gettext("Only one of [-r|-w] can be passed at a time\n")); 6554 usage(B_FALSE); 6555 return (1); 6556 } 6557 6558 /* 6559 * Enter the main iostat loop. 6560 */ 6561 cb.cb_list = list; 6562 6563 if (l_histo) { 6564 /* 6565 * Histograms tables look out of place when you try to display 6566 * them with the other stats, so make a rule that you can only 6567 * print histograms by themselves. 6568 */ 6569 cb.cb_flags = IOS_L_HISTO_M; 6570 } else if (rq_histo) { 6571 cb.cb_flags = IOS_RQ_HISTO_M; 6572 } else { 6573 cb.cb_flags = IOS_DEFAULT_M; 6574 if (latency) 6575 cb.cb_flags |= IOS_LATENCY_M; 6576 if (queues) 6577 cb.cb_flags |= IOS_QUEUES_M; 6578 } 6579 6580 /* 6581 * See if the module supports all the stats we want to display. 6582 */ 6583 unsupported_flags = cb.cb_flags & ~get_stat_flags(list); 6584 if (unsupported_flags) { 6585 uint64_t f; 6586 int idx; 6587 fprintf(stderr, 6588 gettext("The loaded zfs module doesn't support:")); 6589 6590 /* for each bit set in unsupported_flags */ 6591 for (f = unsupported_flags; f; f &= ~(1ULL << idx)) { 6592 idx = lowbit64(f) - 1; 6593 fprintf(stderr, " -%c", flag_to_arg[idx]); 6594 } 6595 6596 fprintf(stderr, ". Try running a newer module.\n"); 6597 pool_list_free(list); 6598 6599 return (1); 6600 } 6601 6602 int last_npools = 0; 6603 for (;;) { 6604 /* 6605 * Refresh all pools in list, adding or removing pools as 6606 * necessary. 6607 */ 6608 int npools = pool_list_refresh(list); 6609 if (npools == 0) { 6610 (void) fprintf(stderr, gettext("no pools available\n")); 6611 } else { 6612 /* 6613 * If the list of pools has changed since last time 6614 * around, reset the iteration count to force the 6615 * header to be redisplayed. 6616 */ 6617 if (last_npools != npools) 6618 cb.cb_iteration = 0; 6619 6620 /* 6621 * If this is the first iteration and -y was supplied 6622 * we skip any printing. 6623 */ 6624 boolean_t skip = (omit_since_boot && 6625 cb.cb_iteration == 0); 6626 6627 /* 6628 * Iterate over all pools to determine the maximum width 6629 * for the pool / device name column across all pools. 6630 */ 6631 cb.cb_namewidth = 0; 6632 (void) pool_list_iter(list, B_FALSE, 6633 get_namewidth_iostat, &cb); 6634 6635 if (timestamp_fmt != NODATE) 6636 print_timestamp(timestamp_fmt); 6637 6638 if (cmd != NULL && cb.cb_verbose && 6639 !(cb.cb_flags & IOS_ANYHISTO_M)) { 6640 cb.vcdl = all_pools_for_each_vdev_run(argc, 6641 argv, cmd, g_zfs, cb.cb_vdevs.cb_names, 6642 cb.cb_vdevs.cb_names_count, 6643 cb.cb_vdevs.cb_name_flags); 6644 } else { 6645 cb.vcdl = NULL; 6646 } 6647 6648 6649 /* 6650 * Check terminal size so we can print headers 6651 * even when terminal window has its height 6652 * changed. 6653 */ 6654 int winheight = terminal_height(); 6655 /* 6656 * Are we connected to TTY? If not, headers_once 6657 * should be true, to avoid breaking scripts. 6658 */ 6659 if (winheight < 0) 6660 headers_once = B_TRUE; 6661 6662 /* 6663 * If it's the first time and we're not skipping it, 6664 * or either skip or verbose mode, print the header. 6665 * 6666 * The histogram code explicitly prints its header on 6667 * every vdev, so skip this for histograms. 6668 */ 6669 if (((++cb.cb_iteration == 1 && !skip) || 6670 (skip != verbose) || 6671 (!headers_once && 6672 (cb.cb_iteration % winheight) == 0)) && 6673 (!(cb.cb_flags & IOS_ANYHISTO_M)) && 6674 !cb.cb_scripted) 6675 print_iostat_header(&cb); 6676 6677 if (skip) { 6678 (void) fflush(stdout); 6679 (void) fsleep(interval); 6680 last_npools = npools; 6681 continue; 6682 } 6683 6684 pool_list_iter(list, B_FALSE, print_iostat, &cb); 6685 6686 /* 6687 * If there's more than one pool, and we're not in 6688 * verbose mode (which prints a separator for us), 6689 * then print a separator. 6690 * 6691 * In addition, if we're printing specific vdevs then 6692 * we also want an ending separator. 6693 */ 6694 if (((npools > 1 && !verbose && 6695 !(cb.cb_flags & IOS_ANYHISTO_M)) || 6696 (!(cb.cb_flags & IOS_ANYHISTO_M) && 6697 cb.cb_vdevs.cb_names_count)) && 6698 !cb.cb_scripted) { 6699 print_iostat_separator(&cb); 6700 if (cb.vcdl != NULL) 6701 print_cmd_columns(cb.vcdl, 1); 6702 printf("\n"); 6703 } 6704 6705 if (cb.vcdl != NULL) 6706 free_vdev_cmd_data_list(cb.vcdl); 6707 6708 } 6709 6710 if (interval == 0) 6711 break; 6712 6713 if (count != 0 && --count == 0) 6714 break; 6715 6716 (void) fflush(stdout); 6717 (void) fsleep(interval); 6718 6719 last_npools = npools; 6720 } 6721 6722 pool_list_free(list); 6723 6724 return (ret); 6725 } 6726 6727 typedef struct list_cbdata { 6728 boolean_t cb_verbose; 6729 int cb_name_flags; 6730 int cb_namewidth; 6731 boolean_t cb_json; 6732 boolean_t cb_scripted; 6733 zprop_list_t *cb_proplist; 6734 boolean_t cb_literal; 6735 nvlist_t *cb_jsobj; 6736 boolean_t cb_json_as_int; 6737 boolean_t cb_json_pool_key_guid; 6738 } list_cbdata_t; 6739 6740 6741 /* 6742 * Given a list of columns to display, output appropriate headers for each one. 6743 */ 6744 static void 6745 print_header(list_cbdata_t *cb) 6746 { 6747 zprop_list_t *pl = cb->cb_proplist; 6748 char headerbuf[ZPOOL_MAXPROPLEN]; 6749 const char *header; 6750 boolean_t first = B_TRUE; 6751 boolean_t right_justify; 6752 size_t width = 0; 6753 6754 for (; pl != NULL; pl = pl->pl_next) { 6755 width = pl->pl_width; 6756 if (first && cb->cb_verbose) { 6757 /* 6758 * Reset the width to accommodate the verbose listing 6759 * of devices. 6760 */ 6761 width = cb->cb_namewidth; 6762 } 6763 6764 if (!first) 6765 (void) fputs(" ", stdout); 6766 else 6767 first = B_FALSE; 6768 6769 right_justify = B_FALSE; 6770 if (pl->pl_prop != ZPROP_USERPROP) { 6771 header = zpool_prop_column_name(pl->pl_prop); 6772 right_justify = zpool_prop_align_right(pl->pl_prop); 6773 } else { 6774 int i; 6775 6776 for (i = 0; pl->pl_user_prop[i] != '\0'; i++) 6777 headerbuf[i] = toupper(pl->pl_user_prop[i]); 6778 headerbuf[i] = '\0'; 6779 header = headerbuf; 6780 } 6781 6782 if (pl->pl_next == NULL && !right_justify) 6783 (void) fputs(header, stdout); 6784 else if (right_justify) 6785 (void) printf("%*s", (int)width, header); 6786 else 6787 (void) printf("%-*s", (int)width, header); 6788 } 6789 6790 (void) fputc('\n', stdout); 6791 } 6792 6793 /* 6794 * Given a pool and a list of properties, print out all the properties according 6795 * to the described layout. Used by zpool_do_list(). 6796 */ 6797 static void 6798 collect_pool(zpool_handle_t *zhp, list_cbdata_t *cb) 6799 { 6800 zprop_list_t *pl = cb->cb_proplist; 6801 boolean_t first = B_TRUE; 6802 char property[ZPOOL_MAXPROPLEN]; 6803 const char *propstr; 6804 boolean_t right_justify; 6805 size_t width; 6806 zprop_source_t sourcetype = ZPROP_SRC_NONE; 6807 nvlist_t *item, *d, *props; 6808 item = d = props = NULL; 6809 6810 if (cb->cb_json) { 6811 item = fnvlist_alloc(); 6812 props = fnvlist_alloc(); 6813 d = fnvlist_lookup_nvlist(cb->cb_jsobj, "pools"); 6814 if (d == NULL) { 6815 fprintf(stderr, "pools obj not found.\n"); 6816 exit(1); 6817 } 6818 fill_pool_info(item, zhp, B_TRUE, cb->cb_json_as_int); 6819 } 6820 6821 for (; pl != NULL; pl = pl->pl_next) { 6822 6823 width = pl->pl_width; 6824 if (first && cb->cb_verbose) { 6825 /* 6826 * Reset the width to accommodate the verbose listing 6827 * of devices. 6828 */ 6829 width = cb->cb_namewidth; 6830 } 6831 6832 if (!cb->cb_json && !first) { 6833 if (cb->cb_scripted) 6834 (void) fputc('\t', stdout); 6835 else 6836 (void) fputs(" ", stdout); 6837 } else { 6838 first = B_FALSE; 6839 } 6840 6841 right_justify = B_FALSE; 6842 if (pl->pl_prop != ZPROP_USERPROP) { 6843 if (zpool_get_prop(zhp, pl->pl_prop, property, 6844 sizeof (property), &sourcetype, 6845 cb->cb_literal) != 0) 6846 propstr = "-"; 6847 else 6848 propstr = property; 6849 6850 right_justify = zpool_prop_align_right(pl->pl_prop); 6851 } else if ((zpool_prop_feature(pl->pl_user_prop) || 6852 zpool_prop_unsupported(pl->pl_user_prop)) && 6853 zpool_prop_get_feature(zhp, pl->pl_user_prop, property, 6854 sizeof (property)) == 0) { 6855 propstr = property; 6856 sourcetype = ZPROP_SRC_LOCAL; 6857 } else if (zfs_prop_user(pl->pl_user_prop) && 6858 zpool_get_userprop(zhp, pl->pl_user_prop, property, 6859 sizeof (property), &sourcetype) == 0) { 6860 propstr = property; 6861 } else { 6862 propstr = "-"; 6863 } 6864 6865 if (cb->cb_json) { 6866 if (pl->pl_prop == ZPOOL_PROP_NAME) 6867 continue; 6868 const char *prop_name; 6869 if (pl->pl_prop != ZPROP_USERPROP) 6870 prop_name = zpool_prop_to_name(pl->pl_prop); 6871 else 6872 prop_name = pl->pl_user_prop; 6873 (void) zprop_nvlist_one_property( 6874 prop_name, propstr, 6875 sourcetype, NULL, NULL, props, cb->cb_json_as_int); 6876 } else { 6877 /* 6878 * If this is being called in scripted mode, or if this 6879 * is the last column and it is left-justified, don't 6880 * include a width format specifier. 6881 */ 6882 if (cb->cb_scripted || (pl->pl_next == NULL && 6883 !right_justify)) 6884 (void) fputs(propstr, stdout); 6885 else if (right_justify) 6886 (void) printf("%*s", (int)width, propstr); 6887 else 6888 (void) printf("%-*s", (int)width, propstr); 6889 } 6890 } 6891 6892 if (cb->cb_json) { 6893 fnvlist_add_nvlist(item, "properties", props); 6894 if (cb->cb_json_pool_key_guid) { 6895 char pool_guid[256]; 6896 uint64_t guid = fnvlist_lookup_uint64( 6897 zpool_get_config(zhp, NULL), 6898 ZPOOL_CONFIG_POOL_GUID); 6899 snprintf(pool_guid, 256, "%llu", 6900 (u_longlong_t)guid); 6901 fnvlist_add_nvlist(d, pool_guid, item); 6902 } else { 6903 fnvlist_add_nvlist(d, zpool_get_name(zhp), 6904 item); 6905 } 6906 fnvlist_free(props); 6907 fnvlist_free(item); 6908 } else 6909 (void) fputc('\n', stdout); 6910 } 6911 6912 static void 6913 collect_vdev_prop(zpool_prop_t prop, uint64_t value, const char *str, 6914 boolean_t scripted, boolean_t valid, enum zfs_nicenum_format format, 6915 boolean_t json, nvlist_t *nvl, boolean_t as_int) 6916 { 6917 char propval[64]; 6918 boolean_t fixed; 6919 size_t width = zprop_width(prop, &fixed, ZFS_TYPE_POOL); 6920 6921 switch (prop) { 6922 case ZPOOL_PROP_SIZE: 6923 case ZPOOL_PROP_EXPANDSZ: 6924 case ZPOOL_PROP_CHECKPOINT: 6925 case ZPOOL_PROP_DEDUPRATIO: 6926 case ZPOOL_PROP_DEDUPCACHED: 6927 if (value == 0) 6928 (void) strlcpy(propval, "-", sizeof (propval)); 6929 else 6930 zfs_nicenum_format(value, propval, sizeof (propval), 6931 format); 6932 break; 6933 case ZPOOL_PROP_FRAGMENTATION: 6934 if (value == ZFS_FRAG_INVALID) { 6935 (void) strlcpy(propval, "-", sizeof (propval)); 6936 } else if (format == ZFS_NICENUM_RAW) { 6937 (void) snprintf(propval, sizeof (propval), "%llu", 6938 (unsigned long long)value); 6939 } else { 6940 (void) snprintf(propval, sizeof (propval), "%llu%%", 6941 (unsigned long long)value); 6942 } 6943 break; 6944 case ZPOOL_PROP_CAPACITY: 6945 /* capacity value is in parts-per-10,000 (aka permyriad) */ 6946 if (format == ZFS_NICENUM_RAW) 6947 (void) snprintf(propval, sizeof (propval), "%llu", 6948 (unsigned long long)value / 100); 6949 else 6950 (void) snprintf(propval, sizeof (propval), 6951 value < 1000 ? "%1.2f%%" : value < 10000 ? 6952 "%2.1f%%" : "%3.0f%%", value / 100.0); 6953 break; 6954 case ZPOOL_PROP_HEALTH: 6955 width = 8; 6956 (void) strlcpy(propval, str, sizeof (propval)); 6957 break; 6958 default: 6959 zfs_nicenum_format(value, propval, sizeof (propval), format); 6960 } 6961 6962 if (!valid) 6963 (void) strlcpy(propval, "-", sizeof (propval)); 6964 6965 if (json) { 6966 zprop_nvlist_one_property(zpool_prop_to_name(prop), propval, 6967 ZPROP_SRC_NONE, NULL, NULL, nvl, as_int); 6968 } else { 6969 if (scripted) 6970 (void) printf("\t%s", propval); 6971 else 6972 (void) printf(" %*s", (int)width, propval); 6973 } 6974 } 6975 6976 /* 6977 * print static default line per vdev 6978 */ 6979 static void 6980 collect_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv, 6981 list_cbdata_t *cb, int depth, boolean_t isspare, nvlist_t *item) 6982 { 6983 nvlist_t **child; 6984 vdev_stat_t *vs; 6985 uint_t c, children = 0; 6986 char *vname; 6987 boolean_t scripted = cb->cb_scripted; 6988 uint64_t islog = B_FALSE; 6989 nvlist_t *props, *ent, *ch, *obj, *l2c, *sp; 6990 props = ent = ch = obj = sp = l2c = NULL; 6991 const char *dashes = "%-*s - - - - " 6992 "- - - - -\n"; 6993 6994 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, 6995 (uint64_t **)&vs, &c) == 0); 6996 6997 if (name != NULL) { 6998 boolean_t toplevel = (vs->vs_space != 0); 6999 uint64_t cap; 7000 enum zfs_nicenum_format format; 7001 const char *state; 7002 7003 if (cb->cb_literal) 7004 format = ZFS_NICENUM_RAW; 7005 else 7006 format = ZFS_NICENUM_1024; 7007 7008 if (strcmp(name, VDEV_TYPE_INDIRECT) == 0) 7009 return; 7010 7011 if (cb->cb_json) { 7012 props = fnvlist_alloc(); 7013 ent = fnvlist_alloc(); 7014 fill_vdev_info(ent, zhp, (char *)name, B_FALSE, 7015 cb->cb_json_as_int); 7016 } else { 7017 if (scripted) 7018 (void) printf("\t%s", name); 7019 else if (strlen(name) + depth > cb->cb_namewidth) 7020 (void) printf("%*s%s", depth, "", name); 7021 else 7022 (void) printf("%*s%s%*s", depth, "", name, 7023 (int)(cb->cb_namewidth - strlen(name) - 7024 depth), ""); 7025 } 7026 7027 /* 7028 * Print the properties for the individual vdevs. Some 7029 * properties are only applicable to toplevel vdevs. The 7030 * 'toplevel' boolean value is passed to the print_one_column() 7031 * to indicate that the value is valid. 7032 */ 7033 for (zprop_list_t *pl = cb->cb_proplist; pl != NULL; 7034 pl = pl->pl_next) { 7035 switch (pl->pl_prop) { 7036 case ZPOOL_PROP_SIZE: 7037 if (VDEV_STAT_VALID(vs_pspace, c) && 7038 vs->vs_pspace) { 7039 collect_vdev_prop( 7040 ZPOOL_PROP_SIZE, vs->vs_pspace, 7041 NULL, scripted, B_TRUE, format, 7042 cb->cb_json, props, 7043 cb->cb_json_as_int); 7044 } else { 7045 collect_vdev_prop( 7046 ZPOOL_PROP_SIZE, vs->vs_space, NULL, 7047 scripted, toplevel, format, 7048 cb->cb_json, props, 7049 cb->cb_json_as_int); 7050 } 7051 break; 7052 case ZPOOL_PROP_ALLOCATED: 7053 collect_vdev_prop(ZPOOL_PROP_ALLOCATED, 7054 vs->vs_alloc, NULL, scripted, toplevel, 7055 format, cb->cb_json, props, 7056 cb->cb_json_as_int); 7057 break; 7058 7059 case ZPOOL_PROP_FREE: 7060 collect_vdev_prop(ZPOOL_PROP_FREE, 7061 vs->vs_space - vs->vs_alloc, NULL, scripted, 7062 toplevel, format, cb->cb_json, props, 7063 cb->cb_json_as_int); 7064 break; 7065 7066 case ZPOOL_PROP_CHECKPOINT: 7067 collect_vdev_prop(ZPOOL_PROP_CHECKPOINT, 7068 vs->vs_checkpoint_space, NULL, scripted, 7069 toplevel, format, cb->cb_json, props, 7070 cb->cb_json_as_int); 7071 break; 7072 7073 case ZPOOL_PROP_EXPANDSZ: 7074 collect_vdev_prop(ZPOOL_PROP_EXPANDSZ, 7075 vs->vs_esize, NULL, scripted, B_TRUE, 7076 format, cb->cb_json, props, 7077 cb->cb_json_as_int); 7078 break; 7079 7080 case ZPOOL_PROP_FRAGMENTATION: 7081 collect_vdev_prop( 7082 ZPOOL_PROP_FRAGMENTATION, 7083 vs->vs_fragmentation, NULL, scripted, 7084 (vs->vs_fragmentation != ZFS_FRAG_INVALID && 7085 toplevel), 7086 format, cb->cb_json, props, 7087 cb->cb_json_as_int); 7088 break; 7089 7090 case ZPOOL_PROP_CAPACITY: 7091 cap = (vs->vs_space == 0) ? 7092 0 : (vs->vs_alloc * 10000 / vs->vs_space); 7093 collect_vdev_prop(ZPOOL_PROP_CAPACITY, cap, 7094 NULL, scripted, toplevel, format, 7095 cb->cb_json, props, cb->cb_json_as_int); 7096 break; 7097 7098 case ZPOOL_PROP_HEALTH: 7099 state = zpool_state_to_name(vs->vs_state, 7100 vs->vs_aux); 7101 if (isspare) { 7102 if (vs->vs_aux == VDEV_AUX_SPARED) 7103 state = "INUSE"; 7104 else if (vs->vs_state == 7105 VDEV_STATE_HEALTHY) 7106 state = "AVAIL"; 7107 } 7108 collect_vdev_prop(ZPOOL_PROP_HEALTH, 0, state, 7109 scripted, B_TRUE, format, cb->cb_json, 7110 props, cb->cb_json_as_int); 7111 break; 7112 7113 case ZPOOL_PROP_NAME: 7114 break; 7115 7116 default: 7117 collect_vdev_prop(pl->pl_prop, 0, 7118 NULL, scripted, B_FALSE, format, 7119 cb->cb_json, props, cb->cb_json_as_int); 7120 7121 } 7122 7123 7124 } 7125 7126 if (cb->cb_json) { 7127 fnvlist_add_nvlist(ent, "properties", props); 7128 fnvlist_free(props); 7129 } else 7130 (void) fputc('\n', stdout); 7131 } 7132 7133 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 7134 &child, &children) != 0) { 7135 if (cb->cb_json) { 7136 fnvlist_add_nvlist(item, name, ent); 7137 fnvlist_free(ent); 7138 } 7139 return; 7140 } 7141 7142 if (cb->cb_json) { 7143 ch = fnvlist_alloc(); 7144 } 7145 7146 /* list the normal vdevs first */ 7147 for (c = 0; c < children; c++) { 7148 uint64_t ishole = B_FALSE; 7149 7150 if (nvlist_lookup_uint64(child[c], 7151 ZPOOL_CONFIG_IS_HOLE, &ishole) == 0 && ishole) 7152 continue; 7153 7154 if (nvlist_lookup_uint64(child[c], 7155 ZPOOL_CONFIG_IS_LOG, &islog) == 0 && islog) 7156 continue; 7157 7158 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS)) 7159 continue; 7160 7161 vname = zpool_vdev_name(g_zfs, zhp, child[c], 7162 cb->cb_name_flags | VDEV_NAME_TYPE_ID); 7163 7164 if (name == NULL || cb->cb_json != B_TRUE) 7165 collect_list_stats(zhp, vname, child[c], cb, depth + 2, 7166 B_FALSE, item); 7167 else if (cb->cb_json) { 7168 collect_list_stats(zhp, vname, child[c], cb, depth + 2, 7169 B_FALSE, ch); 7170 } 7171 free(vname); 7172 } 7173 7174 if (cb->cb_json) { 7175 if (!nvlist_empty(ch)) 7176 fnvlist_add_nvlist(ent, "vdevs", ch); 7177 fnvlist_free(ch); 7178 } 7179 7180 /* list the classes: 'logs', 'dedup', and 'special' */ 7181 for (uint_t n = 0; n < ARRAY_SIZE(class_name); n++) { 7182 boolean_t printed = B_FALSE; 7183 if (cb->cb_json) 7184 obj = fnvlist_alloc(); 7185 for (c = 0; c < children; c++) { 7186 const char *bias = NULL; 7187 const char *type = NULL; 7188 7189 if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 7190 &islog) == 0 && islog) { 7191 bias = VDEV_ALLOC_CLASS_LOGS; 7192 } else { 7193 (void) nvlist_lookup_string(child[c], 7194 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias); 7195 (void) nvlist_lookup_string(child[c], 7196 ZPOOL_CONFIG_TYPE, &type); 7197 } 7198 if (bias == NULL || strcmp(bias, class_name[n]) != 0) 7199 continue; 7200 if (!islog && strcmp(type, VDEV_TYPE_INDIRECT) == 0) 7201 continue; 7202 7203 if (!printed && !cb->cb_json) { 7204 /* LINTED E_SEC_PRINTF_VAR_FMT */ 7205 (void) printf(dashes, cb->cb_namewidth, 7206 class_name[n]); 7207 printed = B_TRUE; 7208 } 7209 vname = zpool_vdev_name(g_zfs, zhp, child[c], 7210 cb->cb_name_flags | VDEV_NAME_TYPE_ID); 7211 collect_list_stats(zhp, vname, child[c], cb, depth + 2, 7212 B_FALSE, obj); 7213 free(vname); 7214 } 7215 if (cb->cb_json) { 7216 if (!nvlist_empty(obj)) 7217 fnvlist_add_nvlist(item, class_name[n], obj); 7218 fnvlist_free(obj); 7219 } 7220 } 7221 7222 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, 7223 &child, &children) == 0 && children > 0) { 7224 if (cb->cb_json) { 7225 l2c = fnvlist_alloc(); 7226 } else { 7227 /* LINTED E_SEC_PRINTF_VAR_FMT */ 7228 (void) printf(dashes, cb->cb_namewidth, "cache"); 7229 } 7230 for (c = 0; c < children; c++) { 7231 vname = zpool_vdev_name(g_zfs, zhp, child[c], 7232 cb->cb_name_flags); 7233 collect_list_stats(zhp, vname, child[c], cb, depth + 2, 7234 B_FALSE, l2c); 7235 free(vname); 7236 } 7237 if (cb->cb_json) { 7238 if (!nvlist_empty(l2c)) 7239 fnvlist_add_nvlist(item, "l2cache", l2c); 7240 fnvlist_free(l2c); 7241 } 7242 } 7243 7244 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, &child, 7245 &children) == 0 && children > 0) { 7246 if (cb->cb_json) { 7247 sp = fnvlist_alloc(); 7248 } else { 7249 /* LINTED E_SEC_PRINTF_VAR_FMT */ 7250 (void) printf(dashes, cb->cb_namewidth, "spare"); 7251 } 7252 for (c = 0; c < children; c++) { 7253 vname = zpool_vdev_name(g_zfs, zhp, child[c], 7254 cb->cb_name_flags); 7255 collect_list_stats(zhp, vname, child[c], cb, depth + 2, 7256 B_TRUE, sp); 7257 free(vname); 7258 } 7259 if (cb->cb_json) { 7260 if (!nvlist_empty(sp)) 7261 fnvlist_add_nvlist(item, "spares", sp); 7262 fnvlist_free(sp); 7263 } 7264 } 7265 7266 if (name != NULL && cb->cb_json) { 7267 fnvlist_add_nvlist(item, name, ent); 7268 fnvlist_free(ent); 7269 } 7270 } 7271 7272 /* 7273 * Generic callback function to list a pool. 7274 */ 7275 static int 7276 list_callback(zpool_handle_t *zhp, void *data) 7277 { 7278 nvlist_t *p, *d, *nvdevs; 7279 uint64_t guid; 7280 char pool_guid[256]; 7281 const char *pool_name = zpool_get_name(zhp); 7282 list_cbdata_t *cbp = data; 7283 p = d = nvdevs = NULL; 7284 7285 collect_pool(zhp, cbp); 7286 7287 if (cbp->cb_verbose) { 7288 nvlist_t *config, *nvroot; 7289 config = zpool_get_config(zhp, NULL); 7290 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 7291 &nvroot) == 0); 7292 if (cbp->cb_json) { 7293 d = fnvlist_lookup_nvlist(cbp->cb_jsobj, 7294 "pools"); 7295 if (cbp->cb_json_pool_key_guid) { 7296 guid = fnvlist_lookup_uint64(config, 7297 ZPOOL_CONFIG_POOL_GUID); 7298 snprintf(pool_guid, 256, "%llu", 7299 (u_longlong_t)guid); 7300 p = fnvlist_lookup_nvlist(d, pool_guid); 7301 } else { 7302 p = fnvlist_lookup_nvlist(d, pool_name); 7303 } 7304 nvdevs = fnvlist_alloc(); 7305 } 7306 collect_list_stats(zhp, NULL, nvroot, cbp, 0, B_FALSE, nvdevs); 7307 if (cbp->cb_json) { 7308 fnvlist_add_nvlist(p, "vdevs", nvdevs); 7309 if (cbp->cb_json_pool_key_guid) 7310 fnvlist_add_nvlist(d, pool_guid, p); 7311 else 7312 fnvlist_add_nvlist(d, pool_name, p); 7313 fnvlist_add_nvlist(cbp->cb_jsobj, "pools", d); 7314 fnvlist_free(nvdevs); 7315 } 7316 } 7317 7318 return (0); 7319 } 7320 7321 /* 7322 * Set the minimum pool/vdev name column width. The width must be at least 9, 7323 * but may be as large as needed. 7324 */ 7325 static int 7326 get_namewidth_list(zpool_handle_t *zhp, void *data) 7327 { 7328 list_cbdata_t *cb = data; 7329 int width; 7330 7331 width = get_namewidth(zhp, cb->cb_namewidth, 7332 cb->cb_name_flags | VDEV_NAME_TYPE_ID, cb->cb_verbose); 7333 7334 if (width < 9) 7335 width = 9; 7336 7337 cb->cb_namewidth = width; 7338 7339 return (0); 7340 } 7341 7342 /* 7343 * zpool list [-gHLpP] [-o prop[,prop]*] [-T d|u] [pool] ... [interval [count]] 7344 * 7345 * -g Display guid for individual vdev name. 7346 * -H Scripted mode. Don't display headers, and separate properties 7347 * by a single tab. 7348 * -L Follow links when resolving vdev path name. 7349 * -o List of properties to display. Defaults to 7350 * "name,size,allocated,free,expandsize,fragmentation,capacity," 7351 * "dedupratio,health,altroot" 7352 * -p Display values in parsable (exact) format. 7353 * -P Display full path for vdev name. 7354 * -T Display a timestamp in date(1) or Unix format 7355 * -j Display the output in JSON format 7356 * --json-int Display the numbers as integer instead of strings. 7357 * --json-pool-key-guid Set pool GUID as key for pool objects. 7358 * 7359 * List all pools in the system, whether or not they're healthy. Output space 7360 * statistics for each one, as well as health status summary. 7361 */ 7362 int 7363 zpool_do_list(int argc, char **argv) 7364 { 7365 int c; 7366 int ret = 0; 7367 list_cbdata_t cb = { 0 }; 7368 static char default_props[] = 7369 "name,size,allocated,free,checkpoint,expandsize,fragmentation," 7370 "capacity,dedupratio,health,altroot"; 7371 char *props = default_props; 7372 float interval = 0; 7373 unsigned long count = 0; 7374 zpool_list_t *list; 7375 boolean_t first = B_TRUE; 7376 nvlist_t *data = NULL; 7377 current_prop_type = ZFS_TYPE_POOL; 7378 7379 struct option long_options[] = { 7380 {"json", no_argument, NULL, 'j'}, 7381 {"json-int", no_argument, NULL, ZPOOL_OPTION_JSON_NUMS_AS_INT}, 7382 {"json-pool-key-guid", no_argument, NULL, 7383 ZPOOL_OPTION_POOL_KEY_GUID}, 7384 {0, 0, 0, 0} 7385 }; 7386 7387 /* check options */ 7388 while ((c = getopt_long(argc, argv, ":gjHLo:pPT:v", long_options, 7389 NULL)) != -1) { 7390 switch (c) { 7391 case 'g': 7392 cb.cb_name_flags |= VDEV_NAME_GUID; 7393 break; 7394 case 'H': 7395 cb.cb_scripted = B_TRUE; 7396 break; 7397 case 'L': 7398 cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS; 7399 break; 7400 case 'o': 7401 props = optarg; 7402 break; 7403 case 'P': 7404 cb.cb_name_flags |= VDEV_NAME_PATH; 7405 break; 7406 case 'p': 7407 cb.cb_literal = B_TRUE; 7408 break; 7409 case 'j': 7410 cb.cb_json = B_TRUE; 7411 break; 7412 case ZPOOL_OPTION_JSON_NUMS_AS_INT: 7413 cb.cb_json_as_int = B_TRUE; 7414 cb.cb_literal = B_TRUE; 7415 break; 7416 case ZPOOL_OPTION_POOL_KEY_GUID: 7417 cb.cb_json_pool_key_guid = B_TRUE; 7418 break; 7419 case 'T': 7420 get_timestamp_arg(*optarg); 7421 break; 7422 case 'v': 7423 cb.cb_verbose = B_TRUE; 7424 cb.cb_namewidth = 8; /* 8 until precalc is avail */ 7425 break; 7426 case ':': 7427 (void) fprintf(stderr, gettext("missing argument for " 7428 "'%c' option\n"), optopt); 7429 usage(B_FALSE); 7430 break; 7431 case '?': 7432 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 7433 optopt); 7434 usage(B_FALSE); 7435 } 7436 } 7437 7438 argc -= optind; 7439 argv += optind; 7440 7441 if (!cb.cb_json && cb.cb_json_as_int) { 7442 (void) fprintf(stderr, gettext("'--json-int' only works with" 7443 " '-j' option\n")); 7444 usage(B_FALSE); 7445 } 7446 7447 if (!cb.cb_json && cb.cb_json_pool_key_guid) { 7448 (void) fprintf(stderr, gettext("'json-pool-key-guid' only" 7449 " works with '-j' option\n")); 7450 usage(B_FALSE); 7451 } 7452 7453 get_interval_count(&argc, argv, &interval, &count); 7454 7455 if (zprop_get_list(g_zfs, props, &cb.cb_proplist, ZFS_TYPE_POOL) != 0) 7456 usage(B_FALSE); 7457 7458 for (;;) { 7459 if ((list = pool_list_get(argc, argv, &cb.cb_proplist, 7460 ZFS_TYPE_POOL, cb.cb_literal, &ret)) == NULL) 7461 return (1); 7462 7463 if (pool_list_count(list) == 0) 7464 break; 7465 7466 if (cb.cb_json) { 7467 cb.cb_jsobj = zpool_json_schema(0, 1); 7468 data = fnvlist_alloc(); 7469 fnvlist_add_nvlist(cb.cb_jsobj, "pools", data); 7470 fnvlist_free(data); 7471 } 7472 7473 cb.cb_namewidth = 0; 7474 (void) pool_list_iter(list, B_FALSE, get_namewidth_list, &cb); 7475 7476 if (timestamp_fmt != NODATE) { 7477 if (cb.cb_json) { 7478 if (cb.cb_json_as_int) { 7479 fnvlist_add_uint64(cb.cb_jsobj, "time", 7480 time(NULL)); 7481 } else { 7482 char ts[128]; 7483 get_timestamp(timestamp_fmt, ts, 128); 7484 fnvlist_add_string(cb.cb_jsobj, "time", 7485 ts); 7486 } 7487 } else 7488 print_timestamp(timestamp_fmt); 7489 } 7490 7491 if (!cb.cb_scripted && (first || cb.cb_verbose) && 7492 !cb.cb_json) { 7493 print_header(&cb); 7494 first = B_FALSE; 7495 } 7496 ret = pool_list_iter(list, B_TRUE, list_callback, &cb); 7497 7498 if (ret == 0 && cb.cb_json) 7499 zcmd_print_json(cb.cb_jsobj); 7500 else if (ret != 0 && cb.cb_json) 7501 nvlist_free(cb.cb_jsobj); 7502 7503 if (interval == 0) 7504 break; 7505 7506 if (count != 0 && --count == 0) 7507 break; 7508 7509 pool_list_free(list); 7510 7511 (void) fflush(stdout); 7512 (void) fsleep(interval); 7513 } 7514 7515 if (argc == 0 && !cb.cb_scripted && !cb.cb_json && 7516 pool_list_count(list) == 0) { 7517 (void) printf(gettext("no pools available\n")); 7518 ret = 0; 7519 } 7520 7521 pool_list_free(list); 7522 zprop_free_list(cb.cb_proplist); 7523 return (ret); 7524 } 7525 7526 static int 7527 zpool_do_attach_or_replace(int argc, char **argv, int replacing) 7528 { 7529 boolean_t force = B_FALSE; 7530 boolean_t rebuild = B_FALSE; 7531 boolean_t wait = B_FALSE; 7532 int c; 7533 nvlist_t *nvroot; 7534 char *poolname, *old_disk, *new_disk; 7535 zpool_handle_t *zhp; 7536 nvlist_t *props = NULL; 7537 char *propval; 7538 int ret; 7539 7540 /* check options */ 7541 while ((c = getopt(argc, argv, "fo:sw")) != -1) { 7542 switch (c) { 7543 case 'f': 7544 force = B_TRUE; 7545 break; 7546 case 'o': 7547 if ((propval = strchr(optarg, '=')) == NULL) { 7548 (void) fprintf(stderr, gettext("missing " 7549 "'=' for -o option\n")); 7550 usage(B_FALSE); 7551 } 7552 *propval = '\0'; 7553 propval++; 7554 7555 if ((strcmp(optarg, ZPOOL_CONFIG_ASHIFT) != 0) || 7556 (add_prop_list(optarg, propval, &props, B_TRUE))) 7557 usage(B_FALSE); 7558 break; 7559 case 's': 7560 rebuild = B_TRUE; 7561 break; 7562 case 'w': 7563 wait = B_TRUE; 7564 break; 7565 case '?': 7566 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 7567 optopt); 7568 usage(B_FALSE); 7569 } 7570 } 7571 7572 argc -= optind; 7573 argv += optind; 7574 7575 /* get pool name and check number of arguments */ 7576 if (argc < 1) { 7577 (void) fprintf(stderr, gettext("missing pool name argument\n")); 7578 usage(B_FALSE); 7579 } 7580 7581 poolname = argv[0]; 7582 7583 if (argc < 2) { 7584 (void) fprintf(stderr, 7585 gettext("missing <device> specification\n")); 7586 usage(B_FALSE); 7587 } 7588 7589 old_disk = argv[1]; 7590 7591 if (argc < 3) { 7592 if (!replacing) { 7593 (void) fprintf(stderr, 7594 gettext("missing <new_device> specification\n")); 7595 usage(B_FALSE); 7596 } 7597 new_disk = old_disk; 7598 argc -= 1; 7599 argv += 1; 7600 } else { 7601 new_disk = argv[2]; 7602 argc -= 2; 7603 argv += 2; 7604 } 7605 7606 if (argc > 1) { 7607 (void) fprintf(stderr, gettext("too many arguments\n")); 7608 usage(B_FALSE); 7609 } 7610 7611 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) { 7612 nvlist_free(props); 7613 return (1); 7614 } 7615 7616 if (zpool_get_config(zhp, NULL) == NULL) { 7617 (void) fprintf(stderr, gettext("pool '%s' is unavailable\n"), 7618 poolname); 7619 zpool_close(zhp); 7620 nvlist_free(props); 7621 return (1); 7622 } 7623 7624 /* unless manually specified use "ashift" pool property (if set) */ 7625 if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) { 7626 int intval; 7627 zprop_source_t src; 7628 char strval[ZPOOL_MAXPROPLEN]; 7629 7630 intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src); 7631 if (src != ZPROP_SRC_DEFAULT) { 7632 (void) sprintf(strval, "%" PRId32, intval); 7633 verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval, 7634 &props, B_TRUE) == 0); 7635 } 7636 } 7637 7638 nvroot = make_root_vdev(zhp, props, force, B_FALSE, replacing, B_FALSE, 7639 argc, argv); 7640 if (nvroot == NULL) { 7641 zpool_close(zhp); 7642 nvlist_free(props); 7643 return (1); 7644 } 7645 7646 ret = zpool_vdev_attach(zhp, old_disk, new_disk, nvroot, replacing, 7647 rebuild); 7648 7649 if (ret == 0 && wait) { 7650 zpool_wait_activity_t activity = ZPOOL_WAIT_RESILVER; 7651 char raidz_prefix[] = "raidz"; 7652 if (replacing) { 7653 activity = ZPOOL_WAIT_REPLACE; 7654 } else if (strncmp(old_disk, 7655 raidz_prefix, strlen(raidz_prefix)) == 0) { 7656 activity = ZPOOL_WAIT_RAIDZ_EXPAND; 7657 } 7658 ret = zpool_wait(zhp, activity); 7659 } 7660 7661 nvlist_free(props); 7662 nvlist_free(nvroot); 7663 zpool_close(zhp); 7664 7665 return (ret); 7666 } 7667 7668 /* 7669 * zpool replace [-fsw] [-o property=value] <pool> <device> <new_device> 7670 * 7671 * -f Force attach, even if <new_device> appears to be in use. 7672 * -s Use sequential instead of healing reconstruction for resilver. 7673 * -o Set property=value. 7674 * -w Wait for replacing to complete before returning 7675 * 7676 * Replace <device> with <new_device>. 7677 */ 7678 int 7679 zpool_do_replace(int argc, char **argv) 7680 { 7681 return (zpool_do_attach_or_replace(argc, argv, B_TRUE)); 7682 } 7683 7684 /* 7685 * zpool attach [-fsw] [-o property=value] <pool> <vdev> <new_device> 7686 * 7687 * -f Force attach, even if <new_device> appears to be in use. 7688 * -s Use sequential instead of healing reconstruction for resilver. 7689 * -o Set property=value. 7690 * -w Wait for resilvering (mirror) or expansion (raidz) to complete 7691 * before returning. 7692 * 7693 * Attach <new_device> to a <vdev>, where the vdev can be of type 7694 * device, mirror or raidz. If <vdev> is not part of a mirror, then <vdev> will 7695 * be transformed into a mirror of <vdev> and <new_device>. When a mirror 7696 * is involved, <new_device> will begin life with a DTL of [0, now], and will 7697 * immediately begin to resilver itself. For the raidz case, a expansion will 7698 * commence and reflow the raidz data across all the disks including the 7699 * <new_device>. 7700 */ 7701 int 7702 zpool_do_attach(int argc, char **argv) 7703 { 7704 return (zpool_do_attach_or_replace(argc, argv, B_FALSE)); 7705 } 7706 7707 /* 7708 * zpool detach [-f] <pool> <device> 7709 * 7710 * -f Force detach of <device>, even if DTLs argue against it 7711 * (not supported yet) 7712 * 7713 * Detach a device from a mirror. The operation will be refused if <device> 7714 * is the last device in the mirror, or if the DTLs indicate that this device 7715 * has the only valid copy of some data. 7716 */ 7717 int 7718 zpool_do_detach(int argc, char **argv) 7719 { 7720 int c; 7721 char *poolname, *path; 7722 zpool_handle_t *zhp; 7723 int ret; 7724 7725 /* check options */ 7726 while ((c = getopt(argc, argv, "")) != -1) { 7727 switch (c) { 7728 case '?': 7729 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 7730 optopt); 7731 usage(B_FALSE); 7732 } 7733 } 7734 7735 argc -= optind; 7736 argv += optind; 7737 7738 /* get pool name and check number of arguments */ 7739 if (argc < 1) { 7740 (void) fprintf(stderr, gettext("missing pool name argument\n")); 7741 usage(B_FALSE); 7742 } 7743 7744 if (argc < 2) { 7745 (void) fprintf(stderr, 7746 gettext("missing <device> specification\n")); 7747 usage(B_FALSE); 7748 } 7749 7750 poolname = argv[0]; 7751 path = argv[1]; 7752 7753 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) 7754 return (1); 7755 7756 ret = zpool_vdev_detach(zhp, path); 7757 7758 zpool_close(zhp); 7759 7760 return (ret); 7761 } 7762 7763 /* 7764 * zpool split [-gLnP] [-o prop=val] ... 7765 * [-o mntopt] ... 7766 * [-R altroot] <pool> <newpool> [<device> ...] 7767 * 7768 * -g Display guid for individual vdev name. 7769 * -L Follow links when resolving vdev path name. 7770 * -n Do not split the pool, but display the resulting layout if 7771 * it were to be split. 7772 * -o Set property=value, or set mount options. 7773 * -P Display full path for vdev name. 7774 * -R Mount the split-off pool under an alternate root. 7775 * -l Load encryption keys while importing. 7776 * 7777 * Splits the named pool and gives it the new pool name. Devices to be split 7778 * off may be listed, provided that no more than one device is specified 7779 * per top-level vdev mirror. The newly split pool is left in an exported 7780 * state unless -R is specified. 7781 * 7782 * Restrictions: the top-level of the pool pool must only be made up of 7783 * mirrors; all devices in the pool must be healthy; no device may be 7784 * undergoing a resilvering operation. 7785 */ 7786 int 7787 zpool_do_split(int argc, char **argv) 7788 { 7789 char *srcpool, *newpool, *propval; 7790 char *mntopts = NULL; 7791 splitflags_t flags; 7792 int c, ret = 0; 7793 int ms_status = 0; 7794 boolean_t loadkeys = B_FALSE; 7795 zpool_handle_t *zhp; 7796 nvlist_t *config, *props = NULL; 7797 7798 flags.dryrun = B_FALSE; 7799 flags.import = B_FALSE; 7800 flags.name_flags = 0; 7801 7802 /* check options */ 7803 while ((c = getopt(argc, argv, ":gLR:lno:P")) != -1) { 7804 switch (c) { 7805 case 'g': 7806 flags.name_flags |= VDEV_NAME_GUID; 7807 break; 7808 case 'L': 7809 flags.name_flags |= VDEV_NAME_FOLLOW_LINKS; 7810 break; 7811 case 'R': 7812 flags.import = B_TRUE; 7813 if (add_prop_list( 7814 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), optarg, 7815 &props, B_TRUE) != 0) { 7816 nvlist_free(props); 7817 usage(B_FALSE); 7818 } 7819 break; 7820 case 'l': 7821 loadkeys = B_TRUE; 7822 break; 7823 case 'n': 7824 flags.dryrun = B_TRUE; 7825 break; 7826 case 'o': 7827 if ((propval = strchr(optarg, '=')) != NULL) { 7828 *propval = '\0'; 7829 propval++; 7830 if (add_prop_list(optarg, propval, 7831 &props, B_TRUE) != 0) { 7832 nvlist_free(props); 7833 usage(B_FALSE); 7834 } 7835 } else { 7836 mntopts = optarg; 7837 } 7838 break; 7839 case 'P': 7840 flags.name_flags |= VDEV_NAME_PATH; 7841 break; 7842 case ':': 7843 (void) fprintf(stderr, gettext("missing argument for " 7844 "'%c' option\n"), optopt); 7845 usage(B_FALSE); 7846 break; 7847 case '?': 7848 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 7849 optopt); 7850 usage(B_FALSE); 7851 break; 7852 } 7853 } 7854 7855 if (!flags.import && mntopts != NULL) { 7856 (void) fprintf(stderr, gettext("setting mntopts is only " 7857 "valid when importing the pool\n")); 7858 usage(B_FALSE); 7859 } 7860 7861 if (!flags.import && loadkeys) { 7862 (void) fprintf(stderr, gettext("loading keys is only " 7863 "valid when importing the pool\n")); 7864 usage(B_FALSE); 7865 } 7866 7867 argc -= optind; 7868 argv += optind; 7869 7870 if (argc < 1) { 7871 (void) fprintf(stderr, gettext("Missing pool name\n")); 7872 usage(B_FALSE); 7873 } 7874 if (argc < 2) { 7875 (void) fprintf(stderr, gettext("Missing new pool name\n")); 7876 usage(B_FALSE); 7877 } 7878 7879 srcpool = argv[0]; 7880 newpool = argv[1]; 7881 7882 argc -= 2; 7883 argv += 2; 7884 7885 if ((zhp = zpool_open(g_zfs, srcpool)) == NULL) { 7886 nvlist_free(props); 7887 return (1); 7888 } 7889 7890 config = split_mirror_vdev(zhp, newpool, props, flags, argc, argv); 7891 if (config == NULL) { 7892 ret = 1; 7893 } else { 7894 if (flags.dryrun) { 7895 (void) printf(gettext("would create '%s' with the " 7896 "following layout:\n\n"), newpool); 7897 print_vdev_tree(NULL, newpool, config, 0, "", 7898 flags.name_flags); 7899 print_vdev_tree(NULL, "dedup", config, 0, 7900 VDEV_ALLOC_BIAS_DEDUP, 0); 7901 print_vdev_tree(NULL, "special", config, 0, 7902 VDEV_ALLOC_BIAS_SPECIAL, 0); 7903 } 7904 } 7905 7906 zpool_close(zhp); 7907 7908 if (ret != 0 || flags.dryrun || !flags.import) { 7909 nvlist_free(config); 7910 nvlist_free(props); 7911 return (ret); 7912 } 7913 7914 /* 7915 * The split was successful. Now we need to open the new 7916 * pool and import it. 7917 */ 7918 if ((zhp = zpool_open_canfail(g_zfs, newpool)) == NULL) { 7919 nvlist_free(config); 7920 nvlist_free(props); 7921 return (1); 7922 } 7923 7924 if (loadkeys) { 7925 ret = zfs_crypto_attempt_load_keys(g_zfs, newpool); 7926 if (ret != 0) 7927 ret = 1; 7928 } 7929 7930 if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL) { 7931 ms_status = zpool_enable_datasets(zhp, mntopts, 0, 7932 mount_tp_nthr); 7933 if (ms_status == EZFS_SHAREFAILED) { 7934 (void) fprintf(stderr, gettext("Split was successful, " 7935 "datasets are mounted but sharing of some datasets " 7936 "has failed\n")); 7937 } else if (ms_status == EZFS_MOUNTFAILED) { 7938 (void) fprintf(stderr, gettext("Split was successful" 7939 ", but some datasets could not be mounted\n")); 7940 (void) fprintf(stderr, gettext("Try doing '%s' with a " 7941 "different altroot\n"), "zpool import"); 7942 } 7943 } 7944 zpool_close(zhp); 7945 nvlist_free(config); 7946 nvlist_free(props); 7947 7948 return (ret); 7949 } 7950 7951 7952 /* 7953 * zpool online [--power] <pool> <device> ... 7954 * 7955 * --power: Power on the enclosure slot to the drive (if possible) 7956 */ 7957 int 7958 zpool_do_online(int argc, char **argv) 7959 { 7960 int c, i; 7961 char *poolname; 7962 zpool_handle_t *zhp; 7963 int ret = 0; 7964 vdev_state_t newstate; 7965 int flags = 0; 7966 boolean_t is_power_on = B_FALSE; 7967 struct option long_options[] = { 7968 {"power", no_argument, NULL, ZPOOL_OPTION_POWER}, 7969 {0, 0, 0, 0} 7970 }; 7971 7972 /* check options */ 7973 while ((c = getopt_long(argc, argv, "e", long_options, NULL)) != -1) { 7974 switch (c) { 7975 case 'e': 7976 flags |= ZFS_ONLINE_EXPAND; 7977 break; 7978 case ZPOOL_OPTION_POWER: 7979 is_power_on = B_TRUE; 7980 break; 7981 case '?': 7982 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 7983 optopt); 7984 usage(B_FALSE); 7985 } 7986 } 7987 7988 if (libzfs_envvar_is_set("ZPOOL_AUTO_POWER_ON_SLOT")) 7989 is_power_on = B_TRUE; 7990 7991 argc -= optind; 7992 argv += optind; 7993 7994 /* get pool name and check number of arguments */ 7995 if (argc < 1) { 7996 (void) fprintf(stderr, gettext("missing pool name\n")); 7997 usage(B_FALSE); 7998 } 7999 if (argc < 2) { 8000 (void) fprintf(stderr, gettext("missing device name\n")); 8001 usage(B_FALSE); 8002 } 8003 8004 poolname = argv[0]; 8005 8006 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) { 8007 (void) fprintf(stderr, gettext("failed to open pool " 8008 "\"%s\""), poolname); 8009 return (1); 8010 } 8011 8012 for (i = 1; i < argc; i++) { 8013 vdev_state_t oldstate; 8014 boolean_t avail_spare, l2cache; 8015 int rc; 8016 8017 if (is_power_on) { 8018 rc = zpool_power_on_and_disk_wait(zhp, argv[i]); 8019 if (rc == ENOTSUP) { 8020 (void) fprintf(stderr, 8021 gettext("Power control not supported\n")); 8022 } 8023 if (rc != 0) 8024 return (rc); 8025 } 8026 8027 nvlist_t *tgt = zpool_find_vdev(zhp, argv[i], &avail_spare, 8028 &l2cache, NULL); 8029 if (tgt == NULL) { 8030 ret = 1; 8031 (void) fprintf(stderr, gettext("couldn't find device " 8032 "\"%s\" in pool \"%s\"\n"), argv[i], poolname); 8033 continue; 8034 } 8035 uint_t vsc; 8036 oldstate = ((vdev_stat_t *)fnvlist_lookup_uint64_array(tgt, 8037 ZPOOL_CONFIG_VDEV_STATS, &vsc))->vs_state; 8038 if ((rc = zpool_vdev_online(zhp, argv[i], flags, 8039 &newstate)) == 0) { 8040 if (newstate != VDEV_STATE_HEALTHY) { 8041 (void) printf(gettext("warning: device '%s' " 8042 "onlined, but remains in faulted state\n"), 8043 argv[i]); 8044 if (newstate == VDEV_STATE_FAULTED) 8045 (void) printf(gettext("use 'zpool " 8046 "clear' to restore a faulted " 8047 "device\n")); 8048 else 8049 (void) printf(gettext("use 'zpool " 8050 "replace' to replace devices " 8051 "that are no longer present\n")); 8052 if ((flags & ZFS_ONLINE_EXPAND)) { 8053 (void) printf(gettext("%s: failed " 8054 "to expand usable space on " 8055 "unhealthy device '%s'\n"), 8056 (oldstate >= VDEV_STATE_DEGRADED ? 8057 "error" : "warning"), argv[i]); 8058 if (oldstate >= VDEV_STATE_DEGRADED) { 8059 ret = 1; 8060 break; 8061 } 8062 } 8063 } 8064 } else { 8065 (void) fprintf(stderr, gettext("Failed to online " 8066 "\"%s\" in pool \"%s\": %d\n"), 8067 argv[i], poolname, rc); 8068 ret = 1; 8069 } 8070 } 8071 8072 zpool_close(zhp); 8073 8074 return (ret); 8075 } 8076 8077 /* 8078 * zpool offline [-ft]|[--power] <pool> <device> ... 8079 * 8080 * 8081 * -f Force the device into a faulted state. 8082 * 8083 * -t Only take the device off-line temporarily. The offline/faulted 8084 * state will not be persistent across reboots. 8085 * 8086 * --power Power off the enclosure slot to the drive (if possible) 8087 */ 8088 int 8089 zpool_do_offline(int argc, char **argv) 8090 { 8091 int c, i; 8092 char *poolname; 8093 zpool_handle_t *zhp; 8094 int ret = 0; 8095 boolean_t istmp = B_FALSE; 8096 boolean_t fault = B_FALSE; 8097 boolean_t is_power_off = B_FALSE; 8098 8099 struct option long_options[] = { 8100 {"power", no_argument, NULL, ZPOOL_OPTION_POWER}, 8101 {0, 0, 0, 0} 8102 }; 8103 8104 /* check options */ 8105 while ((c = getopt_long(argc, argv, "ft", long_options, NULL)) != -1) { 8106 switch (c) { 8107 case 'f': 8108 fault = B_TRUE; 8109 break; 8110 case 't': 8111 istmp = B_TRUE; 8112 break; 8113 case ZPOOL_OPTION_POWER: 8114 is_power_off = B_TRUE; 8115 break; 8116 case '?': 8117 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 8118 optopt); 8119 usage(B_FALSE); 8120 } 8121 } 8122 8123 if (is_power_off && fault) { 8124 (void) fprintf(stderr, 8125 gettext("-0 and -f cannot be used together\n")); 8126 usage(B_FALSE); 8127 return (1); 8128 } 8129 8130 if (is_power_off && istmp) { 8131 (void) fprintf(stderr, 8132 gettext("-0 and -t cannot be used together\n")); 8133 usage(B_FALSE); 8134 return (1); 8135 } 8136 8137 argc -= optind; 8138 argv += optind; 8139 8140 /* get pool name and check number of arguments */ 8141 if (argc < 1) { 8142 (void) fprintf(stderr, gettext("missing pool name\n")); 8143 usage(B_FALSE); 8144 } 8145 if (argc < 2) { 8146 (void) fprintf(stderr, gettext("missing device name\n")); 8147 usage(B_FALSE); 8148 } 8149 8150 poolname = argv[0]; 8151 8152 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) { 8153 (void) fprintf(stderr, gettext("failed to open pool " 8154 "\"%s\""), poolname); 8155 return (1); 8156 } 8157 8158 for (i = 1; i < argc; i++) { 8159 uint64_t guid = zpool_vdev_path_to_guid(zhp, argv[i]); 8160 if (is_power_off) { 8161 /* 8162 * Note: we have to power off first, then set REMOVED, 8163 * or else zpool_vdev_set_removed_state() returns 8164 * EAGAIN. 8165 */ 8166 ret = zpool_power_off(zhp, argv[i]); 8167 if (ret != 0) { 8168 (void) fprintf(stderr, "%s %s %d\n", 8169 gettext("unable to power off slot for"), 8170 argv[i], ret); 8171 } 8172 zpool_vdev_set_removed_state(zhp, guid, VDEV_AUX_NONE); 8173 8174 } else if (fault) { 8175 vdev_aux_t aux; 8176 if (istmp == B_FALSE) { 8177 /* Force the fault to persist across imports */ 8178 aux = VDEV_AUX_EXTERNAL_PERSIST; 8179 } else { 8180 aux = VDEV_AUX_EXTERNAL; 8181 } 8182 8183 if (guid == 0 || zpool_vdev_fault(zhp, guid, aux) != 0) 8184 ret = 1; 8185 } else { 8186 if (zpool_vdev_offline(zhp, argv[i], istmp) != 0) 8187 ret = 1; 8188 } 8189 } 8190 8191 zpool_close(zhp); 8192 8193 return (ret); 8194 } 8195 8196 /* 8197 * zpool clear [-nF]|[--power] <pool> [device] 8198 * 8199 * Clear all errors associated with a pool or a particular device. 8200 */ 8201 int 8202 zpool_do_clear(int argc, char **argv) 8203 { 8204 int c; 8205 int ret = 0; 8206 boolean_t dryrun = B_FALSE; 8207 boolean_t do_rewind = B_FALSE; 8208 boolean_t xtreme_rewind = B_FALSE; 8209 boolean_t is_power_on = B_FALSE; 8210 uint32_t rewind_policy = ZPOOL_NO_REWIND; 8211 nvlist_t *policy = NULL; 8212 zpool_handle_t *zhp; 8213 char *pool, *device; 8214 8215 struct option long_options[] = { 8216 {"power", no_argument, NULL, ZPOOL_OPTION_POWER}, 8217 {0, 0, 0, 0} 8218 }; 8219 8220 /* check options */ 8221 while ((c = getopt_long(argc, argv, "FnX", long_options, 8222 NULL)) != -1) { 8223 switch (c) { 8224 case 'F': 8225 do_rewind = B_TRUE; 8226 break; 8227 case 'n': 8228 dryrun = B_TRUE; 8229 break; 8230 case 'X': 8231 xtreme_rewind = B_TRUE; 8232 break; 8233 case ZPOOL_OPTION_POWER: 8234 is_power_on = B_TRUE; 8235 break; 8236 case '?': 8237 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 8238 optopt); 8239 usage(B_FALSE); 8240 } 8241 } 8242 8243 if (libzfs_envvar_is_set("ZPOOL_AUTO_POWER_ON_SLOT")) 8244 is_power_on = B_TRUE; 8245 8246 argc -= optind; 8247 argv += optind; 8248 8249 if (argc < 1) { 8250 (void) fprintf(stderr, gettext("missing pool name\n")); 8251 usage(B_FALSE); 8252 } 8253 8254 if (argc > 2) { 8255 (void) fprintf(stderr, gettext("too many arguments\n")); 8256 usage(B_FALSE); 8257 } 8258 8259 if ((dryrun || xtreme_rewind) && !do_rewind) { 8260 (void) fprintf(stderr, 8261 gettext("-n or -X only meaningful with -F\n")); 8262 usage(B_FALSE); 8263 } 8264 if (dryrun) 8265 rewind_policy = ZPOOL_TRY_REWIND; 8266 else if (do_rewind) 8267 rewind_policy = ZPOOL_DO_REWIND; 8268 if (xtreme_rewind) 8269 rewind_policy |= ZPOOL_EXTREME_REWIND; 8270 8271 /* In future, further rewind policy choices can be passed along here */ 8272 if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 || 8273 nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY, 8274 rewind_policy) != 0) { 8275 return (1); 8276 } 8277 8278 pool = argv[0]; 8279 device = argc == 2 ? argv[1] : NULL; 8280 8281 if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) { 8282 nvlist_free(policy); 8283 return (1); 8284 } 8285 8286 if (is_power_on) { 8287 if (device == NULL) { 8288 zpool_power_on_pool_and_wait_for_devices(zhp); 8289 } else { 8290 zpool_power_on_and_disk_wait(zhp, device); 8291 } 8292 } 8293 8294 if (zpool_clear(zhp, device, policy) != 0) 8295 ret = 1; 8296 8297 zpool_close(zhp); 8298 8299 nvlist_free(policy); 8300 8301 return (ret); 8302 } 8303 8304 /* 8305 * zpool reguid [-g <guid>] <pool> 8306 */ 8307 int 8308 zpool_do_reguid(int argc, char **argv) 8309 { 8310 uint64_t guid; 8311 uint64_t *guidp = NULL; 8312 int c; 8313 char *endptr; 8314 char *poolname; 8315 zpool_handle_t *zhp; 8316 int ret = 0; 8317 8318 /* check options */ 8319 while ((c = getopt(argc, argv, "g:")) != -1) { 8320 switch (c) { 8321 case 'g': 8322 errno = 0; 8323 guid = strtoull(optarg, &endptr, 10); 8324 if (errno != 0 || *endptr != '\0') { 8325 (void) fprintf(stderr, 8326 gettext("invalid GUID: %s\n"), optarg); 8327 usage(B_FALSE); 8328 } 8329 guidp = &guid; 8330 break; 8331 case '?': 8332 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 8333 optopt); 8334 usage(B_FALSE); 8335 } 8336 } 8337 8338 argc -= optind; 8339 argv += optind; 8340 8341 /* get pool name and check number of arguments */ 8342 if (argc < 1) { 8343 (void) fprintf(stderr, gettext("missing pool name\n")); 8344 usage(B_FALSE); 8345 } 8346 8347 if (argc > 1) { 8348 (void) fprintf(stderr, gettext("too many arguments\n")); 8349 usage(B_FALSE); 8350 } 8351 8352 poolname = argv[0]; 8353 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) 8354 return (1); 8355 8356 ret = zpool_set_guid(zhp, guidp); 8357 8358 zpool_close(zhp); 8359 return (ret); 8360 } 8361 8362 8363 /* 8364 * zpool reopen <pool> 8365 * 8366 * Reopen the pool so that the kernel can update the sizes of all vdevs. 8367 */ 8368 int 8369 zpool_do_reopen(int argc, char **argv) 8370 { 8371 int c; 8372 int ret = 0; 8373 boolean_t scrub_restart = B_TRUE; 8374 8375 /* check options */ 8376 while ((c = getopt(argc, argv, "n")) != -1) { 8377 switch (c) { 8378 case 'n': 8379 scrub_restart = B_FALSE; 8380 break; 8381 case '?': 8382 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 8383 optopt); 8384 usage(B_FALSE); 8385 } 8386 } 8387 8388 argc -= optind; 8389 argv += optind; 8390 8391 /* if argc == 0 we will execute zpool_reopen_one on all pools */ 8392 ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL, 8393 B_FALSE, zpool_reopen_one, &scrub_restart); 8394 8395 return (ret); 8396 } 8397 8398 typedef struct scrub_cbdata { 8399 int cb_type; 8400 pool_scrub_cmd_t cb_scrub_cmd; 8401 time_t cb_date_start; 8402 time_t cb_date_end; 8403 } scrub_cbdata_t; 8404 8405 static boolean_t 8406 zpool_has_checkpoint(zpool_handle_t *zhp) 8407 { 8408 nvlist_t *config, *nvroot; 8409 8410 config = zpool_get_config(zhp, NULL); 8411 8412 if (config != NULL) { 8413 pool_checkpoint_stat_t *pcs = NULL; 8414 uint_t c; 8415 8416 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE); 8417 (void) nvlist_lookup_uint64_array(nvroot, 8418 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c); 8419 8420 if (pcs == NULL || pcs->pcs_state == CS_NONE) 8421 return (B_FALSE); 8422 8423 assert(pcs->pcs_state == CS_CHECKPOINT_EXISTS || 8424 pcs->pcs_state == CS_CHECKPOINT_DISCARDING); 8425 return (B_TRUE); 8426 } 8427 8428 return (B_FALSE); 8429 } 8430 8431 static int 8432 scrub_callback(zpool_handle_t *zhp, void *data) 8433 { 8434 scrub_cbdata_t *cb = data; 8435 int err; 8436 8437 /* 8438 * Ignore faulted pools. 8439 */ 8440 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) { 8441 (void) fprintf(stderr, gettext("cannot scan '%s': pool is " 8442 "currently unavailable\n"), zpool_get_name(zhp)); 8443 return (1); 8444 } 8445 8446 err = zpool_scan_range(zhp, cb->cb_type, cb->cb_scrub_cmd, 8447 cb->cb_date_start, cb->cb_date_end); 8448 if (err == 0 && zpool_has_checkpoint(zhp) && 8449 cb->cb_type == POOL_SCAN_SCRUB) { 8450 (void) printf(gettext("warning: will not scrub state that " 8451 "belongs to the checkpoint of pool '%s'\n"), 8452 zpool_get_name(zhp)); 8453 } 8454 8455 return (err != 0); 8456 } 8457 8458 static int 8459 wait_callback(zpool_handle_t *zhp, void *data) 8460 { 8461 zpool_wait_activity_t *act = data; 8462 return (zpool_wait(zhp, *act)); 8463 } 8464 8465 static time_t 8466 date_string_to_sec(const char *timestr, boolean_t rounding) 8467 { 8468 struct tm tm = {0}; 8469 int adjustment = rounding ? 1 : 0; 8470 8471 /* Allow mktime to determine timezone. */ 8472 tm.tm_isdst = -1; 8473 8474 if (strptime(timestr, "%Y-%m-%d %H:%M", &tm) == NULL) { 8475 if (strptime(timestr, "%Y-%m-%d", &tm) == NULL) { 8476 fprintf(stderr, gettext("Failed to parse the date.\n")); 8477 usage(B_FALSE); 8478 } 8479 adjustment *= 24 * 60 * 60; 8480 } else { 8481 adjustment *= 60; 8482 } 8483 8484 return (mktime(&tm) + adjustment); 8485 } 8486 8487 /* 8488 * zpool scrub [-e | -s | -p | -C | -E | -S] [-w] [-a | <pool> ...] 8489 * 8490 * -a Scrub all pools. 8491 * -e Only scrub blocks in the error log. 8492 * -E End date of scrub. 8493 * -S Start date of scrub. 8494 * -s Stop. Stops any in-progress scrub. 8495 * -p Pause. Pause in-progress scrub. 8496 * -w Wait. Blocks until scrub has completed. 8497 * -C Scrub from last saved txg. 8498 */ 8499 int 8500 zpool_do_scrub(int argc, char **argv) 8501 { 8502 int c; 8503 scrub_cbdata_t cb; 8504 boolean_t wait = B_FALSE; 8505 int error; 8506 8507 cb.cb_type = POOL_SCAN_SCRUB; 8508 cb.cb_scrub_cmd = POOL_SCRUB_NORMAL; 8509 cb.cb_date_start = cb.cb_date_end = 0; 8510 8511 boolean_t is_error_scrub = B_FALSE; 8512 boolean_t is_pause = B_FALSE; 8513 boolean_t is_stop = B_FALSE; 8514 boolean_t is_txg_continue = B_FALSE; 8515 boolean_t scrub_all = B_FALSE; 8516 8517 /* check options */ 8518 while ((c = getopt(argc, argv, "aspweCE:S:")) != -1) { 8519 switch (c) { 8520 case 'a': 8521 scrub_all = B_TRUE; 8522 break; 8523 case 'e': 8524 is_error_scrub = B_TRUE; 8525 break; 8526 case 'E': 8527 /* 8528 * Round the date. It's better to scrub more data than 8529 * less. This also makes the date inclusive. 8530 */ 8531 cb.cb_date_end = date_string_to_sec(optarg, B_TRUE); 8532 break; 8533 case 's': 8534 is_stop = B_TRUE; 8535 break; 8536 case 'S': 8537 cb.cb_date_start = date_string_to_sec(optarg, B_FALSE); 8538 break; 8539 case 'p': 8540 is_pause = B_TRUE; 8541 break; 8542 case 'w': 8543 wait = B_TRUE; 8544 break; 8545 case 'C': 8546 is_txg_continue = B_TRUE; 8547 break; 8548 case '?': 8549 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 8550 optopt); 8551 usage(B_FALSE); 8552 } 8553 } 8554 8555 if (is_pause && is_stop) { 8556 (void) fprintf(stderr, gettext("invalid option " 8557 "combination: -s and -p are mutually exclusive\n")); 8558 usage(B_FALSE); 8559 } else if (is_pause && is_txg_continue) { 8560 (void) fprintf(stderr, gettext("invalid option " 8561 "combination: -p and -C are mutually exclusive\n")); 8562 usage(B_FALSE); 8563 } else if (is_stop && is_txg_continue) { 8564 (void) fprintf(stderr, gettext("invalid option " 8565 "combination: -s and -C are mutually exclusive\n")); 8566 usage(B_FALSE); 8567 } else if (is_error_scrub && is_txg_continue) { 8568 (void) fprintf(stderr, gettext("invalid option " 8569 "combination: -e and -C are mutually exclusive\n")); 8570 usage(B_FALSE); 8571 } else { 8572 if (is_error_scrub) 8573 cb.cb_type = POOL_SCAN_ERRORSCRUB; 8574 8575 if (is_pause) { 8576 cb.cb_scrub_cmd = POOL_SCRUB_PAUSE; 8577 } else if (is_stop) { 8578 cb.cb_type = POOL_SCAN_NONE; 8579 } else if (is_txg_continue) { 8580 cb.cb_scrub_cmd = POOL_SCRUB_FROM_LAST_TXG; 8581 } else { 8582 cb.cb_scrub_cmd = POOL_SCRUB_NORMAL; 8583 } 8584 } 8585 8586 if ((cb.cb_date_start != 0 || cb.cb_date_end != 0) && 8587 cb.cb_scrub_cmd != POOL_SCRUB_NORMAL) { 8588 (void) fprintf(stderr, gettext("invalid option combination: " 8589 "start/end date is available only with normal scrub\n")); 8590 usage(B_FALSE); 8591 } 8592 if (cb.cb_date_start != 0 && cb.cb_date_end != 0 && 8593 cb.cb_date_start > cb.cb_date_end) { 8594 (void) fprintf(stderr, gettext("invalid arguments: " 8595 "end date has to be later than start date\n")); 8596 usage(B_FALSE); 8597 } 8598 8599 if (wait && (cb.cb_type == POOL_SCAN_NONE || 8600 cb.cb_scrub_cmd == POOL_SCRUB_PAUSE)) { 8601 (void) fprintf(stderr, gettext("invalid option combination: " 8602 "-w cannot be used with -p or -s\n")); 8603 usage(B_FALSE); 8604 } 8605 8606 argc -= optind; 8607 argv += optind; 8608 8609 if (argc < 1 && !scrub_all) { 8610 (void) fprintf(stderr, gettext("missing pool name argument\n")); 8611 usage(B_FALSE); 8612 } 8613 8614 error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL, 8615 B_FALSE, scrub_callback, &cb); 8616 8617 if (wait && !error) { 8618 zpool_wait_activity_t act = ZPOOL_WAIT_SCRUB; 8619 error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL, 8620 B_FALSE, wait_callback, &act); 8621 } 8622 8623 return (error); 8624 } 8625 8626 /* 8627 * zpool resilver <pool> ... 8628 * 8629 * Restarts any in-progress resilver 8630 */ 8631 int 8632 zpool_do_resilver(int argc, char **argv) 8633 { 8634 int c; 8635 scrub_cbdata_t cb; 8636 8637 cb.cb_type = POOL_SCAN_RESILVER; 8638 cb.cb_scrub_cmd = POOL_SCRUB_NORMAL; 8639 cb.cb_date_start = cb.cb_date_end = 0; 8640 8641 /* check options */ 8642 while ((c = getopt(argc, argv, "")) != -1) { 8643 switch (c) { 8644 case '?': 8645 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 8646 optopt); 8647 usage(B_FALSE); 8648 } 8649 } 8650 8651 argc -= optind; 8652 argv += optind; 8653 8654 if (argc < 1) { 8655 (void) fprintf(stderr, gettext("missing pool name argument\n")); 8656 usage(B_FALSE); 8657 } 8658 8659 return (for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL, 8660 B_FALSE, scrub_callback, &cb)); 8661 } 8662 8663 /* 8664 * zpool trim [-d] [-r <rate>] [-c | -s] <-a | pool> [<device> ...] 8665 * 8666 * -a Trim all pools. 8667 * -c Cancel. Ends any in-progress trim. 8668 * -d Secure trim. Requires kernel and device support. 8669 * -r <rate> Sets the TRIM rate in bytes (per second). Supports 8670 * adding a multiplier suffix such as 'k' or 'm'. 8671 * -s Suspend. TRIM can then be restarted with no flags. 8672 * -w Wait. Blocks until trimming has completed. 8673 */ 8674 int 8675 zpool_do_trim(int argc, char **argv) 8676 { 8677 struct option long_options[] = { 8678 {"cancel", no_argument, NULL, 'c'}, 8679 {"secure", no_argument, NULL, 'd'}, 8680 {"rate", required_argument, NULL, 'r'}, 8681 {"suspend", no_argument, NULL, 's'}, 8682 {"wait", no_argument, NULL, 'w'}, 8683 {"all", no_argument, NULL, 'a'}, 8684 {0, 0, 0, 0} 8685 }; 8686 8687 pool_trim_func_t cmd_type = POOL_TRIM_START; 8688 uint64_t rate = 0; 8689 boolean_t secure = B_FALSE; 8690 boolean_t wait = B_FALSE; 8691 boolean_t trimall = B_FALSE; 8692 int error; 8693 8694 int c; 8695 while ((c = getopt_long(argc, argv, "acdr:sw", long_options, NULL)) 8696 != -1) { 8697 switch (c) { 8698 case 'a': 8699 trimall = B_TRUE; 8700 break; 8701 case 'c': 8702 if (cmd_type != POOL_TRIM_START && 8703 cmd_type != POOL_TRIM_CANCEL) { 8704 (void) fprintf(stderr, gettext("-c cannot be " 8705 "combined with other options\n")); 8706 usage(B_FALSE); 8707 } 8708 cmd_type = POOL_TRIM_CANCEL; 8709 break; 8710 case 'd': 8711 if (cmd_type != POOL_TRIM_START) { 8712 (void) fprintf(stderr, gettext("-d cannot be " 8713 "combined with the -c or -s options\n")); 8714 usage(B_FALSE); 8715 } 8716 secure = B_TRUE; 8717 break; 8718 case 'r': 8719 if (cmd_type != POOL_TRIM_START) { 8720 (void) fprintf(stderr, gettext("-r cannot be " 8721 "combined with the -c or -s options\n")); 8722 usage(B_FALSE); 8723 } 8724 if (zfs_nicestrtonum(g_zfs, optarg, &rate) == -1) { 8725 (void) fprintf(stderr, "%s: %s\n", 8726 gettext("invalid value for rate"), 8727 libzfs_error_description(g_zfs)); 8728 usage(B_FALSE); 8729 } 8730 break; 8731 case 's': 8732 if (cmd_type != POOL_TRIM_START && 8733 cmd_type != POOL_TRIM_SUSPEND) { 8734 (void) fprintf(stderr, gettext("-s cannot be " 8735 "combined with other options\n")); 8736 usage(B_FALSE); 8737 } 8738 cmd_type = POOL_TRIM_SUSPEND; 8739 break; 8740 case 'w': 8741 wait = B_TRUE; 8742 break; 8743 case '?': 8744 if (optopt != 0) { 8745 (void) fprintf(stderr, 8746 gettext("invalid option '%c'\n"), optopt); 8747 } else { 8748 (void) fprintf(stderr, 8749 gettext("invalid option '%s'\n"), 8750 argv[optind - 1]); 8751 } 8752 usage(B_FALSE); 8753 } 8754 } 8755 8756 argc -= optind; 8757 argv += optind; 8758 8759 trimflags_t trim_flags = { 8760 .secure = secure, 8761 .rate = rate, 8762 .wait = wait, 8763 }; 8764 8765 trim_cbdata_t cbdata = { 8766 .trim_flags = trim_flags, 8767 .cmd_type = cmd_type 8768 }; 8769 8770 if (argc < 1 && !trimall) { 8771 (void) fprintf(stderr, gettext("missing pool name argument\n")); 8772 usage(B_FALSE); 8773 return (-1); 8774 } 8775 8776 if (wait && (cmd_type != POOL_TRIM_START)) { 8777 (void) fprintf(stderr, gettext("-w cannot be used with -c or " 8778 "-s options\n")); 8779 usage(B_FALSE); 8780 } 8781 8782 if (trimall && argc > 0) { 8783 (void) fprintf(stderr, gettext("-a cannot be combined with " 8784 "individual zpools or vdevs\n")); 8785 usage(B_FALSE); 8786 } 8787 8788 if (argc == 0 && trimall) { 8789 cbdata.trim_flags.fullpool = B_TRUE; 8790 /* Trim each pool recursively */ 8791 error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL, 8792 B_FALSE, zpool_trim_one, &cbdata); 8793 } else if (argc == 1) { 8794 char *poolname = argv[0]; 8795 zpool_handle_t *zhp = zpool_open(g_zfs, poolname); 8796 if (zhp == NULL) 8797 return (-1); 8798 /* no individual leaf vdevs specified, so add them all */ 8799 error = zpool_trim_one(zhp, &cbdata); 8800 zpool_close(zhp); 8801 } else { 8802 char *poolname = argv[0]; 8803 zpool_handle_t *zhp = zpool_open(g_zfs, poolname); 8804 if (zhp == NULL) 8805 return (-1); 8806 /* leaf vdevs specified, trim only those */ 8807 cbdata.trim_flags.fullpool = B_FALSE; 8808 nvlist_t *vdevs = fnvlist_alloc(); 8809 for (int i = 1; i < argc; i++) { 8810 fnvlist_add_boolean(vdevs, argv[i]); 8811 } 8812 error = zpool_trim(zhp, cbdata.cmd_type, vdevs, 8813 &cbdata.trim_flags); 8814 fnvlist_free(vdevs); 8815 zpool_close(zhp); 8816 } 8817 8818 return (error); 8819 } 8820 8821 /* 8822 * Converts a total number of seconds to a human readable string broken 8823 * down in to days/hours/minutes/seconds. 8824 */ 8825 static void 8826 secs_to_dhms(uint64_t total, char *buf) 8827 { 8828 uint64_t days = total / 60 / 60 / 24; 8829 uint64_t hours = (total / 60 / 60) % 24; 8830 uint64_t mins = (total / 60) % 60; 8831 uint64_t secs = (total % 60); 8832 8833 if (days > 0) { 8834 (void) sprintf(buf, "%llu days %02llu:%02llu:%02llu", 8835 (u_longlong_t)days, (u_longlong_t)hours, 8836 (u_longlong_t)mins, (u_longlong_t)secs); 8837 } else { 8838 (void) sprintf(buf, "%02llu:%02llu:%02llu", 8839 (u_longlong_t)hours, (u_longlong_t)mins, 8840 (u_longlong_t)secs); 8841 } 8842 } 8843 8844 /* 8845 * Print out detailed error scrub status. 8846 */ 8847 static void 8848 print_err_scrub_status(pool_scan_stat_t *ps) 8849 { 8850 time_t start, end, pause; 8851 uint64_t total_secs_left; 8852 uint64_t secs_left, mins_left, hours_left, days_left; 8853 uint64_t examined, to_be_examined; 8854 8855 if (ps == NULL || ps->pss_error_scrub_func != POOL_SCAN_ERRORSCRUB) { 8856 return; 8857 } 8858 8859 (void) printf(gettext(" scrub: ")); 8860 8861 start = ps->pss_error_scrub_start; 8862 end = ps->pss_error_scrub_end; 8863 pause = ps->pss_pass_error_scrub_pause; 8864 examined = ps->pss_error_scrub_examined; 8865 to_be_examined = ps->pss_error_scrub_to_be_examined; 8866 8867 assert(ps->pss_error_scrub_func == POOL_SCAN_ERRORSCRUB); 8868 8869 if (ps->pss_error_scrub_state == DSS_FINISHED) { 8870 total_secs_left = end - start; 8871 days_left = total_secs_left / 60 / 60 / 24; 8872 hours_left = (total_secs_left / 60 / 60) % 24; 8873 mins_left = (total_secs_left / 60) % 60; 8874 secs_left = (total_secs_left % 60); 8875 8876 (void) printf(gettext("scrubbed %llu error blocks in %llu days " 8877 "%02llu:%02llu:%02llu on %s"), (u_longlong_t)examined, 8878 (u_longlong_t)days_left, (u_longlong_t)hours_left, 8879 (u_longlong_t)mins_left, (u_longlong_t)secs_left, 8880 ctime(&end)); 8881 8882 return; 8883 } else if (ps->pss_error_scrub_state == DSS_CANCELED) { 8884 (void) printf(gettext("error scrub canceled on %s"), 8885 ctime(&end)); 8886 return; 8887 } 8888 assert(ps->pss_error_scrub_state == DSS_ERRORSCRUBBING); 8889 8890 /* Error scrub is in progress. */ 8891 if (pause == 0) { 8892 (void) printf(gettext("error scrub in progress since %s"), 8893 ctime(&start)); 8894 } else { 8895 (void) printf(gettext("error scrub paused since %s"), 8896 ctime(&pause)); 8897 (void) printf(gettext("\terror scrub started on %s"), 8898 ctime(&start)); 8899 } 8900 8901 double fraction_done = (double)examined / (to_be_examined + examined); 8902 (void) printf(gettext("\t%.2f%% done, issued I/O for %llu error" 8903 " blocks"), 100 * fraction_done, (u_longlong_t)examined); 8904 8905 (void) printf("\n"); 8906 } 8907 8908 /* 8909 * Print out detailed scrub status. 8910 */ 8911 static void 8912 print_scan_scrub_resilver_status(pool_scan_stat_t *ps) 8913 { 8914 time_t start, end, pause; 8915 uint64_t pass_scanned, scanned, pass_issued, issued, total_s, total_i; 8916 uint64_t elapsed, scan_rate, issue_rate; 8917 double fraction_done; 8918 char processed_buf[7], scanned_buf[7], issued_buf[7], total_s_buf[7]; 8919 char total_i_buf[7], srate_buf[7], irate_buf[7], time_buf[32]; 8920 8921 printf(" "); 8922 printf_color(ANSI_BOLD, gettext("scan:")); 8923 printf(" "); 8924 8925 /* If there's never been a scan, there's not much to say. */ 8926 if (ps == NULL || ps->pss_func == POOL_SCAN_NONE || 8927 ps->pss_func >= POOL_SCAN_FUNCS) { 8928 (void) printf(gettext("none requested\n")); 8929 return; 8930 } 8931 8932 start = ps->pss_start_time; 8933 end = ps->pss_end_time; 8934 pause = ps->pss_pass_scrub_pause; 8935 8936 zfs_nicebytes(ps->pss_processed, processed_buf, sizeof (processed_buf)); 8937 8938 int is_resilver = ps->pss_func == POOL_SCAN_RESILVER; 8939 int is_scrub = ps->pss_func == POOL_SCAN_SCRUB; 8940 assert(is_resilver || is_scrub); 8941 8942 /* Scan is finished or canceled. */ 8943 if (ps->pss_state == DSS_FINISHED) { 8944 secs_to_dhms(end - start, time_buf); 8945 8946 if (is_scrub) { 8947 (void) printf(gettext("scrub repaired %s " 8948 "in %s with %llu errors on %s"), processed_buf, 8949 time_buf, (u_longlong_t)ps->pss_errors, 8950 ctime(&end)); 8951 } else if (is_resilver) { 8952 (void) printf(gettext("resilvered %s " 8953 "in %s with %llu errors on %s"), processed_buf, 8954 time_buf, (u_longlong_t)ps->pss_errors, 8955 ctime(&end)); 8956 } 8957 return; 8958 } else if (ps->pss_state == DSS_CANCELED) { 8959 if (is_scrub) { 8960 (void) printf(gettext("scrub canceled on %s"), 8961 ctime(&end)); 8962 } else if (is_resilver) { 8963 (void) printf(gettext("resilver canceled on %s"), 8964 ctime(&end)); 8965 } 8966 return; 8967 } 8968 8969 assert(ps->pss_state == DSS_SCANNING); 8970 8971 /* Scan is in progress. Resilvers can't be paused. */ 8972 if (is_scrub) { 8973 if (pause == 0) { 8974 (void) printf(gettext("scrub in progress since %s"), 8975 ctime(&start)); 8976 } else { 8977 (void) printf(gettext("scrub paused since %s"), 8978 ctime(&pause)); 8979 (void) printf(gettext("\tscrub started on %s"), 8980 ctime(&start)); 8981 } 8982 } else if (is_resilver) { 8983 (void) printf(gettext("resilver in progress since %s"), 8984 ctime(&start)); 8985 } 8986 8987 scanned = ps->pss_examined; 8988 pass_scanned = ps->pss_pass_exam; 8989 issued = ps->pss_issued; 8990 pass_issued = ps->pss_pass_issued; 8991 total_s = ps->pss_to_examine; 8992 total_i = ps->pss_to_examine - ps->pss_skipped; 8993 8994 /* we are only done with a block once we have issued the IO for it */ 8995 fraction_done = (double)issued / total_i; 8996 8997 /* elapsed time for this pass, rounding up to 1 if it's 0 */ 8998 elapsed = time(NULL) - ps->pss_pass_start; 8999 elapsed -= ps->pss_pass_scrub_spent_paused; 9000 elapsed = (elapsed != 0) ? elapsed : 1; 9001 9002 scan_rate = pass_scanned / elapsed; 9003 issue_rate = pass_issued / elapsed; 9004 9005 /* format all of the numbers we will be reporting */ 9006 zfs_nicebytes(scanned, scanned_buf, sizeof (scanned_buf)); 9007 zfs_nicebytes(issued, issued_buf, sizeof (issued_buf)); 9008 zfs_nicebytes(total_s, total_s_buf, sizeof (total_s_buf)); 9009 zfs_nicebytes(total_i, total_i_buf, sizeof (total_i_buf)); 9010 9011 /* do not print estimated time if we have a paused scrub */ 9012 (void) printf(gettext("\t%s / %s scanned"), scanned_buf, total_s_buf); 9013 if (pause == 0 && scan_rate > 0) { 9014 zfs_nicebytes(scan_rate, srate_buf, sizeof (srate_buf)); 9015 (void) printf(gettext(" at %s/s"), srate_buf); 9016 } 9017 (void) printf(gettext(", %s / %s issued"), issued_buf, total_i_buf); 9018 if (pause == 0 && issue_rate > 0) { 9019 zfs_nicebytes(issue_rate, irate_buf, sizeof (irate_buf)); 9020 (void) printf(gettext(" at %s/s"), irate_buf); 9021 } 9022 (void) printf(gettext("\n")); 9023 9024 if (is_resilver) { 9025 (void) printf(gettext("\t%s resilvered, %.2f%% done"), 9026 processed_buf, 100 * fraction_done); 9027 } else if (is_scrub) { 9028 (void) printf(gettext("\t%s repaired, %.2f%% done"), 9029 processed_buf, 100 * fraction_done); 9030 } 9031 9032 if (pause == 0) { 9033 /* 9034 * Only provide an estimate iff: 9035 * 1) we haven't yet issued all we expected, and 9036 * 2) the issue rate exceeds 10 MB/s, and 9037 * 3) it's either: 9038 * a) a resilver which has started repairs, or 9039 * b) a scrub which has entered the issue phase. 9040 */ 9041 if (total_i >= issued && issue_rate >= 10 * 1024 * 1024 && 9042 ((is_resilver && ps->pss_processed > 0) || 9043 (is_scrub && issued > 0))) { 9044 secs_to_dhms((total_i - issued) / issue_rate, time_buf); 9045 (void) printf(gettext(", %s to go\n"), time_buf); 9046 } else { 9047 (void) printf(gettext(", no estimated " 9048 "completion time\n")); 9049 } 9050 } else { 9051 (void) printf(gettext("\n")); 9052 } 9053 } 9054 9055 static void 9056 print_rebuild_status_impl(vdev_rebuild_stat_t *vrs, uint_t c, char *vdev_name) 9057 { 9058 if (vrs == NULL || vrs->vrs_state == VDEV_REBUILD_NONE) 9059 return; 9060 9061 printf(" "); 9062 printf_color(ANSI_BOLD, gettext("scan:")); 9063 printf(" "); 9064 9065 uint64_t bytes_scanned = vrs->vrs_bytes_scanned; 9066 uint64_t bytes_issued = vrs->vrs_bytes_issued; 9067 uint64_t bytes_rebuilt = vrs->vrs_bytes_rebuilt; 9068 uint64_t bytes_est_s = vrs->vrs_bytes_est; 9069 uint64_t bytes_est_i = vrs->vrs_bytes_est; 9070 if (c > offsetof(vdev_rebuild_stat_t, vrs_pass_bytes_skipped) / 8) 9071 bytes_est_i -= vrs->vrs_pass_bytes_skipped; 9072 uint64_t scan_rate = (vrs->vrs_pass_bytes_scanned / 9073 (vrs->vrs_pass_time_ms + 1)) * 1000; 9074 uint64_t issue_rate = (vrs->vrs_pass_bytes_issued / 9075 (vrs->vrs_pass_time_ms + 1)) * 1000; 9076 double scan_pct = MIN((double)bytes_scanned * 100 / 9077 (bytes_est_s + 1), 100); 9078 9079 /* Format all of the numbers we will be reporting */ 9080 char bytes_scanned_buf[7], bytes_issued_buf[7]; 9081 char bytes_rebuilt_buf[7], bytes_est_s_buf[7], bytes_est_i_buf[7]; 9082 char scan_rate_buf[7], issue_rate_buf[7], time_buf[32]; 9083 zfs_nicebytes(bytes_scanned, bytes_scanned_buf, 9084 sizeof (bytes_scanned_buf)); 9085 zfs_nicebytes(bytes_issued, bytes_issued_buf, 9086 sizeof (bytes_issued_buf)); 9087 zfs_nicebytes(bytes_rebuilt, bytes_rebuilt_buf, 9088 sizeof (bytes_rebuilt_buf)); 9089 zfs_nicebytes(bytes_est_s, bytes_est_s_buf, sizeof (bytes_est_s_buf)); 9090 zfs_nicebytes(bytes_est_i, bytes_est_i_buf, sizeof (bytes_est_i_buf)); 9091 9092 time_t start = vrs->vrs_start_time; 9093 time_t end = vrs->vrs_end_time; 9094 9095 /* Rebuild is finished or canceled. */ 9096 if (vrs->vrs_state == VDEV_REBUILD_COMPLETE) { 9097 secs_to_dhms(vrs->vrs_scan_time_ms / 1000, time_buf); 9098 (void) printf(gettext("resilvered (%s) %s in %s " 9099 "with %llu errors on %s"), vdev_name, bytes_rebuilt_buf, 9100 time_buf, (u_longlong_t)vrs->vrs_errors, ctime(&end)); 9101 return; 9102 } else if (vrs->vrs_state == VDEV_REBUILD_CANCELED) { 9103 (void) printf(gettext("resilver (%s) canceled on %s"), 9104 vdev_name, ctime(&end)); 9105 return; 9106 } else if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) { 9107 (void) printf(gettext("resilver (%s) in progress since %s"), 9108 vdev_name, ctime(&start)); 9109 } 9110 9111 assert(vrs->vrs_state == VDEV_REBUILD_ACTIVE); 9112 9113 (void) printf(gettext("\t%s / %s scanned"), bytes_scanned_buf, 9114 bytes_est_s_buf); 9115 if (scan_rate > 0) { 9116 zfs_nicebytes(scan_rate, scan_rate_buf, sizeof (scan_rate_buf)); 9117 (void) printf(gettext(" at %s/s"), scan_rate_buf); 9118 } 9119 (void) printf(gettext(", %s / %s issued"), bytes_issued_buf, 9120 bytes_est_i_buf); 9121 if (issue_rate > 0) { 9122 zfs_nicebytes(issue_rate, issue_rate_buf, 9123 sizeof (issue_rate_buf)); 9124 (void) printf(gettext(" at %s/s"), issue_rate_buf); 9125 } 9126 (void) printf(gettext("\n")); 9127 9128 (void) printf(gettext("\t%s resilvered, %.2f%% done"), 9129 bytes_rebuilt_buf, scan_pct); 9130 9131 if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) { 9132 if (bytes_est_s >= bytes_scanned && 9133 scan_rate >= 10 * 1024 * 1024) { 9134 secs_to_dhms((bytes_est_s - bytes_scanned) / scan_rate, 9135 time_buf); 9136 (void) printf(gettext(", %s to go\n"), time_buf); 9137 } else { 9138 (void) printf(gettext(", no estimated " 9139 "completion time\n")); 9140 } 9141 } else { 9142 (void) printf(gettext("\n")); 9143 } 9144 } 9145 9146 /* 9147 * Print rebuild status for top-level vdevs. 9148 */ 9149 static void 9150 print_rebuild_status(zpool_handle_t *zhp, nvlist_t *nvroot) 9151 { 9152 nvlist_t **child; 9153 uint_t children; 9154 9155 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 9156 &child, &children) != 0) 9157 children = 0; 9158 9159 for (uint_t c = 0; c < children; c++) { 9160 vdev_rebuild_stat_t *vrs; 9161 uint_t i; 9162 9163 if (nvlist_lookup_uint64_array(child[c], 9164 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i) == 0) { 9165 char *name = zpool_vdev_name(g_zfs, zhp, 9166 child[c], VDEV_NAME_TYPE_ID); 9167 print_rebuild_status_impl(vrs, i, name); 9168 free(name); 9169 } 9170 } 9171 } 9172 9173 /* 9174 * As we don't scrub checkpointed blocks, we want to warn the user that we 9175 * skipped scanning some blocks if a checkpoint exists or existed at any 9176 * time during the scan. If a sequential instead of healing reconstruction 9177 * was performed then the blocks were reconstructed. However, their checksums 9178 * have not been verified so we still print the warning. 9179 */ 9180 static void 9181 print_checkpoint_scan_warning(pool_scan_stat_t *ps, pool_checkpoint_stat_t *pcs) 9182 { 9183 if (ps == NULL || pcs == NULL) 9184 return; 9185 9186 if (pcs->pcs_state == CS_NONE || 9187 pcs->pcs_state == CS_CHECKPOINT_DISCARDING) 9188 return; 9189 9190 assert(pcs->pcs_state == CS_CHECKPOINT_EXISTS); 9191 9192 if (ps->pss_state == DSS_NONE) 9193 return; 9194 9195 if ((ps->pss_state == DSS_FINISHED || ps->pss_state == DSS_CANCELED) && 9196 ps->pss_end_time < pcs->pcs_start_time) 9197 return; 9198 9199 if (ps->pss_state == DSS_FINISHED || ps->pss_state == DSS_CANCELED) { 9200 (void) printf(gettext(" scan warning: skipped blocks " 9201 "that are only referenced by the checkpoint.\n")); 9202 } else { 9203 assert(ps->pss_state == DSS_SCANNING); 9204 (void) printf(gettext(" scan warning: skipping blocks " 9205 "that are only referenced by the checkpoint.\n")); 9206 } 9207 } 9208 9209 /* 9210 * Returns B_TRUE if there is an active rebuild in progress. Otherwise, 9211 * B_FALSE is returned and 'rebuild_end_time' is set to the end time for 9212 * the last completed (or cancelled) rebuild. 9213 */ 9214 static boolean_t 9215 check_rebuilding(nvlist_t *nvroot, uint64_t *rebuild_end_time) 9216 { 9217 nvlist_t **child; 9218 uint_t children; 9219 boolean_t rebuilding = B_FALSE; 9220 uint64_t end_time = 0; 9221 9222 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 9223 &child, &children) != 0) 9224 children = 0; 9225 9226 for (uint_t c = 0; c < children; c++) { 9227 vdev_rebuild_stat_t *vrs; 9228 uint_t i; 9229 9230 if (nvlist_lookup_uint64_array(child[c], 9231 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i) == 0) { 9232 9233 if (vrs->vrs_end_time > end_time) 9234 end_time = vrs->vrs_end_time; 9235 9236 if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) { 9237 rebuilding = B_TRUE; 9238 end_time = 0; 9239 break; 9240 } 9241 } 9242 } 9243 9244 if (rebuild_end_time != NULL) 9245 *rebuild_end_time = end_time; 9246 9247 return (rebuilding); 9248 } 9249 9250 static void 9251 vdev_stats_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv, 9252 int depth, boolean_t isspare, char *parent, nvlist_t *item) 9253 { 9254 nvlist_t *vds, **child, *ch = NULL; 9255 uint_t vsc, children; 9256 vdev_stat_t *vs; 9257 char *vname; 9258 uint64_t notpresent; 9259 const char *type, *path; 9260 9261 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 9262 &child, &children) != 0) 9263 children = 0; 9264 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, 9265 (uint64_t **)&vs, &vsc) == 0); 9266 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0); 9267 if (strcmp(type, VDEV_TYPE_INDIRECT) == 0) 9268 return; 9269 9270 if (cb->cb_print_unhealthy && depth > 0 && 9271 for_each_vdev_in_nvlist(nv, vdev_health_check_cb, cb) == 0) { 9272 return; 9273 } 9274 vname = zpool_vdev_name(g_zfs, zhp, nv, 9275 cb->cb_name_flags | VDEV_NAME_TYPE_ID); 9276 vds = fnvlist_alloc(); 9277 fill_vdev_info(vds, zhp, vname, B_FALSE, cb->cb_json_as_int); 9278 if (cb->cb_flat_vdevs && parent != NULL) { 9279 fnvlist_add_string(vds, "parent", parent); 9280 } 9281 9282 if (isspare) { 9283 if (vs->vs_aux == VDEV_AUX_SPARED) { 9284 fnvlist_add_string(vds, "state", "INUSE"); 9285 used_by_other(zhp, nv, vds); 9286 } else if (vs->vs_state == VDEV_STATE_HEALTHY) 9287 fnvlist_add_string(vds, "state", "AVAIL"); 9288 } else { 9289 if (vs->vs_alloc) { 9290 nice_num_str_nvlist(vds, "alloc_space", vs->vs_alloc, 9291 cb->cb_literal, cb->cb_json_as_int, 9292 ZFS_NICENUM_BYTES); 9293 } 9294 if (vs->vs_space) { 9295 nice_num_str_nvlist(vds, "total_space", vs->vs_space, 9296 cb->cb_literal, cb->cb_json_as_int, 9297 ZFS_NICENUM_BYTES); 9298 } 9299 if (vs->vs_dspace) { 9300 nice_num_str_nvlist(vds, "def_space", vs->vs_dspace, 9301 cb->cb_literal, cb->cb_json_as_int, 9302 ZFS_NICENUM_BYTES); 9303 } 9304 if (vs->vs_rsize) { 9305 nice_num_str_nvlist(vds, "rep_dev_size", vs->vs_rsize, 9306 cb->cb_literal, cb->cb_json_as_int, 9307 ZFS_NICENUM_BYTES); 9308 } 9309 if (vs->vs_esize) { 9310 nice_num_str_nvlist(vds, "ex_dev_size", vs->vs_esize, 9311 cb->cb_literal, cb->cb_json_as_int, 9312 ZFS_NICENUM_BYTES); 9313 } 9314 if (vs->vs_self_healed) { 9315 nice_num_str_nvlist(vds, "self_healed", 9316 vs->vs_self_healed, cb->cb_literal, 9317 cb->cb_json_as_int, ZFS_NICENUM_BYTES); 9318 } 9319 if (vs->vs_pspace) { 9320 nice_num_str_nvlist(vds, "phys_space", vs->vs_pspace, 9321 cb->cb_literal, cb->cb_json_as_int, 9322 ZFS_NICENUM_BYTES); 9323 } 9324 nice_num_str_nvlist(vds, "read_errors", vs->vs_read_errors, 9325 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024); 9326 nice_num_str_nvlist(vds, "write_errors", vs->vs_write_errors, 9327 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024); 9328 nice_num_str_nvlist(vds, "checksum_errors", 9329 vs->vs_checksum_errors, cb->cb_literal, 9330 cb->cb_json_as_int, ZFS_NICENUM_1024); 9331 if (vs->vs_scan_processed) { 9332 nice_num_str_nvlist(vds, "scan_processed", 9333 vs->vs_scan_processed, cb->cb_literal, 9334 cb->cb_json_as_int, ZFS_NICENUM_BYTES); 9335 } 9336 if (vs->vs_checkpoint_space) { 9337 nice_num_str_nvlist(vds, "checkpoint_space", 9338 vs->vs_checkpoint_space, cb->cb_literal, 9339 cb->cb_json_as_int, ZFS_NICENUM_BYTES); 9340 } 9341 if (vs->vs_resilver_deferred) { 9342 nice_num_str_nvlist(vds, "resilver_deferred", 9343 vs->vs_resilver_deferred, B_TRUE, 9344 cb->cb_json_as_int, ZFS_NICENUM_1024); 9345 } 9346 if (children == 0) { 9347 nice_num_str_nvlist(vds, "slow_ios", vs->vs_slow_ios, 9348 cb->cb_literal, cb->cb_json_as_int, 9349 ZFS_NICENUM_1024); 9350 } 9351 if (cb->cb_print_power) { 9352 if (children == 0) { 9353 /* Only leaf vdevs have physical slots */ 9354 switch (zpool_power_current_state(zhp, (char *) 9355 fnvlist_lookup_string(nv, 9356 ZPOOL_CONFIG_PATH))) { 9357 case 0: 9358 fnvlist_add_string(vds, "power_state", 9359 "off"); 9360 break; 9361 case 1: 9362 fnvlist_add_string(vds, "power_state", 9363 "on"); 9364 break; 9365 default: 9366 fnvlist_add_string(vds, "power_state", 9367 "-"); 9368 } 9369 } else { 9370 fnvlist_add_string(vds, "power_state", "-"); 9371 } 9372 } 9373 } 9374 9375 if (cb->cb_print_dio_verify) { 9376 nice_num_str_nvlist(vds, "dio_verify_errors", 9377 vs->vs_dio_verify_errors, cb->cb_literal, 9378 cb->cb_json_as_int, ZFS_NICENUM_1024); 9379 } 9380 9381 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 9382 ¬present) == 0) { 9383 nice_num_str_nvlist(vds, ZPOOL_CONFIG_NOT_PRESENT, 9384 1, B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_BYTES); 9385 fnvlist_add_string(vds, "was", 9386 fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH)); 9387 } else if (vs->vs_aux != VDEV_AUX_NONE) { 9388 fnvlist_add_string(vds, "aux", vdev_aux_str[vs->vs_aux]); 9389 } else if (children == 0 && !isspare && 9390 getenv("ZPOOL_STATUS_NON_NATIVE_ASHIFT_IGNORE") == NULL && 9391 VDEV_STAT_VALID(vs_physical_ashift, vsc) && 9392 vs->vs_configured_ashift < vs->vs_physical_ashift) { 9393 nice_num_str_nvlist(vds, "configured_ashift", 9394 vs->vs_configured_ashift, B_TRUE, cb->cb_json_as_int, 9395 ZFS_NICENUM_1024); 9396 nice_num_str_nvlist(vds, "physical_ashift", 9397 vs->vs_physical_ashift, B_TRUE, cb->cb_json_as_int, 9398 ZFS_NICENUM_1024); 9399 } 9400 if (vs->vs_scan_removing != 0) { 9401 nice_num_str_nvlist(vds, "removing", vs->vs_scan_removing, 9402 B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024); 9403 } else if (VDEV_STAT_VALID(vs_noalloc, vsc) && vs->vs_noalloc != 0) { 9404 nice_num_str_nvlist(vds, "noalloc", vs->vs_noalloc, 9405 B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024); 9406 } 9407 9408 if (cb->vcdl != NULL) { 9409 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) { 9410 zpool_nvlist_cmd(cb->vcdl, zpool_get_name(zhp), 9411 path, vds); 9412 } 9413 } 9414 9415 if (children == 0) { 9416 if (cb->cb_print_vdev_init) { 9417 if (vs->vs_initialize_state != 0) { 9418 uint64_t st = vs->vs_initialize_state; 9419 fnvlist_add_string(vds, "init_state", 9420 vdev_init_state_str[st]); 9421 nice_num_str_nvlist(vds, "initialized", 9422 vs->vs_initialize_bytes_done, 9423 cb->cb_literal, cb->cb_json_as_int, 9424 ZFS_NICENUM_BYTES); 9425 nice_num_str_nvlist(vds, "to_initialize", 9426 vs->vs_initialize_bytes_est, 9427 cb->cb_literal, cb->cb_json_as_int, 9428 ZFS_NICENUM_BYTES); 9429 nice_num_str_nvlist(vds, "init_time", 9430 vs->vs_initialize_action_time, 9431 cb->cb_literal, cb->cb_json_as_int, 9432 ZFS_NICE_TIMESTAMP); 9433 nice_num_str_nvlist(vds, "init_errors", 9434 vs->vs_initialize_errors, 9435 cb->cb_literal, cb->cb_json_as_int, 9436 ZFS_NICENUM_1024); 9437 } else { 9438 fnvlist_add_string(vds, "init_state", 9439 "UNINITIALIZED"); 9440 } 9441 } 9442 if (cb->cb_print_vdev_trim) { 9443 if (vs->vs_trim_notsup == 0) { 9444 if (vs->vs_trim_state != 0) { 9445 uint64_t st = vs->vs_trim_state; 9446 fnvlist_add_string(vds, "trim_state", 9447 vdev_trim_state_str[st]); 9448 nice_num_str_nvlist(vds, "trimmed", 9449 vs->vs_trim_bytes_done, 9450 cb->cb_literal, cb->cb_json_as_int, 9451 ZFS_NICENUM_BYTES); 9452 nice_num_str_nvlist(vds, "to_trim", 9453 vs->vs_trim_bytes_est, 9454 cb->cb_literal, cb->cb_json_as_int, 9455 ZFS_NICENUM_BYTES); 9456 nice_num_str_nvlist(vds, "trim_time", 9457 vs->vs_trim_action_time, 9458 cb->cb_literal, cb->cb_json_as_int, 9459 ZFS_NICE_TIMESTAMP); 9460 nice_num_str_nvlist(vds, "trim_errors", 9461 vs->vs_trim_errors, 9462 cb->cb_literal, cb->cb_json_as_int, 9463 ZFS_NICENUM_1024); 9464 } else 9465 fnvlist_add_string(vds, "trim_state", 9466 "UNTRIMMED"); 9467 } 9468 nice_num_str_nvlist(vds, "trim_notsup", 9469 vs->vs_trim_notsup, B_TRUE, 9470 cb->cb_json_as_int, ZFS_NICENUM_1024); 9471 } 9472 } else { 9473 ch = fnvlist_alloc(); 9474 } 9475 9476 if (cb->cb_flat_vdevs && children == 0) { 9477 fnvlist_add_nvlist(item, vname, vds); 9478 } 9479 9480 for (int c = 0; c < children; c++) { 9481 uint64_t islog = B_FALSE, ishole = B_FALSE; 9482 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 9483 &islog); 9484 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 9485 &ishole); 9486 if (islog || ishole) 9487 continue; 9488 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS)) 9489 continue; 9490 if (cb->cb_flat_vdevs) { 9491 vdev_stats_nvlist(zhp, cb, child[c], depth + 2, isspare, 9492 vname, item); 9493 } 9494 vdev_stats_nvlist(zhp, cb, child[c], depth + 2, isspare, 9495 vname, ch); 9496 } 9497 9498 if (ch != NULL) { 9499 if (!nvlist_empty(ch)) 9500 fnvlist_add_nvlist(vds, "vdevs", ch); 9501 fnvlist_free(ch); 9502 } 9503 fnvlist_add_nvlist(item, vname, vds); 9504 fnvlist_free(vds); 9505 free(vname); 9506 } 9507 9508 static void 9509 class_vdevs_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv, 9510 const char *class, nvlist_t *item) 9511 { 9512 uint_t c, children; 9513 nvlist_t **child; 9514 nvlist_t *class_obj = NULL; 9515 9516 if (!cb->cb_flat_vdevs) 9517 class_obj = fnvlist_alloc(); 9518 9519 assert(zhp != NULL || !cb->cb_verbose); 9520 9521 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &child, 9522 &children) != 0) 9523 return; 9524 9525 for (c = 0; c < children; c++) { 9526 uint64_t is_log = B_FALSE; 9527 const char *bias = NULL; 9528 const char *type = NULL; 9529 char *name = zpool_vdev_name(g_zfs, zhp, child[c], 9530 cb->cb_name_flags | VDEV_NAME_TYPE_ID); 9531 9532 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 9533 &is_log); 9534 9535 if (is_log) { 9536 bias = (char *)VDEV_ALLOC_CLASS_LOGS; 9537 } else { 9538 (void) nvlist_lookup_string(child[c], 9539 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias); 9540 (void) nvlist_lookup_string(child[c], 9541 ZPOOL_CONFIG_TYPE, &type); 9542 } 9543 9544 if (bias == NULL || strcmp(bias, class) != 0) 9545 continue; 9546 if (!is_log && strcmp(type, VDEV_TYPE_INDIRECT) == 0) 9547 continue; 9548 9549 if (cb->cb_flat_vdevs) { 9550 vdev_stats_nvlist(zhp, cb, child[c], 2, B_FALSE, 9551 NULL, item); 9552 } else { 9553 vdev_stats_nvlist(zhp, cb, child[c], 2, B_FALSE, 9554 NULL, class_obj); 9555 } 9556 free(name); 9557 } 9558 if (!cb->cb_flat_vdevs) { 9559 if (!nvlist_empty(class_obj)) 9560 fnvlist_add_nvlist(item, class, class_obj); 9561 fnvlist_free(class_obj); 9562 } 9563 } 9564 9565 static void 9566 l2cache_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv, 9567 nvlist_t *item) 9568 { 9569 nvlist_t *l2c = NULL, **l2cache; 9570 uint_t nl2cache; 9571 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, 9572 &l2cache, &nl2cache) == 0) { 9573 if (nl2cache == 0) 9574 return; 9575 if (!cb->cb_flat_vdevs) 9576 l2c = fnvlist_alloc(); 9577 for (int i = 0; i < nl2cache; i++) { 9578 if (cb->cb_flat_vdevs) { 9579 vdev_stats_nvlist(zhp, cb, l2cache[i], 2, 9580 B_FALSE, NULL, item); 9581 } else { 9582 vdev_stats_nvlist(zhp, cb, l2cache[i], 2, 9583 B_FALSE, NULL, l2c); 9584 } 9585 } 9586 } 9587 if (!cb->cb_flat_vdevs) { 9588 if (!nvlist_empty(l2c)) 9589 fnvlist_add_nvlist(item, "l2cache", l2c); 9590 fnvlist_free(l2c); 9591 } 9592 } 9593 9594 static void 9595 spares_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv, 9596 nvlist_t *item) 9597 { 9598 nvlist_t *sp = NULL, **spares; 9599 uint_t nspares; 9600 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, 9601 &spares, &nspares) == 0) { 9602 if (nspares == 0) 9603 return; 9604 if (!cb->cb_flat_vdevs) 9605 sp = fnvlist_alloc(); 9606 for (int i = 0; i < nspares; i++) { 9607 if (cb->cb_flat_vdevs) { 9608 vdev_stats_nvlist(zhp, cb, spares[i], 2, B_TRUE, 9609 NULL, item); 9610 } else { 9611 vdev_stats_nvlist(zhp, cb, spares[i], 2, B_TRUE, 9612 NULL, sp); 9613 } 9614 } 9615 } 9616 if (!cb->cb_flat_vdevs) { 9617 if (!nvlist_empty(sp)) 9618 fnvlist_add_nvlist(item, "spares", sp); 9619 fnvlist_free(sp); 9620 } 9621 } 9622 9623 static void 9624 errors_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *item) 9625 { 9626 uint64_t nerr; 9627 nvlist_t *config = zpool_get_config(zhp, NULL); 9628 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT, 9629 &nerr) == 0) { 9630 nice_num_str_nvlist(item, ZPOOL_CONFIG_ERRCOUNT, nerr, 9631 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024); 9632 if (nerr != 0 && cb->cb_verbose) { 9633 nvlist_t *nverrlist = NULL; 9634 if (zpool_get_errlog(zhp, &nverrlist) == 0) { 9635 int i = 0; 9636 int count = 0; 9637 size_t len = MAXPATHLEN * 2; 9638 nvpair_t *elem = NULL; 9639 9640 for (nvpair_t *pair = 9641 nvlist_next_nvpair(nverrlist, NULL); 9642 pair != NULL; 9643 pair = nvlist_next_nvpair(nverrlist, pair)) 9644 count++; 9645 char **errl = (char **)malloc( 9646 count * sizeof (char *)); 9647 9648 while ((elem = nvlist_next_nvpair(nverrlist, 9649 elem)) != NULL) { 9650 nvlist_t *nv; 9651 uint64_t dsobj, obj; 9652 9653 verify(nvpair_value_nvlist(elem, 9654 &nv) == 0); 9655 verify(nvlist_lookup_uint64(nv, 9656 ZPOOL_ERR_DATASET, &dsobj) == 0); 9657 verify(nvlist_lookup_uint64(nv, 9658 ZPOOL_ERR_OBJECT, &obj) == 0); 9659 errl[i] = safe_malloc(len); 9660 zpool_obj_to_path(zhp, dsobj, obj, 9661 errl[i++], len); 9662 } 9663 nvlist_free(nverrlist); 9664 fnvlist_add_string_array(item, "errlist", 9665 (const char **)errl, count); 9666 for (int i = 0; i < count; ++i) 9667 free(errl[i]); 9668 free(errl); 9669 } else 9670 fnvlist_add_string(item, "errlist", 9671 strerror(errno)); 9672 } 9673 } 9674 } 9675 9676 static void 9677 ddt_stats_nvlist(ddt_stat_t *dds, status_cbdata_t *cb, nvlist_t *item) 9678 { 9679 nice_num_str_nvlist(item, "blocks", dds->dds_blocks, 9680 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024); 9681 nice_num_str_nvlist(item, "logical_size", dds->dds_lsize, 9682 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES); 9683 nice_num_str_nvlist(item, "physical_size", dds->dds_psize, 9684 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES); 9685 nice_num_str_nvlist(item, "deflated_size", dds->dds_dsize, 9686 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES); 9687 nice_num_str_nvlist(item, "ref_blocks", dds->dds_ref_blocks, 9688 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024); 9689 nice_num_str_nvlist(item, "ref_lsize", dds->dds_ref_lsize, 9690 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES); 9691 nice_num_str_nvlist(item, "ref_psize", dds->dds_ref_psize, 9692 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES); 9693 nice_num_str_nvlist(item, "ref_dsize", dds->dds_ref_dsize, 9694 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES); 9695 } 9696 9697 static void 9698 dedup_stats_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *item) 9699 { 9700 nvlist_t *config; 9701 if (cb->cb_dedup_stats) { 9702 ddt_histogram_t *ddh; 9703 ddt_stat_t *dds; 9704 ddt_object_t *ddo; 9705 nvlist_t *ddt_stat, *ddt_obj, *dedup; 9706 uint_t c; 9707 uint64_t cspace_prop; 9708 9709 config = zpool_get_config(zhp, NULL); 9710 if (nvlist_lookup_uint64_array(config, 9711 ZPOOL_CONFIG_DDT_OBJ_STATS, (uint64_t **)&ddo, &c) != 0) 9712 return; 9713 9714 dedup = fnvlist_alloc(); 9715 ddt_obj = fnvlist_alloc(); 9716 nice_num_str_nvlist(dedup, "obj_count", ddo->ddo_count, 9717 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024); 9718 if (ddo->ddo_count == 0) { 9719 fnvlist_add_nvlist(dedup, ZPOOL_CONFIG_DDT_OBJ_STATS, 9720 ddt_obj); 9721 fnvlist_add_nvlist(item, "dedup_stats", dedup); 9722 fnvlist_free(ddt_obj); 9723 fnvlist_free(dedup); 9724 return; 9725 } else { 9726 nice_num_str_nvlist(dedup, "dspace", ddo->ddo_dspace, 9727 cb->cb_literal, cb->cb_json_as_int, 9728 ZFS_NICENUM_1024); 9729 nice_num_str_nvlist(dedup, "mspace", ddo->ddo_mspace, 9730 cb->cb_literal, cb->cb_json_as_int, 9731 ZFS_NICENUM_1024); 9732 /* 9733 * Squash cached size into in-core size to handle race. 9734 * Only include cached size if it is available. 9735 */ 9736 cspace_prop = zpool_get_prop_int(zhp, 9737 ZPOOL_PROP_DEDUPCACHED, NULL); 9738 cspace_prop = MIN(cspace_prop, ddo->ddo_mspace); 9739 nice_num_str_nvlist(dedup, "cspace", cspace_prop, 9740 cb->cb_literal, cb->cb_json_as_int, 9741 ZFS_NICENUM_1024); 9742 } 9743 9744 ddt_stat = fnvlist_alloc(); 9745 if (nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_STATS, 9746 (uint64_t **)&dds, &c) == 0) { 9747 nvlist_t *total = fnvlist_alloc(); 9748 if (dds->dds_blocks == 0) 9749 fnvlist_add_string(total, "blocks", "0"); 9750 else 9751 ddt_stats_nvlist(dds, cb, total); 9752 fnvlist_add_nvlist(ddt_stat, "total", total); 9753 fnvlist_free(total); 9754 } 9755 if (nvlist_lookup_uint64_array(config, 9756 ZPOOL_CONFIG_DDT_HISTOGRAM, (uint64_t **)&ddh, &c) == 0) { 9757 nvlist_t *hist = fnvlist_alloc(); 9758 nvlist_t *entry = NULL; 9759 char buf[16]; 9760 for (int h = 0; h < 64; h++) { 9761 if (ddh->ddh_stat[h].dds_blocks != 0) { 9762 entry = fnvlist_alloc(); 9763 ddt_stats_nvlist(&ddh->ddh_stat[h], cb, 9764 entry); 9765 snprintf(buf, 16, "%d", h); 9766 fnvlist_add_nvlist(hist, buf, entry); 9767 fnvlist_free(entry); 9768 } 9769 } 9770 if (!nvlist_empty(hist)) 9771 fnvlist_add_nvlist(ddt_stat, "histogram", hist); 9772 fnvlist_free(hist); 9773 } 9774 9775 if (!nvlist_empty(ddt_obj)) { 9776 fnvlist_add_nvlist(dedup, ZPOOL_CONFIG_DDT_OBJ_STATS, 9777 ddt_obj); 9778 } 9779 fnvlist_free(ddt_obj); 9780 if (!nvlist_empty(ddt_stat)) { 9781 fnvlist_add_nvlist(dedup, ZPOOL_CONFIG_DDT_STATS, 9782 ddt_stat); 9783 } 9784 fnvlist_free(ddt_stat); 9785 if (!nvlist_empty(dedup)) 9786 fnvlist_add_nvlist(item, "dedup_stats", dedup); 9787 fnvlist_free(dedup); 9788 } 9789 } 9790 9791 static void 9792 raidz_expand_status_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, 9793 nvlist_t *nvroot, nvlist_t *item) 9794 { 9795 uint_t c; 9796 pool_raidz_expand_stat_t *pres = NULL; 9797 if (nvlist_lookup_uint64_array(nvroot, 9798 ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, (uint64_t **)&pres, &c) == 0) { 9799 nvlist_t **child; 9800 uint_t children; 9801 nvlist_t *nv = fnvlist_alloc(); 9802 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 9803 &child, &children) == 0); 9804 assert(pres->pres_expanding_vdev < children); 9805 char *name = 9806 zpool_vdev_name(g_zfs, zhp, 9807 child[pres->pres_expanding_vdev], 0); 9808 fill_vdev_info(nv, zhp, name, B_FALSE, cb->cb_json_as_int); 9809 fnvlist_add_string(nv, "state", 9810 pool_scan_state_str[pres->pres_state]); 9811 nice_num_str_nvlist(nv, "expanding_vdev", 9812 pres->pres_expanding_vdev, B_TRUE, cb->cb_json_as_int, 9813 ZFS_NICENUM_1024); 9814 nice_num_str_nvlist(nv, "start_time", pres->pres_start_time, 9815 cb->cb_literal, cb->cb_json_as_int, ZFS_NICE_TIMESTAMP); 9816 nice_num_str_nvlist(nv, "end_time", pres->pres_end_time, 9817 cb->cb_literal, cb->cb_json_as_int, ZFS_NICE_TIMESTAMP); 9818 nice_num_str_nvlist(nv, "to_reflow", pres->pres_to_reflow, 9819 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES); 9820 nice_num_str_nvlist(nv, "reflowed", pres->pres_reflowed, 9821 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES); 9822 nice_num_str_nvlist(nv, "waiting_for_resilver", 9823 pres->pres_waiting_for_resilver, B_TRUE, 9824 cb->cb_json_as_int, ZFS_NICENUM_1024); 9825 fnvlist_add_nvlist(item, ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, nv); 9826 fnvlist_free(nv); 9827 free(name); 9828 } 9829 } 9830 9831 static void 9832 checkpoint_status_nvlist(nvlist_t *nvroot, status_cbdata_t *cb, 9833 nvlist_t *item) 9834 { 9835 uint_t c; 9836 pool_checkpoint_stat_t *pcs = NULL; 9837 if (nvlist_lookup_uint64_array(nvroot, 9838 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c) == 0) { 9839 nvlist_t *nv = fnvlist_alloc(); 9840 fnvlist_add_string(nv, "state", 9841 checkpoint_state_str[pcs->pcs_state]); 9842 nice_num_str_nvlist(nv, "start_time", 9843 pcs->pcs_start_time, cb->cb_literal, cb->cb_json_as_int, 9844 ZFS_NICE_TIMESTAMP); 9845 nice_num_str_nvlist(nv, "space", 9846 pcs->pcs_space, cb->cb_literal, cb->cb_json_as_int, 9847 ZFS_NICENUM_BYTES); 9848 fnvlist_add_nvlist(item, ZPOOL_CONFIG_CHECKPOINT_STATS, nv); 9849 fnvlist_free(nv); 9850 } 9851 } 9852 9853 static void 9854 removal_status_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, 9855 nvlist_t *nvroot, nvlist_t *item) 9856 { 9857 uint_t c; 9858 pool_removal_stat_t *prs = NULL; 9859 if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_REMOVAL_STATS, 9860 (uint64_t **)&prs, &c) == 0) { 9861 if (prs->prs_state != DSS_NONE) { 9862 nvlist_t **child; 9863 uint_t children; 9864 verify(nvlist_lookup_nvlist_array(nvroot, 9865 ZPOOL_CONFIG_CHILDREN, &child, &children) == 0); 9866 assert(prs->prs_removing_vdev < children); 9867 char *vdev_name = zpool_vdev_name(g_zfs, zhp, 9868 child[prs->prs_removing_vdev], B_TRUE); 9869 nvlist_t *nv = fnvlist_alloc(); 9870 fill_vdev_info(nv, zhp, vdev_name, B_FALSE, 9871 cb->cb_json_as_int); 9872 fnvlist_add_string(nv, "state", 9873 pool_scan_state_str[prs->prs_state]); 9874 nice_num_str_nvlist(nv, "removing_vdev", 9875 prs->prs_removing_vdev, B_TRUE, cb->cb_json_as_int, 9876 ZFS_NICENUM_1024); 9877 nice_num_str_nvlist(nv, "start_time", 9878 prs->prs_start_time, cb->cb_literal, 9879 cb->cb_json_as_int, ZFS_NICE_TIMESTAMP); 9880 nice_num_str_nvlist(nv, "end_time", prs->prs_end_time, 9881 cb->cb_literal, cb->cb_json_as_int, 9882 ZFS_NICE_TIMESTAMP); 9883 nice_num_str_nvlist(nv, "to_copy", prs->prs_to_copy, 9884 cb->cb_literal, cb->cb_json_as_int, 9885 ZFS_NICENUM_BYTES); 9886 nice_num_str_nvlist(nv, "copied", prs->prs_copied, 9887 cb->cb_literal, cb->cb_json_as_int, 9888 ZFS_NICENUM_BYTES); 9889 nice_num_str_nvlist(nv, "mapping_memory", 9890 prs->prs_mapping_memory, cb->cb_literal, 9891 cb->cb_json_as_int, ZFS_NICENUM_BYTES); 9892 fnvlist_add_nvlist(item, 9893 ZPOOL_CONFIG_REMOVAL_STATS, nv); 9894 fnvlist_free(nv); 9895 free(vdev_name); 9896 } 9897 } 9898 } 9899 9900 static void 9901 scan_status_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, 9902 nvlist_t *nvroot, nvlist_t *item) 9903 { 9904 pool_scan_stat_t *ps = NULL; 9905 uint_t c; 9906 nvlist_t *scan = fnvlist_alloc(); 9907 nvlist_t **child; 9908 uint_t children; 9909 9910 if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_SCAN_STATS, 9911 (uint64_t **)&ps, &c) == 0) { 9912 fnvlist_add_string(scan, "function", 9913 pool_scan_func_str[ps->pss_func]); 9914 fnvlist_add_string(scan, "state", 9915 pool_scan_state_str[ps->pss_state]); 9916 nice_num_str_nvlist(scan, "start_time", ps->pss_start_time, 9917 cb->cb_literal, cb->cb_json_as_int, ZFS_NICE_TIMESTAMP); 9918 nice_num_str_nvlist(scan, "end_time", ps->pss_end_time, 9919 cb->cb_literal, cb->cb_json_as_int, ZFS_NICE_TIMESTAMP); 9920 nice_num_str_nvlist(scan, "to_examine", ps->pss_to_examine, 9921 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES); 9922 nice_num_str_nvlist(scan, "examined", ps->pss_examined, 9923 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES); 9924 nice_num_str_nvlist(scan, "skipped", ps->pss_skipped, 9925 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES); 9926 nice_num_str_nvlist(scan, "processed", ps->pss_processed, 9927 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES); 9928 nice_num_str_nvlist(scan, "errors", ps->pss_errors, 9929 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024); 9930 nice_num_str_nvlist(scan, "bytes_per_scan", ps->pss_pass_exam, 9931 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES); 9932 nice_num_str_nvlist(scan, "pass_start", ps->pss_pass_start, 9933 B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024); 9934 nice_num_str_nvlist(scan, "scrub_pause", 9935 ps->pss_pass_scrub_pause, cb->cb_literal, 9936 cb->cb_json_as_int, ZFS_NICE_TIMESTAMP); 9937 nice_num_str_nvlist(scan, "scrub_spent_paused", 9938 ps->pss_pass_scrub_spent_paused, 9939 B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024); 9940 nice_num_str_nvlist(scan, "issued_bytes_per_scan", 9941 ps->pss_pass_issued, cb->cb_literal, 9942 cb->cb_json_as_int, ZFS_NICENUM_BYTES); 9943 nice_num_str_nvlist(scan, "issued", ps->pss_issued, 9944 cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES); 9945 if (ps->pss_error_scrub_func == POOL_SCAN_ERRORSCRUB && 9946 ps->pss_error_scrub_start > ps->pss_start_time) { 9947 fnvlist_add_string(scan, "err_scrub_func", 9948 pool_scan_func_str[ps->pss_error_scrub_func]); 9949 fnvlist_add_string(scan, "err_scrub_state", 9950 pool_scan_state_str[ps->pss_error_scrub_state]); 9951 nice_num_str_nvlist(scan, "err_scrub_start_time", 9952 ps->pss_error_scrub_start, 9953 cb->cb_literal, cb->cb_json_as_int, 9954 ZFS_NICE_TIMESTAMP); 9955 nice_num_str_nvlist(scan, "err_scrub_end_time", 9956 ps->pss_error_scrub_end, 9957 cb->cb_literal, cb->cb_json_as_int, 9958 ZFS_NICE_TIMESTAMP); 9959 nice_num_str_nvlist(scan, "err_scrub_examined", 9960 ps->pss_error_scrub_examined, 9961 cb->cb_literal, cb->cb_json_as_int, 9962 ZFS_NICENUM_1024); 9963 nice_num_str_nvlist(scan, "err_scrub_to_examine", 9964 ps->pss_error_scrub_to_be_examined, 9965 cb->cb_literal, cb->cb_json_as_int, 9966 ZFS_NICENUM_1024); 9967 nice_num_str_nvlist(scan, "err_scrub_pause", 9968 ps->pss_pass_error_scrub_pause, 9969 B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024); 9970 } 9971 } 9972 9973 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 9974 &child, &children) == 0) { 9975 vdev_rebuild_stat_t *vrs; 9976 uint_t i; 9977 char *name; 9978 nvlist_t *nv; 9979 nvlist_t *rebuild = fnvlist_alloc(); 9980 uint64_t st; 9981 for (uint_t c = 0; c < children; c++) { 9982 if (nvlist_lookup_uint64_array(child[c], 9983 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, 9984 &i) == 0) { 9985 if (vrs->vrs_state != VDEV_REBUILD_NONE) { 9986 nv = fnvlist_alloc(); 9987 name = zpool_vdev_name(g_zfs, zhp, 9988 child[c], VDEV_NAME_TYPE_ID); 9989 fill_vdev_info(nv, zhp, name, B_FALSE, 9990 cb->cb_json_as_int); 9991 st = vrs->vrs_state; 9992 fnvlist_add_string(nv, "state", 9993 vdev_rebuild_state_str[st]); 9994 nice_num_str_nvlist(nv, "start_time", 9995 vrs->vrs_start_time, cb->cb_literal, 9996 cb->cb_json_as_int, 9997 ZFS_NICE_TIMESTAMP); 9998 nice_num_str_nvlist(nv, "end_time", 9999 vrs->vrs_end_time, cb->cb_literal, 10000 cb->cb_json_as_int, 10001 ZFS_NICE_TIMESTAMP); 10002 nice_num_str_nvlist(nv, "scan_time", 10003 vrs->vrs_scan_time_ms * 1000000, 10004 cb->cb_literal, cb->cb_json_as_int, 10005 ZFS_NICENUM_TIME); 10006 nice_num_str_nvlist(nv, "scanned", 10007 vrs->vrs_bytes_scanned, 10008 cb->cb_literal, cb->cb_json_as_int, 10009 ZFS_NICENUM_BYTES); 10010 nice_num_str_nvlist(nv, "issued", 10011 vrs->vrs_bytes_issued, 10012 cb->cb_literal, cb->cb_json_as_int, 10013 ZFS_NICENUM_BYTES); 10014 nice_num_str_nvlist(nv, "rebuilt", 10015 vrs->vrs_bytes_rebuilt, 10016 cb->cb_literal, cb->cb_json_as_int, 10017 ZFS_NICENUM_BYTES); 10018 nice_num_str_nvlist(nv, "to_scan", 10019 vrs->vrs_bytes_est, cb->cb_literal, 10020 cb->cb_json_as_int, 10021 ZFS_NICENUM_BYTES); 10022 nice_num_str_nvlist(nv, "errors", 10023 vrs->vrs_errors, cb->cb_literal, 10024 cb->cb_json_as_int, 10025 ZFS_NICENUM_1024); 10026 nice_num_str_nvlist(nv, "pass_time", 10027 vrs->vrs_pass_time_ms * 1000000, 10028 cb->cb_literal, cb->cb_json_as_int, 10029 ZFS_NICENUM_TIME); 10030 nice_num_str_nvlist(nv, "pass_scanned", 10031 vrs->vrs_pass_bytes_scanned, 10032 cb->cb_literal, cb->cb_json_as_int, 10033 ZFS_NICENUM_BYTES); 10034 nice_num_str_nvlist(nv, "pass_issued", 10035 vrs->vrs_pass_bytes_issued, 10036 cb->cb_literal, cb->cb_json_as_int, 10037 ZFS_NICENUM_BYTES); 10038 nice_num_str_nvlist(nv, "pass_skipped", 10039 vrs->vrs_pass_bytes_skipped, 10040 cb->cb_literal, cb->cb_json_as_int, 10041 ZFS_NICENUM_BYTES); 10042 fnvlist_add_nvlist(rebuild, name, nv); 10043 free(name); 10044 } 10045 } 10046 } 10047 if (!nvlist_empty(rebuild)) 10048 fnvlist_add_nvlist(scan, "rebuild_stats", rebuild); 10049 fnvlist_free(rebuild); 10050 } 10051 10052 if (!nvlist_empty(scan)) 10053 fnvlist_add_nvlist(item, ZPOOL_CONFIG_SCAN_STATS, scan); 10054 fnvlist_free(scan); 10055 } 10056 10057 /* 10058 * Print the scan status. 10059 */ 10060 static void 10061 print_scan_status(zpool_handle_t *zhp, nvlist_t *nvroot) 10062 { 10063 uint64_t rebuild_end_time = 0, resilver_end_time = 0; 10064 boolean_t have_resilver = B_FALSE, have_scrub = B_FALSE; 10065 boolean_t have_errorscrub = B_FALSE; 10066 boolean_t active_resilver = B_FALSE; 10067 pool_checkpoint_stat_t *pcs = NULL; 10068 pool_scan_stat_t *ps = NULL; 10069 uint_t c; 10070 time_t scrub_start = 0, errorscrub_start = 0; 10071 10072 if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_SCAN_STATS, 10073 (uint64_t **)&ps, &c) == 0) { 10074 if (ps->pss_func == POOL_SCAN_RESILVER) { 10075 resilver_end_time = ps->pss_end_time; 10076 active_resilver = (ps->pss_state == DSS_SCANNING); 10077 } 10078 10079 have_resilver = (ps->pss_func == POOL_SCAN_RESILVER); 10080 have_scrub = (ps->pss_func == POOL_SCAN_SCRUB); 10081 scrub_start = ps->pss_start_time; 10082 if (c > offsetof(pool_scan_stat_t, 10083 pss_pass_error_scrub_pause) / 8) { 10084 have_errorscrub = (ps->pss_error_scrub_func == 10085 POOL_SCAN_ERRORSCRUB); 10086 errorscrub_start = ps->pss_error_scrub_start; 10087 } 10088 } 10089 10090 boolean_t active_rebuild = check_rebuilding(nvroot, &rebuild_end_time); 10091 boolean_t have_rebuild = (active_rebuild || (rebuild_end_time > 0)); 10092 10093 /* Always print the scrub status when available. */ 10094 if (have_scrub && scrub_start > errorscrub_start) 10095 print_scan_scrub_resilver_status(ps); 10096 else if (have_errorscrub && errorscrub_start >= scrub_start) 10097 print_err_scrub_status(ps); 10098 10099 /* 10100 * When there is an active resilver or rebuild print its status. 10101 * Otherwise print the status of the last resilver or rebuild. 10102 */ 10103 if (active_resilver || (!active_rebuild && have_resilver && 10104 resilver_end_time && resilver_end_time > rebuild_end_time)) { 10105 print_scan_scrub_resilver_status(ps); 10106 } else if (active_rebuild || (!active_resilver && have_rebuild && 10107 rebuild_end_time && rebuild_end_time > resilver_end_time)) { 10108 print_rebuild_status(zhp, nvroot); 10109 } 10110 10111 (void) nvlist_lookup_uint64_array(nvroot, 10112 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c); 10113 print_checkpoint_scan_warning(ps, pcs); 10114 } 10115 10116 /* 10117 * Print out detailed removal status. 10118 */ 10119 static void 10120 print_removal_status(zpool_handle_t *zhp, pool_removal_stat_t *prs) 10121 { 10122 char copied_buf[7], examined_buf[7], total_buf[7], rate_buf[7]; 10123 time_t start, end; 10124 nvlist_t *config, *nvroot; 10125 nvlist_t **child; 10126 uint_t children; 10127 char *vdev_name; 10128 10129 if (prs == NULL || prs->prs_state == DSS_NONE) 10130 return; 10131 10132 /* 10133 * Determine name of vdev. 10134 */ 10135 config = zpool_get_config(zhp, NULL); 10136 nvroot = fnvlist_lookup_nvlist(config, 10137 ZPOOL_CONFIG_VDEV_TREE); 10138 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 10139 &child, &children) == 0); 10140 assert(prs->prs_removing_vdev < children); 10141 vdev_name = zpool_vdev_name(g_zfs, zhp, 10142 child[prs->prs_removing_vdev], B_TRUE); 10143 10144 printf_color(ANSI_BOLD, gettext("remove: ")); 10145 10146 start = prs->prs_start_time; 10147 end = prs->prs_end_time; 10148 zfs_nicenum(prs->prs_copied, copied_buf, sizeof (copied_buf)); 10149 10150 /* 10151 * Removal is finished or canceled. 10152 */ 10153 if (prs->prs_state == DSS_FINISHED) { 10154 uint64_t minutes_taken = (end - start) / 60; 10155 10156 (void) printf(gettext("Removal of vdev %llu copied %s " 10157 "in %lluh%um, completed on %s"), 10158 (longlong_t)prs->prs_removing_vdev, 10159 copied_buf, 10160 (u_longlong_t)(minutes_taken / 60), 10161 (uint_t)(minutes_taken % 60), 10162 ctime((time_t *)&end)); 10163 } else if (prs->prs_state == DSS_CANCELED) { 10164 (void) printf(gettext("Removal of %s canceled on %s"), 10165 vdev_name, ctime(&end)); 10166 } else { 10167 uint64_t copied, total, elapsed, rate, mins_left, hours_left; 10168 double fraction_done; 10169 10170 assert(prs->prs_state == DSS_SCANNING); 10171 10172 /* 10173 * Removal is in progress. 10174 */ 10175 (void) printf(gettext( 10176 "Evacuation of %s in progress since %s"), 10177 vdev_name, ctime(&start)); 10178 10179 copied = prs->prs_copied > 0 ? prs->prs_copied : 1; 10180 total = prs->prs_to_copy; 10181 fraction_done = (double)copied / total; 10182 10183 /* elapsed time for this pass */ 10184 elapsed = time(NULL) - prs->prs_start_time; 10185 elapsed = elapsed > 0 ? elapsed : 1; 10186 rate = copied / elapsed; 10187 rate = rate > 0 ? rate : 1; 10188 mins_left = ((total - copied) / rate) / 60; 10189 hours_left = mins_left / 60; 10190 10191 zfs_nicenum(copied, examined_buf, sizeof (examined_buf)); 10192 zfs_nicenum(total, total_buf, sizeof (total_buf)); 10193 zfs_nicenum(rate, rate_buf, sizeof (rate_buf)); 10194 10195 /* 10196 * do not print estimated time if hours_left is more than 10197 * 30 days 10198 */ 10199 (void) printf(gettext( 10200 "\t%s copied out of %s at %s/s, %.2f%% done"), 10201 examined_buf, total_buf, rate_buf, 100 * fraction_done); 10202 if (hours_left < (30 * 24)) { 10203 (void) printf(gettext(", %lluh%um to go\n"), 10204 (u_longlong_t)hours_left, (uint_t)(mins_left % 60)); 10205 } else { 10206 (void) printf(gettext( 10207 ", (copy is slow, no estimated time)\n")); 10208 } 10209 } 10210 free(vdev_name); 10211 10212 if (prs->prs_mapping_memory > 0) { 10213 char mem_buf[7]; 10214 zfs_nicenum(prs->prs_mapping_memory, mem_buf, sizeof (mem_buf)); 10215 (void) printf(gettext( 10216 "\t%s memory used for removed device mappings\n"), 10217 mem_buf); 10218 } 10219 } 10220 10221 /* 10222 * Print out detailed raidz expansion status. 10223 */ 10224 static void 10225 print_raidz_expand_status(zpool_handle_t *zhp, pool_raidz_expand_stat_t *pres) 10226 { 10227 char copied_buf[7]; 10228 10229 if (pres == NULL || pres->pres_state == DSS_NONE) 10230 return; 10231 10232 /* 10233 * Determine name of vdev. 10234 */ 10235 nvlist_t *config = zpool_get_config(zhp, NULL); 10236 nvlist_t *nvroot = fnvlist_lookup_nvlist(config, 10237 ZPOOL_CONFIG_VDEV_TREE); 10238 nvlist_t **child; 10239 uint_t children; 10240 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 10241 &child, &children) == 0); 10242 assert(pres->pres_expanding_vdev < children); 10243 10244 printf_color(ANSI_BOLD, gettext("expand: ")); 10245 10246 time_t start = pres->pres_start_time; 10247 time_t end = pres->pres_end_time; 10248 char *vname = 10249 zpool_vdev_name(g_zfs, zhp, child[pres->pres_expanding_vdev], 0); 10250 zfs_nicenum(pres->pres_reflowed, copied_buf, sizeof (copied_buf)); 10251 10252 /* 10253 * Expansion is finished or canceled. 10254 */ 10255 if (pres->pres_state == DSS_FINISHED) { 10256 char time_buf[32]; 10257 secs_to_dhms(end - start, time_buf); 10258 10259 (void) printf(gettext("expanded %s-%u copied %s in %s, " 10260 "on %s"), vname, (int)pres->pres_expanding_vdev, 10261 copied_buf, time_buf, ctime((time_t *)&end)); 10262 } else { 10263 char examined_buf[7], total_buf[7], rate_buf[7]; 10264 uint64_t copied, total, elapsed, rate, secs_left; 10265 double fraction_done; 10266 10267 assert(pres->pres_state == DSS_SCANNING); 10268 10269 /* 10270 * Expansion is in progress. 10271 */ 10272 (void) printf(gettext( 10273 "expansion of %s-%u in progress since %s"), 10274 vname, (int)pres->pres_expanding_vdev, ctime(&start)); 10275 10276 copied = pres->pres_reflowed > 0 ? pres->pres_reflowed : 1; 10277 total = pres->pres_to_reflow; 10278 fraction_done = (double)copied / total; 10279 10280 /* elapsed time for this pass */ 10281 elapsed = time(NULL) - pres->pres_start_time; 10282 elapsed = elapsed > 0 ? elapsed : 1; 10283 rate = copied / elapsed; 10284 rate = rate > 0 ? rate : 1; 10285 secs_left = (total - copied) / rate; 10286 10287 zfs_nicenum(copied, examined_buf, sizeof (examined_buf)); 10288 zfs_nicenum(total, total_buf, sizeof (total_buf)); 10289 zfs_nicenum(rate, rate_buf, sizeof (rate_buf)); 10290 10291 /* 10292 * do not print estimated time if hours_left is more than 10293 * 30 days 10294 */ 10295 (void) printf(gettext("\t%s / %s copied at %s/s, %.2f%% done"), 10296 examined_buf, total_buf, rate_buf, 100 * fraction_done); 10297 if (pres->pres_waiting_for_resilver) { 10298 (void) printf(gettext(", paused for resilver or " 10299 "clear\n")); 10300 } else if (secs_left < (30 * 24 * 3600)) { 10301 char time_buf[32]; 10302 secs_to_dhms(secs_left, time_buf); 10303 (void) printf(gettext(", %s to go\n"), time_buf); 10304 } else { 10305 (void) printf(gettext( 10306 ", (copy is slow, no estimated time)\n")); 10307 } 10308 } 10309 free(vname); 10310 } 10311 static void 10312 print_checkpoint_status(pool_checkpoint_stat_t *pcs) 10313 { 10314 time_t start; 10315 char space_buf[7]; 10316 10317 if (pcs == NULL || pcs->pcs_state == CS_NONE) 10318 return; 10319 10320 (void) printf(gettext("checkpoint: ")); 10321 10322 start = pcs->pcs_start_time; 10323 zfs_nicenum(pcs->pcs_space, space_buf, sizeof (space_buf)); 10324 10325 if (pcs->pcs_state == CS_CHECKPOINT_EXISTS) { 10326 char *date = ctime(&start); 10327 10328 /* 10329 * ctime() adds a newline at the end of the generated 10330 * string, thus the weird format specifier and the 10331 * strlen() call used to chop it off from the output. 10332 */ 10333 (void) printf(gettext("created %.*s, consumes %s\n"), 10334 (int)(strlen(date) - 1), date, space_buf); 10335 return; 10336 } 10337 10338 assert(pcs->pcs_state == CS_CHECKPOINT_DISCARDING); 10339 10340 (void) printf(gettext("discarding, %s remaining.\n"), 10341 space_buf); 10342 } 10343 10344 static void 10345 print_error_log(zpool_handle_t *zhp) 10346 { 10347 nvlist_t *nverrlist = NULL; 10348 nvpair_t *elem; 10349 char *pathname; 10350 size_t len = MAXPATHLEN * 2; 10351 10352 if (zpool_get_errlog(zhp, &nverrlist) != 0) 10353 return; 10354 10355 (void) printf("errors: Permanent errors have been " 10356 "detected in the following files:\n\n"); 10357 10358 pathname = safe_malloc(len); 10359 elem = NULL; 10360 while ((elem = nvlist_next_nvpair(nverrlist, elem)) != NULL) { 10361 nvlist_t *nv; 10362 uint64_t dsobj, obj; 10363 10364 verify(nvpair_value_nvlist(elem, &nv) == 0); 10365 verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_DATASET, 10366 &dsobj) == 0); 10367 verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_OBJECT, 10368 &obj) == 0); 10369 zpool_obj_to_path(zhp, dsobj, obj, pathname, len); 10370 (void) printf("%7s %s\n", "", pathname); 10371 } 10372 free(pathname); 10373 nvlist_free(nverrlist); 10374 } 10375 10376 static void 10377 print_spares(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **spares, 10378 uint_t nspares) 10379 { 10380 uint_t i; 10381 char *name; 10382 10383 if (nspares == 0) 10384 return; 10385 10386 (void) printf(gettext("\tspares\n")); 10387 10388 for (i = 0; i < nspares; i++) { 10389 name = zpool_vdev_name(g_zfs, zhp, spares[i], 10390 cb->cb_name_flags); 10391 print_status_config(zhp, cb, name, spares[i], 2, B_TRUE, NULL); 10392 free(name); 10393 } 10394 } 10395 10396 static void 10397 print_l2cache(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **l2cache, 10398 uint_t nl2cache) 10399 { 10400 uint_t i; 10401 char *name; 10402 10403 if (nl2cache == 0) 10404 return; 10405 10406 (void) printf(gettext("\tcache\n")); 10407 10408 for (i = 0; i < nl2cache; i++) { 10409 name = zpool_vdev_name(g_zfs, zhp, l2cache[i], 10410 cb->cb_name_flags); 10411 print_status_config(zhp, cb, name, l2cache[i], 2, 10412 B_FALSE, NULL); 10413 free(name); 10414 } 10415 } 10416 10417 static void 10418 print_dedup_stats(zpool_handle_t *zhp, nvlist_t *config, boolean_t literal) 10419 { 10420 ddt_histogram_t *ddh; 10421 ddt_stat_t *dds; 10422 ddt_object_t *ddo; 10423 uint_t c; 10424 /* Extra space provided for literal display */ 10425 char dspace[32], mspace[32], cspace[32]; 10426 uint64_t cspace_prop; 10427 enum zfs_nicenum_format format; 10428 zprop_source_t src; 10429 10430 /* 10431 * If the pool was faulted then we may not have been able to 10432 * obtain the config. Otherwise, if we have anything in the dedup 10433 * table continue processing the stats. 10434 */ 10435 if (nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_OBJ_STATS, 10436 (uint64_t **)&ddo, &c) != 0) 10437 return; 10438 10439 (void) printf("\n"); 10440 (void) printf(gettext(" dedup: ")); 10441 if (ddo->ddo_count == 0) { 10442 (void) printf(gettext("no DDT entries\n")); 10443 return; 10444 } 10445 10446 /* 10447 * Squash cached size into in-core size to handle race. 10448 * Only include cached size if it is available. 10449 */ 10450 cspace_prop = zpool_get_prop_int(zhp, ZPOOL_PROP_DEDUPCACHED, &src); 10451 cspace_prop = MIN(cspace_prop, ddo->ddo_mspace); 10452 format = literal ? ZFS_NICENUM_RAW : ZFS_NICENUM_1024; 10453 zfs_nicenum_format(cspace_prop, cspace, sizeof (cspace), format); 10454 zfs_nicenum_format(ddo->ddo_dspace, dspace, sizeof (dspace), format); 10455 zfs_nicenum_format(ddo->ddo_mspace, mspace, sizeof (mspace), format); 10456 (void) printf("DDT entries %llu, size %s on disk, %s in core", 10457 (u_longlong_t)ddo->ddo_count, 10458 dspace, 10459 mspace); 10460 if (src != ZPROP_SRC_DEFAULT) { 10461 (void) printf(", %s cached (%.02f%%)", 10462 cspace, 10463 (double)cspace_prop / (double)ddo->ddo_mspace * 100.0); 10464 } 10465 (void) printf("\n"); 10466 10467 verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_STATS, 10468 (uint64_t **)&dds, &c) == 0); 10469 verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_HISTOGRAM, 10470 (uint64_t **)&ddh, &c) == 0); 10471 zpool_dump_ddt(dds, ddh); 10472 } 10473 10474 #define ST_SIZE 4096 10475 #define AC_SIZE 2048 10476 10477 static void 10478 print_status_reason(zpool_handle_t *zhp, status_cbdata_t *cbp, 10479 zpool_status_t reason, zpool_errata_t errata, nvlist_t *item) 10480 { 10481 char status[ST_SIZE]; 10482 char action[AC_SIZE]; 10483 memset(status, 0, ST_SIZE); 10484 memset(action, 0, AC_SIZE); 10485 10486 switch (reason) { 10487 case ZPOOL_STATUS_MISSING_DEV_R: 10488 snprintf(status, ST_SIZE, gettext("One or more devices could " 10489 "not be opened. Sufficient replicas exist for\n\tthe pool " 10490 "to continue functioning in a degraded state.\n")); 10491 snprintf(action, AC_SIZE, gettext("Attach the missing device " 10492 "and online it using 'zpool online'.\n")); 10493 break; 10494 10495 case ZPOOL_STATUS_MISSING_DEV_NR: 10496 snprintf(status, ST_SIZE, gettext("One or more devices could " 10497 "not be opened. There are insufficient\n\treplicas for the" 10498 " pool to continue functioning.\n")); 10499 snprintf(action, AC_SIZE, gettext("Attach the missing device " 10500 "and online it using 'zpool online'.\n")); 10501 break; 10502 10503 case ZPOOL_STATUS_CORRUPT_LABEL_R: 10504 snprintf(status, ST_SIZE, gettext("One or more devices could " 10505 "not be used because the label is missing or\n\tinvalid. " 10506 "Sufficient replicas exist for the pool to continue\n\t" 10507 "functioning in a degraded state.\n")); 10508 snprintf(action, AC_SIZE, gettext("Replace the device using " 10509 "'zpool replace'.\n")); 10510 break; 10511 10512 case ZPOOL_STATUS_CORRUPT_LABEL_NR: 10513 snprintf(status, ST_SIZE, gettext("One or more devices could " 10514 "not be used because the label is missing \n\tor invalid. " 10515 "There are insufficient replicas for the pool to " 10516 "continue\n\tfunctioning.\n")); 10517 zpool_explain_recover(zpool_get_handle(zhp), 10518 zpool_get_name(zhp), reason, zpool_get_config(zhp, NULL), 10519 action, AC_SIZE); 10520 break; 10521 10522 case ZPOOL_STATUS_FAILING_DEV: 10523 snprintf(status, ST_SIZE, gettext("One or more devices has " 10524 "experienced an unrecoverable error. An\n\tattempt was " 10525 "made to correct the error. Applications are " 10526 "unaffected.\n")); 10527 snprintf(action, AC_SIZE, gettext("Determine if the " 10528 "device needs to be replaced, and clear the errors\n\tusing" 10529 " 'zpool clear' or replace the device with 'zpool " 10530 "replace'.\n")); 10531 break; 10532 10533 case ZPOOL_STATUS_OFFLINE_DEV: 10534 snprintf(status, ST_SIZE, gettext("One or more devices has " 10535 "been taken offline by the administrator.\n\tSufficient " 10536 "replicas exist for the pool to continue functioning in " 10537 "a\n\tdegraded state.\n")); 10538 snprintf(action, AC_SIZE, gettext("Online the device " 10539 "using 'zpool online' or replace the device with\n\t'zpool " 10540 "replace'.\n")); 10541 break; 10542 10543 case ZPOOL_STATUS_REMOVED_DEV: 10544 snprintf(status, ST_SIZE, gettext("One or more devices have " 10545 "been removed.\n\tSufficient replicas exist for the pool " 10546 "to continue functioning in a\n\tdegraded state.\n")); 10547 snprintf(action, AC_SIZE, gettext("Online the device " 10548 "using zpool online' or replace the device with\n\t'zpool " 10549 "replace'.\n")); 10550 break; 10551 10552 case ZPOOL_STATUS_RESILVERING: 10553 case ZPOOL_STATUS_REBUILDING: 10554 snprintf(status, ST_SIZE, gettext("One or more devices is " 10555 "currently being resilvered. The pool will\n\tcontinue " 10556 "to function, possibly in a degraded state.\n")); 10557 snprintf(action, AC_SIZE, gettext("Wait for the resilver to " 10558 "complete.\n")); 10559 break; 10560 10561 case ZPOOL_STATUS_REBUILD_SCRUB: 10562 snprintf(status, ST_SIZE, gettext("One or more devices have " 10563 "been sequentially resilvered, scrubbing\n\tthe pool " 10564 "is recommended.\n")); 10565 snprintf(action, AC_SIZE, gettext("Use 'zpool scrub' to " 10566 "verify all data checksums.\n")); 10567 break; 10568 10569 case ZPOOL_STATUS_CORRUPT_DATA: 10570 snprintf(status, ST_SIZE, gettext("One or more devices has " 10571 "experienced an error resulting in data\n\tcorruption. " 10572 "Applications may be affected.\n")); 10573 snprintf(action, AC_SIZE, gettext("Restore the file in question" 10574 " if possible. Otherwise restore the\n\tentire pool from " 10575 "backup.\n")); 10576 break; 10577 10578 case ZPOOL_STATUS_CORRUPT_POOL: 10579 snprintf(status, ST_SIZE, gettext("The pool metadata is " 10580 "corrupted and the pool cannot be opened.\n")); 10581 zpool_explain_recover(zpool_get_handle(zhp), 10582 zpool_get_name(zhp), reason, zpool_get_config(zhp, NULL), 10583 action, AC_SIZE); 10584 break; 10585 10586 case ZPOOL_STATUS_VERSION_OLDER: 10587 snprintf(status, ST_SIZE, gettext("The pool is formatted using " 10588 "a legacy on-disk format. The pool can\n\tstill be used, " 10589 "but some features are unavailable.\n")); 10590 snprintf(action, AC_SIZE, gettext("Upgrade the pool using " 10591 "'zpool upgrade'. Once this is done, the\n\tpool will no " 10592 "longer be accessible on software that does not support\n\t" 10593 "feature flags.\n")); 10594 break; 10595 10596 case ZPOOL_STATUS_VERSION_NEWER: 10597 snprintf(status, ST_SIZE, gettext("The pool has been upgraded " 10598 "to a newer, incompatible on-disk version.\n\tThe pool " 10599 "cannot be accessed on this system.\n")); 10600 snprintf(action, AC_SIZE, gettext("Access the pool from a " 10601 "system running more recent software, or\n\trestore the " 10602 "pool from backup.\n")); 10603 break; 10604 10605 case ZPOOL_STATUS_FEAT_DISABLED: 10606 snprintf(status, ST_SIZE, gettext("Some supported and " 10607 "requested features are not enabled on the pool.\n\t" 10608 "The pool can still be used, but some features are " 10609 "unavailable.\n")); 10610 snprintf(action, AC_SIZE, gettext("Enable all features using " 10611 "'zpool upgrade'. Once this is done,\n\tthe pool may no " 10612 "longer be accessible by software that does not support\n\t" 10613 "the features. See zpool-features(7) for details.\n")); 10614 break; 10615 10616 case ZPOOL_STATUS_COMPATIBILITY_ERR: 10617 snprintf(status, ST_SIZE, gettext("This pool has a " 10618 "compatibility list specified, but it could not be\n\t" 10619 "read/parsed at this time. The pool can still be used, " 10620 "but this\n\tshould be investigated.\n")); 10621 snprintf(action, AC_SIZE, gettext("Check the value of the " 10622 "'compatibility' property against the\n\t" 10623 "appropriate file in " ZPOOL_SYSCONF_COMPAT_D " or " 10624 ZPOOL_DATA_COMPAT_D ".\n")); 10625 break; 10626 10627 case ZPOOL_STATUS_INCOMPATIBLE_FEAT: 10628 snprintf(status, ST_SIZE, gettext("One or more features " 10629 "are enabled on the pool despite not being\n\t" 10630 "requested by the 'compatibility' property.\n")); 10631 snprintf(action, AC_SIZE, gettext("Consider setting " 10632 "'compatibility' to an appropriate value, or\n\t" 10633 "adding needed features to the relevant file in\n\t" 10634 ZPOOL_SYSCONF_COMPAT_D " or " ZPOOL_DATA_COMPAT_D ".\n")); 10635 break; 10636 10637 case ZPOOL_STATUS_UNSUP_FEAT_READ: 10638 snprintf(status, ST_SIZE, gettext("The pool cannot be accessed " 10639 "on this system because it uses the\n\tfollowing feature(s)" 10640 " not supported on this system:\n")); 10641 zpool_collect_unsup_feat(zpool_get_config(zhp, NULL), status, 10642 1024); 10643 snprintf(action, AC_SIZE, gettext("Access the pool from a " 10644 "system that supports the required feature(s),\n\tor " 10645 "restore the pool from backup.\n")); 10646 break; 10647 10648 case ZPOOL_STATUS_UNSUP_FEAT_WRITE: 10649 snprintf(status, ST_SIZE, gettext("The pool can only be " 10650 "accessed in read-only mode on this system. It\n\tcannot be" 10651 " accessed in read-write mode because it uses the " 10652 "following\n\tfeature(s) not supported on this system:\n")); 10653 zpool_collect_unsup_feat(zpool_get_config(zhp, NULL), status, 10654 1024); 10655 snprintf(action, AC_SIZE, gettext("The pool cannot be accessed " 10656 "in read-write mode. Import the pool with\n" 10657 "\t\"-o readonly=on\", access the pool from a system that " 10658 "supports the\n\trequired feature(s), or restore the " 10659 "pool from backup.\n")); 10660 break; 10661 10662 case ZPOOL_STATUS_FAULTED_DEV_R: 10663 snprintf(status, ST_SIZE, gettext("One or more devices are " 10664 "faulted in response to persistent errors.\n\tSufficient " 10665 "replicas exist for the pool to continue functioning " 10666 "in a\n\tdegraded state.\n")); 10667 snprintf(action, AC_SIZE, gettext("Replace the faulted device, " 10668 "or use 'zpool clear' to mark the device\n\trepaired.\n")); 10669 break; 10670 10671 case ZPOOL_STATUS_FAULTED_DEV_NR: 10672 snprintf(status, ST_SIZE, gettext("One or more devices are " 10673 "faulted in response to persistent errors. There are " 10674 "insufficient replicas for the pool to\n\tcontinue " 10675 "functioning.\n")); 10676 snprintf(action, AC_SIZE, gettext("Destroy and re-create the " 10677 "pool from a backup source. Manually marking the device\n" 10678 "\trepaired using 'zpool clear' may allow some data " 10679 "to be recovered.\n")); 10680 break; 10681 10682 case ZPOOL_STATUS_IO_FAILURE_MMP: 10683 snprintf(status, ST_SIZE, gettext("The pool is suspended " 10684 "because multihost writes failed or were delayed;\n\t" 10685 "another system could import the pool undetected.\n")); 10686 snprintf(action, AC_SIZE, gettext("Make sure the pool's devices" 10687 " are connected, then reboot your system and\n\timport the " 10688 "pool or run 'zpool clear' to resume the pool.\n")); 10689 break; 10690 10691 case ZPOOL_STATUS_IO_FAILURE_WAIT: 10692 case ZPOOL_STATUS_IO_FAILURE_CONTINUE: 10693 snprintf(status, ST_SIZE, gettext("One or more devices are " 10694 "faulted in response to IO failures.\n")); 10695 snprintf(action, AC_SIZE, gettext("Make sure the affected " 10696 "devices are connected, then run 'zpool clear'.\n")); 10697 break; 10698 10699 case ZPOOL_STATUS_BAD_LOG: 10700 snprintf(status, ST_SIZE, gettext("An intent log record " 10701 "could not be read.\n" 10702 "\tWaiting for administrator intervention to fix the " 10703 "faulted pool.\n")); 10704 snprintf(action, AC_SIZE, gettext("Either restore the affected " 10705 "device(s) and run 'zpool online',\n" 10706 "\tor ignore the intent log records by running " 10707 "'zpool clear'.\n")); 10708 break; 10709 10710 case ZPOOL_STATUS_NON_NATIVE_ASHIFT: 10711 snprintf(status, ST_SIZE, gettext("One or more devices are " 10712 "configured to use a non-native block size.\n" 10713 "\tExpect reduced performance.\n")); 10714 snprintf(action, AC_SIZE, gettext("Replace affected devices " 10715 "with devices that support the\n\tconfigured block size, " 10716 "or migrate data to a properly configured\n\tpool.\n")); 10717 break; 10718 10719 case ZPOOL_STATUS_HOSTID_MISMATCH: 10720 snprintf(status, ST_SIZE, gettext("Mismatch between pool hostid" 10721 " and system hostid on imported pool.\n\tThis pool was " 10722 "previously imported into a system with a different " 10723 "hostid,\n\tand then was verbatim imported into this " 10724 "system.\n")); 10725 snprintf(action, AC_SIZE, gettext("Export this pool on all " 10726 "systems on which it is imported.\n" 10727 "\tThen import it to correct the mismatch.\n")); 10728 break; 10729 10730 case ZPOOL_STATUS_ERRATA: 10731 snprintf(status, ST_SIZE, gettext("Errata #%d detected.\n"), 10732 errata); 10733 switch (errata) { 10734 case ZPOOL_ERRATA_NONE: 10735 break; 10736 10737 case ZPOOL_ERRATA_ZOL_2094_SCRUB: 10738 snprintf(action, AC_SIZE, gettext("To correct the issue" 10739 " run 'zpool scrub'.\n")); 10740 break; 10741 10742 case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION: 10743 (void) strlcat(status, gettext("\tExisting encrypted " 10744 "datasets contain an on-disk incompatibility\n\t " 10745 "which needs to be corrected.\n"), ST_SIZE); 10746 snprintf(action, AC_SIZE, gettext("To correct the issue" 10747 " backup existing encrypted datasets to new\n\t" 10748 "encrypted datasets and destroy the old ones. " 10749 "'zfs mount -o ro' can\n\tbe used to temporarily " 10750 "mount existing encrypted datasets readonly.\n")); 10751 break; 10752 10753 case ZPOOL_ERRATA_ZOL_8308_ENCRYPTION: 10754 (void) strlcat(status, gettext("\tExisting encrypted " 10755 "snapshots and bookmarks contain an on-disk\n\t" 10756 "incompatibility. This may cause on-disk " 10757 "corruption if they are used\n\twith " 10758 "'zfs recv'.\n"), ST_SIZE); 10759 snprintf(action, AC_SIZE, gettext("To correct the" 10760 "issue, enable the bookmark_v2 feature. No " 10761 "additional\n\taction is needed if there are no " 10762 "encrypted snapshots or bookmarks.\n\tIf preserving" 10763 "the encrypted snapshots and bookmarks is required," 10764 " use\n\ta non-raw send to backup and restore them." 10765 " Alternately, they may be\n\tremoved to resolve " 10766 "the incompatibility.\n")); 10767 break; 10768 10769 default: 10770 /* 10771 * All errata which allow the pool to be imported 10772 * must contain an action message. 10773 */ 10774 assert(0); 10775 } 10776 break; 10777 10778 default: 10779 /* 10780 * The remaining errors can't actually be generated, yet. 10781 */ 10782 assert(reason == ZPOOL_STATUS_OK); 10783 } 10784 10785 if (status[0] != 0) { 10786 if (cbp->cb_json) 10787 fnvlist_add_string(item, "status", status); 10788 else { 10789 printf_color(ANSI_BOLD, gettext("status: ")); 10790 printf_color(ANSI_YELLOW, status); 10791 } 10792 } 10793 10794 if (action[0] != 0) { 10795 if (cbp->cb_json) 10796 fnvlist_add_string(item, "action", action); 10797 else { 10798 printf_color(ANSI_BOLD, gettext("action: ")); 10799 printf_color(ANSI_YELLOW, action); 10800 } 10801 } 10802 } 10803 10804 static int 10805 status_callback_json(zpool_handle_t *zhp, void *data) 10806 { 10807 status_cbdata_t *cbp = data; 10808 nvlist_t *config, *nvroot; 10809 const char *msgid; 10810 char pool_guid[256]; 10811 char msgbuf[256]; 10812 uint64_t guid; 10813 zpool_status_t reason; 10814 zpool_errata_t errata; 10815 uint_t c; 10816 vdev_stat_t *vs; 10817 nvlist_t *item, *d, *load_info, *vds; 10818 10819 /* If dedup stats were requested, also fetch dedupcached. */ 10820 if (cbp->cb_dedup_stats > 1) 10821 zpool_add_propname(zhp, ZPOOL_DEDUPCACHED_PROP_NAME); 10822 reason = zpool_get_status(zhp, &msgid, &errata); 10823 /* 10824 * If we were given 'zpool status -x', only report those pools with 10825 * problems. 10826 */ 10827 if (cbp->cb_explain && 10828 (reason == ZPOOL_STATUS_OK || 10829 reason == ZPOOL_STATUS_VERSION_OLDER || 10830 reason == ZPOOL_STATUS_FEAT_DISABLED || 10831 reason == ZPOOL_STATUS_COMPATIBILITY_ERR || 10832 reason == ZPOOL_STATUS_INCOMPATIBLE_FEAT)) { 10833 return (0); 10834 } 10835 10836 d = fnvlist_lookup_nvlist(cbp->cb_jsobj, "pools"); 10837 item = fnvlist_alloc(); 10838 vds = fnvlist_alloc(); 10839 fill_pool_info(item, zhp, B_FALSE, cbp->cb_json_as_int); 10840 config = zpool_get_config(zhp, NULL); 10841 10842 if (config != NULL) { 10843 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE); 10844 verify(nvlist_lookup_uint64_array(nvroot, 10845 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &c) == 0); 10846 if (cbp->cb_json_pool_key_guid) { 10847 guid = fnvlist_lookup_uint64(config, 10848 ZPOOL_CONFIG_POOL_GUID); 10849 snprintf(pool_guid, 256, "%llu", (u_longlong_t)guid); 10850 } 10851 cbp->cb_count++; 10852 10853 print_status_reason(zhp, cbp, reason, errata, item); 10854 if (msgid != NULL) { 10855 snprintf(msgbuf, 256, 10856 "https://openzfs.github.io/openzfs-docs/msg/%s", 10857 msgid); 10858 fnvlist_add_string(item, "msgid", msgid); 10859 fnvlist_add_string(item, "moreinfo", msgbuf); 10860 } 10861 10862 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, 10863 &load_info) == 0) { 10864 fnvlist_add_nvlist(item, ZPOOL_CONFIG_LOAD_INFO, 10865 load_info); 10866 } 10867 10868 scan_status_nvlist(zhp, cbp, nvroot, item); 10869 removal_status_nvlist(zhp, cbp, nvroot, item); 10870 checkpoint_status_nvlist(nvroot, cbp, item); 10871 raidz_expand_status_nvlist(zhp, cbp, nvroot, item); 10872 vdev_stats_nvlist(zhp, cbp, nvroot, 0, B_FALSE, NULL, vds); 10873 if (cbp->cb_flat_vdevs) { 10874 class_vdevs_nvlist(zhp, cbp, nvroot, 10875 VDEV_ALLOC_BIAS_DEDUP, vds); 10876 class_vdevs_nvlist(zhp, cbp, nvroot, 10877 VDEV_ALLOC_BIAS_SPECIAL, vds); 10878 class_vdevs_nvlist(zhp, cbp, nvroot, 10879 VDEV_ALLOC_CLASS_LOGS, vds); 10880 l2cache_nvlist(zhp, cbp, nvroot, vds); 10881 spares_nvlist(zhp, cbp, nvroot, vds); 10882 10883 fnvlist_add_nvlist(item, "vdevs", vds); 10884 fnvlist_free(vds); 10885 } else { 10886 fnvlist_add_nvlist(item, "vdevs", vds); 10887 fnvlist_free(vds); 10888 10889 class_vdevs_nvlist(zhp, cbp, nvroot, 10890 VDEV_ALLOC_BIAS_DEDUP, item); 10891 class_vdevs_nvlist(zhp, cbp, nvroot, 10892 VDEV_ALLOC_BIAS_SPECIAL, item); 10893 class_vdevs_nvlist(zhp, cbp, nvroot, 10894 VDEV_ALLOC_CLASS_LOGS, item); 10895 l2cache_nvlist(zhp, cbp, nvroot, item); 10896 spares_nvlist(zhp, cbp, nvroot, item); 10897 } 10898 dedup_stats_nvlist(zhp, cbp, item); 10899 errors_nvlist(zhp, cbp, item); 10900 } 10901 if (cbp->cb_json_pool_key_guid) { 10902 fnvlist_add_nvlist(d, pool_guid, item); 10903 } else { 10904 fnvlist_add_nvlist(d, zpool_get_name(zhp), 10905 item); 10906 } 10907 fnvlist_free(item); 10908 return (0); 10909 } 10910 10911 /* 10912 * Display a summary of pool status. Displays a summary such as: 10913 * 10914 * pool: tank 10915 * status: DEGRADED 10916 * reason: One or more devices ... 10917 * see: https://openzfs.github.io/openzfs-docs/msg/ZFS-xxxx-01 10918 * config: 10919 * mirror DEGRADED 10920 * c1t0d0 OK 10921 * c2t0d0 UNAVAIL 10922 * 10923 * When given the '-v' option, we print out the complete config. If the '-e' 10924 * option is specified, then we print out error rate information as well. 10925 */ 10926 static int 10927 status_callback(zpool_handle_t *zhp, void *data) 10928 { 10929 status_cbdata_t *cbp = data; 10930 nvlist_t *config, *nvroot; 10931 const char *msgid; 10932 zpool_status_t reason; 10933 zpool_errata_t errata; 10934 const char *health; 10935 uint_t c; 10936 vdev_stat_t *vs; 10937 10938 /* If dedup stats were requested, also fetch dedupcached. */ 10939 if (cbp->cb_dedup_stats > 1) 10940 zpool_add_propname(zhp, ZPOOL_DEDUPCACHED_PROP_NAME); 10941 10942 config = zpool_get_config(zhp, NULL); 10943 reason = zpool_get_status(zhp, &msgid, &errata); 10944 10945 cbp->cb_count++; 10946 10947 /* 10948 * If we were given 'zpool status -x', only report those pools with 10949 * problems. 10950 */ 10951 if (cbp->cb_explain && 10952 (reason == ZPOOL_STATUS_OK || 10953 reason == ZPOOL_STATUS_VERSION_OLDER || 10954 reason == ZPOOL_STATUS_FEAT_DISABLED || 10955 reason == ZPOOL_STATUS_COMPATIBILITY_ERR || 10956 reason == ZPOOL_STATUS_INCOMPATIBLE_FEAT)) { 10957 if (!cbp->cb_allpools) { 10958 (void) printf(gettext("pool '%s' is healthy\n"), 10959 zpool_get_name(zhp)); 10960 if (cbp->cb_first) 10961 cbp->cb_first = B_FALSE; 10962 } 10963 return (0); 10964 } 10965 10966 if (cbp->cb_first) 10967 cbp->cb_first = B_FALSE; 10968 else 10969 (void) printf("\n"); 10970 10971 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE); 10972 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS, 10973 (uint64_t **)&vs, &c) == 0); 10974 10975 health = zpool_get_state_str(zhp); 10976 10977 printf(" "); 10978 printf_color(ANSI_BOLD, gettext("pool:")); 10979 printf(" %s\n", zpool_get_name(zhp)); 10980 fputc(' ', stdout); 10981 printf_color(ANSI_BOLD, gettext("state: ")); 10982 10983 printf_color(health_str_to_color(health), "%s", health); 10984 10985 fputc('\n', stdout); 10986 print_status_reason(zhp, cbp, reason, errata, NULL); 10987 10988 if (msgid != NULL) { 10989 printf(" "); 10990 printf_color(ANSI_BOLD, gettext("see:")); 10991 printf(gettext( 10992 " https://openzfs.github.io/openzfs-docs/msg/%s\n"), 10993 msgid); 10994 } 10995 10996 if (config != NULL) { 10997 uint64_t nerr; 10998 nvlist_t **spares, **l2cache; 10999 uint_t nspares, nl2cache; 11000 11001 print_scan_status(zhp, nvroot); 11002 11003 pool_removal_stat_t *prs = NULL; 11004 (void) nvlist_lookup_uint64_array(nvroot, 11005 ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c); 11006 print_removal_status(zhp, prs); 11007 11008 pool_checkpoint_stat_t *pcs = NULL; 11009 (void) nvlist_lookup_uint64_array(nvroot, 11010 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c); 11011 print_checkpoint_status(pcs); 11012 11013 pool_raidz_expand_stat_t *pres = NULL; 11014 (void) nvlist_lookup_uint64_array(nvroot, 11015 ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, (uint64_t **)&pres, &c); 11016 print_raidz_expand_status(zhp, pres); 11017 11018 cbp->cb_namewidth = max_width(zhp, nvroot, 0, 0, 11019 cbp->cb_name_flags | VDEV_NAME_TYPE_ID); 11020 if (cbp->cb_namewidth < 10) 11021 cbp->cb_namewidth = 10; 11022 11023 color_start(ANSI_BOLD); 11024 (void) printf(gettext("config:\n\n")); 11025 (void) printf(gettext("\t%-*s %-8s %5s %5s %5s"), 11026 cbp->cb_namewidth, "NAME", "STATE", "READ", "WRITE", 11027 "CKSUM"); 11028 color_end(); 11029 11030 if (cbp->cb_print_slow_ios) { 11031 printf_color(ANSI_BOLD, " %5s", gettext("SLOW")); 11032 } 11033 11034 if (cbp->cb_print_power) { 11035 printf_color(ANSI_BOLD, " %5s", gettext("POWER")); 11036 } 11037 11038 if (cbp->cb_print_dio_verify) { 11039 printf_color(ANSI_BOLD, " %5s", gettext("DIO")); 11040 } 11041 11042 if (cbp->vcdl != NULL) 11043 print_cmd_columns(cbp->vcdl, 0); 11044 11045 printf("\n"); 11046 11047 print_status_config(zhp, cbp, zpool_get_name(zhp), nvroot, 0, 11048 B_FALSE, NULL); 11049 11050 print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_BIAS_DEDUP); 11051 print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_BIAS_SPECIAL); 11052 print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_CLASS_LOGS); 11053 11054 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 11055 &l2cache, &nl2cache) == 0) 11056 print_l2cache(zhp, cbp, l2cache, nl2cache); 11057 11058 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 11059 &spares, &nspares) == 0) 11060 print_spares(zhp, cbp, spares, nspares); 11061 11062 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT, 11063 &nerr) == 0) { 11064 (void) printf("\n"); 11065 if (nerr == 0) { 11066 (void) printf(gettext( 11067 "errors: No known data errors\n")); 11068 } else if (!cbp->cb_verbose) { 11069 color_start(ANSI_RED); 11070 (void) printf(gettext("errors: %llu data " 11071 "errors, use '-v' for a list\n"), 11072 (u_longlong_t)nerr); 11073 color_end(); 11074 } else { 11075 print_error_log(zhp); 11076 } 11077 } 11078 11079 if (cbp->cb_dedup_stats) 11080 print_dedup_stats(zhp, config, cbp->cb_literal); 11081 } else { 11082 (void) printf(gettext("config: The configuration cannot be " 11083 "determined.\n")); 11084 } 11085 11086 return (0); 11087 } 11088 11089 /* 11090 * zpool status [-dDegiLpPstvx] [-c [script1,script2,...]] ... 11091 * [-j|--json [--json-flat-vdevs] [--json-int] ... 11092 * [--json-pool-key-guid]] [--power] [-T d|u] ... 11093 * [pool] [interval [count]] 11094 * 11095 * -c CMD For each vdev, run command CMD 11096 * -D Display dedup status (undocumented) 11097 * -d Display Direct I/O write verify errors 11098 * -e Display only unhealthy vdevs 11099 * -g Display guid for individual vdev name. 11100 * -i Display vdev initialization status. 11101 * -j [...] Display output in JSON format 11102 * --json-flat-vdevs Display vdevs in flat hierarchy 11103 * --json-int Display numbers in integer format instead of string 11104 * --json-pool-key-guid Use pool GUID as key for pool objects 11105 * -L Follow links when resolving vdev path name. 11106 * -P Display full path for vdev name. 11107 * -p Display values in parsable (exact) format. 11108 * --power Display vdev enclosure slot power status 11109 * -s Display slow IOs column. 11110 * -T Display a timestamp in date(1) or Unix format 11111 * -t Display vdev TRIM status. 11112 * -v Display complete error logs 11113 * -x Display only pools with potential problems 11114 * 11115 * Describes the health status of all pools or some subset. 11116 */ 11117 int 11118 zpool_do_status(int argc, char **argv) 11119 { 11120 int c; 11121 int ret; 11122 float interval = 0; 11123 unsigned long count = 0; 11124 status_cbdata_t cb = { 0 }; 11125 nvlist_t *data; 11126 char *cmd = NULL; 11127 11128 struct option long_options[] = { 11129 {"power", no_argument, NULL, ZPOOL_OPTION_POWER}, 11130 {"json", no_argument, NULL, 'j'}, 11131 {"json-int", no_argument, NULL, ZPOOL_OPTION_JSON_NUMS_AS_INT}, 11132 {"json-flat-vdevs", no_argument, NULL, 11133 ZPOOL_OPTION_JSON_FLAT_VDEVS}, 11134 {"json-pool-key-guid", no_argument, NULL, 11135 ZPOOL_OPTION_POOL_KEY_GUID}, 11136 {0, 0, 0, 0} 11137 }; 11138 11139 /* check options */ 11140 while ((c = getopt_long(argc, argv, "c:jdDegiLpPstT:vx", long_options, 11141 NULL)) != -1) { 11142 switch (c) { 11143 case 'c': 11144 if (cmd != NULL) { 11145 fprintf(stderr, 11146 gettext("Can't set -c flag twice\n")); 11147 exit(1); 11148 } 11149 11150 if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL && 11151 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) { 11152 fprintf(stderr, gettext( 11153 "Can't run -c, disabled by " 11154 "ZPOOL_SCRIPTS_ENABLED.\n")); 11155 exit(1); 11156 } 11157 11158 if ((getuid() <= 0 || geteuid() <= 0) && 11159 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) { 11160 fprintf(stderr, gettext( 11161 "Can't run -c with root privileges " 11162 "unless ZPOOL_SCRIPTS_AS_ROOT is set.\n")); 11163 exit(1); 11164 } 11165 cmd = optarg; 11166 break; 11167 case 'd': 11168 cb.cb_print_dio_verify = B_TRUE; 11169 break; 11170 case 'D': 11171 if (++cb.cb_dedup_stats > 2) 11172 cb.cb_dedup_stats = 2; 11173 break; 11174 case 'e': 11175 cb.cb_print_unhealthy = B_TRUE; 11176 break; 11177 case 'g': 11178 cb.cb_name_flags |= VDEV_NAME_GUID; 11179 break; 11180 case 'i': 11181 cb.cb_print_vdev_init = B_TRUE; 11182 break; 11183 case 'L': 11184 cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS; 11185 break; 11186 case 'p': 11187 cb.cb_literal = B_TRUE; 11188 break; 11189 case 'P': 11190 cb.cb_name_flags |= VDEV_NAME_PATH; 11191 break; 11192 case 's': 11193 cb.cb_print_slow_ios = B_TRUE; 11194 break; 11195 case 't': 11196 cb.cb_print_vdev_trim = B_TRUE; 11197 break; 11198 case 'T': 11199 get_timestamp_arg(*optarg); 11200 break; 11201 case 'v': 11202 cb.cb_verbose = B_TRUE; 11203 break; 11204 case 'j': 11205 cb.cb_json = B_TRUE; 11206 break; 11207 case 'x': 11208 cb.cb_explain = B_TRUE; 11209 break; 11210 case ZPOOL_OPTION_POWER: 11211 cb.cb_print_power = B_TRUE; 11212 break; 11213 case ZPOOL_OPTION_JSON_FLAT_VDEVS: 11214 cb.cb_flat_vdevs = B_TRUE; 11215 break; 11216 case ZPOOL_OPTION_JSON_NUMS_AS_INT: 11217 cb.cb_json_as_int = B_TRUE; 11218 cb.cb_literal = B_TRUE; 11219 break; 11220 case ZPOOL_OPTION_POOL_KEY_GUID: 11221 cb.cb_json_pool_key_guid = B_TRUE; 11222 break; 11223 case '?': 11224 if (optopt == 'c') { 11225 print_zpool_script_list("status"); 11226 exit(0); 11227 } else { 11228 fprintf(stderr, 11229 gettext("invalid option '%c'\n"), optopt); 11230 } 11231 usage(B_FALSE); 11232 } 11233 } 11234 11235 argc -= optind; 11236 argv += optind; 11237 11238 get_interval_count(&argc, argv, &interval, &count); 11239 11240 if (argc == 0) 11241 cb.cb_allpools = B_TRUE; 11242 11243 cb.cb_first = B_TRUE; 11244 cb.cb_print_status = B_TRUE; 11245 11246 if (cb.cb_flat_vdevs && !cb.cb_json) { 11247 fprintf(stderr, gettext("'--json-flat-vdevs' only works with" 11248 " '-j' option\n")); 11249 usage(B_FALSE); 11250 } 11251 11252 if (cb.cb_json_as_int && !cb.cb_json) { 11253 (void) fprintf(stderr, gettext("'--json-int' only works with" 11254 " '-j' option\n")); 11255 usage(B_FALSE); 11256 } 11257 11258 if (!cb.cb_json && cb.cb_json_pool_key_guid) { 11259 (void) fprintf(stderr, gettext("'json-pool-key-guid' only" 11260 " works with '-j' option\n")); 11261 usage(B_FALSE); 11262 } 11263 11264 for (;;) { 11265 if (cb.cb_json) { 11266 cb.cb_jsobj = zpool_json_schema(0, 1); 11267 data = fnvlist_alloc(); 11268 fnvlist_add_nvlist(cb.cb_jsobj, "pools", data); 11269 fnvlist_free(data); 11270 } 11271 11272 if (timestamp_fmt != NODATE) { 11273 if (cb.cb_json) { 11274 if (cb.cb_json_as_int) { 11275 fnvlist_add_uint64(cb.cb_jsobj, "time", 11276 time(NULL)); 11277 } else { 11278 char ts[128]; 11279 get_timestamp(timestamp_fmt, ts, 128); 11280 fnvlist_add_string(cb.cb_jsobj, "time", 11281 ts); 11282 } 11283 } else 11284 print_timestamp(timestamp_fmt); 11285 } 11286 11287 if (cmd != NULL) 11288 cb.vcdl = all_pools_for_each_vdev_run(argc, argv, cmd, 11289 NULL, NULL, 0, 0); 11290 11291 if (cb.cb_json) { 11292 ret = for_each_pool(argc, argv, B_TRUE, NULL, 11293 ZFS_TYPE_POOL, cb.cb_literal, 11294 status_callback_json, &cb); 11295 } else { 11296 ret = for_each_pool(argc, argv, B_TRUE, NULL, 11297 ZFS_TYPE_POOL, cb.cb_literal, 11298 status_callback, &cb); 11299 } 11300 11301 if (cb.vcdl != NULL) 11302 free_vdev_cmd_data_list(cb.vcdl); 11303 11304 if (cb.cb_json) { 11305 if (ret == 0) 11306 zcmd_print_json(cb.cb_jsobj); 11307 else 11308 nvlist_free(cb.cb_jsobj); 11309 } else { 11310 if (argc == 0 && cb.cb_count == 0) { 11311 (void) fprintf(stderr, "%s", 11312 gettext("no pools available\n")); 11313 } else if (cb.cb_explain && cb.cb_first && 11314 cb.cb_allpools) { 11315 (void) printf("%s", 11316 gettext("all pools are healthy\n")); 11317 } 11318 } 11319 11320 if (ret != 0) 11321 return (ret); 11322 11323 if (interval == 0) 11324 break; 11325 11326 if (count != 0 && --count == 0) 11327 break; 11328 11329 (void) fflush(stdout); 11330 (void) fsleep(interval); 11331 } 11332 11333 return (0); 11334 } 11335 11336 typedef struct upgrade_cbdata { 11337 int cb_first; 11338 int cb_argc; 11339 uint64_t cb_version; 11340 char **cb_argv; 11341 } upgrade_cbdata_t; 11342 11343 static int 11344 check_unsupp_fs(zfs_handle_t *zhp, void *unsupp_fs) 11345 { 11346 int zfs_version = (int)zfs_prop_get_int(zhp, ZFS_PROP_VERSION); 11347 int *count = (int *)unsupp_fs; 11348 11349 if (zfs_version > ZPL_VERSION) { 11350 (void) printf(gettext("%s (v%d) is not supported by this " 11351 "implementation of ZFS.\n"), 11352 zfs_get_name(zhp), zfs_version); 11353 (*count)++; 11354 } 11355 11356 zfs_iter_filesystems_v2(zhp, 0, check_unsupp_fs, unsupp_fs); 11357 11358 zfs_close(zhp); 11359 11360 return (0); 11361 } 11362 11363 static int 11364 upgrade_version(zpool_handle_t *zhp, uint64_t version) 11365 { 11366 int ret; 11367 nvlist_t *config; 11368 uint64_t oldversion; 11369 int unsupp_fs = 0; 11370 11371 config = zpool_get_config(zhp, NULL); 11372 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 11373 &oldversion) == 0); 11374 11375 char compat[ZFS_MAXPROPLEN]; 11376 if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, compat, 11377 ZFS_MAXPROPLEN, NULL, B_FALSE) != 0) 11378 compat[0] = '\0'; 11379 11380 assert(SPA_VERSION_IS_SUPPORTED(oldversion)); 11381 assert(oldversion < version); 11382 11383 ret = zfs_iter_root(zpool_get_handle(zhp), check_unsupp_fs, &unsupp_fs); 11384 if (ret != 0) 11385 return (ret); 11386 11387 if (unsupp_fs) { 11388 (void) fprintf(stderr, gettext("Upgrade not performed due " 11389 "to %d unsupported filesystems (max v%d).\n"), 11390 unsupp_fs, (int)ZPL_VERSION); 11391 return (1); 11392 } 11393 11394 if (strcmp(compat, ZPOOL_COMPAT_LEGACY) == 0) { 11395 (void) fprintf(stderr, gettext("Upgrade not performed because " 11396 "'compatibility' property set to '" 11397 ZPOOL_COMPAT_LEGACY "'.\n")); 11398 return (1); 11399 } 11400 11401 ret = zpool_upgrade(zhp, version); 11402 if (ret != 0) 11403 return (ret); 11404 11405 if (version >= SPA_VERSION_FEATURES) { 11406 (void) printf(gettext("Successfully upgraded " 11407 "'%s' from version %llu to feature flags.\n"), 11408 zpool_get_name(zhp), (u_longlong_t)oldversion); 11409 } else { 11410 (void) printf(gettext("Successfully upgraded " 11411 "'%s' from version %llu to version %llu.\n"), 11412 zpool_get_name(zhp), (u_longlong_t)oldversion, 11413 (u_longlong_t)version); 11414 } 11415 11416 return (0); 11417 } 11418 11419 static int 11420 upgrade_enable_all(zpool_handle_t *zhp, int *countp) 11421 { 11422 int i, ret, count; 11423 boolean_t firstff = B_TRUE; 11424 nvlist_t *enabled = zpool_get_features(zhp); 11425 11426 char compat[ZFS_MAXPROPLEN]; 11427 if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, compat, 11428 ZFS_MAXPROPLEN, NULL, B_FALSE) != 0) 11429 compat[0] = '\0'; 11430 11431 boolean_t requested_features[SPA_FEATURES]; 11432 if (zpool_do_load_compat(compat, requested_features) != 11433 ZPOOL_COMPATIBILITY_OK) 11434 return (-1); 11435 11436 count = 0; 11437 for (i = 0; i < SPA_FEATURES; i++) { 11438 const char *fname = spa_feature_table[i].fi_uname; 11439 const char *fguid = spa_feature_table[i].fi_guid; 11440 11441 if (!spa_feature_table[i].fi_zfs_mod_supported || 11442 (spa_feature_table[i].fi_flags & ZFEATURE_FLAG_NO_UPGRADE)) 11443 continue; 11444 11445 if (!nvlist_exists(enabled, fguid) && requested_features[i]) { 11446 char *propname; 11447 verify(-1 != asprintf(&propname, "feature@%s", fname)); 11448 ret = zpool_set_prop(zhp, propname, 11449 ZFS_FEATURE_ENABLED); 11450 if (ret != 0) { 11451 free(propname); 11452 return (ret); 11453 } 11454 count++; 11455 11456 if (firstff) { 11457 (void) printf(gettext("Enabled the " 11458 "following features on '%s':\n"), 11459 zpool_get_name(zhp)); 11460 firstff = B_FALSE; 11461 } 11462 (void) printf(gettext(" %s\n"), fname); 11463 free(propname); 11464 } 11465 } 11466 11467 if (countp != NULL) 11468 *countp = count; 11469 return (0); 11470 } 11471 11472 static int 11473 upgrade_cb(zpool_handle_t *zhp, void *arg) 11474 { 11475 upgrade_cbdata_t *cbp = arg; 11476 nvlist_t *config; 11477 uint64_t version; 11478 boolean_t modified_pool = B_FALSE; 11479 int ret; 11480 11481 config = zpool_get_config(zhp, NULL); 11482 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 11483 &version) == 0); 11484 11485 assert(SPA_VERSION_IS_SUPPORTED(version)); 11486 11487 if (version < cbp->cb_version) { 11488 cbp->cb_first = B_FALSE; 11489 ret = upgrade_version(zhp, cbp->cb_version); 11490 if (ret != 0) 11491 return (ret); 11492 modified_pool = B_TRUE; 11493 11494 /* 11495 * If they did "zpool upgrade -a", then we could 11496 * be doing ioctls to different pools. We need 11497 * to log this history once to each pool, and bypass 11498 * the normal history logging that happens in main(). 11499 */ 11500 (void) zpool_log_history(g_zfs, history_str); 11501 log_history = B_FALSE; 11502 } 11503 11504 if (cbp->cb_version >= SPA_VERSION_FEATURES) { 11505 int count; 11506 ret = upgrade_enable_all(zhp, &count); 11507 if (ret != 0) 11508 return (ret); 11509 11510 if (count > 0) { 11511 cbp->cb_first = B_FALSE; 11512 modified_pool = B_TRUE; 11513 } 11514 } 11515 11516 if (modified_pool) { 11517 (void) printf("\n"); 11518 (void) after_zpool_upgrade(zhp); 11519 } 11520 11521 return (0); 11522 } 11523 11524 static int 11525 upgrade_list_older_cb(zpool_handle_t *zhp, void *arg) 11526 { 11527 upgrade_cbdata_t *cbp = arg; 11528 nvlist_t *config; 11529 uint64_t version; 11530 11531 config = zpool_get_config(zhp, NULL); 11532 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 11533 &version) == 0); 11534 11535 assert(SPA_VERSION_IS_SUPPORTED(version)); 11536 11537 if (version < SPA_VERSION_FEATURES) { 11538 if (cbp->cb_first) { 11539 (void) printf(gettext("The following pools are " 11540 "formatted with legacy version numbers and can\n" 11541 "be upgraded to use feature flags. After " 11542 "being upgraded, these pools\nwill no " 11543 "longer be accessible by software that does not " 11544 "support feature\nflags.\n\n" 11545 "Note that setting a pool's 'compatibility' " 11546 "feature to '" ZPOOL_COMPAT_LEGACY "' will\n" 11547 "inhibit upgrades.\n\n")); 11548 (void) printf(gettext("VER POOL\n")); 11549 (void) printf(gettext("--- ------------\n")); 11550 cbp->cb_first = B_FALSE; 11551 } 11552 11553 (void) printf("%2llu %s\n", (u_longlong_t)version, 11554 zpool_get_name(zhp)); 11555 } 11556 11557 return (0); 11558 } 11559 11560 static int 11561 upgrade_list_disabled_cb(zpool_handle_t *zhp, void *arg) 11562 { 11563 upgrade_cbdata_t *cbp = arg; 11564 nvlist_t *config; 11565 uint64_t version; 11566 11567 config = zpool_get_config(zhp, NULL); 11568 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 11569 &version) == 0); 11570 11571 if (version >= SPA_VERSION_FEATURES) { 11572 int i; 11573 boolean_t poolfirst = B_TRUE; 11574 nvlist_t *enabled = zpool_get_features(zhp); 11575 11576 for (i = 0; i < SPA_FEATURES; i++) { 11577 const char *fguid = spa_feature_table[i].fi_guid; 11578 const char *fname = spa_feature_table[i].fi_uname; 11579 11580 if (!spa_feature_table[i].fi_zfs_mod_supported) 11581 continue; 11582 11583 if (!nvlist_exists(enabled, fguid)) { 11584 if (cbp->cb_first) { 11585 (void) printf(gettext("\nSome " 11586 "supported features are not " 11587 "enabled on the following pools. " 11588 "Once a\nfeature is enabled the " 11589 "pool may become incompatible with " 11590 "software\nthat does not support " 11591 "the feature. See " 11592 "zpool-features(7) for " 11593 "details.\n\n" 11594 "Note that the pool " 11595 "'compatibility' feature can be " 11596 "used to inhibit\nfeature " 11597 "upgrades.\n\n" 11598 "Features marked with (*) are not " 11599 "applied automatically on upgrade, " 11600 "and\nmust be applied explicitly " 11601 "with zpool-set(7).\n\n")); 11602 (void) printf(gettext("POOL " 11603 "FEATURE\n")); 11604 (void) printf(gettext("------" 11605 "---------\n")); 11606 cbp->cb_first = B_FALSE; 11607 } 11608 11609 if (poolfirst) { 11610 (void) printf(gettext("%s\n"), 11611 zpool_get_name(zhp)); 11612 poolfirst = B_FALSE; 11613 } 11614 11615 (void) printf(gettext(" %s%s\n"), fname, 11616 spa_feature_table[i].fi_flags & 11617 ZFEATURE_FLAG_NO_UPGRADE ? "(*)" : ""); 11618 } 11619 /* 11620 * If they did "zpool upgrade -a", then we could 11621 * be doing ioctls to different pools. We need 11622 * to log this history once to each pool, and bypass 11623 * the normal history logging that happens in main(). 11624 */ 11625 (void) zpool_log_history(g_zfs, history_str); 11626 log_history = B_FALSE; 11627 } 11628 } 11629 11630 return (0); 11631 } 11632 11633 static int 11634 upgrade_one(zpool_handle_t *zhp, void *data) 11635 { 11636 boolean_t modified_pool = B_FALSE; 11637 upgrade_cbdata_t *cbp = data; 11638 uint64_t cur_version; 11639 int ret; 11640 11641 if (strcmp("log", zpool_get_name(zhp)) == 0) { 11642 (void) fprintf(stderr, gettext("'log' is now a reserved word\n" 11643 "Pool 'log' must be renamed using export and import" 11644 " to upgrade.\n")); 11645 return (1); 11646 } 11647 11648 cur_version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 11649 if (cur_version > cbp->cb_version) { 11650 (void) printf(gettext("Pool '%s' is already formatted " 11651 "using more current version '%llu'.\n\n"), 11652 zpool_get_name(zhp), (u_longlong_t)cur_version); 11653 return (0); 11654 } 11655 11656 if (cbp->cb_version != SPA_VERSION && cur_version == cbp->cb_version) { 11657 (void) printf(gettext("Pool '%s' is already formatted " 11658 "using version %llu.\n\n"), zpool_get_name(zhp), 11659 (u_longlong_t)cbp->cb_version); 11660 return (0); 11661 } 11662 11663 if (cur_version != cbp->cb_version) { 11664 modified_pool = B_TRUE; 11665 ret = upgrade_version(zhp, cbp->cb_version); 11666 if (ret != 0) 11667 return (ret); 11668 } 11669 11670 if (cbp->cb_version >= SPA_VERSION_FEATURES) { 11671 int count = 0; 11672 ret = upgrade_enable_all(zhp, &count); 11673 if (ret != 0) 11674 return (ret); 11675 11676 if (count != 0) { 11677 modified_pool = B_TRUE; 11678 } else if (cur_version == SPA_VERSION) { 11679 (void) printf(gettext("Pool '%s' already has all " 11680 "supported and requested features enabled.\n"), 11681 zpool_get_name(zhp)); 11682 } 11683 } 11684 11685 if (modified_pool) { 11686 (void) printf("\n"); 11687 (void) after_zpool_upgrade(zhp); 11688 } 11689 11690 return (0); 11691 } 11692 11693 /* 11694 * zpool upgrade 11695 * zpool upgrade -v 11696 * zpool upgrade [-V version] <-a | pool ...> 11697 * 11698 * With no arguments, display downrev'd ZFS pool available for upgrade. 11699 * Individual pools can be upgraded by specifying the pool, and '-a' will 11700 * upgrade all pools. 11701 */ 11702 int 11703 zpool_do_upgrade(int argc, char **argv) 11704 { 11705 int c; 11706 upgrade_cbdata_t cb = { 0 }; 11707 int ret = 0; 11708 boolean_t showversions = B_FALSE; 11709 boolean_t upgradeall = B_FALSE; 11710 char *end; 11711 11712 11713 /* check options */ 11714 while ((c = getopt(argc, argv, ":avV:")) != -1) { 11715 switch (c) { 11716 case 'a': 11717 upgradeall = B_TRUE; 11718 break; 11719 case 'v': 11720 showversions = B_TRUE; 11721 break; 11722 case 'V': 11723 cb.cb_version = strtoll(optarg, &end, 10); 11724 if (*end != '\0' || 11725 !SPA_VERSION_IS_SUPPORTED(cb.cb_version)) { 11726 (void) fprintf(stderr, 11727 gettext("invalid version '%s'\n"), optarg); 11728 usage(B_FALSE); 11729 } 11730 break; 11731 case ':': 11732 (void) fprintf(stderr, gettext("missing argument for " 11733 "'%c' option\n"), optopt); 11734 usage(B_FALSE); 11735 break; 11736 case '?': 11737 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 11738 optopt); 11739 usage(B_FALSE); 11740 } 11741 } 11742 11743 cb.cb_argc = argc; 11744 cb.cb_argv = argv; 11745 argc -= optind; 11746 argv += optind; 11747 11748 if (cb.cb_version == 0) { 11749 cb.cb_version = SPA_VERSION; 11750 } else if (!upgradeall && argc == 0) { 11751 (void) fprintf(stderr, gettext("-V option is " 11752 "incompatible with other arguments\n")); 11753 usage(B_FALSE); 11754 } 11755 11756 if (showversions) { 11757 if (upgradeall || argc != 0) { 11758 (void) fprintf(stderr, gettext("-v option is " 11759 "incompatible with other arguments\n")); 11760 usage(B_FALSE); 11761 } 11762 } else if (upgradeall) { 11763 if (argc != 0) { 11764 (void) fprintf(stderr, gettext("-a option should not " 11765 "be used along with a pool name\n")); 11766 usage(B_FALSE); 11767 } 11768 } 11769 11770 (void) printf("%s", gettext("This system supports ZFS pool feature " 11771 "flags.\n\n")); 11772 if (showversions) { 11773 int i; 11774 11775 (void) printf(gettext("The following features are " 11776 "supported:\n\n")); 11777 (void) printf(gettext("FEAT DESCRIPTION\n")); 11778 (void) printf("----------------------------------------------" 11779 "---------------\n"); 11780 for (i = 0; i < SPA_FEATURES; i++) { 11781 zfeature_info_t *fi = &spa_feature_table[i]; 11782 if (!fi->fi_zfs_mod_supported) 11783 continue; 11784 const char *ro = 11785 (fi->fi_flags & ZFEATURE_FLAG_READONLY_COMPAT) ? 11786 " (read-only compatible)" : ""; 11787 11788 (void) printf("%-37s%s\n", fi->fi_uname, ro); 11789 (void) printf(" %s\n", fi->fi_desc); 11790 } 11791 (void) printf("\n"); 11792 11793 (void) printf(gettext("The following legacy versions are also " 11794 "supported:\n\n")); 11795 (void) printf(gettext("VER DESCRIPTION\n")); 11796 (void) printf("--- -----------------------------------------" 11797 "---------------\n"); 11798 (void) printf(gettext(" 1 Initial ZFS version\n")); 11799 (void) printf(gettext(" 2 Ditto blocks " 11800 "(replicated metadata)\n")); 11801 (void) printf(gettext(" 3 Hot spares and double parity " 11802 "RAID-Z\n")); 11803 (void) printf(gettext(" 4 zpool history\n")); 11804 (void) printf(gettext(" 5 Compression using the gzip " 11805 "algorithm\n")); 11806 (void) printf(gettext(" 6 bootfs pool property\n")); 11807 (void) printf(gettext(" 7 Separate intent log devices\n")); 11808 (void) printf(gettext(" 8 Delegated administration\n")); 11809 (void) printf(gettext(" 9 refquota and refreservation " 11810 "properties\n")); 11811 (void) printf(gettext(" 10 Cache devices\n")); 11812 (void) printf(gettext(" 11 Improved scrub performance\n")); 11813 (void) printf(gettext(" 12 Snapshot properties\n")); 11814 (void) printf(gettext(" 13 snapused property\n")); 11815 (void) printf(gettext(" 14 passthrough-x aclinherit\n")); 11816 (void) printf(gettext(" 15 user/group space accounting\n")); 11817 (void) printf(gettext(" 16 stmf property support\n")); 11818 (void) printf(gettext(" 17 Triple-parity RAID-Z\n")); 11819 (void) printf(gettext(" 18 Snapshot user holds\n")); 11820 (void) printf(gettext(" 19 Log device removal\n")); 11821 (void) printf(gettext(" 20 Compression using zle " 11822 "(zero-length encoding)\n")); 11823 (void) printf(gettext(" 21 Deduplication\n")); 11824 (void) printf(gettext(" 22 Received properties\n")); 11825 (void) printf(gettext(" 23 Slim ZIL\n")); 11826 (void) printf(gettext(" 24 System attributes\n")); 11827 (void) printf(gettext(" 25 Improved scrub stats\n")); 11828 (void) printf(gettext(" 26 Improved snapshot deletion " 11829 "performance\n")); 11830 (void) printf(gettext(" 27 Improved snapshot creation " 11831 "performance\n")); 11832 (void) printf(gettext(" 28 Multiple vdev replacements\n")); 11833 (void) printf(gettext("\nFor more information on a particular " 11834 "version, including supported releases,\n")); 11835 (void) printf(gettext("see the ZFS Administration Guide.\n\n")); 11836 } else if (argc == 0 && upgradeall) { 11837 cb.cb_first = B_TRUE; 11838 ret = zpool_iter(g_zfs, upgrade_cb, &cb); 11839 if (ret == 0 && cb.cb_first) { 11840 if (cb.cb_version == SPA_VERSION) { 11841 (void) printf(gettext("All pools are already " 11842 "formatted using feature flags.\n\n")); 11843 (void) printf(gettext("Every feature flags " 11844 "pool already has all supported and " 11845 "requested features enabled.\n")); 11846 } else { 11847 (void) printf(gettext("All pools are already " 11848 "formatted with version %llu or higher.\n"), 11849 (u_longlong_t)cb.cb_version); 11850 } 11851 } 11852 } else if (argc == 0) { 11853 cb.cb_first = B_TRUE; 11854 ret = zpool_iter(g_zfs, upgrade_list_older_cb, &cb); 11855 assert(ret == 0); 11856 11857 if (cb.cb_first) { 11858 (void) printf(gettext("All pools are formatted " 11859 "using feature flags.\n\n")); 11860 } else { 11861 (void) printf(gettext("\nUse 'zpool upgrade -v' " 11862 "for a list of available legacy versions.\n")); 11863 } 11864 11865 cb.cb_first = B_TRUE; 11866 ret = zpool_iter(g_zfs, upgrade_list_disabled_cb, &cb); 11867 assert(ret == 0); 11868 11869 if (cb.cb_first) { 11870 (void) printf(gettext("Every feature flags pool has " 11871 "all supported and requested features enabled.\n")); 11872 } else { 11873 (void) printf(gettext("\n")); 11874 } 11875 } else { 11876 ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL, 11877 B_FALSE, upgrade_one, &cb); 11878 } 11879 11880 return (ret); 11881 } 11882 11883 typedef struct hist_cbdata { 11884 boolean_t first; 11885 boolean_t longfmt; 11886 boolean_t internal; 11887 } hist_cbdata_t; 11888 11889 static void 11890 print_history_records(nvlist_t *nvhis, hist_cbdata_t *cb) 11891 { 11892 nvlist_t **records; 11893 uint_t numrecords; 11894 int i; 11895 11896 verify(nvlist_lookup_nvlist_array(nvhis, ZPOOL_HIST_RECORD, 11897 &records, &numrecords) == 0); 11898 for (i = 0; i < numrecords; i++) { 11899 nvlist_t *rec = records[i]; 11900 char tbuf[64] = ""; 11901 11902 if (nvlist_exists(rec, ZPOOL_HIST_TIME)) { 11903 time_t tsec; 11904 struct tm t; 11905 11906 tsec = fnvlist_lookup_uint64(records[i], 11907 ZPOOL_HIST_TIME); 11908 (void) localtime_r(&tsec, &t); 11909 (void) strftime(tbuf, sizeof (tbuf), "%F.%T", &t); 11910 } 11911 11912 if (nvlist_exists(rec, ZPOOL_HIST_ELAPSED_NS)) { 11913 uint64_t elapsed_ns = fnvlist_lookup_int64(records[i], 11914 ZPOOL_HIST_ELAPSED_NS); 11915 (void) snprintf(tbuf + strlen(tbuf), 11916 sizeof (tbuf) - strlen(tbuf), 11917 " (%lldms)", (long long)elapsed_ns / 1000 / 1000); 11918 } 11919 11920 if (nvlist_exists(rec, ZPOOL_HIST_CMD)) { 11921 (void) printf("%s %s", tbuf, 11922 fnvlist_lookup_string(rec, ZPOOL_HIST_CMD)); 11923 } else if (nvlist_exists(rec, ZPOOL_HIST_INT_EVENT)) { 11924 int ievent = 11925 fnvlist_lookup_uint64(rec, ZPOOL_HIST_INT_EVENT); 11926 if (!cb->internal) 11927 continue; 11928 if (ievent >= ZFS_NUM_LEGACY_HISTORY_EVENTS) { 11929 (void) printf("%s unrecognized record:\n", 11930 tbuf); 11931 dump_nvlist(rec, 4); 11932 continue; 11933 } 11934 (void) printf("%s [internal %s txg:%lld] %s", tbuf, 11935 zfs_history_event_names[ievent], 11936 (longlong_t)fnvlist_lookup_uint64( 11937 rec, ZPOOL_HIST_TXG), 11938 fnvlist_lookup_string(rec, ZPOOL_HIST_INT_STR)); 11939 } else if (nvlist_exists(rec, ZPOOL_HIST_INT_NAME)) { 11940 if (!cb->internal) 11941 continue; 11942 (void) printf("%s [txg:%lld] %s", tbuf, 11943 (longlong_t)fnvlist_lookup_uint64( 11944 rec, ZPOOL_HIST_TXG), 11945 fnvlist_lookup_string(rec, ZPOOL_HIST_INT_NAME)); 11946 if (nvlist_exists(rec, ZPOOL_HIST_DSNAME)) { 11947 (void) printf(" %s (%llu)", 11948 fnvlist_lookup_string(rec, 11949 ZPOOL_HIST_DSNAME), 11950 (u_longlong_t)fnvlist_lookup_uint64(rec, 11951 ZPOOL_HIST_DSID)); 11952 } 11953 (void) printf(" %s", fnvlist_lookup_string(rec, 11954 ZPOOL_HIST_INT_STR)); 11955 } else if (nvlist_exists(rec, ZPOOL_HIST_IOCTL)) { 11956 if (!cb->internal) 11957 continue; 11958 (void) printf("%s ioctl %s\n", tbuf, 11959 fnvlist_lookup_string(rec, ZPOOL_HIST_IOCTL)); 11960 if (nvlist_exists(rec, ZPOOL_HIST_INPUT_NVL)) { 11961 (void) printf(" input:\n"); 11962 dump_nvlist(fnvlist_lookup_nvlist(rec, 11963 ZPOOL_HIST_INPUT_NVL), 8); 11964 } 11965 if (nvlist_exists(rec, ZPOOL_HIST_OUTPUT_NVL)) { 11966 (void) printf(" output:\n"); 11967 dump_nvlist(fnvlist_lookup_nvlist(rec, 11968 ZPOOL_HIST_OUTPUT_NVL), 8); 11969 } 11970 if (nvlist_exists(rec, ZPOOL_HIST_OUTPUT_SIZE)) { 11971 (void) printf(" output nvlist omitted; " 11972 "original size: %lldKB\n", 11973 (longlong_t)fnvlist_lookup_int64(rec, 11974 ZPOOL_HIST_OUTPUT_SIZE) / 1024); 11975 } 11976 if (nvlist_exists(rec, ZPOOL_HIST_ERRNO)) { 11977 (void) printf(" errno: %lld\n", 11978 (longlong_t)fnvlist_lookup_int64(rec, 11979 ZPOOL_HIST_ERRNO)); 11980 } 11981 } else { 11982 if (!cb->internal) 11983 continue; 11984 (void) printf("%s unrecognized record:\n", tbuf); 11985 dump_nvlist(rec, 4); 11986 } 11987 11988 if (!cb->longfmt) { 11989 (void) printf("\n"); 11990 continue; 11991 } 11992 (void) printf(" ["); 11993 if (nvlist_exists(rec, ZPOOL_HIST_WHO)) { 11994 uid_t who = fnvlist_lookup_uint64(rec, ZPOOL_HIST_WHO); 11995 struct passwd *pwd = getpwuid(who); 11996 (void) printf("user %d ", (int)who); 11997 if (pwd != NULL) 11998 (void) printf("(%s) ", pwd->pw_name); 11999 } 12000 if (nvlist_exists(rec, ZPOOL_HIST_HOST)) { 12001 (void) printf("on %s", 12002 fnvlist_lookup_string(rec, ZPOOL_HIST_HOST)); 12003 } 12004 if (nvlist_exists(rec, ZPOOL_HIST_ZONE)) { 12005 (void) printf(":%s", 12006 fnvlist_lookup_string(rec, ZPOOL_HIST_ZONE)); 12007 } 12008 12009 (void) printf("]"); 12010 (void) printf("\n"); 12011 } 12012 } 12013 12014 /* 12015 * Print out the command history for a specific pool. 12016 */ 12017 static int 12018 get_history_one(zpool_handle_t *zhp, void *data) 12019 { 12020 nvlist_t *nvhis; 12021 int ret; 12022 hist_cbdata_t *cb = (hist_cbdata_t *)data; 12023 uint64_t off = 0; 12024 boolean_t eof = B_FALSE; 12025 12026 cb->first = B_FALSE; 12027 12028 (void) printf(gettext("History for '%s':\n"), zpool_get_name(zhp)); 12029 12030 while (!eof) { 12031 if ((ret = zpool_get_history(zhp, &nvhis, &off, &eof)) != 0) 12032 return (ret); 12033 12034 print_history_records(nvhis, cb); 12035 nvlist_free(nvhis); 12036 } 12037 (void) printf("\n"); 12038 12039 return (ret); 12040 } 12041 12042 /* 12043 * zpool history <pool> 12044 * 12045 * Displays the history of commands that modified pools. 12046 */ 12047 int 12048 zpool_do_history(int argc, char **argv) 12049 { 12050 hist_cbdata_t cbdata = { 0 }; 12051 int ret; 12052 int c; 12053 12054 cbdata.first = B_TRUE; 12055 /* check options */ 12056 while ((c = getopt(argc, argv, "li")) != -1) { 12057 switch (c) { 12058 case 'l': 12059 cbdata.longfmt = B_TRUE; 12060 break; 12061 case 'i': 12062 cbdata.internal = B_TRUE; 12063 break; 12064 case '?': 12065 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 12066 optopt); 12067 usage(B_FALSE); 12068 } 12069 } 12070 argc -= optind; 12071 argv += optind; 12072 12073 ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL, 12074 B_FALSE, get_history_one, &cbdata); 12075 12076 if (argc == 0 && cbdata.first == B_TRUE) { 12077 (void) fprintf(stderr, gettext("no pools available\n")); 12078 return (0); 12079 } 12080 12081 return (ret); 12082 } 12083 12084 typedef struct ev_opts { 12085 int verbose; 12086 int scripted; 12087 int follow; 12088 int clear; 12089 char poolname[ZFS_MAX_DATASET_NAME_LEN]; 12090 } ev_opts_t; 12091 12092 static void 12093 zpool_do_events_short(nvlist_t *nvl, ev_opts_t *opts) 12094 { 12095 char ctime_str[26], str[32]; 12096 const char *ptr; 12097 int64_t *tv; 12098 uint_t n; 12099 12100 verify(nvlist_lookup_int64_array(nvl, FM_EREPORT_TIME, &tv, &n) == 0); 12101 memset(str, ' ', 32); 12102 (void) ctime_r((const time_t *)&tv[0], ctime_str); 12103 (void) memcpy(str, ctime_str+4, 6); /* 'Jun 30' */ 12104 (void) memcpy(str+7, ctime_str+20, 4); /* '1993' */ 12105 (void) memcpy(str+12, ctime_str+11, 8); /* '21:49:08' */ 12106 (void) sprintf(str+20, ".%09lld", (longlong_t)tv[1]); /* '.123456789' */ 12107 if (opts->scripted) 12108 (void) printf(gettext("%s\t"), str); 12109 else 12110 (void) printf(gettext("%s "), str); 12111 12112 verify(nvlist_lookup_string(nvl, FM_CLASS, &ptr) == 0); 12113 (void) printf(gettext("%s\n"), ptr); 12114 } 12115 12116 static void 12117 zpool_do_events_nvprint(nvlist_t *nvl, int depth) 12118 { 12119 nvpair_t *nvp; 12120 static char flagstr[256]; 12121 12122 for (nvp = nvlist_next_nvpair(nvl, NULL); 12123 nvp != NULL; nvp = nvlist_next_nvpair(nvl, nvp)) { 12124 12125 data_type_t type = nvpair_type(nvp); 12126 const char *name = nvpair_name(nvp); 12127 12128 boolean_t b; 12129 uint8_t i8; 12130 uint16_t i16; 12131 uint32_t i32; 12132 uint64_t i64; 12133 const char *str; 12134 nvlist_t *cnv; 12135 12136 printf(gettext("%*s%s = "), depth, "", name); 12137 12138 switch (type) { 12139 case DATA_TYPE_BOOLEAN: 12140 printf(gettext("%s"), "1"); 12141 break; 12142 12143 case DATA_TYPE_BOOLEAN_VALUE: 12144 (void) nvpair_value_boolean_value(nvp, &b); 12145 printf(gettext("%s"), b ? "1" : "0"); 12146 break; 12147 12148 case DATA_TYPE_BYTE: 12149 (void) nvpair_value_byte(nvp, &i8); 12150 printf(gettext("0x%x"), i8); 12151 break; 12152 12153 case DATA_TYPE_INT8: 12154 (void) nvpair_value_int8(nvp, (void *)&i8); 12155 printf(gettext("0x%x"), i8); 12156 break; 12157 12158 case DATA_TYPE_UINT8: 12159 (void) nvpair_value_uint8(nvp, &i8); 12160 printf(gettext("0x%x"), i8); 12161 break; 12162 12163 case DATA_TYPE_INT16: 12164 (void) nvpair_value_int16(nvp, (void *)&i16); 12165 printf(gettext("0x%x"), i16); 12166 break; 12167 12168 case DATA_TYPE_UINT16: 12169 (void) nvpair_value_uint16(nvp, &i16); 12170 printf(gettext("0x%x"), i16); 12171 break; 12172 12173 case DATA_TYPE_INT32: 12174 (void) nvpair_value_int32(nvp, (void *)&i32); 12175 printf(gettext("0x%x"), i32); 12176 break; 12177 12178 case DATA_TYPE_UINT32: 12179 (void) nvpair_value_uint32(nvp, &i32); 12180 if (strcmp(name, 12181 FM_EREPORT_PAYLOAD_ZFS_ZIO_STAGE) == 0 || 12182 strcmp(name, 12183 FM_EREPORT_PAYLOAD_ZFS_ZIO_PIPELINE) == 0) { 12184 zfs_valstr_zio_stage(i32, flagstr, 12185 sizeof (flagstr)); 12186 printf(gettext("0x%x [%s]"), i32, flagstr); 12187 } else if (strcmp(name, 12188 FM_EREPORT_PAYLOAD_ZFS_ZIO_TYPE) == 0) { 12189 zfs_valstr_zio_type(i32, flagstr, 12190 sizeof (flagstr)); 12191 printf(gettext("0x%x [%s]"), i32, flagstr); 12192 } else if (strcmp(name, 12193 FM_EREPORT_PAYLOAD_ZFS_ZIO_PRIORITY) == 0) { 12194 zfs_valstr_zio_priority(i32, flagstr, 12195 sizeof (flagstr)); 12196 printf(gettext("0x%x [%s]"), i32, flagstr); 12197 } else { 12198 printf(gettext("0x%x"), i32); 12199 } 12200 break; 12201 12202 case DATA_TYPE_INT64: 12203 (void) nvpair_value_int64(nvp, (void *)&i64); 12204 printf(gettext("0x%llx"), (u_longlong_t)i64); 12205 break; 12206 12207 case DATA_TYPE_UINT64: 12208 (void) nvpair_value_uint64(nvp, &i64); 12209 /* 12210 * translate vdev state values to readable 12211 * strings to aide zpool events consumers 12212 */ 12213 if (strcmp(name, 12214 FM_EREPORT_PAYLOAD_ZFS_VDEV_STATE) == 0 || 12215 strcmp(name, 12216 FM_EREPORT_PAYLOAD_ZFS_VDEV_LASTSTATE) == 0) { 12217 printf(gettext("\"%s\" (0x%llx)"), 12218 zpool_state_to_name(i64, VDEV_AUX_NONE), 12219 (u_longlong_t)i64); 12220 } else if (strcmp(name, 12221 FM_EREPORT_PAYLOAD_ZFS_ZIO_FLAGS) == 0) { 12222 zfs_valstr_zio_flag(i64, flagstr, 12223 sizeof (flagstr)); 12224 printf(gettext("0x%llx [%s]"), 12225 (u_longlong_t)i64, flagstr); 12226 } else { 12227 printf(gettext("0x%llx"), (u_longlong_t)i64); 12228 } 12229 break; 12230 12231 case DATA_TYPE_HRTIME: 12232 (void) nvpair_value_hrtime(nvp, (void *)&i64); 12233 printf(gettext("0x%llx"), (u_longlong_t)i64); 12234 break; 12235 12236 case DATA_TYPE_STRING: 12237 (void) nvpair_value_string(nvp, &str); 12238 printf(gettext("\"%s\""), str ? str : "<NULL>"); 12239 break; 12240 12241 case DATA_TYPE_NVLIST: 12242 printf(gettext("(embedded nvlist)\n")); 12243 (void) nvpair_value_nvlist(nvp, &cnv); 12244 zpool_do_events_nvprint(cnv, depth + 8); 12245 printf(gettext("%*s(end %s)"), depth, "", name); 12246 break; 12247 12248 case DATA_TYPE_NVLIST_ARRAY: { 12249 nvlist_t **val; 12250 uint_t i, nelem; 12251 12252 (void) nvpair_value_nvlist_array(nvp, &val, &nelem); 12253 printf(gettext("(%d embedded nvlists)\n"), nelem); 12254 for (i = 0; i < nelem; i++) { 12255 printf(gettext("%*s%s[%d] = %s\n"), 12256 depth, "", name, i, "(embedded nvlist)"); 12257 zpool_do_events_nvprint(val[i], depth + 8); 12258 printf(gettext("%*s(end %s[%i])\n"), 12259 depth, "", name, i); 12260 } 12261 printf(gettext("%*s(end %s)\n"), depth, "", name); 12262 } 12263 break; 12264 12265 case DATA_TYPE_INT8_ARRAY: { 12266 int8_t *val; 12267 uint_t i, nelem; 12268 12269 (void) nvpair_value_int8_array(nvp, &val, &nelem); 12270 for (i = 0; i < nelem; i++) 12271 printf(gettext("0x%x "), val[i]); 12272 12273 break; 12274 } 12275 12276 case DATA_TYPE_UINT8_ARRAY: { 12277 uint8_t *val; 12278 uint_t i, nelem; 12279 12280 (void) nvpair_value_uint8_array(nvp, &val, &nelem); 12281 for (i = 0; i < nelem; i++) 12282 printf(gettext("0x%x "), val[i]); 12283 12284 break; 12285 } 12286 12287 case DATA_TYPE_INT16_ARRAY: { 12288 int16_t *val; 12289 uint_t i, nelem; 12290 12291 (void) nvpair_value_int16_array(nvp, &val, &nelem); 12292 for (i = 0; i < nelem; i++) 12293 printf(gettext("0x%x "), val[i]); 12294 12295 break; 12296 } 12297 12298 case DATA_TYPE_UINT16_ARRAY: { 12299 uint16_t *val; 12300 uint_t i, nelem; 12301 12302 (void) nvpair_value_uint16_array(nvp, &val, &nelem); 12303 for (i = 0; i < nelem; i++) 12304 printf(gettext("0x%x "), val[i]); 12305 12306 break; 12307 } 12308 12309 case DATA_TYPE_INT32_ARRAY: { 12310 int32_t *val; 12311 uint_t i, nelem; 12312 12313 (void) nvpair_value_int32_array(nvp, &val, &nelem); 12314 for (i = 0; i < nelem; i++) 12315 printf(gettext("0x%x "), val[i]); 12316 12317 break; 12318 } 12319 12320 case DATA_TYPE_UINT32_ARRAY: { 12321 uint32_t *val; 12322 uint_t i, nelem; 12323 12324 (void) nvpair_value_uint32_array(nvp, &val, &nelem); 12325 for (i = 0; i < nelem; i++) 12326 printf(gettext("0x%x "), val[i]); 12327 12328 break; 12329 } 12330 12331 case DATA_TYPE_INT64_ARRAY: { 12332 int64_t *val; 12333 uint_t i, nelem; 12334 12335 (void) nvpair_value_int64_array(nvp, &val, &nelem); 12336 for (i = 0; i < nelem; i++) 12337 printf(gettext("0x%llx "), 12338 (u_longlong_t)val[i]); 12339 12340 break; 12341 } 12342 12343 case DATA_TYPE_UINT64_ARRAY: { 12344 uint64_t *val; 12345 uint_t i, nelem; 12346 12347 (void) nvpair_value_uint64_array(nvp, &val, &nelem); 12348 for (i = 0; i < nelem; i++) 12349 printf(gettext("0x%llx "), 12350 (u_longlong_t)val[i]); 12351 12352 break; 12353 } 12354 12355 case DATA_TYPE_STRING_ARRAY: { 12356 const char **str; 12357 uint_t i, nelem; 12358 12359 (void) nvpair_value_string_array(nvp, &str, &nelem); 12360 for (i = 0; i < nelem; i++) 12361 printf(gettext("\"%s\" "), 12362 str[i] ? str[i] : "<NULL>"); 12363 12364 break; 12365 } 12366 12367 case DATA_TYPE_BOOLEAN_ARRAY: 12368 case DATA_TYPE_BYTE_ARRAY: 12369 case DATA_TYPE_DOUBLE: 12370 case DATA_TYPE_DONTCARE: 12371 case DATA_TYPE_UNKNOWN: 12372 printf(gettext("<unknown>")); 12373 break; 12374 } 12375 12376 printf(gettext("\n")); 12377 } 12378 } 12379 12380 static int 12381 zpool_do_events_next(ev_opts_t *opts) 12382 { 12383 nvlist_t *nvl; 12384 int zevent_fd, ret, dropped; 12385 const char *pool; 12386 12387 zevent_fd = open(ZFS_DEV, O_RDWR); 12388 VERIFY(zevent_fd >= 0); 12389 12390 if (!opts->scripted) 12391 (void) printf(gettext("%-30s %s\n"), "TIME", "CLASS"); 12392 12393 while (1) { 12394 ret = zpool_events_next(g_zfs, &nvl, &dropped, 12395 (opts->follow ? ZEVENT_NONE : ZEVENT_NONBLOCK), zevent_fd); 12396 if (ret || nvl == NULL) 12397 break; 12398 12399 if (dropped > 0) 12400 (void) printf(gettext("dropped %d events\n"), dropped); 12401 12402 if (strlen(opts->poolname) > 0 && 12403 nvlist_lookup_string(nvl, FM_FMRI_ZFS_POOL, &pool) == 0 && 12404 strcmp(opts->poolname, pool) != 0) 12405 continue; 12406 12407 zpool_do_events_short(nvl, opts); 12408 12409 if (opts->verbose) { 12410 zpool_do_events_nvprint(nvl, 8); 12411 printf(gettext("\n")); 12412 } 12413 (void) fflush(stdout); 12414 12415 nvlist_free(nvl); 12416 } 12417 12418 VERIFY0(close(zevent_fd)); 12419 12420 return (ret); 12421 } 12422 12423 static int 12424 zpool_do_events_clear(void) 12425 { 12426 int count, ret; 12427 12428 ret = zpool_events_clear(g_zfs, &count); 12429 if (!ret) 12430 (void) printf(gettext("cleared %d events\n"), count); 12431 12432 return (ret); 12433 } 12434 12435 /* 12436 * zpool events [-vHf [pool] | -c] 12437 * 12438 * Displays events logs by ZFS. 12439 */ 12440 int 12441 zpool_do_events(int argc, char **argv) 12442 { 12443 ev_opts_t opts = { 0 }; 12444 int ret; 12445 int c; 12446 12447 /* check options */ 12448 while ((c = getopt(argc, argv, "vHfc")) != -1) { 12449 switch (c) { 12450 case 'v': 12451 opts.verbose = 1; 12452 break; 12453 case 'H': 12454 opts.scripted = 1; 12455 break; 12456 case 'f': 12457 opts.follow = 1; 12458 break; 12459 case 'c': 12460 opts.clear = 1; 12461 break; 12462 case '?': 12463 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 12464 optopt); 12465 usage(B_FALSE); 12466 } 12467 } 12468 argc -= optind; 12469 argv += optind; 12470 12471 if (argc > 1) { 12472 (void) fprintf(stderr, gettext("too many arguments\n")); 12473 usage(B_FALSE); 12474 } else if (argc == 1) { 12475 (void) strlcpy(opts.poolname, argv[0], sizeof (opts.poolname)); 12476 if (!zfs_name_valid(opts.poolname, ZFS_TYPE_POOL)) { 12477 (void) fprintf(stderr, 12478 gettext("invalid pool name '%s'\n"), opts.poolname); 12479 usage(B_FALSE); 12480 } 12481 } 12482 12483 if ((argc == 1 || opts.verbose || opts.scripted || opts.follow) && 12484 opts.clear) { 12485 (void) fprintf(stderr, 12486 gettext("invalid options combined with -c\n")); 12487 usage(B_FALSE); 12488 } 12489 12490 if (opts.clear) 12491 ret = zpool_do_events_clear(); 12492 else 12493 ret = zpool_do_events_next(&opts); 12494 12495 return (ret); 12496 } 12497 12498 static int 12499 get_callback_vdev(zpool_handle_t *zhp, char *vdevname, void *data) 12500 { 12501 zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data; 12502 char value[ZFS_MAXPROPLEN]; 12503 zprop_source_t srctype; 12504 nvlist_t *props, *item, *d; 12505 props = item = d = NULL; 12506 12507 if (cbp->cb_json) { 12508 d = fnvlist_lookup_nvlist(cbp->cb_jsobj, "vdevs"); 12509 if (d == NULL) { 12510 fprintf(stderr, "vdevs obj not found.\n"); 12511 exit(1); 12512 } 12513 props = fnvlist_alloc(); 12514 } 12515 12516 for (zprop_list_t *pl = cbp->cb_proplist; pl != NULL; 12517 pl = pl->pl_next) { 12518 char *prop_name; 12519 /* 12520 * If the first property is pool name, it is a special 12521 * placeholder that we can skip. This will also skip 12522 * over the name property when 'all' is specified. 12523 */ 12524 if (pl->pl_prop == ZPOOL_PROP_NAME && 12525 pl == cbp->cb_proplist) 12526 continue; 12527 12528 if (pl->pl_prop == ZPROP_INVAL) { 12529 prop_name = pl->pl_user_prop; 12530 } else { 12531 prop_name = (char *)vdev_prop_to_name(pl->pl_prop); 12532 } 12533 if (zpool_get_vdev_prop(zhp, vdevname, pl->pl_prop, 12534 prop_name, value, sizeof (value), &srctype, 12535 cbp->cb_literal) == 0) { 12536 zprop_collect_property(vdevname, cbp, prop_name, 12537 value, srctype, NULL, NULL, props); 12538 } 12539 } 12540 12541 if (cbp->cb_json) { 12542 if (!nvlist_empty(props)) { 12543 item = fnvlist_alloc(); 12544 fill_vdev_info(item, zhp, vdevname, B_TRUE, 12545 cbp->cb_json_as_int); 12546 fnvlist_add_nvlist(item, "properties", props); 12547 fnvlist_add_nvlist(d, vdevname, item); 12548 fnvlist_add_nvlist(cbp->cb_jsobj, "vdevs", d); 12549 fnvlist_free(item); 12550 } 12551 fnvlist_free(props); 12552 } 12553 12554 return (0); 12555 } 12556 12557 static int 12558 get_callback_vdev_cb(void *zhp_data, nvlist_t *nv, void *data) 12559 { 12560 zpool_handle_t *zhp = zhp_data; 12561 zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data; 12562 char *vdevname; 12563 const char *type; 12564 int ret; 12565 12566 /* 12567 * zpool_vdev_name() transforms the root vdev name (i.e., root-0) to the 12568 * pool name for display purposes, which is not desired. Fallback to 12569 * zpool_vdev_name() when not dealing with the root vdev. 12570 */ 12571 type = fnvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE); 12572 if (zhp != NULL && strcmp(type, "root") == 0) 12573 vdevname = strdup("root-0"); 12574 else 12575 vdevname = zpool_vdev_name(g_zfs, zhp, nv, 12576 cbp->cb_vdevs.cb_name_flags); 12577 12578 (void) vdev_expand_proplist(zhp, vdevname, &cbp->cb_proplist); 12579 12580 ret = get_callback_vdev(zhp, vdevname, data); 12581 12582 free(vdevname); 12583 12584 return (ret); 12585 } 12586 12587 static int 12588 get_callback(zpool_handle_t *zhp, void *data) 12589 { 12590 zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data; 12591 char value[ZFS_MAXPROPLEN]; 12592 zprop_source_t srctype; 12593 zprop_list_t *pl; 12594 int vid; 12595 int err = 0; 12596 nvlist_t *props, *item, *d; 12597 props = item = d = NULL; 12598 12599 if (cbp->cb_type == ZFS_TYPE_VDEV) { 12600 if (cbp->cb_json) { 12601 nvlist_t *pool = fnvlist_alloc(); 12602 fill_pool_info(pool, zhp, B_FALSE, cbp->cb_json_as_int); 12603 fnvlist_add_nvlist(cbp->cb_jsobj, "pool", pool); 12604 fnvlist_free(pool); 12605 } 12606 12607 if (strcmp(cbp->cb_vdevs.cb_names[0], "all-vdevs") == 0) { 12608 for_each_vdev(zhp, get_callback_vdev_cb, data); 12609 } else { 12610 /* Adjust column widths for vdev properties */ 12611 for (vid = 0; vid < cbp->cb_vdevs.cb_names_count; 12612 vid++) { 12613 vdev_expand_proplist(zhp, 12614 cbp->cb_vdevs.cb_names[vid], 12615 &cbp->cb_proplist); 12616 } 12617 /* Display the properties */ 12618 for (vid = 0; vid < cbp->cb_vdevs.cb_names_count; 12619 vid++) { 12620 get_callback_vdev(zhp, 12621 cbp->cb_vdevs.cb_names[vid], data); 12622 } 12623 } 12624 } else { 12625 assert(cbp->cb_type == ZFS_TYPE_POOL); 12626 if (cbp->cb_json) { 12627 d = fnvlist_lookup_nvlist(cbp->cb_jsobj, "pools"); 12628 if (d == NULL) { 12629 fprintf(stderr, "pools obj not found.\n"); 12630 exit(1); 12631 } 12632 props = fnvlist_alloc(); 12633 } 12634 for (pl = cbp->cb_proplist; pl != NULL; pl = pl->pl_next) { 12635 /* 12636 * Skip the special fake placeholder. This will also 12637 * skip over the name property when 'all' is specified. 12638 */ 12639 if (pl->pl_prop == ZPOOL_PROP_NAME && 12640 pl == cbp->cb_proplist) 12641 continue; 12642 12643 if (pl->pl_prop == ZPROP_INVAL && 12644 zfs_prop_user(pl->pl_user_prop)) { 12645 srctype = ZPROP_SRC_LOCAL; 12646 12647 if (zpool_get_userprop(zhp, pl->pl_user_prop, 12648 value, sizeof (value), &srctype) != 0) 12649 continue; 12650 12651 err = zprop_collect_property( 12652 zpool_get_name(zhp), cbp, pl->pl_user_prop, 12653 value, srctype, NULL, NULL, props); 12654 } else if (pl->pl_prop == ZPROP_INVAL && 12655 (zpool_prop_feature(pl->pl_user_prop) || 12656 zpool_prop_unsupported(pl->pl_user_prop))) { 12657 srctype = ZPROP_SRC_LOCAL; 12658 12659 if (zpool_prop_get_feature(zhp, 12660 pl->pl_user_prop, value, 12661 sizeof (value)) == 0) { 12662 err = zprop_collect_property( 12663 zpool_get_name(zhp), cbp, 12664 pl->pl_user_prop, value, srctype, 12665 NULL, NULL, props); 12666 } 12667 } else { 12668 if (zpool_get_prop(zhp, pl->pl_prop, value, 12669 sizeof (value), &srctype, 12670 cbp->cb_literal) != 0) 12671 continue; 12672 12673 err = zprop_collect_property( 12674 zpool_get_name(zhp), cbp, 12675 zpool_prop_to_name(pl->pl_prop), 12676 value, srctype, NULL, NULL, props); 12677 } 12678 if (err != 0) 12679 return (err); 12680 } 12681 12682 if (cbp->cb_json) { 12683 if (!nvlist_empty(props)) { 12684 item = fnvlist_alloc(); 12685 fill_pool_info(item, zhp, B_TRUE, 12686 cbp->cb_json_as_int); 12687 fnvlist_add_nvlist(item, "properties", props); 12688 if (cbp->cb_json_pool_key_guid) { 12689 char buf[256]; 12690 uint64_t guid = fnvlist_lookup_uint64( 12691 zpool_get_config(zhp, NULL), 12692 ZPOOL_CONFIG_POOL_GUID); 12693 snprintf(buf, 256, "%llu", 12694 (u_longlong_t)guid); 12695 fnvlist_add_nvlist(d, buf, item); 12696 } else { 12697 const char *name = zpool_get_name(zhp); 12698 fnvlist_add_nvlist(d, name, item); 12699 } 12700 fnvlist_add_nvlist(cbp->cb_jsobj, "pools", d); 12701 fnvlist_free(item); 12702 } 12703 fnvlist_free(props); 12704 } 12705 } 12706 12707 return (0); 12708 } 12709 12710 /* 12711 * zpool get [-Hp] [-o "all" | field[,...]] <"all" | property[,...]> <pool> ... 12712 * 12713 * -H Scripted mode. Don't display headers, and separate properties 12714 * by a single tab. 12715 * -o List of columns to display. Defaults to 12716 * "name,property,value,source". 12717 * -p Display values in parsable (exact) format. 12718 * -j Display output in JSON format. 12719 * --json-int Display numbers as integers instead of strings. 12720 * --json-pool-key-guid Set pool GUID as key for pool objects. 12721 * 12722 * Get properties of pools in the system. Output space statistics 12723 * for each one as well as other attributes. 12724 */ 12725 int 12726 zpool_do_get(int argc, char **argv) 12727 { 12728 zprop_get_cbdata_t cb = { 0 }; 12729 zprop_list_t fake_name = { 0 }; 12730 int ret; 12731 int c, i; 12732 char *propstr = NULL; 12733 char *vdev = NULL; 12734 nvlist_t *data = NULL; 12735 12736 cb.cb_first = B_TRUE; 12737 12738 /* 12739 * Set up default columns and sources. 12740 */ 12741 cb.cb_sources = ZPROP_SRC_ALL; 12742 cb.cb_columns[0] = GET_COL_NAME; 12743 cb.cb_columns[1] = GET_COL_PROPERTY; 12744 cb.cb_columns[2] = GET_COL_VALUE; 12745 cb.cb_columns[3] = GET_COL_SOURCE; 12746 cb.cb_type = ZFS_TYPE_POOL; 12747 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_TYPE_ID; 12748 current_prop_type = cb.cb_type; 12749 12750 struct option long_options[] = { 12751 {"json", no_argument, NULL, 'j'}, 12752 {"json-int", no_argument, NULL, ZPOOL_OPTION_JSON_NUMS_AS_INT}, 12753 {"json-pool-key-guid", no_argument, NULL, 12754 ZPOOL_OPTION_POOL_KEY_GUID}, 12755 {0, 0, 0, 0} 12756 }; 12757 12758 /* check options */ 12759 while ((c = getopt_long(argc, argv, ":jHpo:", long_options, 12760 NULL)) != -1) { 12761 switch (c) { 12762 case 'p': 12763 cb.cb_literal = B_TRUE; 12764 break; 12765 case 'H': 12766 cb.cb_scripted = B_TRUE; 12767 break; 12768 case 'j': 12769 cb.cb_json = B_TRUE; 12770 cb.cb_jsobj = zpool_json_schema(0, 1); 12771 data = fnvlist_alloc(); 12772 break; 12773 case ZPOOL_OPTION_POOL_KEY_GUID: 12774 cb.cb_json_pool_key_guid = B_TRUE; 12775 break; 12776 case ZPOOL_OPTION_JSON_NUMS_AS_INT: 12777 cb.cb_json_as_int = B_TRUE; 12778 cb.cb_literal = B_TRUE; 12779 break; 12780 case 'o': 12781 memset(&cb.cb_columns, 0, sizeof (cb.cb_columns)); 12782 i = 0; 12783 12784 for (char *tok; (tok = strsep(&optarg, ",")); ) { 12785 static const char *const col_opts[] = 12786 { "name", "property", "value", "source", 12787 "all" }; 12788 static const zfs_get_column_t col_cols[] = 12789 { GET_COL_NAME, GET_COL_PROPERTY, GET_COL_VALUE, 12790 GET_COL_SOURCE }; 12791 12792 if (i == ZFS_GET_NCOLS - 1) { 12793 (void) fprintf(stderr, gettext("too " 12794 "many fields given to -o " 12795 "option\n")); 12796 usage(B_FALSE); 12797 } 12798 12799 for (c = 0; c < ARRAY_SIZE(col_opts); ++c) 12800 if (strcmp(tok, col_opts[c]) == 0) 12801 goto found; 12802 12803 (void) fprintf(stderr, 12804 gettext("invalid column name '%s'\n"), tok); 12805 usage(B_FALSE); 12806 12807 found: 12808 if (c >= 4) { 12809 if (i > 0) { 12810 (void) fprintf(stderr, 12811 gettext("\"all\" conflicts " 12812 "with specific fields " 12813 "given to -o option\n")); 12814 usage(B_FALSE); 12815 } 12816 12817 memcpy(cb.cb_columns, col_cols, 12818 sizeof (col_cols)); 12819 i = ZFS_GET_NCOLS - 1; 12820 } else 12821 cb.cb_columns[i++] = col_cols[c]; 12822 } 12823 break; 12824 case '?': 12825 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 12826 optopt); 12827 usage(B_FALSE); 12828 } 12829 } 12830 12831 argc -= optind; 12832 argv += optind; 12833 12834 if (!cb.cb_json && cb.cb_json_as_int) { 12835 (void) fprintf(stderr, gettext("'--json-int' only works with" 12836 " '-j' option\n")); 12837 usage(B_FALSE); 12838 } 12839 12840 if (!cb.cb_json && cb.cb_json_pool_key_guid) { 12841 (void) fprintf(stderr, gettext("'json-pool-key-guid' only" 12842 " works with '-j' option\n")); 12843 usage(B_FALSE); 12844 } 12845 12846 if (argc < 1) { 12847 (void) fprintf(stderr, gettext("missing property " 12848 "argument\n")); 12849 usage(B_FALSE); 12850 } 12851 12852 /* Properties list is needed later by zprop_get_list() */ 12853 propstr = argv[0]; 12854 12855 argc--; 12856 argv++; 12857 12858 if (argc == 0) { 12859 /* No args, so just print the defaults. */ 12860 } else if (are_all_pools(argc, argv)) { 12861 /* All the args are pool names */ 12862 } else if (are_all_pools(1, argv)) { 12863 /* The first arg is a pool name */ 12864 if ((argc == 2 && strcmp(argv[1], "all-vdevs") == 0) || 12865 (argc == 2 && strcmp(argv[1], "root") == 0) || 12866 are_vdevs_in_pool(argc - 1, argv + 1, argv[0], 12867 &cb.cb_vdevs)) { 12868 12869 if (strcmp(argv[1], "root") == 0) 12870 vdev = strdup("root-0"); 12871 12872 /* ... and the rest are vdev names */ 12873 if (vdev == NULL) 12874 cb.cb_vdevs.cb_names = argv + 1; 12875 else 12876 cb.cb_vdevs.cb_names = &vdev; 12877 12878 cb.cb_vdevs.cb_names_count = argc - 1; 12879 cb.cb_type = ZFS_TYPE_VDEV; 12880 argc = 1; /* One pool to process */ 12881 } else { 12882 if (cb.cb_json) { 12883 nvlist_free(cb.cb_jsobj); 12884 nvlist_free(data); 12885 } 12886 fprintf(stderr, gettext("Expected a list of vdevs in" 12887 " \"%s\", but got:\n"), argv[0]); 12888 error_list_unresolved_vdevs(argc - 1, argv + 1, 12889 argv[0], &cb.cb_vdevs); 12890 fprintf(stderr, "\n"); 12891 usage(B_FALSE); 12892 return (1); 12893 } 12894 } else { 12895 if (cb.cb_json) { 12896 nvlist_free(cb.cb_jsobj); 12897 nvlist_free(data); 12898 } 12899 /* 12900 * The first arg isn't the name of a valid pool. 12901 */ 12902 fprintf(stderr, gettext("Cannot get properties of %s: " 12903 "no such pool available.\n"), argv[0]); 12904 return (1); 12905 } 12906 12907 if (zprop_get_list(g_zfs, propstr, &cb.cb_proplist, 12908 cb.cb_type) != 0) { 12909 /* Use correct list of valid properties (pool or vdev) */ 12910 current_prop_type = cb.cb_type; 12911 usage(B_FALSE); 12912 } 12913 12914 if (cb.cb_proplist != NULL) { 12915 fake_name.pl_prop = ZPOOL_PROP_NAME; 12916 fake_name.pl_width = strlen(gettext("NAME")); 12917 fake_name.pl_next = cb.cb_proplist; 12918 cb.cb_proplist = &fake_name; 12919 } 12920 12921 if (cb.cb_json) { 12922 if (cb.cb_type == ZFS_TYPE_VDEV) 12923 fnvlist_add_nvlist(cb.cb_jsobj, "vdevs", data); 12924 else 12925 fnvlist_add_nvlist(cb.cb_jsobj, "pools", data); 12926 fnvlist_free(data); 12927 } 12928 12929 ret = for_each_pool(argc, argv, B_TRUE, &cb.cb_proplist, cb.cb_type, 12930 cb.cb_literal, get_callback, &cb); 12931 12932 if (ret == 0 && cb.cb_json) 12933 zcmd_print_json(cb.cb_jsobj); 12934 else if (ret != 0 && cb.cb_json) 12935 nvlist_free(cb.cb_jsobj); 12936 12937 if (cb.cb_proplist == &fake_name) 12938 zprop_free_list(fake_name.pl_next); 12939 else 12940 zprop_free_list(cb.cb_proplist); 12941 12942 if (vdev != NULL) 12943 free(vdev); 12944 12945 return (ret); 12946 } 12947 12948 typedef struct set_cbdata { 12949 char *cb_propname; 12950 char *cb_value; 12951 zfs_type_t cb_type; 12952 vdev_cbdata_t cb_vdevs; 12953 boolean_t cb_any_successful; 12954 } set_cbdata_t; 12955 12956 static int 12957 set_pool_callback(zpool_handle_t *zhp, set_cbdata_t *cb) 12958 { 12959 int error; 12960 12961 /* Check if we have out-of-bounds features */ 12962 if (strcmp(cb->cb_propname, ZPOOL_CONFIG_COMPATIBILITY) == 0) { 12963 boolean_t features[SPA_FEATURES]; 12964 if (zpool_do_load_compat(cb->cb_value, features) != 12965 ZPOOL_COMPATIBILITY_OK) 12966 return (-1); 12967 12968 nvlist_t *enabled = zpool_get_features(zhp); 12969 spa_feature_t i; 12970 for (i = 0; i < SPA_FEATURES; i++) { 12971 const char *fguid = spa_feature_table[i].fi_guid; 12972 if (nvlist_exists(enabled, fguid) && !features[i]) 12973 break; 12974 } 12975 if (i < SPA_FEATURES) 12976 (void) fprintf(stderr, gettext("Warning: one or " 12977 "more features already enabled on pool '%s'\n" 12978 "are not present in this compatibility set.\n"), 12979 zpool_get_name(zhp)); 12980 } 12981 12982 /* if we're setting a feature, check it's in compatibility set */ 12983 if (zpool_prop_feature(cb->cb_propname) && 12984 strcmp(cb->cb_value, ZFS_FEATURE_ENABLED) == 0) { 12985 char *fname = strchr(cb->cb_propname, '@') + 1; 12986 spa_feature_t f; 12987 12988 if (zfeature_lookup_name(fname, &f) == 0) { 12989 char compat[ZFS_MAXPROPLEN]; 12990 if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, 12991 compat, ZFS_MAXPROPLEN, NULL, B_FALSE) != 0) 12992 compat[0] = '\0'; 12993 12994 boolean_t features[SPA_FEATURES]; 12995 if (zpool_do_load_compat(compat, features) != 12996 ZPOOL_COMPATIBILITY_OK) { 12997 (void) fprintf(stderr, gettext("Error: " 12998 "cannot enable feature '%s' on pool '%s'\n" 12999 "because the pool's 'compatibility' " 13000 "property cannot be parsed.\n"), 13001 fname, zpool_get_name(zhp)); 13002 return (-1); 13003 } 13004 13005 if (!features[f]) { 13006 (void) fprintf(stderr, gettext("Error: " 13007 "cannot enable feature '%s' on pool '%s'\n" 13008 "as it is not specified in this pool's " 13009 "current compatibility set.\n" 13010 "Consider setting 'compatibility' to a " 13011 "less restrictive set, or to 'off'.\n"), 13012 fname, zpool_get_name(zhp)); 13013 return (-1); 13014 } 13015 } 13016 } 13017 13018 error = zpool_set_prop(zhp, cb->cb_propname, cb->cb_value); 13019 13020 return (error); 13021 } 13022 13023 static int 13024 set_callback(zpool_handle_t *zhp, void *data) 13025 { 13026 int error; 13027 set_cbdata_t *cb = (set_cbdata_t *)data; 13028 13029 if (cb->cb_type == ZFS_TYPE_VDEV) { 13030 error = zpool_set_vdev_prop(zhp, *cb->cb_vdevs.cb_names, 13031 cb->cb_propname, cb->cb_value); 13032 } else { 13033 assert(cb->cb_type == ZFS_TYPE_POOL); 13034 error = set_pool_callback(zhp, cb); 13035 } 13036 13037 cb->cb_any_successful = !error; 13038 return (error); 13039 } 13040 13041 int 13042 zpool_do_set(int argc, char **argv) 13043 { 13044 set_cbdata_t cb = { 0 }; 13045 int error; 13046 char *vdev = NULL; 13047 13048 current_prop_type = ZFS_TYPE_POOL; 13049 if (argc > 1 && argv[1][0] == '-') { 13050 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 13051 argv[1][1]); 13052 usage(B_FALSE); 13053 } 13054 13055 if (argc < 2) { 13056 (void) fprintf(stderr, gettext("missing property=value " 13057 "argument\n")); 13058 usage(B_FALSE); 13059 } 13060 13061 if (argc < 3) { 13062 (void) fprintf(stderr, gettext("missing pool name\n")); 13063 usage(B_FALSE); 13064 } 13065 13066 if (argc > 4) { 13067 (void) fprintf(stderr, gettext("too many pool names\n")); 13068 usage(B_FALSE); 13069 } 13070 13071 cb.cb_propname = argv[1]; 13072 cb.cb_type = ZFS_TYPE_POOL; 13073 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_TYPE_ID; 13074 cb.cb_value = strchr(cb.cb_propname, '='); 13075 if (cb.cb_value == NULL) { 13076 (void) fprintf(stderr, gettext("missing value in " 13077 "property=value argument\n")); 13078 usage(B_FALSE); 13079 } 13080 13081 *(cb.cb_value) = '\0'; 13082 cb.cb_value++; 13083 argc -= 2; 13084 argv += 2; 13085 13086 /* argv[0] is pool name */ 13087 if (!is_pool(argv[0])) { 13088 (void) fprintf(stderr, 13089 gettext("cannot open '%s': is not a pool\n"), argv[0]); 13090 return (EINVAL); 13091 } 13092 13093 /* argv[1], when supplied, is vdev name */ 13094 if (argc == 2) { 13095 13096 if (strcmp(argv[1], "root") == 0) 13097 vdev = strdup("root-0"); 13098 else 13099 vdev = strdup(argv[1]); 13100 13101 if (!are_vdevs_in_pool(1, &vdev, argv[0], &cb.cb_vdevs)) { 13102 (void) fprintf(stderr, gettext( 13103 "cannot find '%s' in '%s': device not in pool\n"), 13104 vdev, argv[0]); 13105 free(vdev); 13106 return (EINVAL); 13107 } 13108 cb.cb_vdevs.cb_names = &vdev; 13109 cb.cb_vdevs.cb_names_count = 1; 13110 cb.cb_type = ZFS_TYPE_VDEV; 13111 } 13112 13113 error = for_each_pool(1, argv, B_TRUE, NULL, ZFS_TYPE_POOL, 13114 B_FALSE, set_callback, &cb); 13115 13116 if (vdev != NULL) 13117 free(vdev); 13118 13119 return (error); 13120 } 13121 13122 /* Add up the total number of bytes left to initialize/trim across all vdevs */ 13123 static uint64_t 13124 vdev_activity_remaining(nvlist_t *nv, zpool_wait_activity_t activity) 13125 { 13126 uint64_t bytes_remaining; 13127 nvlist_t **child; 13128 uint_t c, children; 13129 vdev_stat_t *vs; 13130 13131 assert(activity == ZPOOL_WAIT_INITIALIZE || 13132 activity == ZPOOL_WAIT_TRIM); 13133 13134 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, 13135 (uint64_t **)&vs, &c) == 0); 13136 13137 if (activity == ZPOOL_WAIT_INITIALIZE && 13138 vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE) 13139 bytes_remaining = vs->vs_initialize_bytes_est - 13140 vs->vs_initialize_bytes_done; 13141 else if (activity == ZPOOL_WAIT_TRIM && 13142 vs->vs_trim_state == VDEV_TRIM_ACTIVE) 13143 bytes_remaining = vs->vs_trim_bytes_est - 13144 vs->vs_trim_bytes_done; 13145 else 13146 bytes_remaining = 0; 13147 13148 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 13149 &child, &children) != 0) 13150 children = 0; 13151 13152 for (c = 0; c < children; c++) 13153 bytes_remaining += vdev_activity_remaining(child[c], activity); 13154 13155 return (bytes_remaining); 13156 } 13157 13158 /* Add up the total number of bytes left to rebuild across top-level vdevs */ 13159 static uint64_t 13160 vdev_activity_top_remaining(nvlist_t *nv) 13161 { 13162 uint64_t bytes_remaining = 0; 13163 nvlist_t **child; 13164 uint_t children; 13165 int error; 13166 13167 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 13168 &child, &children) != 0) 13169 children = 0; 13170 13171 for (uint_t c = 0; c < children; c++) { 13172 vdev_rebuild_stat_t *vrs; 13173 uint_t i; 13174 13175 error = nvlist_lookup_uint64_array(child[c], 13176 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i); 13177 if (error == 0) { 13178 if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) { 13179 bytes_remaining += (vrs->vrs_bytes_est - 13180 vrs->vrs_bytes_rebuilt); 13181 } 13182 } 13183 } 13184 13185 return (bytes_remaining); 13186 } 13187 13188 /* Whether any vdevs are 'spare' or 'replacing' vdevs */ 13189 static boolean_t 13190 vdev_any_spare_replacing(nvlist_t *nv) 13191 { 13192 nvlist_t **child; 13193 uint_t c, children; 13194 const char *vdev_type; 13195 13196 (void) nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &vdev_type); 13197 13198 if (strcmp(vdev_type, VDEV_TYPE_REPLACING) == 0 || 13199 strcmp(vdev_type, VDEV_TYPE_SPARE) == 0 || 13200 strcmp(vdev_type, VDEV_TYPE_DRAID_SPARE) == 0) { 13201 return (B_TRUE); 13202 } 13203 13204 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 13205 &child, &children) != 0) 13206 children = 0; 13207 13208 for (c = 0; c < children; c++) { 13209 if (vdev_any_spare_replacing(child[c])) 13210 return (B_TRUE); 13211 } 13212 13213 return (B_FALSE); 13214 } 13215 13216 typedef struct wait_data { 13217 char *wd_poolname; 13218 boolean_t wd_scripted; 13219 boolean_t wd_exact; 13220 boolean_t wd_headers_once; 13221 boolean_t wd_should_exit; 13222 /* Which activities to wait for */ 13223 boolean_t wd_enabled[ZPOOL_WAIT_NUM_ACTIVITIES]; 13224 float wd_interval; 13225 pthread_cond_t wd_cv; 13226 pthread_mutex_t wd_mutex; 13227 } wait_data_t; 13228 13229 /* 13230 * Print to stdout a single line, containing one column for each activity that 13231 * we are waiting for specifying how many bytes of work are left for that 13232 * activity. 13233 */ 13234 static void 13235 print_wait_status_row(wait_data_t *wd, zpool_handle_t *zhp, int row) 13236 { 13237 nvlist_t *config, *nvroot; 13238 uint_t c; 13239 int i; 13240 pool_checkpoint_stat_t *pcs = NULL; 13241 pool_scan_stat_t *pss = NULL; 13242 pool_removal_stat_t *prs = NULL; 13243 pool_raidz_expand_stat_t *pres = NULL; 13244 const char *const headers[] = {"DISCARD", "FREE", "INITIALIZE", 13245 "REPLACE", "REMOVE", "RESILVER", "SCRUB", "TRIM", "RAIDZ_EXPAND"}; 13246 int col_widths[ZPOOL_WAIT_NUM_ACTIVITIES]; 13247 13248 /* Calculate the width of each column */ 13249 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) { 13250 /* 13251 * Make sure we have enough space in the col for pretty-printed 13252 * numbers and for the column header, and then leave a couple 13253 * spaces between cols for readability. 13254 */ 13255 col_widths[i] = MAX(strlen(headers[i]), 6) + 2; 13256 } 13257 13258 if (timestamp_fmt != NODATE) 13259 print_timestamp(timestamp_fmt); 13260 13261 /* Print header if appropriate */ 13262 int term_height = terminal_height(); 13263 boolean_t reprint_header = (!wd->wd_headers_once && term_height > 0 && 13264 row % (term_height-1) == 0); 13265 if (!wd->wd_scripted && (row == 0 || reprint_header)) { 13266 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) { 13267 if (wd->wd_enabled[i]) 13268 (void) printf("%*s", col_widths[i], headers[i]); 13269 } 13270 (void) fputc('\n', stdout); 13271 } 13272 13273 /* Bytes of work remaining in each activity */ 13274 int64_t bytes_rem[ZPOOL_WAIT_NUM_ACTIVITIES] = {0}; 13275 13276 bytes_rem[ZPOOL_WAIT_FREE] = 13277 zpool_get_prop_int(zhp, ZPOOL_PROP_FREEING, NULL); 13278 13279 config = zpool_get_config(zhp, NULL); 13280 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE); 13281 13282 (void) nvlist_lookup_uint64_array(nvroot, 13283 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c); 13284 if (pcs != NULL && pcs->pcs_state == CS_CHECKPOINT_DISCARDING) 13285 bytes_rem[ZPOOL_WAIT_CKPT_DISCARD] = pcs->pcs_space; 13286 13287 (void) nvlist_lookup_uint64_array(nvroot, 13288 ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c); 13289 if (prs != NULL && prs->prs_state == DSS_SCANNING) 13290 bytes_rem[ZPOOL_WAIT_REMOVE] = prs->prs_to_copy - 13291 prs->prs_copied; 13292 13293 (void) nvlist_lookup_uint64_array(nvroot, 13294 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&pss, &c); 13295 if (pss != NULL && pss->pss_state == DSS_SCANNING && 13296 pss->pss_pass_scrub_pause == 0) { 13297 int64_t rem = pss->pss_to_examine - pss->pss_issued; 13298 if (pss->pss_func == POOL_SCAN_SCRUB) 13299 bytes_rem[ZPOOL_WAIT_SCRUB] = rem; 13300 else 13301 bytes_rem[ZPOOL_WAIT_RESILVER] = rem; 13302 } else if (check_rebuilding(nvroot, NULL)) { 13303 bytes_rem[ZPOOL_WAIT_RESILVER] = 13304 vdev_activity_top_remaining(nvroot); 13305 } 13306 13307 (void) nvlist_lookup_uint64_array(nvroot, 13308 ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, (uint64_t **)&pres, &c); 13309 if (pres != NULL && pres->pres_state == DSS_SCANNING) { 13310 int64_t rem = pres->pres_to_reflow - pres->pres_reflowed; 13311 bytes_rem[ZPOOL_WAIT_RAIDZ_EXPAND] = rem; 13312 } 13313 13314 bytes_rem[ZPOOL_WAIT_INITIALIZE] = 13315 vdev_activity_remaining(nvroot, ZPOOL_WAIT_INITIALIZE); 13316 bytes_rem[ZPOOL_WAIT_TRIM] = 13317 vdev_activity_remaining(nvroot, ZPOOL_WAIT_TRIM); 13318 13319 /* 13320 * A replace finishes after resilvering finishes, so the amount of work 13321 * left for a replace is the same as for resilvering. 13322 * 13323 * It isn't quite correct to say that if we have any 'spare' or 13324 * 'replacing' vdevs and a resilver is happening, then a replace is in 13325 * progress, like we do here. When a hot spare is used, the faulted vdev 13326 * is not removed after the hot spare is resilvered, so parent 'spare' 13327 * vdev is not removed either. So we could have a 'spare' vdev, but be 13328 * resilvering for a different reason. However, we use it as a heuristic 13329 * because we don't have access to the DTLs, which could tell us whether 13330 * or not we have really finished resilvering a hot spare. 13331 */ 13332 if (vdev_any_spare_replacing(nvroot)) 13333 bytes_rem[ZPOOL_WAIT_REPLACE] = bytes_rem[ZPOOL_WAIT_RESILVER]; 13334 13335 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) { 13336 char buf[64]; 13337 if (!wd->wd_enabled[i]) 13338 continue; 13339 13340 if (wd->wd_exact) { 13341 (void) snprintf(buf, sizeof (buf), "%" PRIi64, 13342 bytes_rem[i]); 13343 } else { 13344 zfs_nicenum(bytes_rem[i], buf, sizeof (buf)); 13345 } 13346 13347 if (wd->wd_scripted) 13348 (void) printf(i == 0 ? "%s" : "\t%s", buf); 13349 else 13350 (void) printf(" %*s", col_widths[i] - 1, buf); 13351 } 13352 (void) printf("\n"); 13353 (void) fflush(stdout); 13354 } 13355 13356 static void * 13357 wait_status_thread(void *arg) 13358 { 13359 wait_data_t *wd = (wait_data_t *)arg; 13360 zpool_handle_t *zhp; 13361 13362 if ((zhp = zpool_open(g_zfs, wd->wd_poolname)) == NULL) 13363 return (void *)(1); 13364 13365 for (int row = 0; ; row++) { 13366 boolean_t missing; 13367 struct timespec timeout; 13368 int ret = 0; 13369 (void) clock_gettime(CLOCK_REALTIME, &timeout); 13370 13371 if (zpool_refresh_stats(zhp, &missing) != 0 || missing || 13372 zpool_props_refresh(zhp) != 0) { 13373 zpool_close(zhp); 13374 return (void *)(uintptr_t)(missing ? 0 : 1); 13375 } 13376 13377 print_wait_status_row(wd, zhp, row); 13378 13379 timeout.tv_sec += floor(wd->wd_interval); 13380 long nanos = timeout.tv_nsec + 13381 (wd->wd_interval - floor(wd->wd_interval)) * NANOSEC; 13382 if (nanos >= NANOSEC) { 13383 timeout.tv_sec++; 13384 timeout.tv_nsec = nanos - NANOSEC; 13385 } else { 13386 timeout.tv_nsec = nanos; 13387 } 13388 pthread_mutex_lock(&wd->wd_mutex); 13389 if (!wd->wd_should_exit) 13390 ret = pthread_cond_timedwait(&wd->wd_cv, &wd->wd_mutex, 13391 &timeout); 13392 pthread_mutex_unlock(&wd->wd_mutex); 13393 if (ret == 0) { 13394 break; /* signaled by main thread */ 13395 } else if (ret != ETIMEDOUT) { 13396 (void) fprintf(stderr, gettext("pthread_cond_timedwait " 13397 "failed: %s\n"), strerror(ret)); 13398 zpool_close(zhp); 13399 return (void *)(uintptr_t)(1); 13400 } 13401 } 13402 13403 zpool_close(zhp); 13404 return (void *)(0); 13405 } 13406 13407 int 13408 zpool_do_wait(int argc, char **argv) 13409 { 13410 boolean_t verbose = B_FALSE; 13411 int c, i; 13412 unsigned long count; 13413 pthread_t status_thr; 13414 int error = 0; 13415 zpool_handle_t *zhp; 13416 13417 wait_data_t wd; 13418 wd.wd_scripted = B_FALSE; 13419 wd.wd_exact = B_FALSE; 13420 wd.wd_headers_once = B_FALSE; 13421 wd.wd_should_exit = B_FALSE; 13422 13423 pthread_mutex_init(&wd.wd_mutex, NULL); 13424 pthread_cond_init(&wd.wd_cv, NULL); 13425 13426 /* By default, wait for all types of activity. */ 13427 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) 13428 wd.wd_enabled[i] = B_TRUE; 13429 13430 while ((c = getopt(argc, argv, "HpT:t:")) != -1) { 13431 switch (c) { 13432 case 'H': 13433 wd.wd_scripted = B_TRUE; 13434 break; 13435 case 'n': 13436 wd.wd_headers_once = B_TRUE; 13437 break; 13438 case 'p': 13439 wd.wd_exact = B_TRUE; 13440 break; 13441 case 'T': 13442 get_timestamp_arg(*optarg); 13443 break; 13444 case 't': 13445 /* Reset activities array */ 13446 memset(&wd.wd_enabled, 0, sizeof (wd.wd_enabled)); 13447 13448 for (char *tok; (tok = strsep(&optarg, ",")); ) { 13449 static const char *const col_opts[] = { 13450 "discard", "free", "initialize", "replace", 13451 "remove", "resilver", "scrub", "trim", 13452 "raidz_expand" }; 13453 13454 for (i = 0; i < ARRAY_SIZE(col_opts); ++i) 13455 if (strcmp(tok, col_opts[i]) == 0) { 13456 wd.wd_enabled[i] = B_TRUE; 13457 goto found; 13458 } 13459 13460 (void) fprintf(stderr, 13461 gettext("invalid activity '%s'\n"), tok); 13462 usage(B_FALSE); 13463 found:; 13464 } 13465 break; 13466 case '?': 13467 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 13468 optopt); 13469 usage(B_FALSE); 13470 } 13471 } 13472 13473 argc -= optind; 13474 argv += optind; 13475 13476 get_interval_count(&argc, argv, &wd.wd_interval, &count); 13477 if (count != 0) { 13478 /* This subcmd only accepts an interval, not a count */ 13479 (void) fprintf(stderr, gettext("too many arguments\n")); 13480 usage(B_FALSE); 13481 } 13482 13483 if (wd.wd_interval != 0) 13484 verbose = B_TRUE; 13485 13486 if (argc < 1) { 13487 (void) fprintf(stderr, gettext("missing 'pool' argument\n")); 13488 usage(B_FALSE); 13489 } 13490 if (argc > 1) { 13491 (void) fprintf(stderr, gettext("too many arguments\n")); 13492 usage(B_FALSE); 13493 } 13494 13495 wd.wd_poolname = argv[0]; 13496 13497 if ((zhp = zpool_open(g_zfs, wd.wd_poolname)) == NULL) 13498 return (1); 13499 13500 if (verbose) { 13501 /* 13502 * We use a separate thread for printing status updates because 13503 * the main thread will call lzc_wait(), which blocks as long 13504 * as an activity is in progress, which can be a long time. 13505 */ 13506 if (pthread_create(&status_thr, NULL, wait_status_thread, &wd) 13507 != 0) { 13508 (void) fprintf(stderr, gettext("failed to create status" 13509 "thread: %s\n"), strerror(errno)); 13510 zpool_close(zhp); 13511 return (1); 13512 } 13513 } 13514 13515 /* 13516 * Loop over all activities that we are supposed to wait for until none 13517 * of them are in progress. Note that this means we can end up waiting 13518 * for more activities to complete than just those that were in progress 13519 * when we began waiting; if an activity we are interested in begins 13520 * while we are waiting for another activity, we will wait for both to 13521 * complete before exiting. 13522 */ 13523 for (;;) { 13524 boolean_t missing = B_FALSE; 13525 boolean_t any_waited = B_FALSE; 13526 13527 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) { 13528 boolean_t waited; 13529 13530 if (!wd.wd_enabled[i]) 13531 continue; 13532 13533 error = zpool_wait_status(zhp, i, &missing, &waited); 13534 if (error != 0 || missing) 13535 break; 13536 13537 any_waited = (any_waited || waited); 13538 } 13539 13540 if (error != 0 || missing || !any_waited) 13541 break; 13542 } 13543 13544 zpool_close(zhp); 13545 13546 if (verbose) { 13547 uintptr_t status; 13548 pthread_mutex_lock(&wd.wd_mutex); 13549 wd.wd_should_exit = B_TRUE; 13550 pthread_cond_signal(&wd.wd_cv); 13551 pthread_mutex_unlock(&wd.wd_mutex); 13552 (void) pthread_join(status_thr, (void *)&status); 13553 if (status != 0) 13554 error = status; 13555 } 13556 13557 pthread_mutex_destroy(&wd.wd_mutex); 13558 pthread_cond_destroy(&wd.wd_cv); 13559 return (error); 13560 } 13561 13562 /* 13563 * zpool ddtprune -d|-p <amount> <pool> 13564 * 13565 * -d <days> Prune entries <days> old and older 13566 * -p <percent> Prune <percent> amount of entries 13567 * 13568 * Prune single reference entries from DDT to satisfy the amount specified. 13569 */ 13570 int 13571 zpool_do_ddt_prune(int argc, char **argv) 13572 { 13573 zpool_ddt_prune_unit_t unit = ZPOOL_DDT_PRUNE_NONE; 13574 uint64_t amount = 0; 13575 zpool_handle_t *zhp; 13576 char *endptr; 13577 int c; 13578 13579 while ((c = getopt(argc, argv, "d:p:")) != -1) { 13580 switch (c) { 13581 case 'd': 13582 if (unit == ZPOOL_DDT_PRUNE_PERCENTAGE) { 13583 (void) fprintf(stderr, gettext("-d cannot be " 13584 "combined with -p option\n")); 13585 usage(B_FALSE); 13586 } 13587 errno = 0; 13588 amount = strtoull(optarg, &endptr, 0); 13589 if (errno != 0 || *endptr != '\0' || amount == 0) { 13590 (void) fprintf(stderr, 13591 gettext("invalid days value\n")); 13592 usage(B_FALSE); 13593 } 13594 amount *= 86400; /* convert days to seconds */ 13595 unit = ZPOOL_DDT_PRUNE_AGE; 13596 break; 13597 case 'p': 13598 if (unit == ZPOOL_DDT_PRUNE_AGE) { 13599 (void) fprintf(stderr, gettext("-p cannot be " 13600 "combined with -d option\n")); 13601 usage(B_FALSE); 13602 } 13603 errno = 0; 13604 amount = strtoull(optarg, &endptr, 0); 13605 if (errno != 0 || *endptr != '\0' || 13606 amount == 0 || amount > 100) { 13607 (void) fprintf(stderr, 13608 gettext("invalid percentage value\n")); 13609 usage(B_FALSE); 13610 } 13611 unit = ZPOOL_DDT_PRUNE_PERCENTAGE; 13612 break; 13613 case '?': 13614 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 13615 optopt); 13616 usage(B_FALSE); 13617 } 13618 } 13619 argc -= optind; 13620 argv += optind; 13621 13622 if (unit == ZPOOL_DDT_PRUNE_NONE) { 13623 (void) fprintf(stderr, 13624 gettext("missing amount option (-d|-p <value>)\n")); 13625 usage(B_FALSE); 13626 } else if (argc < 1) { 13627 (void) fprintf(stderr, gettext("missing pool argument\n")); 13628 usage(B_FALSE); 13629 } else if (argc > 1) { 13630 (void) fprintf(stderr, gettext("too many arguments\n")); 13631 usage(B_FALSE); 13632 } 13633 zhp = zpool_open(g_zfs, argv[0]); 13634 if (zhp == NULL) 13635 return (-1); 13636 13637 int error = zpool_ddt_prune(zhp, unit, amount); 13638 13639 zpool_close(zhp); 13640 13641 return (error); 13642 } 13643 13644 static int 13645 find_command_idx(const char *command, int *idx) 13646 { 13647 for (int i = 0; i < NCOMMAND; ++i) { 13648 if (command_table[i].name == NULL) 13649 continue; 13650 13651 if (strcmp(command, command_table[i].name) == 0) { 13652 *idx = i; 13653 return (0); 13654 } 13655 } 13656 return (1); 13657 } 13658 13659 /* 13660 * Display version message 13661 */ 13662 static int 13663 zpool_do_version(int argc, char **argv) 13664 { 13665 int c; 13666 nvlist_t *jsobj = NULL, *zfs_ver = NULL; 13667 boolean_t json = B_FALSE; 13668 13669 struct option long_options[] = { 13670 {"json", no_argument, NULL, 'j'}, 13671 }; 13672 13673 while ((c = getopt_long(argc, argv, "j", long_options, NULL)) != -1) { 13674 switch (c) { 13675 case 'j': 13676 json = B_TRUE; 13677 jsobj = zpool_json_schema(0, 1); 13678 break; 13679 case '?': 13680 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 13681 optopt); 13682 usage(B_FALSE); 13683 } 13684 } 13685 13686 argc -= optind; 13687 if (argc != 0) { 13688 (void) fprintf(stderr, "too many arguments\n"); 13689 usage(B_FALSE); 13690 } 13691 13692 if (json) { 13693 zfs_ver = zfs_version_nvlist(); 13694 if (zfs_ver) { 13695 fnvlist_add_nvlist(jsobj, "zfs_version", zfs_ver); 13696 zcmd_print_json(jsobj); 13697 fnvlist_free(zfs_ver); 13698 return (0); 13699 } else 13700 return (-1); 13701 } else 13702 return (zfs_version_print() != 0); 13703 } 13704 13705 /* Display documentation */ 13706 static int 13707 zpool_do_help(int argc, char **argv) 13708 { 13709 char page[MAXNAMELEN]; 13710 if (argc < 3 || strcmp(argv[2], "zpool") == 0) 13711 strcpy(page, "zpool"); 13712 else if (strcmp(argv[2], "concepts") == 0 || 13713 strcmp(argv[2], "props") == 0) 13714 snprintf(page, sizeof (page), "zpool%s", argv[2]); 13715 else 13716 snprintf(page, sizeof (page), "zpool-%s", argv[2]); 13717 13718 execlp("man", "man", page, NULL); 13719 13720 fprintf(stderr, "couldn't run man program: %s", strerror(errno)); 13721 return (-1); 13722 } 13723 13724 /* 13725 * Do zpool_load_compat() and print error message on failure 13726 */ 13727 static zpool_compat_status_t 13728 zpool_do_load_compat(const char *compat, boolean_t *list) 13729 { 13730 char report[1024]; 13731 13732 zpool_compat_status_t ret; 13733 13734 ret = zpool_load_compat(compat, list, report, 1024); 13735 switch (ret) { 13736 13737 case ZPOOL_COMPATIBILITY_OK: 13738 break; 13739 13740 case ZPOOL_COMPATIBILITY_NOFILES: 13741 case ZPOOL_COMPATIBILITY_BADFILE: 13742 case ZPOOL_COMPATIBILITY_BADTOKEN: 13743 (void) fprintf(stderr, "Error: %s\n", report); 13744 break; 13745 13746 case ZPOOL_COMPATIBILITY_WARNTOKEN: 13747 (void) fprintf(stderr, "Warning: %s\n", report); 13748 ret = ZPOOL_COMPATIBILITY_OK; 13749 break; 13750 } 13751 return (ret); 13752 } 13753 13754 int 13755 main(int argc, char **argv) 13756 { 13757 int ret = 0; 13758 int i = 0; 13759 char *cmdname; 13760 char **newargv; 13761 13762 (void) setlocale(LC_ALL, ""); 13763 (void) setlocale(LC_NUMERIC, "C"); 13764 (void) textdomain(TEXT_DOMAIN); 13765 srand(time(NULL)); 13766 13767 opterr = 0; 13768 13769 /* 13770 * Make sure the user has specified some command. 13771 */ 13772 if (argc < 2) { 13773 (void) fprintf(stderr, gettext("missing command\n")); 13774 usage(B_FALSE); 13775 } 13776 13777 cmdname = argv[1]; 13778 13779 /* 13780 * Special case '-?' 13781 */ 13782 if ((strcmp(cmdname, "-?") == 0) || strcmp(cmdname, "--help") == 0) 13783 usage(B_TRUE); 13784 13785 /* 13786 * Special case '-V|--version' 13787 */ 13788 if ((strcmp(cmdname, "-V") == 0) || (strcmp(cmdname, "--version") == 0)) 13789 return (zfs_version_print() != 0); 13790 13791 /* 13792 * Special case 'help' 13793 */ 13794 if (strcmp(cmdname, "help") == 0) 13795 return (zpool_do_help(argc, argv)); 13796 13797 if ((g_zfs = libzfs_init()) == NULL) { 13798 (void) fprintf(stderr, "%s\n", libzfs_error_init(errno)); 13799 return (1); 13800 } 13801 13802 libzfs_print_on_error(g_zfs, B_TRUE); 13803 13804 zfs_save_arguments(argc, argv, history_str, sizeof (history_str)); 13805 13806 /* 13807 * Many commands modify input strings for string parsing reasons. 13808 * We create a copy to protect the original argv. 13809 */ 13810 newargv = safe_malloc((argc + 1) * sizeof (newargv[0])); 13811 for (i = 0; i < argc; i++) 13812 newargv[i] = strdup(argv[i]); 13813 newargv[argc] = NULL; 13814 13815 /* 13816 * Run the appropriate command. 13817 */ 13818 if (find_command_idx(cmdname, &i) == 0) { 13819 current_command = &command_table[i]; 13820 ret = command_table[i].func(argc - 1, newargv + 1); 13821 } else if (strchr(cmdname, '=')) { 13822 verify(find_command_idx("set", &i) == 0); 13823 current_command = &command_table[i]; 13824 ret = command_table[i].func(argc, newargv); 13825 } else if (strcmp(cmdname, "freeze") == 0 && argc == 3) { 13826 /* 13827 * 'freeze' is a vile debugging abomination, so we treat 13828 * it as such. 13829 */ 13830 zfs_cmd_t zc = {"\0"}; 13831 13832 (void) strlcpy(zc.zc_name, argv[2], sizeof (zc.zc_name)); 13833 ret = zfs_ioctl(g_zfs, ZFS_IOC_POOL_FREEZE, &zc); 13834 if (ret != 0) { 13835 (void) fprintf(stderr, 13836 gettext("failed to freeze pool: %d\n"), errno); 13837 ret = 1; 13838 } 13839 13840 log_history = 0; 13841 } else { 13842 (void) fprintf(stderr, gettext("unrecognized " 13843 "command '%s'\n"), cmdname); 13844 usage(B_FALSE); 13845 ret = 1; 13846 } 13847 13848 for (i = 0; i < argc; i++) 13849 free(newargv[i]); 13850 free(newargv); 13851 13852 if (ret == 0 && log_history) 13853 (void) zpool_log_history(g_zfs, history_str); 13854 13855 libzfs_fini(g_zfs); 13856 13857 /* 13858 * The 'ZFS_ABORT' environment variable causes us to dump core on exit 13859 * for the purposes of running ::findleaks. 13860 */ 13861 if (getenv("ZFS_ABORT") != NULL) { 13862 (void) printf("dumping core by request\n"); 13863 abort(); 13864 } 13865 13866 return (ret); 13867 } 13868