1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or https://opensource.org/licenses/CDDL-1.0. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 25 * Copyright (c) 2011, 2020 by Delphix. All rights reserved. 26 * Copyright (c) 2012 by Frederik Wessels. All rights reserved. 27 * Copyright (c) 2012 by Cyril Plisko. All rights reserved. 28 * Copyright (c) 2013 by Prasad Joshi (sTec). All rights reserved. 29 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>. 30 * Copyright (c) 2017 Datto Inc. 31 * Copyright (c) 2017 Open-E, Inc. All Rights Reserved. 32 * Copyright (c) 2017, Intel Corporation. 33 * Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com> 34 * Copyright (c) 2021, Colm Buckley <colm@tuatha.org> 35 * Copyright (c) 2021, Klara Inc. 36 * Copyright [2021] Hewlett Packard Enterprise Development LP 37 */ 38 39 #include <assert.h> 40 #include <ctype.h> 41 #include <dirent.h> 42 #include <errno.h> 43 #include <fcntl.h> 44 #include <getopt.h> 45 #include <libgen.h> 46 #include <libintl.h> 47 #include <libuutil.h> 48 #include <locale.h> 49 #include <pthread.h> 50 #include <stdio.h> 51 #include <stdlib.h> 52 #include <string.h> 53 #include <time.h> 54 #include <unistd.h> 55 #include <pwd.h> 56 #include <zone.h> 57 #include <sys/wait.h> 58 #include <zfs_prop.h> 59 #include <sys/fs/zfs.h> 60 #include <sys/stat.h> 61 #include <sys/systeminfo.h> 62 #include <sys/fm/fs/zfs.h> 63 #include <sys/fm/util.h> 64 #include <sys/fm/protocol.h> 65 #include <sys/zfs_ioctl.h> 66 #include <sys/mount.h> 67 #include <sys/sysmacros.h> 68 69 #include <math.h> 70 71 #include <libzfs.h> 72 #include <libzutil.h> 73 74 #include "zpool_util.h" 75 #include "zfs_comutil.h" 76 #include "zfeature_common.h" 77 78 #include "statcommon.h" 79 80 libzfs_handle_t *g_zfs; 81 82 static int zpool_do_create(int, char **); 83 static int zpool_do_destroy(int, char **); 84 85 static int zpool_do_add(int, char **); 86 static int zpool_do_remove(int, char **); 87 static int zpool_do_labelclear(int, char **); 88 89 static int zpool_do_checkpoint(int, char **); 90 91 static int zpool_do_list(int, char **); 92 static int zpool_do_iostat(int, char **); 93 static int zpool_do_status(int, char **); 94 95 static int zpool_do_online(int, char **); 96 static int zpool_do_offline(int, char **); 97 static int zpool_do_clear(int, char **); 98 static int zpool_do_reopen(int, char **); 99 100 static int zpool_do_reguid(int, char **); 101 102 static int zpool_do_attach(int, char **); 103 static int zpool_do_detach(int, char **); 104 static int zpool_do_replace(int, char **); 105 static int zpool_do_split(int, char **); 106 107 static int zpool_do_initialize(int, char **); 108 static int zpool_do_scrub(int, char **); 109 static int zpool_do_resilver(int, char **); 110 static int zpool_do_trim(int, char **); 111 112 static int zpool_do_import(int, char **); 113 static int zpool_do_export(int, char **); 114 115 static int zpool_do_upgrade(int, char **); 116 117 static int zpool_do_history(int, char **); 118 static int zpool_do_events(int, char **); 119 120 static int zpool_do_get(int, char **); 121 static int zpool_do_set(int, char **); 122 123 static int zpool_do_sync(int, char **); 124 125 static int zpool_do_version(int, char **); 126 127 static int zpool_do_wait(int, char **); 128 129 static zpool_compat_status_t zpool_do_load_compat( 130 const char *, boolean_t *); 131 132 /* 133 * These libumem hooks provide a reasonable set of defaults for the allocator's 134 * debugging facilities. 135 */ 136 137 #ifdef DEBUG 138 const char * 139 _umem_debug_init(void) 140 { 141 return ("default,verbose"); /* $UMEM_DEBUG setting */ 142 } 143 144 const char * 145 _umem_logging_init(void) 146 { 147 return ("fail,contents"); /* $UMEM_LOGGING setting */ 148 } 149 #endif 150 151 typedef enum { 152 HELP_ADD, 153 HELP_ATTACH, 154 HELP_CLEAR, 155 HELP_CREATE, 156 HELP_CHECKPOINT, 157 HELP_DESTROY, 158 HELP_DETACH, 159 HELP_EXPORT, 160 HELP_HISTORY, 161 HELP_IMPORT, 162 HELP_IOSTAT, 163 HELP_LABELCLEAR, 164 HELP_LIST, 165 HELP_OFFLINE, 166 HELP_ONLINE, 167 HELP_REPLACE, 168 HELP_REMOVE, 169 HELP_INITIALIZE, 170 HELP_SCRUB, 171 HELP_RESILVER, 172 HELP_TRIM, 173 HELP_STATUS, 174 HELP_UPGRADE, 175 HELP_EVENTS, 176 HELP_GET, 177 HELP_SET, 178 HELP_SPLIT, 179 HELP_SYNC, 180 HELP_REGUID, 181 HELP_REOPEN, 182 HELP_VERSION, 183 HELP_WAIT 184 } zpool_help_t; 185 186 187 /* 188 * Flags for stats to display with "zpool iostats" 189 */ 190 enum iostat_type { 191 IOS_DEFAULT = 0, 192 IOS_LATENCY = 1, 193 IOS_QUEUES = 2, 194 IOS_L_HISTO = 3, 195 IOS_RQ_HISTO = 4, 196 IOS_COUNT, /* always last element */ 197 }; 198 199 /* iostat_type entries as bitmasks */ 200 #define IOS_DEFAULT_M (1ULL << IOS_DEFAULT) 201 #define IOS_LATENCY_M (1ULL << IOS_LATENCY) 202 #define IOS_QUEUES_M (1ULL << IOS_QUEUES) 203 #define IOS_L_HISTO_M (1ULL << IOS_L_HISTO) 204 #define IOS_RQ_HISTO_M (1ULL << IOS_RQ_HISTO) 205 206 /* Mask of all the histo bits */ 207 #define IOS_ANYHISTO_M (IOS_L_HISTO_M | IOS_RQ_HISTO_M) 208 209 /* 210 * Lookup table for iostat flags to nvlist names. Basically a list 211 * of all the nvlists a flag requires. Also specifies the order in 212 * which data gets printed in zpool iostat. 213 */ 214 static const char *vsx_type_to_nvlist[IOS_COUNT][15] = { 215 [IOS_L_HISTO] = { 216 ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO, 217 ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO, 218 ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO, 219 ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO, 220 ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO, 221 ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO, 222 ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO, 223 ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO, 224 ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO, 225 ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO, 226 ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO, 227 NULL}, 228 [IOS_LATENCY] = { 229 ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO, 230 ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO, 231 ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO, 232 ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO, 233 ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO, 234 ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO, 235 NULL}, 236 [IOS_QUEUES] = { 237 ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE, 238 ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE, 239 ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE, 240 ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE, 241 ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE, 242 ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE, 243 ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE, 244 NULL}, 245 [IOS_RQ_HISTO] = { 246 ZPOOL_CONFIG_VDEV_SYNC_IND_R_HISTO, 247 ZPOOL_CONFIG_VDEV_SYNC_AGG_R_HISTO, 248 ZPOOL_CONFIG_VDEV_SYNC_IND_W_HISTO, 249 ZPOOL_CONFIG_VDEV_SYNC_AGG_W_HISTO, 250 ZPOOL_CONFIG_VDEV_ASYNC_IND_R_HISTO, 251 ZPOOL_CONFIG_VDEV_ASYNC_AGG_R_HISTO, 252 ZPOOL_CONFIG_VDEV_ASYNC_IND_W_HISTO, 253 ZPOOL_CONFIG_VDEV_ASYNC_AGG_W_HISTO, 254 ZPOOL_CONFIG_VDEV_IND_SCRUB_HISTO, 255 ZPOOL_CONFIG_VDEV_AGG_SCRUB_HISTO, 256 ZPOOL_CONFIG_VDEV_IND_TRIM_HISTO, 257 ZPOOL_CONFIG_VDEV_AGG_TRIM_HISTO, 258 ZPOOL_CONFIG_VDEV_IND_REBUILD_HISTO, 259 ZPOOL_CONFIG_VDEV_AGG_REBUILD_HISTO, 260 NULL}, 261 }; 262 263 264 /* 265 * Given a cb->cb_flags with a histogram bit set, return the iostat_type. 266 * Right now, only one histo bit is ever set at one time, so we can 267 * just do a highbit64(a) 268 */ 269 #define IOS_HISTO_IDX(a) (highbit64(a & IOS_ANYHISTO_M) - 1) 270 271 typedef struct zpool_command { 272 const char *name; 273 int (*func)(int, char **); 274 zpool_help_t usage; 275 } zpool_command_t; 276 277 /* 278 * Master command table. Each ZFS command has a name, associated function, and 279 * usage message. The usage messages need to be internationalized, so we have 280 * to have a function to return the usage message based on a command index. 281 * 282 * These commands are organized according to how they are displayed in the usage 283 * message. An empty command (one with a NULL name) indicates an empty line in 284 * the generic usage message. 285 */ 286 static zpool_command_t command_table[] = { 287 { "version", zpool_do_version, HELP_VERSION }, 288 { NULL }, 289 { "create", zpool_do_create, HELP_CREATE }, 290 { "destroy", zpool_do_destroy, HELP_DESTROY }, 291 { NULL }, 292 { "add", zpool_do_add, HELP_ADD }, 293 { "remove", zpool_do_remove, HELP_REMOVE }, 294 { NULL }, 295 { "labelclear", zpool_do_labelclear, HELP_LABELCLEAR }, 296 { NULL }, 297 { "checkpoint", zpool_do_checkpoint, HELP_CHECKPOINT }, 298 { NULL }, 299 { "list", zpool_do_list, HELP_LIST }, 300 { "iostat", zpool_do_iostat, HELP_IOSTAT }, 301 { "status", zpool_do_status, HELP_STATUS }, 302 { NULL }, 303 { "online", zpool_do_online, HELP_ONLINE }, 304 { "offline", zpool_do_offline, HELP_OFFLINE }, 305 { "clear", zpool_do_clear, HELP_CLEAR }, 306 { "reopen", zpool_do_reopen, HELP_REOPEN }, 307 { NULL }, 308 { "attach", zpool_do_attach, HELP_ATTACH }, 309 { "detach", zpool_do_detach, HELP_DETACH }, 310 { "replace", zpool_do_replace, HELP_REPLACE }, 311 { "split", zpool_do_split, HELP_SPLIT }, 312 { NULL }, 313 { "initialize", zpool_do_initialize, HELP_INITIALIZE }, 314 { "resilver", zpool_do_resilver, HELP_RESILVER }, 315 { "scrub", zpool_do_scrub, HELP_SCRUB }, 316 { "trim", zpool_do_trim, HELP_TRIM }, 317 { NULL }, 318 { "import", zpool_do_import, HELP_IMPORT }, 319 { "export", zpool_do_export, HELP_EXPORT }, 320 { "upgrade", zpool_do_upgrade, HELP_UPGRADE }, 321 { "reguid", zpool_do_reguid, HELP_REGUID }, 322 { NULL }, 323 { "history", zpool_do_history, HELP_HISTORY }, 324 { "events", zpool_do_events, HELP_EVENTS }, 325 { NULL }, 326 { "get", zpool_do_get, HELP_GET }, 327 { "set", zpool_do_set, HELP_SET }, 328 { "sync", zpool_do_sync, HELP_SYNC }, 329 { NULL }, 330 { "wait", zpool_do_wait, HELP_WAIT }, 331 }; 332 333 #define NCOMMAND (ARRAY_SIZE(command_table)) 334 335 #define VDEV_ALLOC_CLASS_LOGS "logs" 336 337 static zpool_command_t *current_command; 338 static zfs_type_t current_prop_type = (ZFS_TYPE_POOL | ZFS_TYPE_VDEV); 339 static char history_str[HIS_MAX_RECORD_LEN]; 340 static boolean_t log_history = B_TRUE; 341 static uint_t timestamp_fmt = NODATE; 342 343 static const char * 344 get_usage(zpool_help_t idx) 345 { 346 switch (idx) { 347 case HELP_ADD: 348 return (gettext("\tadd [-fgLnP] [-o property=value] " 349 "<pool> <vdev> ...\n")); 350 case HELP_ATTACH: 351 return (gettext("\tattach [-fsw] [-o property=value] " 352 "<pool> <device> <new-device>\n")); 353 case HELP_CLEAR: 354 return (gettext("\tclear [-nF] <pool> [device]\n")); 355 case HELP_CREATE: 356 return (gettext("\tcreate [-fnd] [-o property=value] ... \n" 357 "\t [-O file-system-property=value] ... \n" 358 "\t [-m mountpoint] [-R root] <pool> <vdev> ...\n")); 359 case HELP_CHECKPOINT: 360 return (gettext("\tcheckpoint [-d [-w]] <pool> ...\n")); 361 case HELP_DESTROY: 362 return (gettext("\tdestroy [-f] <pool>\n")); 363 case HELP_DETACH: 364 return (gettext("\tdetach <pool> <device>\n")); 365 case HELP_EXPORT: 366 return (gettext("\texport [-af] <pool> ...\n")); 367 case HELP_HISTORY: 368 return (gettext("\thistory [-il] [<pool>] ...\n")); 369 case HELP_IMPORT: 370 return (gettext("\timport [-d dir] [-D]\n" 371 "\timport [-o mntopts] [-o property=value] ... \n" 372 "\t [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] " 373 "[-R root] [-F [-n]] -a\n" 374 "\timport [-o mntopts] [-o property=value] ... \n" 375 "\t [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] " 376 "[-R root] [-F [-n]]\n" 377 "\t [--rewind-to-checkpoint] <pool | id> [newpool]\n")); 378 case HELP_IOSTAT: 379 return (gettext("\tiostat [[[-c [script1,script2,...]" 380 "[-lq]]|[-rw]] [-T d | u] [-ghHLpPvy]\n" 381 "\t [[pool ...]|[pool vdev ...]|[vdev ...]]" 382 " [[-n] interval [count]]\n")); 383 case HELP_LABELCLEAR: 384 return (gettext("\tlabelclear [-f] <vdev>\n")); 385 case HELP_LIST: 386 return (gettext("\tlist [-gHLpPv] [-o property[,...]] " 387 "[-T d|u] [pool] ... \n" 388 "\t [interval [count]]\n")); 389 case HELP_OFFLINE: 390 return (gettext("\toffline [-f] [-t] <pool> <device> ...\n")); 391 case HELP_ONLINE: 392 return (gettext("\tonline [-e] <pool> <device> ...\n")); 393 case HELP_REPLACE: 394 return (gettext("\treplace [-fsw] [-o property=value] " 395 "<pool> <device> [new-device]\n")); 396 case HELP_REMOVE: 397 return (gettext("\tremove [-npsw] <pool> <device> ...\n")); 398 case HELP_REOPEN: 399 return (gettext("\treopen [-n] <pool>\n")); 400 case HELP_INITIALIZE: 401 return (gettext("\tinitialize [-c | -s] [-w] <pool> " 402 "[<device> ...]\n")); 403 case HELP_SCRUB: 404 return (gettext("\tscrub [-s | -p] [-w] <pool> ...\n")); 405 case HELP_RESILVER: 406 return (gettext("\tresilver <pool> ...\n")); 407 case HELP_TRIM: 408 return (gettext("\ttrim [-dw] [-r <rate>] [-c | -s] <pool> " 409 "[<device> ...]\n")); 410 case HELP_STATUS: 411 return (gettext("\tstatus [-c [script1,script2,...]] " 412 "[-igLpPstvxD] [-T d|u] [pool] ... \n" 413 "\t [interval [count]]\n")); 414 case HELP_UPGRADE: 415 return (gettext("\tupgrade\n" 416 "\tupgrade -v\n" 417 "\tupgrade [-V version] <-a | pool ...>\n")); 418 case HELP_EVENTS: 419 return (gettext("\tevents [-vHf [pool] | -c]\n")); 420 case HELP_GET: 421 return (gettext("\tget [-Hp] [-o \"all\" | field[,...]] " 422 "<\"all\" | property[,...]> <pool> ...\n")); 423 case HELP_SET: 424 return (gettext("\tset <property=value> <pool>\n" 425 "\tset <vdev_property=value> <pool> <vdev>\n")); 426 case HELP_SPLIT: 427 return (gettext("\tsplit [-gLnPl] [-R altroot] [-o mntopts]\n" 428 "\t [-o property=value] <pool> <newpool> " 429 "[<device> ...]\n")); 430 case HELP_REGUID: 431 return (gettext("\treguid <pool>\n")); 432 case HELP_SYNC: 433 return (gettext("\tsync [pool] ...\n")); 434 case HELP_VERSION: 435 return (gettext("\tversion\n")); 436 case HELP_WAIT: 437 return (gettext("\twait [-Hp] [-T d|u] [-t <activity>[,...]] " 438 "<pool> [interval]\n")); 439 default: 440 __builtin_unreachable(); 441 } 442 } 443 444 static void 445 zpool_collect_leaves(zpool_handle_t *zhp, nvlist_t *nvroot, nvlist_t *res) 446 { 447 uint_t children = 0; 448 nvlist_t **child; 449 uint_t i; 450 451 (void) nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 452 &child, &children); 453 454 if (children == 0) { 455 char *path = zpool_vdev_name(g_zfs, zhp, nvroot, 456 VDEV_NAME_PATH); 457 458 if (strcmp(path, VDEV_TYPE_INDIRECT) != 0 && 459 strcmp(path, VDEV_TYPE_HOLE) != 0) 460 fnvlist_add_boolean(res, path); 461 462 free(path); 463 return; 464 } 465 466 for (i = 0; i < children; i++) { 467 zpool_collect_leaves(zhp, child[i], res); 468 } 469 } 470 471 /* 472 * Callback routine that will print out a pool property value. 473 */ 474 static int 475 print_pool_prop_cb(int prop, void *cb) 476 { 477 FILE *fp = cb; 478 479 (void) fprintf(fp, "\t%-19s ", zpool_prop_to_name(prop)); 480 481 if (zpool_prop_readonly(prop)) 482 (void) fprintf(fp, " NO "); 483 else 484 (void) fprintf(fp, " YES "); 485 486 if (zpool_prop_values(prop) == NULL) 487 (void) fprintf(fp, "-\n"); 488 else 489 (void) fprintf(fp, "%s\n", zpool_prop_values(prop)); 490 491 return (ZPROP_CONT); 492 } 493 494 /* 495 * Callback routine that will print out a vdev property value. 496 */ 497 static int 498 print_vdev_prop_cb(int prop, void *cb) 499 { 500 FILE *fp = cb; 501 502 (void) fprintf(fp, "\t%-19s ", vdev_prop_to_name(prop)); 503 504 if (vdev_prop_readonly(prop)) 505 (void) fprintf(fp, " NO "); 506 else 507 (void) fprintf(fp, " YES "); 508 509 if (vdev_prop_values(prop) == NULL) 510 (void) fprintf(fp, "-\n"); 511 else 512 (void) fprintf(fp, "%s\n", vdev_prop_values(prop)); 513 514 return (ZPROP_CONT); 515 } 516 517 /* 518 * Display usage message. If we're inside a command, display only the usage for 519 * that command. Otherwise, iterate over the entire command table and display 520 * a complete usage message. 521 */ 522 static __attribute__((noreturn)) void 523 usage(boolean_t requested) 524 { 525 FILE *fp = requested ? stdout : stderr; 526 527 if (current_command == NULL) { 528 int i; 529 530 (void) fprintf(fp, gettext("usage: zpool command args ...\n")); 531 (void) fprintf(fp, 532 gettext("where 'command' is one of the following:\n\n")); 533 534 for (i = 0; i < NCOMMAND; i++) { 535 if (command_table[i].name == NULL) 536 (void) fprintf(fp, "\n"); 537 else 538 (void) fprintf(fp, "%s", 539 get_usage(command_table[i].usage)); 540 } 541 } else { 542 (void) fprintf(fp, gettext("usage:\n")); 543 (void) fprintf(fp, "%s", get_usage(current_command->usage)); 544 } 545 546 if (current_command != NULL && 547 current_prop_type != (ZFS_TYPE_POOL | ZFS_TYPE_VDEV) && 548 ((strcmp(current_command->name, "set") == 0) || 549 (strcmp(current_command->name, "get") == 0) || 550 (strcmp(current_command->name, "list") == 0))) { 551 552 (void) fprintf(fp, "%s", 553 gettext("\nthe following properties are supported:\n")); 554 555 (void) fprintf(fp, "\n\t%-19s %s %s\n\n", 556 "PROPERTY", "EDIT", "VALUES"); 557 558 /* Iterate over all properties */ 559 if (current_prop_type == ZFS_TYPE_POOL) { 560 (void) zprop_iter(print_pool_prop_cb, fp, B_FALSE, 561 B_TRUE, current_prop_type); 562 563 (void) fprintf(fp, "\t%-19s ", "feature@..."); 564 (void) fprintf(fp, "YES " 565 "disabled | enabled | active\n"); 566 567 (void) fprintf(fp, gettext("\nThe feature@ properties " 568 "must be appended with a feature name.\n" 569 "See zpool-features(7).\n")); 570 } else if (current_prop_type == ZFS_TYPE_VDEV) { 571 (void) zprop_iter(print_vdev_prop_cb, fp, B_FALSE, 572 B_TRUE, current_prop_type); 573 } 574 } 575 576 /* 577 * See comments at end of main(). 578 */ 579 if (getenv("ZFS_ABORT") != NULL) { 580 (void) printf("dumping core by request\n"); 581 abort(); 582 } 583 584 exit(requested ? 0 : 2); 585 } 586 587 /* 588 * zpool initialize [-c | -s] [-w] <pool> [<vdev> ...] 589 * Initialize all unused blocks in the specified vdevs, or all vdevs in the pool 590 * if none specified. 591 * 592 * -c Cancel. Ends active initializing. 593 * -s Suspend. Initializing can then be restarted with no flags. 594 * -w Wait. Blocks until initializing has completed. 595 */ 596 int 597 zpool_do_initialize(int argc, char **argv) 598 { 599 int c; 600 char *poolname; 601 zpool_handle_t *zhp; 602 nvlist_t *vdevs; 603 int err = 0; 604 boolean_t wait = B_FALSE; 605 606 struct option long_options[] = { 607 {"cancel", no_argument, NULL, 'c'}, 608 {"suspend", no_argument, NULL, 's'}, 609 {"wait", no_argument, NULL, 'w'}, 610 {0, 0, 0, 0} 611 }; 612 613 pool_initialize_func_t cmd_type = POOL_INITIALIZE_START; 614 while ((c = getopt_long(argc, argv, "csw", long_options, NULL)) != -1) { 615 switch (c) { 616 case 'c': 617 if (cmd_type != POOL_INITIALIZE_START && 618 cmd_type != POOL_INITIALIZE_CANCEL) { 619 (void) fprintf(stderr, gettext("-c cannot be " 620 "combined with other options\n")); 621 usage(B_FALSE); 622 } 623 cmd_type = POOL_INITIALIZE_CANCEL; 624 break; 625 case 's': 626 if (cmd_type != POOL_INITIALIZE_START && 627 cmd_type != POOL_INITIALIZE_SUSPEND) { 628 (void) fprintf(stderr, gettext("-s cannot be " 629 "combined with other options\n")); 630 usage(B_FALSE); 631 } 632 cmd_type = POOL_INITIALIZE_SUSPEND; 633 break; 634 case 'w': 635 wait = B_TRUE; 636 break; 637 case '?': 638 if (optopt != 0) { 639 (void) fprintf(stderr, 640 gettext("invalid option '%c'\n"), optopt); 641 } else { 642 (void) fprintf(stderr, 643 gettext("invalid option '%s'\n"), 644 argv[optind - 1]); 645 } 646 usage(B_FALSE); 647 } 648 } 649 650 argc -= optind; 651 argv += optind; 652 653 if (argc < 1) { 654 (void) fprintf(stderr, gettext("missing pool name argument\n")); 655 usage(B_FALSE); 656 return (-1); 657 } 658 659 if (wait && (cmd_type != POOL_INITIALIZE_START)) { 660 (void) fprintf(stderr, gettext("-w cannot be used with -c or " 661 "-s\n")); 662 usage(B_FALSE); 663 } 664 665 poolname = argv[0]; 666 zhp = zpool_open(g_zfs, poolname); 667 if (zhp == NULL) 668 return (-1); 669 670 vdevs = fnvlist_alloc(); 671 if (argc == 1) { 672 /* no individual leaf vdevs specified, so add them all */ 673 nvlist_t *config = zpool_get_config(zhp, NULL); 674 nvlist_t *nvroot = fnvlist_lookup_nvlist(config, 675 ZPOOL_CONFIG_VDEV_TREE); 676 zpool_collect_leaves(zhp, nvroot, vdevs); 677 } else { 678 for (int i = 1; i < argc; i++) { 679 fnvlist_add_boolean(vdevs, argv[i]); 680 } 681 } 682 683 if (wait) 684 err = zpool_initialize_wait(zhp, cmd_type, vdevs); 685 else 686 err = zpool_initialize(zhp, cmd_type, vdevs); 687 688 fnvlist_free(vdevs); 689 zpool_close(zhp); 690 691 return (err); 692 } 693 694 /* 695 * print a pool vdev config for dry runs 696 */ 697 static void 698 print_vdev_tree(zpool_handle_t *zhp, const char *name, nvlist_t *nv, int indent, 699 const char *match, int name_flags) 700 { 701 nvlist_t **child; 702 uint_t c, children; 703 char *vname; 704 boolean_t printed = B_FALSE; 705 706 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 707 &child, &children) != 0) { 708 if (name != NULL) 709 (void) printf("\t%*s%s\n", indent, "", name); 710 return; 711 } 712 713 for (c = 0; c < children; c++) { 714 uint64_t is_log = B_FALSE, is_hole = B_FALSE; 715 char *class = (char *)""; 716 717 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 718 &is_hole); 719 720 if (is_hole == B_TRUE) { 721 continue; 722 } 723 724 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 725 &is_log); 726 if (is_log) 727 class = (char *)VDEV_ALLOC_BIAS_LOG; 728 (void) nvlist_lookup_string(child[c], 729 ZPOOL_CONFIG_ALLOCATION_BIAS, &class); 730 if (strcmp(match, class) != 0) 731 continue; 732 733 if (!printed && name != NULL) { 734 (void) printf("\t%*s%s\n", indent, "", name); 735 printed = B_TRUE; 736 } 737 vname = zpool_vdev_name(g_zfs, zhp, child[c], name_flags); 738 print_vdev_tree(zhp, vname, child[c], indent + 2, "", 739 name_flags); 740 free(vname); 741 } 742 } 743 744 /* 745 * Print the list of l2cache devices for dry runs. 746 */ 747 static void 748 print_cache_list(nvlist_t *nv, int indent) 749 { 750 nvlist_t **child; 751 uint_t c, children; 752 753 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, 754 &child, &children) == 0 && children > 0) { 755 (void) printf("\t%*s%s\n", indent, "", "cache"); 756 } else { 757 return; 758 } 759 for (c = 0; c < children; c++) { 760 char *vname; 761 762 vname = zpool_vdev_name(g_zfs, NULL, child[c], 0); 763 (void) printf("\t%*s%s\n", indent + 2, "", vname); 764 free(vname); 765 } 766 } 767 768 /* 769 * Print the list of spares for dry runs. 770 */ 771 static void 772 print_spare_list(nvlist_t *nv, int indent) 773 { 774 nvlist_t **child; 775 uint_t c, children; 776 777 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, 778 &child, &children) == 0 && children > 0) { 779 (void) printf("\t%*s%s\n", indent, "", "spares"); 780 } else { 781 return; 782 } 783 for (c = 0; c < children; c++) { 784 char *vname; 785 786 vname = zpool_vdev_name(g_zfs, NULL, child[c], 0); 787 (void) printf("\t%*s%s\n", indent + 2, "", vname); 788 free(vname); 789 } 790 } 791 792 static boolean_t 793 prop_list_contains_feature(nvlist_t *proplist) 794 { 795 nvpair_t *nvp; 796 for (nvp = nvlist_next_nvpair(proplist, NULL); NULL != nvp; 797 nvp = nvlist_next_nvpair(proplist, nvp)) { 798 if (zpool_prop_feature(nvpair_name(nvp))) 799 return (B_TRUE); 800 } 801 return (B_FALSE); 802 } 803 804 /* 805 * Add a property pair (name, string-value) into a property nvlist. 806 */ 807 static int 808 add_prop_list(const char *propname, const char *propval, nvlist_t **props, 809 boolean_t poolprop) 810 { 811 zpool_prop_t prop = ZPOOL_PROP_INVAL; 812 nvlist_t *proplist; 813 const char *normnm; 814 char *strval; 815 816 if (*props == NULL && 817 nvlist_alloc(props, NV_UNIQUE_NAME, 0) != 0) { 818 (void) fprintf(stderr, 819 gettext("internal error: out of memory\n")); 820 return (1); 821 } 822 823 proplist = *props; 824 825 if (poolprop) { 826 const char *vname = zpool_prop_to_name(ZPOOL_PROP_VERSION); 827 const char *cname = 828 zpool_prop_to_name(ZPOOL_PROP_COMPATIBILITY); 829 830 if ((prop = zpool_name_to_prop(propname)) == ZPOOL_PROP_INVAL && 831 (!zpool_prop_feature(propname) && 832 !zpool_prop_vdev(propname))) { 833 (void) fprintf(stderr, gettext("property '%s' is " 834 "not a valid pool or vdev property\n"), propname); 835 return (2); 836 } 837 838 /* 839 * feature@ properties and version should not be specified 840 * at the same time. 841 */ 842 if ((prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname) && 843 nvlist_exists(proplist, vname)) || 844 (prop == ZPOOL_PROP_VERSION && 845 prop_list_contains_feature(proplist))) { 846 (void) fprintf(stderr, gettext("'feature@' and " 847 "'version' properties cannot be specified " 848 "together\n")); 849 return (2); 850 } 851 852 /* 853 * if version is specified, only "legacy" compatibility 854 * may be requested 855 */ 856 if ((prop == ZPOOL_PROP_COMPATIBILITY && 857 strcmp(propval, ZPOOL_COMPAT_LEGACY) != 0 && 858 nvlist_exists(proplist, vname)) || 859 (prop == ZPOOL_PROP_VERSION && 860 nvlist_exists(proplist, cname) && 861 strcmp(fnvlist_lookup_string(proplist, cname), 862 ZPOOL_COMPAT_LEGACY) != 0)) { 863 (void) fprintf(stderr, gettext("when 'version' is " 864 "specified, the 'compatibility' feature may only " 865 "be set to '" ZPOOL_COMPAT_LEGACY "'\n")); 866 return (2); 867 } 868 869 if (zpool_prop_feature(propname) || zpool_prop_vdev(propname)) 870 normnm = propname; 871 else 872 normnm = zpool_prop_to_name(prop); 873 } else { 874 zfs_prop_t fsprop = zfs_name_to_prop(propname); 875 876 if (zfs_prop_valid_for_type(fsprop, ZFS_TYPE_FILESYSTEM, 877 B_FALSE)) { 878 normnm = zfs_prop_to_name(fsprop); 879 } else if (zfs_prop_user(propname) || 880 zfs_prop_userquota(propname)) { 881 normnm = propname; 882 } else { 883 (void) fprintf(stderr, gettext("property '%s' is " 884 "not a valid filesystem property\n"), propname); 885 return (2); 886 } 887 } 888 889 if (nvlist_lookup_string(proplist, normnm, &strval) == 0 && 890 prop != ZPOOL_PROP_CACHEFILE) { 891 (void) fprintf(stderr, gettext("property '%s' " 892 "specified multiple times\n"), propname); 893 return (2); 894 } 895 896 if (nvlist_add_string(proplist, normnm, propval) != 0) { 897 (void) fprintf(stderr, gettext("internal " 898 "error: out of memory\n")); 899 return (1); 900 } 901 902 return (0); 903 } 904 905 /* 906 * Set a default property pair (name, string-value) in a property nvlist 907 */ 908 static int 909 add_prop_list_default(const char *propname, const char *propval, 910 nvlist_t **props) 911 { 912 char *pval; 913 914 if (nvlist_lookup_string(*props, propname, &pval) == 0) 915 return (0); 916 917 return (add_prop_list(propname, propval, props, B_TRUE)); 918 } 919 920 /* 921 * zpool add [-fgLnP] [-o property=value] <pool> <vdev> ... 922 * 923 * -f Force addition of devices, even if they appear in use 924 * -g Display guid for individual vdev name. 925 * -L Follow links when resolving vdev path name. 926 * -n Do not add the devices, but display the resulting layout if 927 * they were to be added. 928 * -o Set property=value. 929 * -P Display full path for vdev name. 930 * 931 * Adds the given vdevs to 'pool'. As with create, the bulk of this work is 932 * handled by make_root_vdev(), which constructs the nvlist needed to pass to 933 * libzfs. 934 */ 935 int 936 zpool_do_add(int argc, char **argv) 937 { 938 boolean_t force = B_FALSE; 939 boolean_t dryrun = B_FALSE; 940 int name_flags = 0; 941 int c; 942 nvlist_t *nvroot; 943 char *poolname; 944 int ret; 945 zpool_handle_t *zhp; 946 nvlist_t *config; 947 nvlist_t *props = NULL; 948 char *propval; 949 950 /* check options */ 951 while ((c = getopt(argc, argv, "fgLno:P")) != -1) { 952 switch (c) { 953 case 'f': 954 force = B_TRUE; 955 break; 956 case 'g': 957 name_flags |= VDEV_NAME_GUID; 958 break; 959 case 'L': 960 name_flags |= VDEV_NAME_FOLLOW_LINKS; 961 break; 962 case 'n': 963 dryrun = B_TRUE; 964 break; 965 case 'o': 966 if ((propval = strchr(optarg, '=')) == NULL) { 967 (void) fprintf(stderr, gettext("missing " 968 "'=' for -o option\n")); 969 usage(B_FALSE); 970 } 971 *propval = '\0'; 972 propval++; 973 974 if ((strcmp(optarg, ZPOOL_CONFIG_ASHIFT) != 0) || 975 (add_prop_list(optarg, propval, &props, B_TRUE))) 976 usage(B_FALSE); 977 break; 978 case 'P': 979 name_flags |= VDEV_NAME_PATH; 980 break; 981 case '?': 982 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 983 optopt); 984 usage(B_FALSE); 985 } 986 } 987 988 argc -= optind; 989 argv += optind; 990 991 /* get pool name and check number of arguments */ 992 if (argc < 1) { 993 (void) fprintf(stderr, gettext("missing pool name argument\n")); 994 usage(B_FALSE); 995 } 996 if (argc < 2) { 997 (void) fprintf(stderr, gettext("missing vdev specification\n")); 998 usage(B_FALSE); 999 } 1000 1001 poolname = argv[0]; 1002 1003 argc--; 1004 argv++; 1005 1006 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) 1007 return (1); 1008 1009 if ((config = zpool_get_config(zhp, NULL)) == NULL) { 1010 (void) fprintf(stderr, gettext("pool '%s' is unavailable\n"), 1011 poolname); 1012 zpool_close(zhp); 1013 return (1); 1014 } 1015 1016 /* unless manually specified use "ashift" pool property (if set) */ 1017 if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) { 1018 int intval; 1019 zprop_source_t src; 1020 char strval[ZPOOL_MAXPROPLEN]; 1021 1022 intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src); 1023 if (src != ZPROP_SRC_DEFAULT) { 1024 (void) sprintf(strval, "%" PRId32, intval); 1025 verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval, 1026 &props, B_TRUE) == 0); 1027 } 1028 } 1029 1030 /* pass off to make_root_vdev for processing */ 1031 nvroot = make_root_vdev(zhp, props, force, !force, B_FALSE, dryrun, 1032 argc, argv); 1033 if (nvroot == NULL) { 1034 zpool_close(zhp); 1035 return (1); 1036 } 1037 1038 if (dryrun) { 1039 nvlist_t *poolnvroot; 1040 nvlist_t **l2child, **sparechild; 1041 uint_t l2children, sparechildren, c; 1042 char *vname; 1043 boolean_t hadcache = B_FALSE, hadspare = B_FALSE; 1044 1045 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 1046 &poolnvroot) == 0); 1047 1048 (void) printf(gettext("would update '%s' to the following " 1049 "configuration:\n\n"), zpool_get_name(zhp)); 1050 1051 /* print original main pool and new tree */ 1052 print_vdev_tree(zhp, poolname, poolnvroot, 0, "", 1053 name_flags | VDEV_NAME_TYPE_ID); 1054 print_vdev_tree(zhp, NULL, nvroot, 0, "", name_flags); 1055 1056 /* print other classes: 'dedup', 'special', and 'log' */ 1057 if (zfs_special_devs(poolnvroot, VDEV_ALLOC_BIAS_DEDUP)) { 1058 print_vdev_tree(zhp, "dedup", poolnvroot, 0, 1059 VDEV_ALLOC_BIAS_DEDUP, name_flags); 1060 print_vdev_tree(zhp, NULL, nvroot, 0, 1061 VDEV_ALLOC_BIAS_DEDUP, name_flags); 1062 } else if (zfs_special_devs(nvroot, VDEV_ALLOC_BIAS_DEDUP)) { 1063 print_vdev_tree(zhp, "dedup", nvroot, 0, 1064 VDEV_ALLOC_BIAS_DEDUP, name_flags); 1065 } 1066 1067 if (zfs_special_devs(poolnvroot, VDEV_ALLOC_BIAS_SPECIAL)) { 1068 print_vdev_tree(zhp, "special", poolnvroot, 0, 1069 VDEV_ALLOC_BIAS_SPECIAL, name_flags); 1070 print_vdev_tree(zhp, NULL, nvroot, 0, 1071 VDEV_ALLOC_BIAS_SPECIAL, name_flags); 1072 } else if (zfs_special_devs(nvroot, VDEV_ALLOC_BIAS_SPECIAL)) { 1073 print_vdev_tree(zhp, "special", nvroot, 0, 1074 VDEV_ALLOC_BIAS_SPECIAL, name_flags); 1075 } 1076 1077 if (num_logs(poolnvroot) > 0) { 1078 print_vdev_tree(zhp, "logs", poolnvroot, 0, 1079 VDEV_ALLOC_BIAS_LOG, name_flags); 1080 print_vdev_tree(zhp, NULL, nvroot, 0, 1081 VDEV_ALLOC_BIAS_LOG, name_flags); 1082 } else if (num_logs(nvroot) > 0) { 1083 print_vdev_tree(zhp, "logs", nvroot, 0, 1084 VDEV_ALLOC_BIAS_LOG, name_flags); 1085 } 1086 1087 /* Do the same for the caches */ 1088 if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_L2CACHE, 1089 &l2child, &l2children) == 0 && l2children) { 1090 hadcache = B_TRUE; 1091 (void) printf(gettext("\tcache\n")); 1092 for (c = 0; c < l2children; c++) { 1093 vname = zpool_vdev_name(g_zfs, NULL, 1094 l2child[c], name_flags); 1095 (void) printf("\t %s\n", vname); 1096 free(vname); 1097 } 1098 } 1099 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 1100 &l2child, &l2children) == 0 && l2children) { 1101 if (!hadcache) 1102 (void) printf(gettext("\tcache\n")); 1103 for (c = 0; c < l2children; c++) { 1104 vname = zpool_vdev_name(g_zfs, NULL, 1105 l2child[c], name_flags); 1106 (void) printf("\t %s\n", vname); 1107 free(vname); 1108 } 1109 } 1110 /* And finally the spares */ 1111 if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_SPARES, 1112 &sparechild, &sparechildren) == 0 && sparechildren > 0) { 1113 hadspare = B_TRUE; 1114 (void) printf(gettext("\tspares\n")); 1115 for (c = 0; c < sparechildren; c++) { 1116 vname = zpool_vdev_name(g_zfs, NULL, 1117 sparechild[c], name_flags); 1118 (void) printf("\t %s\n", vname); 1119 free(vname); 1120 } 1121 } 1122 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 1123 &sparechild, &sparechildren) == 0 && sparechildren > 0) { 1124 if (!hadspare) 1125 (void) printf(gettext("\tspares\n")); 1126 for (c = 0; c < sparechildren; c++) { 1127 vname = zpool_vdev_name(g_zfs, NULL, 1128 sparechild[c], name_flags); 1129 (void) printf("\t %s\n", vname); 1130 free(vname); 1131 } 1132 } 1133 1134 ret = 0; 1135 } else { 1136 ret = (zpool_add(zhp, nvroot) != 0); 1137 } 1138 1139 nvlist_free(props); 1140 nvlist_free(nvroot); 1141 zpool_close(zhp); 1142 1143 return (ret); 1144 } 1145 1146 /* 1147 * zpool remove [-npsw] <pool> <vdev> ... 1148 * 1149 * Removes the given vdev from the pool. 1150 */ 1151 int 1152 zpool_do_remove(int argc, char **argv) 1153 { 1154 char *poolname; 1155 int i, ret = 0; 1156 zpool_handle_t *zhp = NULL; 1157 boolean_t stop = B_FALSE; 1158 int c; 1159 boolean_t noop = B_FALSE; 1160 boolean_t parsable = B_FALSE; 1161 boolean_t wait = B_FALSE; 1162 1163 /* check options */ 1164 while ((c = getopt(argc, argv, "npsw")) != -1) { 1165 switch (c) { 1166 case 'n': 1167 noop = B_TRUE; 1168 break; 1169 case 'p': 1170 parsable = B_TRUE; 1171 break; 1172 case 's': 1173 stop = B_TRUE; 1174 break; 1175 case 'w': 1176 wait = B_TRUE; 1177 break; 1178 case '?': 1179 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 1180 optopt); 1181 usage(B_FALSE); 1182 } 1183 } 1184 1185 argc -= optind; 1186 argv += optind; 1187 1188 /* get pool name and check number of arguments */ 1189 if (argc < 1) { 1190 (void) fprintf(stderr, gettext("missing pool name argument\n")); 1191 usage(B_FALSE); 1192 } 1193 1194 poolname = argv[0]; 1195 1196 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) 1197 return (1); 1198 1199 if (stop && noop) { 1200 zpool_close(zhp); 1201 (void) fprintf(stderr, gettext("stop request ignored\n")); 1202 return (0); 1203 } 1204 1205 if (stop) { 1206 if (argc > 1) { 1207 (void) fprintf(stderr, gettext("too many arguments\n")); 1208 usage(B_FALSE); 1209 } 1210 if (zpool_vdev_remove_cancel(zhp) != 0) 1211 ret = 1; 1212 if (wait) { 1213 (void) fprintf(stderr, gettext("invalid option " 1214 "combination: -w cannot be used with -s\n")); 1215 usage(B_FALSE); 1216 } 1217 } else { 1218 if (argc < 2) { 1219 (void) fprintf(stderr, gettext("missing device\n")); 1220 usage(B_FALSE); 1221 } 1222 1223 for (i = 1; i < argc; i++) { 1224 if (noop) { 1225 uint64_t size; 1226 1227 if (zpool_vdev_indirect_size(zhp, argv[i], 1228 &size) != 0) { 1229 ret = 1; 1230 break; 1231 } 1232 if (parsable) { 1233 (void) printf("%s %llu\n", 1234 argv[i], (unsigned long long)size); 1235 } else { 1236 char valstr[32]; 1237 zfs_nicenum(size, valstr, 1238 sizeof (valstr)); 1239 (void) printf("Memory that will be " 1240 "used after removing %s: %s\n", 1241 argv[i], valstr); 1242 } 1243 } else { 1244 if (zpool_vdev_remove(zhp, argv[i]) != 0) 1245 ret = 1; 1246 } 1247 } 1248 1249 if (ret == 0 && wait) 1250 ret = zpool_wait(zhp, ZPOOL_WAIT_REMOVE); 1251 } 1252 zpool_close(zhp); 1253 1254 return (ret); 1255 } 1256 1257 /* 1258 * Return 1 if a vdev is active (being used in a pool) 1259 * Return 0 if a vdev is inactive (offlined or faulted, or not in active pool) 1260 * 1261 * This is useful for checking if a disk in an active pool is offlined or 1262 * faulted. 1263 */ 1264 static int 1265 vdev_is_active(char *vdev_path) 1266 { 1267 int fd; 1268 fd = open(vdev_path, O_EXCL); 1269 if (fd < 0) { 1270 return (1); /* cant open O_EXCL - disk is active */ 1271 } 1272 1273 close(fd); 1274 return (0); /* disk is inactive in the pool */ 1275 } 1276 1277 /* 1278 * zpool labelclear [-f] <vdev> 1279 * 1280 * -f Force clearing the label for the vdevs which are members of 1281 * the exported or foreign pools. 1282 * 1283 * Verifies that the vdev is not active and zeros out the label information 1284 * on the device. 1285 */ 1286 int 1287 zpool_do_labelclear(int argc, char **argv) 1288 { 1289 char vdev[MAXPATHLEN]; 1290 char *name = NULL; 1291 struct stat st; 1292 int c, fd = -1, ret = 0; 1293 nvlist_t *config; 1294 pool_state_t state; 1295 boolean_t inuse = B_FALSE; 1296 boolean_t force = B_FALSE; 1297 1298 /* check options */ 1299 while ((c = getopt(argc, argv, "f")) != -1) { 1300 switch (c) { 1301 case 'f': 1302 force = B_TRUE; 1303 break; 1304 default: 1305 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 1306 optopt); 1307 usage(B_FALSE); 1308 } 1309 } 1310 1311 argc -= optind; 1312 argv += optind; 1313 1314 /* get vdev name */ 1315 if (argc < 1) { 1316 (void) fprintf(stderr, gettext("missing vdev name\n")); 1317 usage(B_FALSE); 1318 } 1319 if (argc > 1) { 1320 (void) fprintf(stderr, gettext("too many arguments\n")); 1321 usage(B_FALSE); 1322 } 1323 1324 /* 1325 * Check if we were given absolute path and use it as is. 1326 * Otherwise if the provided vdev name doesn't point to a file, 1327 * try prepending expected disk paths and partition numbers. 1328 */ 1329 (void) strlcpy(vdev, argv[0], sizeof (vdev)); 1330 if (vdev[0] != '/' && stat(vdev, &st) != 0) { 1331 int error; 1332 1333 error = zfs_resolve_shortname(argv[0], vdev, MAXPATHLEN); 1334 if (error == 0 && zfs_dev_is_whole_disk(vdev)) { 1335 if (zfs_append_partition(vdev, MAXPATHLEN) == -1) 1336 error = ENOENT; 1337 } 1338 1339 if (error || (stat(vdev, &st) != 0)) { 1340 (void) fprintf(stderr, gettext( 1341 "failed to find device %s, try specifying absolute " 1342 "path instead\n"), argv[0]); 1343 return (1); 1344 } 1345 } 1346 1347 if ((fd = open(vdev, O_RDWR)) < 0) { 1348 (void) fprintf(stderr, gettext("failed to open %s: %s\n"), 1349 vdev, strerror(errno)); 1350 return (1); 1351 } 1352 1353 /* 1354 * Flush all dirty pages for the block device. This should not be 1355 * fatal when the device does not support BLKFLSBUF as would be the 1356 * case for a file vdev. 1357 */ 1358 if ((zfs_dev_flush(fd) != 0) && (errno != ENOTTY)) 1359 (void) fprintf(stderr, gettext("failed to invalidate " 1360 "cache for %s: %s\n"), vdev, strerror(errno)); 1361 1362 if (zpool_read_label(fd, &config, NULL) != 0) { 1363 (void) fprintf(stderr, 1364 gettext("failed to read label from %s\n"), vdev); 1365 ret = 1; 1366 goto errout; 1367 } 1368 nvlist_free(config); 1369 1370 ret = zpool_in_use(g_zfs, fd, &state, &name, &inuse); 1371 if (ret != 0) { 1372 (void) fprintf(stderr, 1373 gettext("failed to check state for %s\n"), vdev); 1374 ret = 1; 1375 goto errout; 1376 } 1377 1378 if (!inuse) 1379 goto wipe_label; 1380 1381 switch (state) { 1382 default: 1383 case POOL_STATE_ACTIVE: 1384 case POOL_STATE_SPARE: 1385 case POOL_STATE_L2CACHE: 1386 /* 1387 * We allow the user to call 'zpool offline -f' 1388 * on an offlined disk in an active pool. We can check if 1389 * the disk is online by calling vdev_is_active(). 1390 */ 1391 if (force && !vdev_is_active(vdev)) 1392 break; 1393 1394 (void) fprintf(stderr, gettext( 1395 "%s is a member (%s) of pool \"%s\""), 1396 vdev, zpool_pool_state_to_name(state), name); 1397 1398 if (force) { 1399 (void) fprintf(stderr, gettext( 1400 ". Offline the disk first to clear its label.")); 1401 } 1402 printf("\n"); 1403 ret = 1; 1404 goto errout; 1405 1406 case POOL_STATE_EXPORTED: 1407 if (force) 1408 break; 1409 (void) fprintf(stderr, gettext( 1410 "use '-f' to override the following error:\n" 1411 "%s is a member of exported pool \"%s\"\n"), 1412 vdev, name); 1413 ret = 1; 1414 goto errout; 1415 1416 case POOL_STATE_POTENTIALLY_ACTIVE: 1417 if (force) 1418 break; 1419 (void) fprintf(stderr, gettext( 1420 "use '-f' to override the following error:\n" 1421 "%s is a member of potentially active pool \"%s\"\n"), 1422 vdev, name); 1423 ret = 1; 1424 goto errout; 1425 1426 case POOL_STATE_DESTROYED: 1427 /* inuse should never be set for a destroyed pool */ 1428 assert(0); 1429 break; 1430 } 1431 1432 wipe_label: 1433 ret = zpool_clear_label(fd); 1434 if (ret != 0) { 1435 (void) fprintf(stderr, 1436 gettext("failed to clear label for %s\n"), vdev); 1437 } 1438 1439 errout: 1440 free(name); 1441 (void) close(fd); 1442 1443 return (ret); 1444 } 1445 1446 /* 1447 * zpool create [-fnd] [-o property=value] ... 1448 * [-O file-system-property=value] ... 1449 * [-R root] [-m mountpoint] <pool> <dev> ... 1450 * 1451 * -f Force creation, even if devices appear in use 1452 * -n Do not create the pool, but display the resulting layout if it 1453 * were to be created. 1454 * -R Create a pool under an alternate root 1455 * -m Set default mountpoint for the root dataset. By default it's 1456 * '/<pool>' 1457 * -o Set property=value. 1458 * -o Set feature@feature=enabled|disabled. 1459 * -d Don't automatically enable all supported pool features 1460 * (individual features can be enabled with -o). 1461 * -O Set fsproperty=value in the pool's root file system 1462 * 1463 * Creates the named pool according to the given vdev specification. The 1464 * bulk of the vdev processing is done in make_root_vdev() in zpool_vdev.c. 1465 * Once we get the nvlist back from make_root_vdev(), we either print out the 1466 * contents (if '-n' was specified), or pass it to libzfs to do the creation. 1467 */ 1468 int 1469 zpool_do_create(int argc, char **argv) 1470 { 1471 boolean_t force = B_FALSE; 1472 boolean_t dryrun = B_FALSE; 1473 boolean_t enable_pool_features = B_TRUE; 1474 1475 int c; 1476 nvlist_t *nvroot = NULL; 1477 char *poolname; 1478 char *tname = NULL; 1479 int ret = 1; 1480 char *altroot = NULL; 1481 char *compat = NULL; 1482 char *mountpoint = NULL; 1483 nvlist_t *fsprops = NULL; 1484 nvlist_t *props = NULL; 1485 char *propval; 1486 1487 /* check options */ 1488 while ((c = getopt(argc, argv, ":fndR:m:o:O:t:")) != -1) { 1489 switch (c) { 1490 case 'f': 1491 force = B_TRUE; 1492 break; 1493 case 'n': 1494 dryrun = B_TRUE; 1495 break; 1496 case 'd': 1497 enable_pool_features = B_FALSE; 1498 break; 1499 case 'R': 1500 altroot = optarg; 1501 if (add_prop_list(zpool_prop_to_name( 1502 ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE)) 1503 goto errout; 1504 if (add_prop_list_default(zpool_prop_to_name( 1505 ZPOOL_PROP_CACHEFILE), "none", &props)) 1506 goto errout; 1507 break; 1508 case 'm': 1509 /* Equivalent to -O mountpoint=optarg */ 1510 mountpoint = optarg; 1511 break; 1512 case 'o': 1513 if ((propval = strchr(optarg, '=')) == NULL) { 1514 (void) fprintf(stderr, gettext("missing " 1515 "'=' for -o option\n")); 1516 goto errout; 1517 } 1518 *propval = '\0'; 1519 propval++; 1520 1521 if (add_prop_list(optarg, propval, &props, B_TRUE)) 1522 goto errout; 1523 1524 /* 1525 * If the user is creating a pool that doesn't support 1526 * feature flags, don't enable any features. 1527 */ 1528 if (zpool_name_to_prop(optarg) == ZPOOL_PROP_VERSION) { 1529 char *end; 1530 u_longlong_t ver; 1531 1532 ver = strtoull(propval, &end, 10); 1533 if (*end == '\0' && 1534 ver < SPA_VERSION_FEATURES) { 1535 enable_pool_features = B_FALSE; 1536 } 1537 } 1538 if (zpool_name_to_prop(optarg) == ZPOOL_PROP_ALTROOT) 1539 altroot = propval; 1540 if (zpool_name_to_prop(optarg) == 1541 ZPOOL_PROP_COMPATIBILITY) 1542 compat = propval; 1543 break; 1544 case 'O': 1545 if ((propval = strchr(optarg, '=')) == NULL) { 1546 (void) fprintf(stderr, gettext("missing " 1547 "'=' for -O option\n")); 1548 goto errout; 1549 } 1550 *propval = '\0'; 1551 propval++; 1552 1553 /* 1554 * Mountpoints are checked and then added later. 1555 * Uniquely among properties, they can be specified 1556 * more than once, to avoid conflict with -m. 1557 */ 1558 if (0 == strcmp(optarg, 1559 zfs_prop_to_name(ZFS_PROP_MOUNTPOINT))) { 1560 mountpoint = propval; 1561 } else if (add_prop_list(optarg, propval, &fsprops, 1562 B_FALSE)) { 1563 goto errout; 1564 } 1565 break; 1566 case 't': 1567 /* 1568 * Sanity check temporary pool name. 1569 */ 1570 if (strchr(optarg, '/') != NULL) { 1571 (void) fprintf(stderr, gettext("cannot create " 1572 "'%s': invalid character '/' in temporary " 1573 "name\n"), optarg); 1574 (void) fprintf(stderr, gettext("use 'zfs " 1575 "create' to create a dataset\n")); 1576 goto errout; 1577 } 1578 1579 if (add_prop_list(zpool_prop_to_name( 1580 ZPOOL_PROP_TNAME), optarg, &props, B_TRUE)) 1581 goto errout; 1582 if (add_prop_list_default(zpool_prop_to_name( 1583 ZPOOL_PROP_CACHEFILE), "none", &props)) 1584 goto errout; 1585 tname = optarg; 1586 break; 1587 case ':': 1588 (void) fprintf(stderr, gettext("missing argument for " 1589 "'%c' option\n"), optopt); 1590 goto badusage; 1591 case '?': 1592 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 1593 optopt); 1594 goto badusage; 1595 } 1596 } 1597 1598 argc -= optind; 1599 argv += optind; 1600 1601 /* get pool name and check number of arguments */ 1602 if (argc < 1) { 1603 (void) fprintf(stderr, gettext("missing pool name argument\n")); 1604 goto badusage; 1605 } 1606 if (argc < 2) { 1607 (void) fprintf(stderr, gettext("missing vdev specification\n")); 1608 goto badusage; 1609 } 1610 1611 poolname = argv[0]; 1612 1613 /* 1614 * As a special case, check for use of '/' in the name, and direct the 1615 * user to use 'zfs create' instead. 1616 */ 1617 if (strchr(poolname, '/') != NULL) { 1618 (void) fprintf(stderr, gettext("cannot create '%s': invalid " 1619 "character '/' in pool name\n"), poolname); 1620 (void) fprintf(stderr, gettext("use 'zfs create' to " 1621 "create a dataset\n")); 1622 goto errout; 1623 } 1624 1625 /* pass off to make_root_vdev for bulk processing */ 1626 nvroot = make_root_vdev(NULL, props, force, !force, B_FALSE, dryrun, 1627 argc - 1, argv + 1); 1628 if (nvroot == NULL) 1629 goto errout; 1630 1631 /* make_root_vdev() allows 0 toplevel children if there are spares */ 1632 if (!zfs_allocatable_devs(nvroot)) { 1633 (void) fprintf(stderr, gettext("invalid vdev " 1634 "specification: at least one toplevel vdev must be " 1635 "specified\n")); 1636 goto errout; 1637 } 1638 1639 if (altroot != NULL && altroot[0] != '/') { 1640 (void) fprintf(stderr, gettext("invalid alternate root '%s': " 1641 "must be an absolute path\n"), altroot); 1642 goto errout; 1643 } 1644 1645 /* 1646 * Check the validity of the mountpoint and direct the user to use the 1647 * '-m' mountpoint option if it looks like its in use. 1648 */ 1649 if (mountpoint == NULL || 1650 (strcmp(mountpoint, ZFS_MOUNTPOINT_LEGACY) != 0 && 1651 strcmp(mountpoint, ZFS_MOUNTPOINT_NONE) != 0)) { 1652 char buf[MAXPATHLEN]; 1653 DIR *dirp; 1654 1655 if (mountpoint && mountpoint[0] != '/') { 1656 (void) fprintf(stderr, gettext("invalid mountpoint " 1657 "'%s': must be an absolute path, 'legacy', or " 1658 "'none'\n"), mountpoint); 1659 goto errout; 1660 } 1661 1662 if (mountpoint == NULL) { 1663 if (altroot != NULL) 1664 (void) snprintf(buf, sizeof (buf), "%s/%s", 1665 altroot, poolname); 1666 else 1667 (void) snprintf(buf, sizeof (buf), "/%s", 1668 poolname); 1669 } else { 1670 if (altroot != NULL) 1671 (void) snprintf(buf, sizeof (buf), "%s%s", 1672 altroot, mountpoint); 1673 else 1674 (void) snprintf(buf, sizeof (buf), "%s", 1675 mountpoint); 1676 } 1677 1678 if ((dirp = opendir(buf)) == NULL && errno != ENOENT) { 1679 (void) fprintf(stderr, gettext("mountpoint '%s' : " 1680 "%s\n"), buf, strerror(errno)); 1681 (void) fprintf(stderr, gettext("use '-m' " 1682 "option to provide a different default\n")); 1683 goto errout; 1684 } else if (dirp) { 1685 int count = 0; 1686 1687 while (count < 3 && readdir(dirp) != NULL) 1688 count++; 1689 (void) closedir(dirp); 1690 1691 if (count > 2) { 1692 (void) fprintf(stderr, gettext("mountpoint " 1693 "'%s' exists and is not empty\n"), buf); 1694 (void) fprintf(stderr, gettext("use '-m' " 1695 "option to provide a " 1696 "different default\n")); 1697 goto errout; 1698 } 1699 } 1700 } 1701 1702 /* 1703 * Now that the mountpoint's validity has been checked, ensure that 1704 * the property is set appropriately prior to creating the pool. 1705 */ 1706 if (mountpoint != NULL) { 1707 ret = add_prop_list(zfs_prop_to_name(ZFS_PROP_MOUNTPOINT), 1708 mountpoint, &fsprops, B_FALSE); 1709 if (ret != 0) 1710 goto errout; 1711 } 1712 1713 ret = 1; 1714 if (dryrun) { 1715 /* 1716 * For a dry run invocation, print out a basic message and run 1717 * through all the vdevs in the list and print out in an 1718 * appropriate hierarchy. 1719 */ 1720 (void) printf(gettext("would create '%s' with the " 1721 "following layout:\n\n"), poolname); 1722 1723 print_vdev_tree(NULL, poolname, nvroot, 0, "", 0); 1724 print_vdev_tree(NULL, "dedup", nvroot, 0, 1725 VDEV_ALLOC_BIAS_DEDUP, 0); 1726 print_vdev_tree(NULL, "special", nvroot, 0, 1727 VDEV_ALLOC_BIAS_SPECIAL, 0); 1728 print_vdev_tree(NULL, "logs", nvroot, 0, 1729 VDEV_ALLOC_BIAS_LOG, 0); 1730 print_cache_list(nvroot, 0); 1731 print_spare_list(nvroot, 0); 1732 1733 ret = 0; 1734 } else { 1735 /* 1736 * Load in feature set. 1737 * Note: if compatibility property not given, we'll have 1738 * NULL, which means 'all features'. 1739 */ 1740 boolean_t requested_features[SPA_FEATURES]; 1741 if (zpool_do_load_compat(compat, requested_features) != 1742 ZPOOL_COMPATIBILITY_OK) 1743 goto errout; 1744 1745 /* 1746 * props contains list of features to enable. 1747 * For each feature: 1748 * - remove it if feature@name=disabled 1749 * - leave it there if feature@name=enabled 1750 * - add it if: 1751 * - enable_pool_features (ie: no '-d' or '-o version') 1752 * - it's supported by the kernel module 1753 * - it's in the requested feature set 1754 * - warn if it's enabled but not in compat 1755 */ 1756 for (spa_feature_t i = 0; i < SPA_FEATURES; i++) { 1757 char propname[MAXPATHLEN]; 1758 char *propval; 1759 zfeature_info_t *feat = &spa_feature_table[i]; 1760 1761 (void) snprintf(propname, sizeof (propname), 1762 "feature@%s", feat->fi_uname); 1763 1764 if (!nvlist_lookup_string(props, propname, &propval)) { 1765 if (strcmp(propval, 1766 ZFS_FEATURE_DISABLED) == 0) { 1767 (void) nvlist_remove_all(props, 1768 propname); 1769 } else if (strcmp(propval, 1770 ZFS_FEATURE_ENABLED) == 0 && 1771 !requested_features[i]) { 1772 (void) fprintf(stderr, gettext( 1773 "Warning: feature \"%s\" enabled " 1774 "but is not in specified " 1775 "'compatibility' feature set.\n"), 1776 feat->fi_uname); 1777 } 1778 } else if ( 1779 enable_pool_features && 1780 feat->fi_zfs_mod_supported && 1781 requested_features[i]) { 1782 ret = add_prop_list(propname, 1783 ZFS_FEATURE_ENABLED, &props, B_TRUE); 1784 if (ret != 0) 1785 goto errout; 1786 } 1787 } 1788 1789 ret = 1; 1790 if (zpool_create(g_zfs, poolname, 1791 nvroot, props, fsprops) == 0) { 1792 zfs_handle_t *pool = zfs_open(g_zfs, 1793 tname ? tname : poolname, ZFS_TYPE_FILESYSTEM); 1794 if (pool != NULL) { 1795 if (zfs_mount(pool, NULL, 0) == 0) { 1796 ret = zfs_share(pool, NULL); 1797 zfs_commit_shares(NULL); 1798 } 1799 zfs_close(pool); 1800 } 1801 } else if (libzfs_errno(g_zfs) == EZFS_INVALIDNAME) { 1802 (void) fprintf(stderr, gettext("pool name may have " 1803 "been omitted\n")); 1804 } 1805 } 1806 1807 errout: 1808 nvlist_free(nvroot); 1809 nvlist_free(fsprops); 1810 nvlist_free(props); 1811 return (ret); 1812 badusage: 1813 nvlist_free(fsprops); 1814 nvlist_free(props); 1815 usage(B_FALSE); 1816 return (2); 1817 } 1818 1819 /* 1820 * zpool destroy <pool> 1821 * 1822 * -f Forcefully unmount any datasets 1823 * 1824 * Destroy the given pool. Automatically unmounts any datasets in the pool. 1825 */ 1826 int 1827 zpool_do_destroy(int argc, char **argv) 1828 { 1829 boolean_t force = B_FALSE; 1830 int c; 1831 char *pool; 1832 zpool_handle_t *zhp; 1833 int ret; 1834 1835 /* check options */ 1836 while ((c = getopt(argc, argv, "f")) != -1) { 1837 switch (c) { 1838 case 'f': 1839 force = B_TRUE; 1840 break; 1841 case '?': 1842 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 1843 optopt); 1844 usage(B_FALSE); 1845 } 1846 } 1847 1848 argc -= optind; 1849 argv += optind; 1850 1851 /* check arguments */ 1852 if (argc < 1) { 1853 (void) fprintf(stderr, gettext("missing pool argument\n")); 1854 usage(B_FALSE); 1855 } 1856 if (argc > 1) { 1857 (void) fprintf(stderr, gettext("too many arguments\n")); 1858 usage(B_FALSE); 1859 } 1860 1861 pool = argv[0]; 1862 1863 if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) { 1864 /* 1865 * As a special case, check for use of '/' in the name, and 1866 * direct the user to use 'zfs destroy' instead. 1867 */ 1868 if (strchr(pool, '/') != NULL) 1869 (void) fprintf(stderr, gettext("use 'zfs destroy' to " 1870 "destroy a dataset\n")); 1871 return (1); 1872 } 1873 1874 if (zpool_disable_datasets(zhp, force) != 0) { 1875 (void) fprintf(stderr, gettext("could not destroy '%s': " 1876 "could not unmount datasets\n"), zpool_get_name(zhp)); 1877 zpool_close(zhp); 1878 return (1); 1879 } 1880 1881 /* The history must be logged as part of the export */ 1882 log_history = B_FALSE; 1883 1884 ret = (zpool_destroy(zhp, history_str) != 0); 1885 1886 zpool_close(zhp); 1887 1888 return (ret); 1889 } 1890 1891 typedef struct export_cbdata { 1892 boolean_t force; 1893 boolean_t hardforce; 1894 } export_cbdata_t; 1895 1896 /* 1897 * Export one pool 1898 */ 1899 static int 1900 zpool_export_one(zpool_handle_t *zhp, void *data) 1901 { 1902 export_cbdata_t *cb = data; 1903 1904 if (zpool_disable_datasets(zhp, cb->force) != 0) 1905 return (1); 1906 1907 /* The history must be logged as part of the export */ 1908 log_history = B_FALSE; 1909 1910 if (cb->hardforce) { 1911 if (zpool_export_force(zhp, history_str) != 0) 1912 return (1); 1913 } else if (zpool_export(zhp, cb->force, history_str) != 0) { 1914 return (1); 1915 } 1916 1917 return (0); 1918 } 1919 1920 /* 1921 * zpool export [-f] <pool> ... 1922 * 1923 * -a Export all pools 1924 * -f Forcefully unmount datasets 1925 * 1926 * Export the given pools. By default, the command will attempt to cleanly 1927 * unmount any active datasets within the pool. If the '-f' flag is specified, 1928 * then the datasets will be forcefully unmounted. 1929 */ 1930 int 1931 zpool_do_export(int argc, char **argv) 1932 { 1933 export_cbdata_t cb; 1934 boolean_t do_all = B_FALSE; 1935 boolean_t force = B_FALSE; 1936 boolean_t hardforce = B_FALSE; 1937 int c, ret; 1938 1939 /* check options */ 1940 while ((c = getopt(argc, argv, "afF")) != -1) { 1941 switch (c) { 1942 case 'a': 1943 do_all = B_TRUE; 1944 break; 1945 case 'f': 1946 force = B_TRUE; 1947 break; 1948 case 'F': 1949 hardforce = B_TRUE; 1950 break; 1951 case '?': 1952 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 1953 optopt); 1954 usage(B_FALSE); 1955 } 1956 } 1957 1958 cb.force = force; 1959 cb.hardforce = hardforce; 1960 argc -= optind; 1961 argv += optind; 1962 1963 if (do_all) { 1964 if (argc != 0) { 1965 (void) fprintf(stderr, gettext("too many arguments\n")); 1966 usage(B_FALSE); 1967 } 1968 1969 return (for_each_pool(argc, argv, B_TRUE, NULL, 1970 ZFS_TYPE_POOL, B_FALSE, zpool_export_one, &cb)); 1971 } 1972 1973 /* check arguments */ 1974 if (argc < 1) { 1975 (void) fprintf(stderr, gettext("missing pool argument\n")); 1976 usage(B_FALSE); 1977 } 1978 1979 ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL, 1980 B_FALSE, zpool_export_one, &cb); 1981 1982 return (ret); 1983 } 1984 1985 /* 1986 * Given a vdev configuration, determine the maximum width needed for the device 1987 * name column. 1988 */ 1989 static int 1990 max_width(zpool_handle_t *zhp, nvlist_t *nv, int depth, int max, 1991 int name_flags) 1992 { 1993 static const char *const subtypes[] = 1994 {ZPOOL_CONFIG_SPARES, ZPOOL_CONFIG_L2CACHE, ZPOOL_CONFIG_CHILDREN}; 1995 1996 char *name = zpool_vdev_name(g_zfs, zhp, nv, name_flags); 1997 max = MAX(strlen(name) + depth, max); 1998 free(name); 1999 2000 nvlist_t **child; 2001 uint_t children; 2002 for (size_t i = 0; i < ARRAY_SIZE(subtypes); ++i) 2003 if (nvlist_lookup_nvlist_array(nv, subtypes[i], 2004 &child, &children) == 0) 2005 for (uint_t c = 0; c < children; ++c) 2006 max = MAX(max_width(zhp, child[c], depth + 2, 2007 max, name_flags), max); 2008 2009 return (max); 2010 } 2011 2012 typedef struct spare_cbdata { 2013 uint64_t cb_guid; 2014 zpool_handle_t *cb_zhp; 2015 } spare_cbdata_t; 2016 2017 static boolean_t 2018 find_vdev(nvlist_t *nv, uint64_t search) 2019 { 2020 uint64_t guid; 2021 nvlist_t **child; 2022 uint_t c, children; 2023 2024 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0 && 2025 search == guid) 2026 return (B_TRUE); 2027 2028 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 2029 &child, &children) == 0) { 2030 for (c = 0; c < children; c++) 2031 if (find_vdev(child[c], search)) 2032 return (B_TRUE); 2033 } 2034 2035 return (B_FALSE); 2036 } 2037 2038 static int 2039 find_spare(zpool_handle_t *zhp, void *data) 2040 { 2041 spare_cbdata_t *cbp = data; 2042 nvlist_t *config, *nvroot; 2043 2044 config = zpool_get_config(zhp, NULL); 2045 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 2046 &nvroot) == 0); 2047 2048 if (find_vdev(nvroot, cbp->cb_guid)) { 2049 cbp->cb_zhp = zhp; 2050 return (1); 2051 } 2052 2053 zpool_close(zhp); 2054 return (0); 2055 } 2056 2057 typedef struct status_cbdata { 2058 int cb_count; 2059 int cb_name_flags; 2060 int cb_namewidth; 2061 boolean_t cb_allpools; 2062 boolean_t cb_verbose; 2063 boolean_t cb_literal; 2064 boolean_t cb_explain; 2065 boolean_t cb_first; 2066 boolean_t cb_dedup_stats; 2067 boolean_t cb_print_status; 2068 boolean_t cb_print_slow_ios; 2069 boolean_t cb_print_vdev_init; 2070 boolean_t cb_print_vdev_trim; 2071 vdev_cmd_data_list_t *vcdl; 2072 } status_cbdata_t; 2073 2074 /* Return 1 if string is NULL, empty, or whitespace; return 0 otherwise. */ 2075 static boolean_t 2076 is_blank_str(const char *str) 2077 { 2078 for (; str != NULL && *str != '\0'; ++str) 2079 if (!isblank(*str)) 2080 return (B_FALSE); 2081 return (B_TRUE); 2082 } 2083 2084 /* Print command output lines for specific vdev in a specific pool */ 2085 static void 2086 zpool_print_cmd(vdev_cmd_data_list_t *vcdl, const char *pool, char *path) 2087 { 2088 vdev_cmd_data_t *data; 2089 int i, j; 2090 const char *val; 2091 2092 for (i = 0; i < vcdl->count; i++) { 2093 if ((strcmp(vcdl->data[i].path, path) != 0) || 2094 (strcmp(vcdl->data[i].pool, pool) != 0)) { 2095 /* Not the vdev we're looking for */ 2096 continue; 2097 } 2098 2099 data = &vcdl->data[i]; 2100 /* Print out all the output values for this vdev */ 2101 for (j = 0; j < vcdl->uniq_cols_cnt; j++) { 2102 val = NULL; 2103 /* Does this vdev have values for this column? */ 2104 for (int k = 0; k < data->cols_cnt; k++) { 2105 if (strcmp(data->cols[k], 2106 vcdl->uniq_cols[j]) == 0) { 2107 /* yes it does, record the value */ 2108 val = data->lines[k]; 2109 break; 2110 } 2111 } 2112 /* 2113 * Mark empty values with dashes to make output 2114 * awk-able. 2115 */ 2116 if (val == NULL || is_blank_str(val)) 2117 val = "-"; 2118 2119 printf("%*s", vcdl->uniq_cols_width[j], val); 2120 if (j < vcdl->uniq_cols_cnt - 1) 2121 fputs(" ", stdout); 2122 } 2123 2124 /* Print out any values that aren't in a column at the end */ 2125 for (j = data->cols_cnt; j < data->lines_cnt; j++) { 2126 /* Did we have any columns? If so print a spacer. */ 2127 if (vcdl->uniq_cols_cnt > 0) 2128 fputs(" ", stdout); 2129 2130 val = data->lines[j]; 2131 fputs(val ?: "", stdout); 2132 } 2133 break; 2134 } 2135 } 2136 2137 /* 2138 * Print vdev initialization status for leaves 2139 */ 2140 static void 2141 print_status_initialize(vdev_stat_t *vs, boolean_t verbose) 2142 { 2143 if (verbose) { 2144 if ((vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE || 2145 vs->vs_initialize_state == VDEV_INITIALIZE_SUSPENDED || 2146 vs->vs_initialize_state == VDEV_INITIALIZE_COMPLETE) && 2147 !vs->vs_scan_removing) { 2148 char zbuf[1024]; 2149 char tbuf[256]; 2150 struct tm zaction_ts; 2151 2152 time_t t = vs->vs_initialize_action_time; 2153 int initialize_pct = 100; 2154 if (vs->vs_initialize_state != 2155 VDEV_INITIALIZE_COMPLETE) { 2156 initialize_pct = (vs->vs_initialize_bytes_done * 2157 100 / (vs->vs_initialize_bytes_est + 1)); 2158 } 2159 2160 (void) localtime_r(&t, &zaction_ts); 2161 (void) strftime(tbuf, sizeof (tbuf), "%c", &zaction_ts); 2162 2163 switch (vs->vs_initialize_state) { 2164 case VDEV_INITIALIZE_SUSPENDED: 2165 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s", 2166 gettext("suspended, started at"), tbuf); 2167 break; 2168 case VDEV_INITIALIZE_ACTIVE: 2169 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s", 2170 gettext("started at"), tbuf); 2171 break; 2172 case VDEV_INITIALIZE_COMPLETE: 2173 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s", 2174 gettext("completed at"), tbuf); 2175 break; 2176 } 2177 2178 (void) printf(gettext(" (%d%% initialized%s)"), 2179 initialize_pct, zbuf); 2180 } else { 2181 (void) printf(gettext(" (uninitialized)")); 2182 } 2183 } else if (vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE) { 2184 (void) printf(gettext(" (initializing)")); 2185 } 2186 } 2187 2188 /* 2189 * Print vdev TRIM status for leaves 2190 */ 2191 static void 2192 print_status_trim(vdev_stat_t *vs, boolean_t verbose) 2193 { 2194 if (verbose) { 2195 if ((vs->vs_trim_state == VDEV_TRIM_ACTIVE || 2196 vs->vs_trim_state == VDEV_TRIM_SUSPENDED || 2197 vs->vs_trim_state == VDEV_TRIM_COMPLETE) && 2198 !vs->vs_scan_removing) { 2199 char zbuf[1024]; 2200 char tbuf[256]; 2201 struct tm zaction_ts; 2202 2203 time_t t = vs->vs_trim_action_time; 2204 int trim_pct = 100; 2205 if (vs->vs_trim_state != VDEV_TRIM_COMPLETE) { 2206 trim_pct = (vs->vs_trim_bytes_done * 2207 100 / (vs->vs_trim_bytes_est + 1)); 2208 } 2209 2210 (void) localtime_r(&t, &zaction_ts); 2211 (void) strftime(tbuf, sizeof (tbuf), "%c", &zaction_ts); 2212 2213 switch (vs->vs_trim_state) { 2214 case VDEV_TRIM_SUSPENDED: 2215 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s", 2216 gettext("suspended, started at"), tbuf); 2217 break; 2218 case VDEV_TRIM_ACTIVE: 2219 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s", 2220 gettext("started at"), tbuf); 2221 break; 2222 case VDEV_TRIM_COMPLETE: 2223 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s", 2224 gettext("completed at"), tbuf); 2225 break; 2226 } 2227 2228 (void) printf(gettext(" (%d%% trimmed%s)"), 2229 trim_pct, zbuf); 2230 } else if (vs->vs_trim_notsup) { 2231 (void) printf(gettext(" (trim unsupported)")); 2232 } else { 2233 (void) printf(gettext(" (untrimmed)")); 2234 } 2235 } else if (vs->vs_trim_state == VDEV_TRIM_ACTIVE) { 2236 (void) printf(gettext(" (trimming)")); 2237 } 2238 } 2239 2240 /* 2241 * Return the color associated with a health string. This includes returning 2242 * NULL for no color change. 2243 */ 2244 static const char * 2245 health_str_to_color(const char *health) 2246 { 2247 if (strcmp(health, gettext("FAULTED")) == 0 || 2248 strcmp(health, gettext("SUSPENDED")) == 0 || 2249 strcmp(health, gettext("UNAVAIL")) == 0) { 2250 return (ANSI_RED); 2251 } 2252 2253 if (strcmp(health, gettext("OFFLINE")) == 0 || 2254 strcmp(health, gettext("DEGRADED")) == 0 || 2255 strcmp(health, gettext("REMOVED")) == 0) { 2256 return (ANSI_YELLOW); 2257 } 2258 2259 return (NULL); 2260 } 2261 2262 /* 2263 * Print out configuration state as requested by status_callback. 2264 */ 2265 static void 2266 print_status_config(zpool_handle_t *zhp, status_cbdata_t *cb, const char *name, 2267 nvlist_t *nv, int depth, boolean_t isspare, vdev_rebuild_stat_t *vrs) 2268 { 2269 nvlist_t **child, *root; 2270 uint_t c, i, vsc, children; 2271 pool_scan_stat_t *ps = NULL; 2272 vdev_stat_t *vs; 2273 char rbuf[6], wbuf[6], cbuf[6]; 2274 char *vname; 2275 uint64_t notpresent; 2276 spare_cbdata_t spare_cb; 2277 const char *state; 2278 char *type; 2279 char *path = NULL; 2280 const char *rcolor = NULL, *wcolor = NULL, *ccolor = NULL; 2281 2282 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 2283 &child, &children) != 0) 2284 children = 0; 2285 2286 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, 2287 (uint64_t **)&vs, &vsc) == 0); 2288 2289 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0); 2290 2291 if (strcmp(type, VDEV_TYPE_INDIRECT) == 0) 2292 return; 2293 2294 state = zpool_state_to_name(vs->vs_state, vs->vs_aux); 2295 2296 if (isspare) { 2297 /* 2298 * For hot spares, we use the terms 'INUSE' and 'AVAILABLE' for 2299 * online drives. 2300 */ 2301 if (vs->vs_aux == VDEV_AUX_SPARED) 2302 state = gettext("INUSE"); 2303 else if (vs->vs_state == VDEV_STATE_HEALTHY) 2304 state = gettext("AVAIL"); 2305 } 2306 2307 printf_color(health_str_to_color(state), 2308 "\t%*s%-*s %-8s", depth, "", cb->cb_namewidth - depth, 2309 name, state); 2310 2311 if (!isspare) { 2312 if (vs->vs_read_errors) 2313 rcolor = ANSI_RED; 2314 2315 if (vs->vs_write_errors) 2316 wcolor = ANSI_RED; 2317 2318 if (vs->vs_checksum_errors) 2319 ccolor = ANSI_RED; 2320 2321 if (cb->cb_literal) { 2322 fputc(' ', stdout); 2323 printf_color(rcolor, "%5llu", 2324 (u_longlong_t)vs->vs_read_errors); 2325 fputc(' ', stdout); 2326 printf_color(wcolor, "%5llu", 2327 (u_longlong_t)vs->vs_write_errors); 2328 fputc(' ', stdout); 2329 printf_color(ccolor, "%5llu", 2330 (u_longlong_t)vs->vs_checksum_errors); 2331 } else { 2332 zfs_nicenum(vs->vs_read_errors, rbuf, sizeof (rbuf)); 2333 zfs_nicenum(vs->vs_write_errors, wbuf, sizeof (wbuf)); 2334 zfs_nicenum(vs->vs_checksum_errors, cbuf, 2335 sizeof (cbuf)); 2336 fputc(' ', stdout); 2337 printf_color(rcolor, "%5s", rbuf); 2338 fputc(' ', stdout); 2339 printf_color(wcolor, "%5s", wbuf); 2340 fputc(' ', stdout); 2341 printf_color(ccolor, "%5s", cbuf); 2342 } 2343 if (cb->cb_print_slow_ios) { 2344 if (children == 0) { 2345 /* Only leafs vdevs have slow IOs */ 2346 zfs_nicenum(vs->vs_slow_ios, rbuf, 2347 sizeof (rbuf)); 2348 } else { 2349 snprintf(rbuf, sizeof (rbuf), "-"); 2350 } 2351 2352 if (cb->cb_literal) 2353 printf(" %5llu", (u_longlong_t)vs->vs_slow_ios); 2354 else 2355 printf(" %5s", rbuf); 2356 } 2357 } 2358 2359 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 2360 ¬present) == 0) { 2361 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0); 2362 (void) printf(" %s %s", gettext("was"), path); 2363 } else if (vs->vs_aux != 0) { 2364 (void) printf(" "); 2365 color_start(ANSI_RED); 2366 switch (vs->vs_aux) { 2367 case VDEV_AUX_OPEN_FAILED: 2368 (void) printf(gettext("cannot open")); 2369 break; 2370 2371 case VDEV_AUX_BAD_GUID_SUM: 2372 (void) printf(gettext("missing device")); 2373 break; 2374 2375 case VDEV_AUX_NO_REPLICAS: 2376 (void) printf(gettext("insufficient replicas")); 2377 break; 2378 2379 case VDEV_AUX_VERSION_NEWER: 2380 (void) printf(gettext("newer version")); 2381 break; 2382 2383 case VDEV_AUX_UNSUP_FEAT: 2384 (void) printf(gettext("unsupported feature(s)")); 2385 break; 2386 2387 case VDEV_AUX_ASHIFT_TOO_BIG: 2388 (void) printf(gettext("unsupported minimum blocksize")); 2389 break; 2390 2391 case VDEV_AUX_SPARED: 2392 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 2393 &spare_cb.cb_guid) == 0); 2394 if (zpool_iter(g_zfs, find_spare, &spare_cb) == 1) { 2395 if (strcmp(zpool_get_name(spare_cb.cb_zhp), 2396 zpool_get_name(zhp)) == 0) 2397 (void) printf(gettext("currently in " 2398 "use")); 2399 else 2400 (void) printf(gettext("in use by " 2401 "pool '%s'"), 2402 zpool_get_name(spare_cb.cb_zhp)); 2403 zpool_close(spare_cb.cb_zhp); 2404 } else { 2405 (void) printf(gettext("currently in use")); 2406 } 2407 break; 2408 2409 case VDEV_AUX_ERR_EXCEEDED: 2410 (void) printf(gettext("too many errors")); 2411 break; 2412 2413 case VDEV_AUX_IO_FAILURE: 2414 (void) printf(gettext("experienced I/O failures")); 2415 break; 2416 2417 case VDEV_AUX_BAD_LOG: 2418 (void) printf(gettext("bad intent log")); 2419 break; 2420 2421 case VDEV_AUX_EXTERNAL: 2422 (void) printf(gettext("external device fault")); 2423 break; 2424 2425 case VDEV_AUX_SPLIT_POOL: 2426 (void) printf(gettext("split into new pool")); 2427 break; 2428 2429 case VDEV_AUX_ACTIVE: 2430 (void) printf(gettext("currently in use")); 2431 break; 2432 2433 case VDEV_AUX_CHILDREN_OFFLINE: 2434 (void) printf(gettext("all children offline")); 2435 break; 2436 2437 case VDEV_AUX_BAD_LABEL: 2438 (void) printf(gettext("invalid label")); 2439 break; 2440 2441 default: 2442 (void) printf(gettext("corrupted data")); 2443 break; 2444 } 2445 color_end(); 2446 } else if (children == 0 && !isspare && 2447 getenv("ZPOOL_STATUS_NON_NATIVE_ASHIFT_IGNORE") == NULL && 2448 VDEV_STAT_VALID(vs_physical_ashift, vsc) && 2449 vs->vs_configured_ashift < vs->vs_physical_ashift) { 2450 (void) printf( 2451 gettext(" block size: %dB configured, %dB native"), 2452 1 << vs->vs_configured_ashift, 1 << vs->vs_physical_ashift); 2453 } 2454 2455 if (vs->vs_scan_removing != 0) { 2456 (void) printf(gettext(" (removing)")); 2457 } else if (VDEV_STAT_VALID(vs_noalloc, vsc) && vs->vs_noalloc != 0) { 2458 (void) printf(gettext(" (non-allocating)")); 2459 } 2460 2461 /* The root vdev has the scrub/resilver stats */ 2462 root = fnvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 2463 ZPOOL_CONFIG_VDEV_TREE); 2464 (void) nvlist_lookup_uint64_array(root, ZPOOL_CONFIG_SCAN_STATS, 2465 (uint64_t **)&ps, &c); 2466 2467 /* 2468 * If you force fault a drive that's resilvering, its scan stats can 2469 * get frozen in time, giving the false impression that it's 2470 * being resilvered. That's why we check the state to see if the vdev 2471 * is healthy before reporting "resilvering" or "repairing". 2472 */ 2473 if (ps != NULL && ps->pss_state == DSS_SCANNING && children == 0 && 2474 vs->vs_state == VDEV_STATE_HEALTHY) { 2475 if (vs->vs_scan_processed != 0) { 2476 (void) printf(gettext(" (%s)"), 2477 (ps->pss_func == POOL_SCAN_RESILVER) ? 2478 "resilvering" : "repairing"); 2479 } else if (vs->vs_resilver_deferred) { 2480 (void) printf(gettext(" (awaiting resilver)")); 2481 } 2482 } 2483 2484 /* The top-level vdevs have the rebuild stats */ 2485 if (vrs != NULL && vrs->vrs_state == VDEV_REBUILD_ACTIVE && 2486 children == 0 && vs->vs_state == VDEV_STATE_HEALTHY) { 2487 if (vs->vs_rebuild_processed != 0) { 2488 (void) printf(gettext(" (resilvering)")); 2489 } 2490 } 2491 2492 if (cb->vcdl != NULL) { 2493 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) { 2494 printf(" "); 2495 zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path); 2496 } 2497 } 2498 2499 /* Display vdev initialization and trim status for leaves. */ 2500 if (children == 0) { 2501 print_status_initialize(vs, cb->cb_print_vdev_init); 2502 print_status_trim(vs, cb->cb_print_vdev_trim); 2503 } 2504 2505 (void) printf("\n"); 2506 2507 for (c = 0; c < children; c++) { 2508 uint64_t islog = B_FALSE, ishole = B_FALSE; 2509 2510 /* Don't print logs or holes here */ 2511 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 2512 &islog); 2513 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 2514 &ishole); 2515 if (islog || ishole) 2516 continue; 2517 /* Only print normal classes here */ 2518 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS)) 2519 continue; 2520 2521 /* Provide vdev_rebuild_stats to children if available */ 2522 if (vrs == NULL) { 2523 (void) nvlist_lookup_uint64_array(nv, 2524 ZPOOL_CONFIG_REBUILD_STATS, 2525 (uint64_t **)&vrs, &i); 2526 } 2527 2528 vname = zpool_vdev_name(g_zfs, zhp, child[c], 2529 cb->cb_name_flags | VDEV_NAME_TYPE_ID); 2530 print_status_config(zhp, cb, vname, child[c], depth + 2, 2531 isspare, vrs); 2532 free(vname); 2533 } 2534 } 2535 2536 /* 2537 * Print the configuration of an exported pool. Iterate over all vdevs in the 2538 * pool, printing out the name and status for each one. 2539 */ 2540 static void 2541 print_import_config(status_cbdata_t *cb, const char *name, nvlist_t *nv, 2542 int depth) 2543 { 2544 nvlist_t **child; 2545 uint_t c, children; 2546 vdev_stat_t *vs; 2547 char *type, *vname; 2548 2549 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0); 2550 if (strcmp(type, VDEV_TYPE_MISSING) == 0 || 2551 strcmp(type, VDEV_TYPE_HOLE) == 0) 2552 return; 2553 2554 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, 2555 (uint64_t **)&vs, &c) == 0); 2556 2557 (void) printf("\t%*s%-*s", depth, "", cb->cb_namewidth - depth, name); 2558 (void) printf(" %s", zpool_state_to_name(vs->vs_state, vs->vs_aux)); 2559 2560 if (vs->vs_aux != 0) { 2561 (void) printf(" "); 2562 2563 switch (vs->vs_aux) { 2564 case VDEV_AUX_OPEN_FAILED: 2565 (void) printf(gettext("cannot open")); 2566 break; 2567 2568 case VDEV_AUX_BAD_GUID_SUM: 2569 (void) printf(gettext("missing device")); 2570 break; 2571 2572 case VDEV_AUX_NO_REPLICAS: 2573 (void) printf(gettext("insufficient replicas")); 2574 break; 2575 2576 case VDEV_AUX_VERSION_NEWER: 2577 (void) printf(gettext("newer version")); 2578 break; 2579 2580 case VDEV_AUX_UNSUP_FEAT: 2581 (void) printf(gettext("unsupported feature(s)")); 2582 break; 2583 2584 case VDEV_AUX_ERR_EXCEEDED: 2585 (void) printf(gettext("too many errors")); 2586 break; 2587 2588 case VDEV_AUX_ACTIVE: 2589 (void) printf(gettext("currently in use")); 2590 break; 2591 2592 case VDEV_AUX_CHILDREN_OFFLINE: 2593 (void) printf(gettext("all children offline")); 2594 break; 2595 2596 case VDEV_AUX_BAD_LABEL: 2597 (void) printf(gettext("invalid label")); 2598 break; 2599 2600 default: 2601 (void) printf(gettext("corrupted data")); 2602 break; 2603 } 2604 } 2605 (void) printf("\n"); 2606 2607 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 2608 &child, &children) != 0) 2609 return; 2610 2611 for (c = 0; c < children; c++) { 2612 uint64_t is_log = B_FALSE; 2613 2614 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 2615 &is_log); 2616 if (is_log) 2617 continue; 2618 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS)) 2619 continue; 2620 2621 vname = zpool_vdev_name(g_zfs, NULL, child[c], 2622 cb->cb_name_flags | VDEV_NAME_TYPE_ID); 2623 print_import_config(cb, vname, child[c], depth + 2); 2624 free(vname); 2625 } 2626 2627 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, 2628 &child, &children) == 0) { 2629 (void) printf(gettext("\tcache\n")); 2630 for (c = 0; c < children; c++) { 2631 vname = zpool_vdev_name(g_zfs, NULL, child[c], 2632 cb->cb_name_flags); 2633 (void) printf("\t %s\n", vname); 2634 free(vname); 2635 } 2636 } 2637 2638 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, 2639 &child, &children) == 0) { 2640 (void) printf(gettext("\tspares\n")); 2641 for (c = 0; c < children; c++) { 2642 vname = zpool_vdev_name(g_zfs, NULL, child[c], 2643 cb->cb_name_flags); 2644 (void) printf("\t %s\n", vname); 2645 free(vname); 2646 } 2647 } 2648 } 2649 2650 /* 2651 * Print specialized class vdevs. 2652 * 2653 * These are recorded as top level vdevs in the main pool child array 2654 * but with "is_log" set to 1 or an "alloc_bias" string. We use either 2655 * print_status_config() or print_import_config() to print the top level 2656 * class vdevs then any of their children (eg mirrored slogs) are printed 2657 * recursively - which works because only the top level vdev is marked. 2658 */ 2659 static void 2660 print_class_vdevs(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv, 2661 const char *class) 2662 { 2663 uint_t c, children; 2664 nvlist_t **child; 2665 boolean_t printed = B_FALSE; 2666 2667 assert(zhp != NULL || !cb->cb_verbose); 2668 2669 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &child, 2670 &children) != 0) 2671 return; 2672 2673 for (c = 0; c < children; c++) { 2674 uint64_t is_log = B_FALSE; 2675 char *bias = NULL; 2676 char *type = NULL; 2677 2678 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 2679 &is_log); 2680 2681 if (is_log) { 2682 bias = (char *)VDEV_ALLOC_CLASS_LOGS; 2683 } else { 2684 (void) nvlist_lookup_string(child[c], 2685 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias); 2686 (void) nvlist_lookup_string(child[c], 2687 ZPOOL_CONFIG_TYPE, &type); 2688 } 2689 2690 if (bias == NULL || strcmp(bias, class) != 0) 2691 continue; 2692 if (!is_log && strcmp(type, VDEV_TYPE_INDIRECT) == 0) 2693 continue; 2694 2695 if (!printed) { 2696 (void) printf("\t%s\t\n", gettext(class)); 2697 printed = B_TRUE; 2698 } 2699 2700 char *name = zpool_vdev_name(g_zfs, zhp, child[c], 2701 cb->cb_name_flags | VDEV_NAME_TYPE_ID); 2702 if (cb->cb_print_status) 2703 print_status_config(zhp, cb, name, child[c], 2, 2704 B_FALSE, NULL); 2705 else 2706 print_import_config(cb, name, child[c], 2); 2707 free(name); 2708 } 2709 } 2710 2711 /* 2712 * Display the status for the given pool. 2713 */ 2714 static int 2715 show_import(nvlist_t *config, boolean_t report_error) 2716 { 2717 uint64_t pool_state; 2718 vdev_stat_t *vs; 2719 char *name; 2720 uint64_t guid; 2721 uint64_t hostid = 0; 2722 const char *msgid; 2723 const char *hostname = "unknown"; 2724 nvlist_t *nvroot, *nvinfo; 2725 zpool_status_t reason; 2726 zpool_errata_t errata; 2727 const char *health; 2728 uint_t vsc; 2729 char *comment; 2730 status_cbdata_t cb = { 0 }; 2731 2732 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 2733 &name) == 0); 2734 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 2735 &guid) == 0); 2736 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, 2737 &pool_state) == 0); 2738 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 2739 &nvroot) == 0); 2740 2741 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS, 2742 (uint64_t **)&vs, &vsc) == 0); 2743 health = zpool_state_to_name(vs->vs_state, vs->vs_aux); 2744 2745 reason = zpool_import_status(config, &msgid, &errata); 2746 2747 /* 2748 * If we're importing using a cachefile, then we won't report any 2749 * errors unless we are in the scan phase of the import. 2750 */ 2751 if (reason != ZPOOL_STATUS_OK && !report_error) 2752 return (reason); 2753 2754 (void) printf(gettext(" pool: %s\n"), name); 2755 (void) printf(gettext(" id: %llu\n"), (u_longlong_t)guid); 2756 (void) printf(gettext(" state: %s"), health); 2757 if (pool_state == POOL_STATE_DESTROYED) 2758 (void) printf(gettext(" (DESTROYED)")); 2759 (void) printf("\n"); 2760 2761 switch (reason) { 2762 case ZPOOL_STATUS_MISSING_DEV_R: 2763 case ZPOOL_STATUS_MISSING_DEV_NR: 2764 case ZPOOL_STATUS_BAD_GUID_SUM: 2765 printf_color(ANSI_BOLD, gettext("status: ")); 2766 printf_color(ANSI_YELLOW, gettext("One or more devices are " 2767 "missing from the system.\n")); 2768 break; 2769 2770 case ZPOOL_STATUS_CORRUPT_LABEL_R: 2771 case ZPOOL_STATUS_CORRUPT_LABEL_NR: 2772 printf_color(ANSI_BOLD, gettext("status: ")); 2773 printf_color(ANSI_YELLOW, gettext("One or more devices contains" 2774 " corrupted data.\n")); 2775 break; 2776 2777 case ZPOOL_STATUS_CORRUPT_DATA: 2778 (void) printf( 2779 gettext(" status: The pool data is corrupted.\n")); 2780 break; 2781 2782 case ZPOOL_STATUS_OFFLINE_DEV: 2783 printf_color(ANSI_BOLD, gettext("status: ")); 2784 printf_color(ANSI_YELLOW, gettext("One or more devices " 2785 "are offlined.\n")); 2786 break; 2787 2788 case ZPOOL_STATUS_CORRUPT_POOL: 2789 printf_color(ANSI_BOLD, gettext("status: ")); 2790 printf_color(ANSI_YELLOW, gettext("The pool metadata is " 2791 "corrupted.\n")); 2792 break; 2793 2794 case ZPOOL_STATUS_VERSION_OLDER: 2795 printf_color(ANSI_BOLD, gettext("status: ")); 2796 printf_color(ANSI_YELLOW, gettext("The pool is formatted using " 2797 "a legacy on-disk version.\n")); 2798 break; 2799 2800 case ZPOOL_STATUS_VERSION_NEWER: 2801 printf_color(ANSI_BOLD, gettext("status: ")); 2802 printf_color(ANSI_YELLOW, gettext("The pool is formatted using " 2803 "an incompatible version.\n")); 2804 break; 2805 2806 case ZPOOL_STATUS_FEAT_DISABLED: 2807 printf_color(ANSI_BOLD, gettext("status: ")); 2808 printf_color(ANSI_YELLOW, gettext("Some supported " 2809 "features are not enabled on the pool.\n\t" 2810 "(Note that they may be intentionally disabled " 2811 "if the\n\t'compatibility' property is set.)\n")); 2812 break; 2813 2814 case ZPOOL_STATUS_COMPATIBILITY_ERR: 2815 printf_color(ANSI_BOLD, gettext("status: ")); 2816 printf_color(ANSI_YELLOW, gettext("Error reading or parsing " 2817 "the file(s) indicated by the 'compatibility'\n" 2818 "property.\n")); 2819 break; 2820 2821 case ZPOOL_STATUS_INCOMPATIBLE_FEAT: 2822 printf_color(ANSI_BOLD, gettext("status: ")); 2823 printf_color(ANSI_YELLOW, gettext("One or more features " 2824 "are enabled on the pool despite not being\n" 2825 "requested by the 'compatibility' property.\n")); 2826 break; 2827 2828 case ZPOOL_STATUS_UNSUP_FEAT_READ: 2829 printf_color(ANSI_BOLD, gettext("status: ")); 2830 printf_color(ANSI_YELLOW, gettext("The pool uses the following " 2831 "feature(s) not supported on this system:\n")); 2832 color_start(ANSI_YELLOW); 2833 zpool_print_unsup_feat(config); 2834 color_end(); 2835 break; 2836 2837 case ZPOOL_STATUS_UNSUP_FEAT_WRITE: 2838 printf_color(ANSI_BOLD, gettext("status: ")); 2839 printf_color(ANSI_YELLOW, gettext("The pool can only be " 2840 "accessed in read-only mode on this system. It\n\tcannot be" 2841 " accessed in read-write mode because it uses the " 2842 "following\n\tfeature(s) not supported on this system:\n")); 2843 color_start(ANSI_YELLOW); 2844 zpool_print_unsup_feat(config); 2845 color_end(); 2846 break; 2847 2848 case ZPOOL_STATUS_HOSTID_ACTIVE: 2849 printf_color(ANSI_BOLD, gettext("status: ")); 2850 printf_color(ANSI_YELLOW, gettext("The pool is currently " 2851 "imported by another system.\n")); 2852 break; 2853 2854 case ZPOOL_STATUS_HOSTID_REQUIRED: 2855 printf_color(ANSI_BOLD, gettext("status: ")); 2856 printf_color(ANSI_YELLOW, gettext("The pool has the " 2857 "multihost property on. It cannot\n\tbe safely imported " 2858 "when the system hostid is not set.\n")); 2859 break; 2860 2861 case ZPOOL_STATUS_HOSTID_MISMATCH: 2862 printf_color(ANSI_BOLD, gettext("status: ")); 2863 printf_color(ANSI_YELLOW, gettext("The pool was last accessed " 2864 "by another system.\n")); 2865 break; 2866 2867 case ZPOOL_STATUS_FAULTED_DEV_R: 2868 case ZPOOL_STATUS_FAULTED_DEV_NR: 2869 printf_color(ANSI_BOLD, gettext("status: ")); 2870 printf_color(ANSI_YELLOW, gettext("One or more devices are " 2871 "faulted.\n")); 2872 break; 2873 2874 case ZPOOL_STATUS_BAD_LOG: 2875 printf_color(ANSI_BOLD, gettext("status: ")); 2876 printf_color(ANSI_YELLOW, gettext("An intent log record cannot " 2877 "be read.\n")); 2878 break; 2879 2880 case ZPOOL_STATUS_RESILVERING: 2881 case ZPOOL_STATUS_REBUILDING: 2882 printf_color(ANSI_BOLD, gettext("status: ")); 2883 printf_color(ANSI_YELLOW, gettext("One or more devices were " 2884 "being resilvered.\n")); 2885 break; 2886 2887 case ZPOOL_STATUS_ERRATA: 2888 printf_color(ANSI_BOLD, gettext("status: ")); 2889 printf_color(ANSI_YELLOW, gettext("Errata #%d detected.\n"), 2890 errata); 2891 break; 2892 2893 case ZPOOL_STATUS_NON_NATIVE_ASHIFT: 2894 printf_color(ANSI_BOLD, gettext("status: ")); 2895 printf_color(ANSI_YELLOW, gettext("One or more devices are " 2896 "configured to use a non-native block size.\n" 2897 "\tExpect reduced performance.\n")); 2898 break; 2899 2900 default: 2901 /* 2902 * No other status can be seen when importing pools. 2903 */ 2904 assert(reason == ZPOOL_STATUS_OK); 2905 } 2906 2907 /* 2908 * Print out an action according to the overall state of the pool. 2909 */ 2910 if (vs->vs_state == VDEV_STATE_HEALTHY) { 2911 if (reason == ZPOOL_STATUS_VERSION_OLDER || 2912 reason == ZPOOL_STATUS_FEAT_DISABLED) { 2913 (void) printf(gettext(" action: The pool can be " 2914 "imported using its name or numeric identifier, " 2915 "though\n\tsome features will not be available " 2916 "without an explicit 'zpool upgrade'.\n")); 2917 } else if (reason == ZPOOL_STATUS_COMPATIBILITY_ERR) { 2918 (void) printf(gettext(" action: The pool can be " 2919 "imported using its name or numeric\n\tidentifier, " 2920 "though the file(s) indicated by its " 2921 "'compatibility'\n\tproperty cannot be parsed at " 2922 "this time.\n")); 2923 } else if (reason == ZPOOL_STATUS_HOSTID_MISMATCH) { 2924 (void) printf(gettext(" action: The pool can be " 2925 "imported using its name or numeric " 2926 "identifier and\n\tthe '-f' flag.\n")); 2927 } else if (reason == ZPOOL_STATUS_ERRATA) { 2928 switch (errata) { 2929 case ZPOOL_ERRATA_NONE: 2930 break; 2931 2932 case ZPOOL_ERRATA_ZOL_2094_SCRUB: 2933 (void) printf(gettext(" action: The pool can " 2934 "be imported using its name or numeric " 2935 "identifier,\n\thowever there is a compat" 2936 "ibility issue which should be corrected" 2937 "\n\tby running 'zpool scrub'\n")); 2938 break; 2939 2940 case ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY: 2941 (void) printf(gettext(" action: The pool can" 2942 "not be imported with this version of ZFS " 2943 "due to\n\tan active asynchronous destroy. " 2944 "Revert to an earlier version\n\tand " 2945 "allow the destroy to complete before " 2946 "updating.\n")); 2947 break; 2948 2949 case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION: 2950 (void) printf(gettext(" action: Existing " 2951 "encrypted datasets contain an on-disk " 2952 "incompatibility, which\n\tneeds to be " 2953 "corrected. Backup these datasets to new " 2954 "encrypted datasets\n\tand destroy the " 2955 "old ones.\n")); 2956 break; 2957 2958 case ZPOOL_ERRATA_ZOL_8308_ENCRYPTION: 2959 (void) printf(gettext(" action: Existing " 2960 "encrypted snapshots and bookmarks contain " 2961 "an on-disk\n\tincompatibility. This may " 2962 "cause on-disk corruption if they are used" 2963 "\n\twith 'zfs recv'. To correct the " 2964 "issue, enable the bookmark_v2 feature.\n\t" 2965 "No additional action is needed if there " 2966 "are no encrypted snapshots or\n\t" 2967 "bookmarks. If preserving the encrypted " 2968 "snapshots and bookmarks is\n\trequired, " 2969 "use a non-raw send to backup and restore " 2970 "them. Alternately,\n\tthey may be removed" 2971 " to resolve the incompatibility.\n")); 2972 break; 2973 default: 2974 /* 2975 * All errata must contain an action message. 2976 */ 2977 assert(0); 2978 } 2979 } else { 2980 (void) printf(gettext(" action: The pool can be " 2981 "imported using its name or numeric " 2982 "identifier.\n")); 2983 } 2984 } else if (vs->vs_state == VDEV_STATE_DEGRADED) { 2985 (void) printf(gettext(" action: The pool can be imported " 2986 "despite missing or damaged devices. The\n\tfault " 2987 "tolerance of the pool may be compromised if imported.\n")); 2988 } else { 2989 switch (reason) { 2990 case ZPOOL_STATUS_VERSION_NEWER: 2991 (void) printf(gettext(" action: The pool cannot be " 2992 "imported. Access the pool on a system running " 2993 "newer\n\tsoftware, or recreate the pool from " 2994 "backup.\n")); 2995 break; 2996 case ZPOOL_STATUS_UNSUP_FEAT_READ: 2997 printf_color(ANSI_BOLD, gettext("action: ")); 2998 printf_color(ANSI_YELLOW, gettext("The pool cannot be " 2999 "imported. Access the pool on a system that " 3000 "supports\n\tthe required feature(s), or recreate " 3001 "the pool from backup.\n")); 3002 break; 3003 case ZPOOL_STATUS_UNSUP_FEAT_WRITE: 3004 printf_color(ANSI_BOLD, gettext("action: ")); 3005 printf_color(ANSI_YELLOW, gettext("The pool cannot be " 3006 "imported in read-write mode. Import the pool " 3007 "with\n" 3008 "\t\"-o readonly=on\", access the pool on a system " 3009 "that supports the\n\trequired feature(s), or " 3010 "recreate the pool from backup.\n")); 3011 break; 3012 case ZPOOL_STATUS_MISSING_DEV_R: 3013 case ZPOOL_STATUS_MISSING_DEV_NR: 3014 case ZPOOL_STATUS_BAD_GUID_SUM: 3015 (void) printf(gettext(" action: The pool cannot be " 3016 "imported. Attach the missing\n\tdevices and try " 3017 "again.\n")); 3018 break; 3019 case ZPOOL_STATUS_HOSTID_ACTIVE: 3020 VERIFY0(nvlist_lookup_nvlist(config, 3021 ZPOOL_CONFIG_LOAD_INFO, &nvinfo)); 3022 3023 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME)) 3024 hostname = fnvlist_lookup_string(nvinfo, 3025 ZPOOL_CONFIG_MMP_HOSTNAME); 3026 3027 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID)) 3028 hostid = fnvlist_lookup_uint64(nvinfo, 3029 ZPOOL_CONFIG_MMP_HOSTID); 3030 3031 (void) printf(gettext(" action: The pool must be " 3032 "exported from %s (hostid=%"PRIx64")\n\tbefore it " 3033 "can be safely imported.\n"), hostname, hostid); 3034 break; 3035 case ZPOOL_STATUS_HOSTID_REQUIRED: 3036 (void) printf(gettext(" action: Set a unique system " 3037 "hostid with the zgenhostid(8) command.\n")); 3038 break; 3039 default: 3040 (void) printf(gettext(" action: The pool cannot be " 3041 "imported due to damaged devices or data.\n")); 3042 } 3043 } 3044 3045 /* Print the comment attached to the pool. */ 3046 if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0) 3047 (void) printf(gettext("comment: %s\n"), comment); 3048 3049 /* 3050 * If the state is "closed" or "can't open", and the aux state 3051 * is "corrupt data": 3052 */ 3053 if (((vs->vs_state == VDEV_STATE_CLOSED) || 3054 (vs->vs_state == VDEV_STATE_CANT_OPEN)) && 3055 (vs->vs_aux == VDEV_AUX_CORRUPT_DATA)) { 3056 if (pool_state == POOL_STATE_DESTROYED) 3057 (void) printf(gettext("\tThe pool was destroyed, " 3058 "but can be imported using the '-Df' flags.\n")); 3059 else if (pool_state != POOL_STATE_EXPORTED) 3060 (void) printf(gettext("\tThe pool may be active on " 3061 "another system, but can be imported using\n\t" 3062 "the '-f' flag.\n")); 3063 } 3064 3065 if (msgid != NULL) { 3066 (void) printf(gettext( 3067 " see: https://openzfs.github.io/openzfs-docs/msg/%s\n"), 3068 msgid); 3069 } 3070 3071 (void) printf(gettext(" config:\n\n")); 3072 3073 cb.cb_namewidth = max_width(NULL, nvroot, 0, strlen(name), 3074 VDEV_NAME_TYPE_ID); 3075 if (cb.cb_namewidth < 10) 3076 cb.cb_namewidth = 10; 3077 3078 print_import_config(&cb, name, nvroot, 0); 3079 3080 print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_BIAS_DEDUP); 3081 print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_BIAS_SPECIAL); 3082 print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_CLASS_LOGS); 3083 3084 if (reason == ZPOOL_STATUS_BAD_GUID_SUM) { 3085 (void) printf(gettext("\n\tAdditional devices are known to " 3086 "be part of this pool, though their\n\texact " 3087 "configuration cannot be determined.\n")); 3088 } 3089 return (0); 3090 } 3091 3092 static boolean_t 3093 zfs_force_import_required(nvlist_t *config) 3094 { 3095 uint64_t state; 3096 uint64_t hostid = 0; 3097 nvlist_t *nvinfo; 3098 3099 state = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE); 3100 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_HOSTID, &hostid); 3101 3102 if (state != POOL_STATE_EXPORTED && hostid != get_system_hostid()) 3103 return (B_TRUE); 3104 3105 nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO); 3106 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE)) { 3107 mmp_state_t mmp_state = fnvlist_lookup_uint64(nvinfo, 3108 ZPOOL_CONFIG_MMP_STATE); 3109 3110 if (mmp_state != MMP_STATE_INACTIVE) 3111 return (B_TRUE); 3112 } 3113 3114 return (B_FALSE); 3115 } 3116 3117 /* 3118 * Perform the import for the given configuration. This passes the heavy 3119 * lifting off to zpool_import_props(), and then mounts the datasets contained 3120 * within the pool. 3121 */ 3122 static int 3123 do_import(nvlist_t *config, const char *newname, const char *mntopts, 3124 nvlist_t *props, int flags) 3125 { 3126 int ret = 0; 3127 zpool_handle_t *zhp; 3128 const char *name; 3129 uint64_t version; 3130 3131 name = fnvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME); 3132 version = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION); 3133 3134 if (!SPA_VERSION_IS_SUPPORTED(version)) { 3135 (void) fprintf(stderr, gettext("cannot import '%s': pool " 3136 "is formatted using an unsupported ZFS version\n"), name); 3137 return (1); 3138 } else if (zfs_force_import_required(config) && 3139 !(flags & ZFS_IMPORT_ANY_HOST)) { 3140 mmp_state_t mmp_state = MMP_STATE_INACTIVE; 3141 nvlist_t *nvinfo; 3142 3143 nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO); 3144 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE)) 3145 mmp_state = fnvlist_lookup_uint64(nvinfo, 3146 ZPOOL_CONFIG_MMP_STATE); 3147 3148 if (mmp_state == MMP_STATE_ACTIVE) { 3149 const char *hostname = "<unknown>"; 3150 uint64_t hostid = 0; 3151 3152 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME)) 3153 hostname = fnvlist_lookup_string(nvinfo, 3154 ZPOOL_CONFIG_MMP_HOSTNAME); 3155 3156 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID)) 3157 hostid = fnvlist_lookup_uint64(nvinfo, 3158 ZPOOL_CONFIG_MMP_HOSTID); 3159 3160 (void) fprintf(stderr, gettext("cannot import '%s': " 3161 "pool is imported on %s (hostid: " 3162 "0x%"PRIx64")\nExport the pool on the other " 3163 "system, then run 'zpool import'.\n"), 3164 name, hostname, hostid); 3165 } else if (mmp_state == MMP_STATE_NO_HOSTID) { 3166 (void) fprintf(stderr, gettext("Cannot import '%s': " 3167 "pool has the multihost property on and the\n" 3168 "system's hostid is not set. Set a unique hostid " 3169 "with the zgenhostid(8) command.\n"), name); 3170 } else { 3171 const char *hostname = "<unknown>"; 3172 time_t timestamp = 0; 3173 uint64_t hostid = 0; 3174 3175 if (nvlist_exists(config, ZPOOL_CONFIG_HOSTNAME)) 3176 hostname = fnvlist_lookup_string(config, 3177 ZPOOL_CONFIG_HOSTNAME); 3178 3179 if (nvlist_exists(config, ZPOOL_CONFIG_TIMESTAMP)) 3180 timestamp = fnvlist_lookup_uint64(config, 3181 ZPOOL_CONFIG_TIMESTAMP); 3182 3183 if (nvlist_exists(config, ZPOOL_CONFIG_HOSTID)) 3184 hostid = fnvlist_lookup_uint64(config, 3185 ZPOOL_CONFIG_HOSTID); 3186 3187 (void) fprintf(stderr, gettext("cannot import '%s': " 3188 "pool was previously in use from another system.\n" 3189 "Last accessed by %s (hostid=%"PRIx64") at %s" 3190 "The pool can be imported, use 'zpool import -f' " 3191 "to import the pool.\n"), name, hostname, 3192 hostid, ctime(×tamp)); 3193 } 3194 3195 return (1); 3196 } 3197 3198 if (zpool_import_props(g_zfs, config, newname, props, flags) != 0) 3199 return (1); 3200 3201 if (newname != NULL) 3202 name = newname; 3203 3204 if ((zhp = zpool_open_canfail(g_zfs, name)) == NULL) 3205 return (1); 3206 3207 /* 3208 * Loading keys is best effort. We don't want to return immediately 3209 * if it fails but we do want to give the error to the caller. 3210 */ 3211 if (flags & ZFS_IMPORT_LOAD_KEYS && 3212 zfs_crypto_attempt_load_keys(g_zfs, name) != 0) 3213 ret = 1; 3214 3215 if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL && 3216 !(flags & ZFS_IMPORT_ONLY) && 3217 zpool_enable_datasets(zhp, mntopts, 0) != 0) { 3218 zpool_close(zhp); 3219 return (1); 3220 } 3221 3222 zpool_close(zhp); 3223 return (ret); 3224 } 3225 3226 static int 3227 import_pools(nvlist_t *pools, nvlist_t *props, char *mntopts, int flags, 3228 char *orig_name, char *new_name, 3229 boolean_t do_destroyed, boolean_t pool_specified, boolean_t do_all, 3230 importargs_t *import) 3231 { 3232 nvlist_t *config = NULL; 3233 nvlist_t *found_config = NULL; 3234 uint64_t pool_state; 3235 3236 /* 3237 * At this point we have a list of import candidate configs. Even if 3238 * we were searching by pool name or guid, we still need to 3239 * post-process the list to deal with pool state and possible 3240 * duplicate names. 3241 */ 3242 int err = 0; 3243 nvpair_t *elem = NULL; 3244 boolean_t first = B_TRUE; 3245 while ((elem = nvlist_next_nvpair(pools, elem)) != NULL) { 3246 3247 verify(nvpair_value_nvlist(elem, &config) == 0); 3248 3249 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, 3250 &pool_state) == 0); 3251 if (!do_destroyed && pool_state == POOL_STATE_DESTROYED) 3252 continue; 3253 if (do_destroyed && pool_state != POOL_STATE_DESTROYED) 3254 continue; 3255 3256 verify(nvlist_add_nvlist(config, ZPOOL_LOAD_POLICY, 3257 import->policy) == 0); 3258 3259 if (!pool_specified) { 3260 if (first) 3261 first = B_FALSE; 3262 else if (!do_all) 3263 (void) fputc('\n', stdout); 3264 3265 if (do_all) { 3266 err |= do_import(config, NULL, mntopts, 3267 props, flags); 3268 } else { 3269 /* 3270 * If we're importing from cachefile, then 3271 * we don't want to report errors until we 3272 * are in the scan phase of the import. If 3273 * we get an error, then we return that error 3274 * to invoke the scan phase. 3275 */ 3276 if (import->cachefile && !import->scan) 3277 err = show_import(config, B_FALSE); 3278 else 3279 (void) show_import(config, B_TRUE); 3280 } 3281 } else if (import->poolname != NULL) { 3282 char *name; 3283 3284 /* 3285 * We are searching for a pool based on name. 3286 */ 3287 verify(nvlist_lookup_string(config, 3288 ZPOOL_CONFIG_POOL_NAME, &name) == 0); 3289 3290 if (strcmp(name, import->poolname) == 0) { 3291 if (found_config != NULL) { 3292 (void) fprintf(stderr, gettext( 3293 "cannot import '%s': more than " 3294 "one matching pool\n"), 3295 import->poolname); 3296 (void) fprintf(stderr, gettext( 3297 "import by numeric ID instead\n")); 3298 err = B_TRUE; 3299 } 3300 found_config = config; 3301 } 3302 } else { 3303 uint64_t guid; 3304 3305 /* 3306 * Search for a pool by guid. 3307 */ 3308 verify(nvlist_lookup_uint64(config, 3309 ZPOOL_CONFIG_POOL_GUID, &guid) == 0); 3310 3311 if (guid == import->guid) 3312 found_config = config; 3313 } 3314 } 3315 3316 /* 3317 * If we were searching for a specific pool, verify that we found a 3318 * pool, and then do the import. 3319 */ 3320 if (pool_specified && err == 0) { 3321 if (found_config == NULL) { 3322 (void) fprintf(stderr, gettext("cannot import '%s': " 3323 "no such pool available\n"), orig_name); 3324 err = B_TRUE; 3325 } else { 3326 err |= do_import(found_config, new_name, 3327 mntopts, props, flags); 3328 } 3329 } 3330 3331 /* 3332 * If we were just looking for pools, report an error if none were 3333 * found. 3334 */ 3335 if (!pool_specified && first) 3336 (void) fprintf(stderr, 3337 gettext("no pools available to import\n")); 3338 return (err); 3339 } 3340 3341 typedef struct target_exists_args { 3342 const char *poolname; 3343 uint64_t poolguid; 3344 } target_exists_args_t; 3345 3346 static int 3347 name_or_guid_exists(zpool_handle_t *zhp, void *data) 3348 { 3349 target_exists_args_t *args = data; 3350 nvlist_t *config = zpool_get_config(zhp, NULL); 3351 int found = 0; 3352 3353 if (config == NULL) 3354 return (0); 3355 3356 if (args->poolname != NULL) { 3357 char *pool_name; 3358 3359 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 3360 &pool_name) == 0); 3361 if (strcmp(pool_name, args->poolname) == 0) 3362 found = 1; 3363 } else { 3364 uint64_t pool_guid; 3365 3366 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 3367 &pool_guid) == 0); 3368 if (pool_guid == args->poolguid) 3369 found = 1; 3370 } 3371 zpool_close(zhp); 3372 3373 return (found); 3374 } 3375 /* 3376 * zpool checkpoint <pool> 3377 * checkpoint --discard <pool> 3378 * 3379 * -d Discard the checkpoint from a checkpointed 3380 * --discard pool. 3381 * 3382 * -w Wait for discarding a checkpoint to complete. 3383 * --wait 3384 * 3385 * Checkpoints the specified pool, by taking a "snapshot" of its 3386 * current state. A pool can only have one checkpoint at a time. 3387 */ 3388 int 3389 zpool_do_checkpoint(int argc, char **argv) 3390 { 3391 boolean_t discard, wait; 3392 char *pool; 3393 zpool_handle_t *zhp; 3394 int c, err; 3395 3396 struct option long_options[] = { 3397 {"discard", no_argument, NULL, 'd'}, 3398 {"wait", no_argument, NULL, 'w'}, 3399 {0, 0, 0, 0} 3400 }; 3401 3402 discard = B_FALSE; 3403 wait = B_FALSE; 3404 while ((c = getopt_long(argc, argv, ":dw", long_options, NULL)) != -1) { 3405 switch (c) { 3406 case 'd': 3407 discard = B_TRUE; 3408 break; 3409 case 'w': 3410 wait = B_TRUE; 3411 break; 3412 case '?': 3413 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 3414 optopt); 3415 usage(B_FALSE); 3416 } 3417 } 3418 3419 if (wait && !discard) { 3420 (void) fprintf(stderr, gettext("--wait only valid when " 3421 "--discard also specified\n")); 3422 usage(B_FALSE); 3423 } 3424 3425 argc -= optind; 3426 argv += optind; 3427 3428 if (argc < 1) { 3429 (void) fprintf(stderr, gettext("missing pool argument\n")); 3430 usage(B_FALSE); 3431 } 3432 3433 if (argc > 1) { 3434 (void) fprintf(stderr, gettext("too many arguments\n")); 3435 usage(B_FALSE); 3436 } 3437 3438 pool = argv[0]; 3439 3440 if ((zhp = zpool_open(g_zfs, pool)) == NULL) { 3441 /* As a special case, check for use of '/' in the name */ 3442 if (strchr(pool, '/') != NULL) 3443 (void) fprintf(stderr, gettext("'zpool checkpoint' " 3444 "doesn't work on datasets. To save the state " 3445 "of a dataset from a specific point in time " 3446 "please use 'zfs snapshot'\n")); 3447 return (1); 3448 } 3449 3450 if (discard) { 3451 err = (zpool_discard_checkpoint(zhp) != 0); 3452 if (err == 0 && wait) 3453 err = zpool_wait(zhp, ZPOOL_WAIT_CKPT_DISCARD); 3454 } else { 3455 err = (zpool_checkpoint(zhp) != 0); 3456 } 3457 3458 zpool_close(zhp); 3459 3460 return (err); 3461 } 3462 3463 #define CHECKPOINT_OPT 1024 3464 3465 /* 3466 * zpool import [-d dir] [-D] 3467 * import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l] 3468 * [-d dir | -c cachefile | -s] [-f] -a 3469 * import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l] 3470 * [-d dir | -c cachefile | -s] [-f] [-n] [-F] <pool | id> 3471 * [newpool] 3472 * 3473 * -c Read pool information from a cachefile instead of searching 3474 * devices. If importing from a cachefile config fails, then 3475 * fallback to searching for devices only in the directories that 3476 * exist in the cachefile. 3477 * 3478 * -d Scan in a specific directory, other than /dev/. More than 3479 * one directory can be specified using multiple '-d' options. 3480 * 3481 * -D Scan for previously destroyed pools or import all or only 3482 * specified destroyed pools. 3483 * 3484 * -R Temporarily import the pool, with all mountpoints relative to 3485 * the given root. The pool will remain exported when the machine 3486 * is rebooted. 3487 * 3488 * -V Import even in the presence of faulted vdevs. This is an 3489 * intentionally undocumented option for testing purposes, and 3490 * treats the pool configuration as complete, leaving any bad 3491 * vdevs in the FAULTED state. In other words, it does verbatim 3492 * import. 3493 * 3494 * -f Force import, even if it appears that the pool is active. 3495 * 3496 * -F Attempt rewind if necessary. 3497 * 3498 * -n See if rewind would work, but don't actually rewind. 3499 * 3500 * -N Import the pool but don't mount datasets. 3501 * 3502 * -T Specify a starting txg to use for import. This option is 3503 * intentionally undocumented option for testing purposes. 3504 * 3505 * -a Import all pools found. 3506 * 3507 * -l Load encryption keys while importing. 3508 * 3509 * -o Set property=value and/or temporary mount options (without '='). 3510 * 3511 * -s Scan using the default search path, the libblkid cache will 3512 * not be consulted. 3513 * 3514 * --rewind-to-checkpoint 3515 * Import the pool and revert back to the checkpoint. 3516 * 3517 * The import command scans for pools to import, and import pools based on pool 3518 * name and GUID. The pool can also be renamed as part of the import process. 3519 */ 3520 int 3521 zpool_do_import(int argc, char **argv) 3522 { 3523 char **searchdirs = NULL; 3524 char *env, *envdup = NULL; 3525 int nsearch = 0; 3526 int c; 3527 int err = 0; 3528 nvlist_t *pools = NULL; 3529 boolean_t do_all = B_FALSE; 3530 boolean_t do_destroyed = B_FALSE; 3531 char *mntopts = NULL; 3532 uint64_t searchguid = 0; 3533 char *searchname = NULL; 3534 char *propval; 3535 nvlist_t *policy = NULL; 3536 nvlist_t *props = NULL; 3537 int flags = ZFS_IMPORT_NORMAL; 3538 uint32_t rewind_policy = ZPOOL_NO_REWIND; 3539 boolean_t dryrun = B_FALSE; 3540 boolean_t do_rewind = B_FALSE; 3541 boolean_t xtreme_rewind = B_FALSE; 3542 boolean_t do_scan = B_FALSE; 3543 boolean_t pool_exists = B_FALSE; 3544 boolean_t pool_specified = B_FALSE; 3545 uint64_t txg = -1ULL; 3546 char *cachefile = NULL; 3547 importargs_t idata = { 0 }; 3548 char *endptr; 3549 3550 struct option long_options[] = { 3551 {"rewind-to-checkpoint", no_argument, NULL, CHECKPOINT_OPT}, 3552 {0, 0, 0, 0} 3553 }; 3554 3555 /* check options */ 3556 while ((c = getopt_long(argc, argv, ":aCc:d:DEfFlmnNo:R:stT:VX", 3557 long_options, NULL)) != -1) { 3558 switch (c) { 3559 case 'a': 3560 do_all = B_TRUE; 3561 break; 3562 case 'c': 3563 cachefile = optarg; 3564 break; 3565 case 'd': 3566 searchdirs = safe_realloc(searchdirs, 3567 (nsearch + 1) * sizeof (char *)); 3568 searchdirs[nsearch++] = optarg; 3569 break; 3570 case 'D': 3571 do_destroyed = B_TRUE; 3572 break; 3573 case 'f': 3574 flags |= ZFS_IMPORT_ANY_HOST; 3575 break; 3576 case 'F': 3577 do_rewind = B_TRUE; 3578 break; 3579 case 'l': 3580 flags |= ZFS_IMPORT_LOAD_KEYS; 3581 break; 3582 case 'm': 3583 flags |= ZFS_IMPORT_MISSING_LOG; 3584 break; 3585 case 'n': 3586 dryrun = B_TRUE; 3587 break; 3588 case 'N': 3589 flags |= ZFS_IMPORT_ONLY; 3590 break; 3591 case 'o': 3592 if ((propval = strchr(optarg, '=')) != NULL) { 3593 *propval = '\0'; 3594 propval++; 3595 if (add_prop_list(optarg, propval, 3596 &props, B_TRUE)) 3597 goto error; 3598 } else { 3599 mntopts = optarg; 3600 } 3601 break; 3602 case 'R': 3603 if (add_prop_list(zpool_prop_to_name( 3604 ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE)) 3605 goto error; 3606 if (add_prop_list_default(zpool_prop_to_name( 3607 ZPOOL_PROP_CACHEFILE), "none", &props)) 3608 goto error; 3609 break; 3610 case 's': 3611 do_scan = B_TRUE; 3612 break; 3613 case 't': 3614 flags |= ZFS_IMPORT_TEMP_NAME; 3615 if (add_prop_list_default(zpool_prop_to_name( 3616 ZPOOL_PROP_CACHEFILE), "none", &props)) 3617 goto error; 3618 break; 3619 3620 case 'T': 3621 errno = 0; 3622 txg = strtoull(optarg, &endptr, 0); 3623 if (errno != 0 || *endptr != '\0') { 3624 (void) fprintf(stderr, 3625 gettext("invalid txg value\n")); 3626 usage(B_FALSE); 3627 } 3628 rewind_policy = ZPOOL_DO_REWIND | ZPOOL_EXTREME_REWIND; 3629 break; 3630 case 'V': 3631 flags |= ZFS_IMPORT_VERBATIM; 3632 break; 3633 case 'X': 3634 xtreme_rewind = B_TRUE; 3635 break; 3636 case CHECKPOINT_OPT: 3637 flags |= ZFS_IMPORT_CHECKPOINT; 3638 break; 3639 case ':': 3640 (void) fprintf(stderr, gettext("missing argument for " 3641 "'%c' option\n"), optopt); 3642 usage(B_FALSE); 3643 break; 3644 case '?': 3645 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 3646 optopt); 3647 usage(B_FALSE); 3648 } 3649 } 3650 3651 argc -= optind; 3652 argv += optind; 3653 3654 if (cachefile && nsearch != 0) { 3655 (void) fprintf(stderr, gettext("-c is incompatible with -d\n")); 3656 usage(B_FALSE); 3657 } 3658 3659 if (cachefile && do_scan) { 3660 (void) fprintf(stderr, gettext("-c is incompatible with -s\n")); 3661 usage(B_FALSE); 3662 } 3663 3664 if ((flags & ZFS_IMPORT_LOAD_KEYS) && (flags & ZFS_IMPORT_ONLY)) { 3665 (void) fprintf(stderr, gettext("-l is incompatible with -N\n")); 3666 usage(B_FALSE); 3667 } 3668 3669 if ((flags & ZFS_IMPORT_LOAD_KEYS) && !do_all && argc == 0) { 3670 (void) fprintf(stderr, gettext("-l is only meaningful during " 3671 "an import\n")); 3672 usage(B_FALSE); 3673 } 3674 3675 if ((dryrun || xtreme_rewind) && !do_rewind) { 3676 (void) fprintf(stderr, 3677 gettext("-n or -X only meaningful with -F\n")); 3678 usage(B_FALSE); 3679 } 3680 if (dryrun) 3681 rewind_policy = ZPOOL_TRY_REWIND; 3682 else if (do_rewind) 3683 rewind_policy = ZPOOL_DO_REWIND; 3684 if (xtreme_rewind) 3685 rewind_policy |= ZPOOL_EXTREME_REWIND; 3686 3687 /* In the future, we can capture further policy and include it here */ 3688 if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 || 3689 nvlist_add_uint64(policy, ZPOOL_LOAD_REQUEST_TXG, txg) != 0 || 3690 nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY, 3691 rewind_policy) != 0) 3692 goto error; 3693 3694 /* check argument count */ 3695 if (do_all) { 3696 if (argc != 0) { 3697 (void) fprintf(stderr, gettext("too many arguments\n")); 3698 usage(B_FALSE); 3699 } 3700 } else { 3701 if (argc > 2) { 3702 (void) fprintf(stderr, gettext("too many arguments\n")); 3703 usage(B_FALSE); 3704 } 3705 } 3706 3707 /* 3708 * Check for the effective uid. We do this explicitly here because 3709 * otherwise any attempt to discover pools will silently fail. 3710 */ 3711 if (argc == 0 && geteuid() != 0) { 3712 (void) fprintf(stderr, gettext("cannot " 3713 "discover pools: permission denied\n")); 3714 3715 free(searchdirs); 3716 nvlist_free(props); 3717 nvlist_free(policy); 3718 return (1); 3719 } 3720 3721 /* 3722 * Depending on the arguments given, we do one of the following: 3723 * 3724 * <none> Iterate through all pools and display information about 3725 * each one. 3726 * 3727 * -a Iterate through all pools and try to import each one. 3728 * 3729 * <id> Find the pool that corresponds to the given GUID/pool 3730 * name and import that one. 3731 * 3732 * -D Above options applies only to destroyed pools. 3733 */ 3734 if (argc != 0) { 3735 char *endptr; 3736 3737 errno = 0; 3738 searchguid = strtoull(argv[0], &endptr, 10); 3739 if (errno != 0 || *endptr != '\0') { 3740 searchname = argv[0]; 3741 searchguid = 0; 3742 } 3743 pool_specified = B_TRUE; 3744 3745 /* 3746 * User specified a name or guid. Ensure it's unique. 3747 */ 3748 target_exists_args_t search = {searchname, searchguid}; 3749 pool_exists = zpool_iter(g_zfs, name_or_guid_exists, &search); 3750 } 3751 3752 /* 3753 * Check the environment for the preferred search path. 3754 */ 3755 if ((searchdirs == NULL) && (env = getenv("ZPOOL_IMPORT_PATH"))) { 3756 char *dir, *tmp = NULL; 3757 3758 envdup = strdup(env); 3759 3760 for (dir = strtok_r(envdup, ":", &tmp); 3761 dir != NULL; 3762 dir = strtok_r(NULL, ":", &tmp)) { 3763 searchdirs = safe_realloc(searchdirs, 3764 (nsearch + 1) * sizeof (char *)); 3765 searchdirs[nsearch++] = dir; 3766 } 3767 } 3768 3769 idata.path = searchdirs; 3770 idata.paths = nsearch; 3771 idata.poolname = searchname; 3772 idata.guid = searchguid; 3773 idata.cachefile = cachefile; 3774 idata.scan = do_scan; 3775 idata.policy = policy; 3776 3777 libpc_handle_t lpch = { 3778 .lpc_lib_handle = g_zfs, 3779 .lpc_ops = &libzfs_config_ops, 3780 .lpc_printerr = B_TRUE 3781 }; 3782 pools = zpool_search_import(&lpch, &idata); 3783 3784 if (pools != NULL && pool_exists && 3785 (argc == 1 || strcmp(argv[0], argv[1]) == 0)) { 3786 (void) fprintf(stderr, gettext("cannot import '%s': " 3787 "a pool with that name already exists\n"), 3788 argv[0]); 3789 (void) fprintf(stderr, gettext("use the form '%s " 3790 "<pool | id> <newpool>' to give it a new name\n"), 3791 "zpool import"); 3792 err = 1; 3793 } else if (pools == NULL && pool_exists) { 3794 (void) fprintf(stderr, gettext("cannot import '%s': " 3795 "a pool with that name is already created/imported,\n"), 3796 argv[0]); 3797 (void) fprintf(stderr, gettext("and no additional pools " 3798 "with that name were found\n")); 3799 err = 1; 3800 } else if (pools == NULL) { 3801 if (argc != 0) { 3802 (void) fprintf(stderr, gettext("cannot import '%s': " 3803 "no such pool available\n"), argv[0]); 3804 } 3805 err = 1; 3806 } 3807 3808 if (err == 1) { 3809 free(searchdirs); 3810 free(envdup); 3811 nvlist_free(policy); 3812 nvlist_free(pools); 3813 nvlist_free(props); 3814 return (1); 3815 } 3816 3817 err = import_pools(pools, props, mntopts, flags, 3818 argc >= 1 ? argv[0] : NULL, 3819 argc >= 2 ? argv[1] : NULL, 3820 do_destroyed, pool_specified, do_all, &idata); 3821 3822 /* 3823 * If we're using the cachefile and we failed to import, then 3824 * fallback to scanning the directory for pools that match 3825 * those in the cachefile. 3826 */ 3827 if (err != 0 && cachefile != NULL) { 3828 (void) printf(gettext("cachefile import failed, retrying\n")); 3829 3830 /* 3831 * We use the scan flag to gather the directories that exist 3832 * in the cachefile. If we need to fallback to searching for 3833 * the pool config, we will only search devices in these 3834 * directories. 3835 */ 3836 idata.scan = B_TRUE; 3837 nvlist_free(pools); 3838 pools = zpool_search_import(&lpch, &idata); 3839 3840 err = import_pools(pools, props, mntopts, flags, 3841 argc >= 1 ? argv[0] : NULL, 3842 argc >= 2 ? argv[1] : NULL, 3843 do_destroyed, pool_specified, do_all, &idata); 3844 } 3845 3846 error: 3847 nvlist_free(props); 3848 nvlist_free(pools); 3849 nvlist_free(policy); 3850 free(searchdirs); 3851 free(envdup); 3852 3853 return (err ? 1 : 0); 3854 } 3855 3856 /* 3857 * zpool sync [-f] [pool] ... 3858 * 3859 * -f (undocumented) force uberblock (and config including zpool cache file) 3860 * update. 3861 * 3862 * Sync the specified pool(s). 3863 * Without arguments "zpool sync" will sync all pools. 3864 * This command initiates TXG sync(s) and will return after the TXG(s) commit. 3865 * 3866 */ 3867 static int 3868 zpool_do_sync(int argc, char **argv) 3869 { 3870 int ret; 3871 boolean_t force = B_FALSE; 3872 3873 /* check options */ 3874 while ((ret = getopt(argc, argv, "f")) != -1) { 3875 switch (ret) { 3876 case 'f': 3877 force = B_TRUE; 3878 break; 3879 case '?': 3880 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 3881 optopt); 3882 usage(B_FALSE); 3883 } 3884 } 3885 3886 argc -= optind; 3887 argv += optind; 3888 3889 /* if argc == 0 we will execute zpool_sync_one on all pools */ 3890 ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL, 3891 B_FALSE, zpool_sync_one, &force); 3892 3893 return (ret); 3894 } 3895 3896 typedef struct iostat_cbdata { 3897 uint64_t cb_flags; 3898 int cb_namewidth; 3899 int cb_iteration; 3900 boolean_t cb_verbose; 3901 boolean_t cb_literal; 3902 boolean_t cb_scripted; 3903 zpool_list_t *cb_list; 3904 vdev_cmd_data_list_t *vcdl; 3905 vdev_cbdata_t cb_vdevs; 3906 } iostat_cbdata_t; 3907 3908 /* iostat labels */ 3909 typedef struct name_and_columns { 3910 const char *name; /* Column name */ 3911 unsigned int columns; /* Center name to this number of columns */ 3912 } name_and_columns_t; 3913 3914 #define IOSTAT_MAX_LABELS 15 /* Max number of labels on one line */ 3915 3916 static const name_and_columns_t iostat_top_labels[][IOSTAT_MAX_LABELS] = 3917 { 3918 [IOS_DEFAULT] = {{"capacity", 2}, {"operations", 2}, {"bandwidth", 2}, 3919 {NULL}}, 3920 [IOS_LATENCY] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2}, 3921 {"asyncq_wait", 2}, {"scrub", 1}, {"trim", 1}, {"rebuild", 1}, 3922 {NULL}}, 3923 [IOS_QUEUES] = {{"syncq_read", 2}, {"syncq_write", 2}, 3924 {"asyncq_read", 2}, {"asyncq_write", 2}, {"scrubq_read", 2}, 3925 {"trimq_write", 2}, {"rebuildq_write", 2}, {NULL}}, 3926 [IOS_L_HISTO] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2}, 3927 {"asyncq_wait", 2}, {NULL}}, 3928 [IOS_RQ_HISTO] = {{"sync_read", 2}, {"sync_write", 2}, 3929 {"async_read", 2}, {"async_write", 2}, {"scrub", 2}, 3930 {"trim", 2}, {"rebuild", 2}, {NULL}}, 3931 }; 3932 3933 /* Shorthand - if "columns" field not set, default to 1 column */ 3934 static const name_and_columns_t iostat_bottom_labels[][IOSTAT_MAX_LABELS] = 3935 { 3936 [IOS_DEFAULT] = {{"alloc"}, {"free"}, {"read"}, {"write"}, {"read"}, 3937 {"write"}, {NULL}}, 3938 [IOS_LATENCY] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"}, 3939 {"write"}, {"read"}, {"write"}, {"wait"}, {"wait"}, {"wait"}, 3940 {NULL}}, 3941 [IOS_QUEUES] = {{"pend"}, {"activ"}, {"pend"}, {"activ"}, {"pend"}, 3942 {"activ"}, {"pend"}, {"activ"}, {"pend"}, {"activ"}, 3943 {"pend"}, {"activ"}, {"pend"}, {"activ"}, {NULL}}, 3944 [IOS_L_HISTO] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"}, 3945 {"write"}, {"read"}, {"write"}, {"scrub"}, {"trim"}, {"rebuild"}, 3946 {NULL}}, 3947 [IOS_RQ_HISTO] = {{"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"}, 3948 {"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"}, 3949 {"ind"}, {"agg"}, {NULL}}, 3950 }; 3951 3952 static const char *histo_to_title[] = { 3953 [IOS_L_HISTO] = "latency", 3954 [IOS_RQ_HISTO] = "req_size", 3955 }; 3956 3957 /* 3958 * Return the number of labels in a null-terminated name_and_columns_t 3959 * array. 3960 * 3961 */ 3962 static unsigned int 3963 label_array_len(const name_and_columns_t *labels) 3964 { 3965 int i = 0; 3966 3967 while (labels[i].name) 3968 i++; 3969 3970 return (i); 3971 } 3972 3973 /* 3974 * Return the number of strings in a null-terminated string array. 3975 * For example: 3976 * 3977 * const char foo[] = {"bar", "baz", NULL} 3978 * 3979 * returns 2 3980 */ 3981 static uint64_t 3982 str_array_len(const char *array[]) 3983 { 3984 uint64_t i = 0; 3985 while (array[i]) 3986 i++; 3987 3988 return (i); 3989 } 3990 3991 3992 /* 3993 * Return a default column width for default/latency/queue columns. This does 3994 * not include histograms, which have their columns autosized. 3995 */ 3996 static unsigned int 3997 default_column_width(iostat_cbdata_t *cb, enum iostat_type type) 3998 { 3999 unsigned long column_width = 5; /* Normal niceprint */ 4000 static unsigned long widths[] = { 4001 /* 4002 * Choose some sane default column sizes for printing the 4003 * raw numbers. 4004 */ 4005 [IOS_DEFAULT] = 15, /* 1PB capacity */ 4006 [IOS_LATENCY] = 10, /* 1B ns = 10sec */ 4007 [IOS_QUEUES] = 6, /* 1M queue entries */ 4008 [IOS_L_HISTO] = 10, /* 1B ns = 10sec */ 4009 [IOS_RQ_HISTO] = 6, /* 1M queue entries */ 4010 }; 4011 4012 if (cb->cb_literal) 4013 column_width = widths[type]; 4014 4015 return (column_width); 4016 } 4017 4018 /* 4019 * Print the column labels, i.e: 4020 * 4021 * capacity operations bandwidth 4022 * alloc free read write read write ... 4023 * 4024 * If force_column_width is set, use it for the column width. If not set, use 4025 * the default column width. 4026 */ 4027 static void 4028 print_iostat_labels(iostat_cbdata_t *cb, unsigned int force_column_width, 4029 const name_and_columns_t labels[][IOSTAT_MAX_LABELS]) 4030 { 4031 int i, idx, s; 4032 int text_start, rw_column_width, spaces_to_end; 4033 uint64_t flags = cb->cb_flags; 4034 uint64_t f; 4035 unsigned int column_width = force_column_width; 4036 4037 /* For each bit set in flags */ 4038 for (f = flags; f; f &= ~(1ULL << idx)) { 4039 idx = lowbit64(f) - 1; 4040 if (!force_column_width) 4041 column_width = default_column_width(cb, idx); 4042 /* Print our top labels centered over "read write" label. */ 4043 for (i = 0; i < label_array_len(labels[idx]); i++) { 4044 const char *name = labels[idx][i].name; 4045 /* 4046 * We treat labels[][].columns == 0 as shorthand 4047 * for one column. It makes writing out the label 4048 * tables more concise. 4049 */ 4050 unsigned int columns = MAX(1, labels[idx][i].columns); 4051 unsigned int slen = strlen(name); 4052 4053 rw_column_width = (column_width * columns) + 4054 (2 * (columns - 1)); 4055 4056 text_start = (int)((rw_column_width) / columns - 4057 slen / columns); 4058 if (text_start < 0) 4059 text_start = 0; 4060 4061 printf(" "); /* Two spaces between columns */ 4062 4063 /* Space from beginning of column to label */ 4064 for (s = 0; s < text_start; s++) 4065 printf(" "); 4066 4067 printf("%s", name); 4068 4069 /* Print space after label to end of column */ 4070 spaces_to_end = rw_column_width - text_start - slen; 4071 if (spaces_to_end < 0) 4072 spaces_to_end = 0; 4073 4074 for (s = 0; s < spaces_to_end; s++) 4075 printf(" "); 4076 } 4077 } 4078 } 4079 4080 4081 /* 4082 * print_cmd_columns - Print custom column titles from -c 4083 * 4084 * If the user specified the "zpool status|iostat -c" then print their custom 4085 * column titles in the header. For example, print_cmd_columns() would print 4086 * the " col1 col2" part of this: 4087 * 4088 * $ zpool iostat -vc 'echo col1=val1; echo col2=val2' 4089 * ... 4090 * capacity operations bandwidth 4091 * pool alloc free read write read write col1 col2 4092 * ---------- ----- ----- ----- ----- ----- ----- ---- ---- 4093 * mypool 269K 1008M 0 0 107 946 4094 * mirror 269K 1008M 0 0 107 946 4095 * sdb - - 0 0 102 473 val1 val2 4096 * sdc - - 0 0 5 473 val1 val2 4097 * ---------- ----- ----- ----- ----- ----- ----- ---- ---- 4098 */ 4099 static void 4100 print_cmd_columns(vdev_cmd_data_list_t *vcdl, int use_dashes) 4101 { 4102 int i, j; 4103 vdev_cmd_data_t *data = &vcdl->data[0]; 4104 4105 if (vcdl->count == 0 || data == NULL) 4106 return; 4107 4108 /* 4109 * Each vdev cmd should have the same column names unless the user did 4110 * something weird with their cmd. Just take the column names from the 4111 * first vdev and assume it works for all of them. 4112 */ 4113 for (i = 0; i < vcdl->uniq_cols_cnt; i++) { 4114 printf(" "); 4115 if (use_dashes) { 4116 for (j = 0; j < vcdl->uniq_cols_width[i]; j++) 4117 printf("-"); 4118 } else { 4119 printf_color(ANSI_BOLD, "%*s", vcdl->uniq_cols_width[i], 4120 vcdl->uniq_cols[i]); 4121 } 4122 } 4123 } 4124 4125 4126 /* 4127 * Utility function to print out a line of dashes like: 4128 * 4129 * -------------------------------- ----- ----- ----- ----- ----- 4130 * 4131 * ...or a dashed named-row line like: 4132 * 4133 * logs - - - - - 4134 * 4135 * @cb: iostat data 4136 * 4137 * @force_column_width If non-zero, use the value as the column width. 4138 * Otherwise use the default column widths. 4139 * 4140 * @name: Print a dashed named-row line starting 4141 * with @name. Otherwise, print a regular 4142 * dashed line. 4143 */ 4144 static void 4145 print_iostat_dashes(iostat_cbdata_t *cb, unsigned int force_column_width, 4146 const char *name) 4147 { 4148 int i; 4149 unsigned int namewidth; 4150 uint64_t flags = cb->cb_flags; 4151 uint64_t f; 4152 int idx; 4153 const name_and_columns_t *labels; 4154 const char *title; 4155 4156 4157 if (cb->cb_flags & IOS_ANYHISTO_M) { 4158 title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)]; 4159 } else if (cb->cb_vdevs.cb_names_count) { 4160 title = "vdev"; 4161 } else { 4162 title = "pool"; 4163 } 4164 4165 namewidth = MAX(MAX(strlen(title), cb->cb_namewidth), 4166 name ? strlen(name) : 0); 4167 4168 4169 if (name) { 4170 printf("%-*s", namewidth, name); 4171 } else { 4172 for (i = 0; i < namewidth; i++) 4173 (void) printf("-"); 4174 } 4175 4176 /* For each bit in flags */ 4177 for (f = flags; f; f &= ~(1ULL << idx)) { 4178 unsigned int column_width; 4179 idx = lowbit64(f) - 1; 4180 if (force_column_width) 4181 column_width = force_column_width; 4182 else 4183 column_width = default_column_width(cb, idx); 4184 4185 labels = iostat_bottom_labels[idx]; 4186 for (i = 0; i < label_array_len(labels); i++) { 4187 if (name) 4188 printf(" %*s-", column_width - 1, " "); 4189 else 4190 printf(" %.*s", column_width, 4191 "--------------------"); 4192 } 4193 } 4194 } 4195 4196 4197 static void 4198 print_iostat_separator_impl(iostat_cbdata_t *cb, 4199 unsigned int force_column_width) 4200 { 4201 print_iostat_dashes(cb, force_column_width, NULL); 4202 } 4203 4204 static void 4205 print_iostat_separator(iostat_cbdata_t *cb) 4206 { 4207 print_iostat_separator_impl(cb, 0); 4208 } 4209 4210 static void 4211 print_iostat_header_impl(iostat_cbdata_t *cb, unsigned int force_column_width, 4212 const char *histo_vdev_name) 4213 { 4214 unsigned int namewidth; 4215 const char *title; 4216 4217 if (cb->cb_flags & IOS_ANYHISTO_M) { 4218 title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)]; 4219 } else if (cb->cb_vdevs.cb_names_count) { 4220 title = "vdev"; 4221 } else { 4222 title = "pool"; 4223 } 4224 4225 namewidth = MAX(MAX(strlen(title), cb->cb_namewidth), 4226 histo_vdev_name ? strlen(histo_vdev_name) : 0); 4227 4228 if (histo_vdev_name) 4229 printf("%-*s", namewidth, histo_vdev_name); 4230 else 4231 printf("%*s", namewidth, ""); 4232 4233 4234 print_iostat_labels(cb, force_column_width, iostat_top_labels); 4235 printf("\n"); 4236 4237 printf("%-*s", namewidth, title); 4238 4239 print_iostat_labels(cb, force_column_width, iostat_bottom_labels); 4240 if (cb->vcdl != NULL) 4241 print_cmd_columns(cb->vcdl, 0); 4242 4243 printf("\n"); 4244 4245 print_iostat_separator_impl(cb, force_column_width); 4246 4247 if (cb->vcdl != NULL) 4248 print_cmd_columns(cb->vcdl, 1); 4249 4250 printf("\n"); 4251 } 4252 4253 static void 4254 print_iostat_header(iostat_cbdata_t *cb) 4255 { 4256 print_iostat_header_impl(cb, 0, NULL); 4257 } 4258 4259 4260 /* 4261 * Display a single statistic. 4262 */ 4263 static void 4264 print_one_stat(uint64_t value, enum zfs_nicenum_format format, 4265 unsigned int column_size, boolean_t scripted) 4266 { 4267 char buf[64]; 4268 4269 zfs_nicenum_format(value, buf, sizeof (buf), format); 4270 4271 if (scripted) 4272 printf("\t%s", buf); 4273 else 4274 printf(" %*s", column_size, buf); 4275 } 4276 4277 /* 4278 * Calculate the default vdev stats 4279 * 4280 * Subtract oldvs from newvs, apply a scaling factor, and save the resulting 4281 * stats into calcvs. 4282 */ 4283 static void 4284 calc_default_iostats(vdev_stat_t *oldvs, vdev_stat_t *newvs, 4285 vdev_stat_t *calcvs) 4286 { 4287 int i; 4288 4289 memcpy(calcvs, newvs, sizeof (*calcvs)); 4290 for (i = 0; i < ARRAY_SIZE(calcvs->vs_ops); i++) 4291 calcvs->vs_ops[i] = (newvs->vs_ops[i] - oldvs->vs_ops[i]); 4292 4293 for (i = 0; i < ARRAY_SIZE(calcvs->vs_bytes); i++) 4294 calcvs->vs_bytes[i] = (newvs->vs_bytes[i] - oldvs->vs_bytes[i]); 4295 } 4296 4297 /* 4298 * Internal representation of the extended iostats data. 4299 * 4300 * The extended iostat stats are exported in nvlists as either uint64_t arrays 4301 * or single uint64_t's. We make both look like arrays to make them easier 4302 * to process. In order to make single uint64_t's look like arrays, we set 4303 * __data to the stat data, and then set *data = &__data with count = 1. Then, 4304 * we can just use *data and count. 4305 */ 4306 struct stat_array { 4307 uint64_t *data; 4308 uint_t count; /* Number of entries in data[] */ 4309 uint64_t __data; /* Only used when data is a single uint64_t */ 4310 }; 4311 4312 static uint64_t 4313 stat_histo_max(struct stat_array *nva, unsigned int len) 4314 { 4315 uint64_t max = 0; 4316 int i; 4317 for (i = 0; i < len; i++) 4318 max = MAX(max, array64_max(nva[i].data, nva[i].count)); 4319 4320 return (max); 4321 } 4322 4323 /* 4324 * Helper function to lookup a uint64_t array or uint64_t value and store its 4325 * data as a stat_array. If the nvpair is a single uint64_t value, then we make 4326 * it look like a one element array to make it easier to process. 4327 */ 4328 static int 4329 nvpair64_to_stat_array(nvlist_t *nvl, const char *name, 4330 struct stat_array *nva) 4331 { 4332 nvpair_t *tmp; 4333 int ret; 4334 4335 verify(nvlist_lookup_nvpair(nvl, name, &tmp) == 0); 4336 switch (nvpair_type(tmp)) { 4337 case DATA_TYPE_UINT64_ARRAY: 4338 ret = nvpair_value_uint64_array(tmp, &nva->data, &nva->count); 4339 break; 4340 case DATA_TYPE_UINT64: 4341 ret = nvpair_value_uint64(tmp, &nva->__data); 4342 nva->data = &nva->__data; 4343 nva->count = 1; 4344 break; 4345 default: 4346 /* Not a uint64_t */ 4347 ret = EINVAL; 4348 break; 4349 } 4350 4351 return (ret); 4352 } 4353 4354 /* 4355 * Given a list of nvlist names, look up the extended stats in newnv and oldnv, 4356 * subtract them, and return the results in a newly allocated stat_array. 4357 * You must free the returned array after you are done with it with 4358 * free_calc_stats(). 4359 * 4360 * Additionally, you can set "oldnv" to NULL if you simply want the newnv 4361 * values. 4362 */ 4363 static struct stat_array * 4364 calc_and_alloc_stats_ex(const char **names, unsigned int len, nvlist_t *oldnv, 4365 nvlist_t *newnv) 4366 { 4367 nvlist_t *oldnvx = NULL, *newnvx; 4368 struct stat_array *oldnva, *newnva, *calcnva; 4369 int i, j; 4370 unsigned int alloc_size = (sizeof (struct stat_array)) * len; 4371 4372 /* Extract our extended stats nvlist from the main list */ 4373 verify(nvlist_lookup_nvlist(newnv, ZPOOL_CONFIG_VDEV_STATS_EX, 4374 &newnvx) == 0); 4375 if (oldnv) { 4376 verify(nvlist_lookup_nvlist(oldnv, ZPOOL_CONFIG_VDEV_STATS_EX, 4377 &oldnvx) == 0); 4378 } 4379 4380 newnva = safe_malloc(alloc_size); 4381 oldnva = safe_malloc(alloc_size); 4382 calcnva = safe_malloc(alloc_size); 4383 4384 for (j = 0; j < len; j++) { 4385 verify(nvpair64_to_stat_array(newnvx, names[j], 4386 &newnva[j]) == 0); 4387 calcnva[j].count = newnva[j].count; 4388 alloc_size = calcnva[j].count * sizeof (calcnva[j].data[0]); 4389 calcnva[j].data = safe_malloc(alloc_size); 4390 memcpy(calcnva[j].data, newnva[j].data, alloc_size); 4391 4392 if (oldnvx) { 4393 verify(nvpair64_to_stat_array(oldnvx, names[j], 4394 &oldnva[j]) == 0); 4395 for (i = 0; i < oldnva[j].count; i++) 4396 calcnva[j].data[i] -= oldnva[j].data[i]; 4397 } 4398 } 4399 free(newnva); 4400 free(oldnva); 4401 return (calcnva); 4402 } 4403 4404 static void 4405 free_calc_stats(struct stat_array *nva, unsigned int len) 4406 { 4407 int i; 4408 for (i = 0; i < len; i++) 4409 free(nva[i].data); 4410 4411 free(nva); 4412 } 4413 4414 static void 4415 print_iostat_histo(struct stat_array *nva, unsigned int len, 4416 iostat_cbdata_t *cb, unsigned int column_width, unsigned int namewidth, 4417 double scale) 4418 { 4419 int i, j; 4420 char buf[6]; 4421 uint64_t val; 4422 enum zfs_nicenum_format format; 4423 unsigned int buckets; 4424 unsigned int start_bucket; 4425 4426 if (cb->cb_literal) 4427 format = ZFS_NICENUM_RAW; 4428 else 4429 format = ZFS_NICENUM_1024; 4430 4431 /* All these histos are the same size, so just use nva[0].count */ 4432 buckets = nva[0].count; 4433 4434 if (cb->cb_flags & IOS_RQ_HISTO_M) { 4435 /* Start at 512 - req size should never be lower than this */ 4436 start_bucket = 9; 4437 } else { 4438 start_bucket = 0; 4439 } 4440 4441 for (j = start_bucket; j < buckets; j++) { 4442 /* Print histogram bucket label */ 4443 if (cb->cb_flags & IOS_L_HISTO_M) { 4444 /* Ending range of this bucket */ 4445 val = (1UL << (j + 1)) - 1; 4446 zfs_nicetime(val, buf, sizeof (buf)); 4447 } else { 4448 /* Request size (starting range of bucket) */ 4449 val = (1UL << j); 4450 zfs_nicenum(val, buf, sizeof (buf)); 4451 } 4452 4453 if (cb->cb_scripted) 4454 printf("%llu", (u_longlong_t)val); 4455 else 4456 printf("%-*s", namewidth, buf); 4457 4458 /* Print the values on the line */ 4459 for (i = 0; i < len; i++) { 4460 print_one_stat(nva[i].data[j] * scale, format, 4461 column_width, cb->cb_scripted); 4462 } 4463 printf("\n"); 4464 } 4465 } 4466 4467 static void 4468 print_solid_separator(unsigned int length) 4469 { 4470 while (length--) 4471 printf("-"); 4472 printf("\n"); 4473 } 4474 4475 static void 4476 print_iostat_histos(iostat_cbdata_t *cb, nvlist_t *oldnv, 4477 nvlist_t *newnv, double scale, const char *name) 4478 { 4479 unsigned int column_width; 4480 unsigned int namewidth; 4481 unsigned int entire_width; 4482 enum iostat_type type; 4483 struct stat_array *nva; 4484 const char **names; 4485 unsigned int names_len; 4486 4487 /* What type of histo are we? */ 4488 type = IOS_HISTO_IDX(cb->cb_flags); 4489 4490 /* Get NULL-terminated array of nvlist names for our histo */ 4491 names = vsx_type_to_nvlist[type]; 4492 names_len = str_array_len(names); /* num of names */ 4493 4494 nva = calc_and_alloc_stats_ex(names, names_len, oldnv, newnv); 4495 4496 if (cb->cb_literal) { 4497 column_width = MAX(5, 4498 (unsigned int) log10(stat_histo_max(nva, names_len)) + 1); 4499 } else { 4500 column_width = 5; 4501 } 4502 4503 namewidth = MAX(cb->cb_namewidth, 4504 strlen(histo_to_title[IOS_HISTO_IDX(cb->cb_flags)])); 4505 4506 /* 4507 * Calculate the entire line width of what we're printing. The 4508 * +2 is for the two spaces between columns: 4509 */ 4510 /* read write */ 4511 /* ----- ----- */ 4512 /* |___| <---------- column_width */ 4513 /* */ 4514 /* |__________| <--- entire_width */ 4515 /* */ 4516 entire_width = namewidth + (column_width + 2) * 4517 label_array_len(iostat_bottom_labels[type]); 4518 4519 if (cb->cb_scripted) 4520 printf("%s\n", name); 4521 else 4522 print_iostat_header_impl(cb, column_width, name); 4523 4524 print_iostat_histo(nva, names_len, cb, column_width, 4525 namewidth, scale); 4526 4527 free_calc_stats(nva, names_len); 4528 if (!cb->cb_scripted) 4529 print_solid_separator(entire_width); 4530 } 4531 4532 /* 4533 * Calculate the average latency of a power-of-two latency histogram 4534 */ 4535 static uint64_t 4536 single_histo_average(uint64_t *histo, unsigned int buckets) 4537 { 4538 int i; 4539 uint64_t count = 0, total = 0; 4540 4541 for (i = 0; i < buckets; i++) { 4542 /* 4543 * Our buckets are power-of-two latency ranges. Use the 4544 * midpoint latency of each bucket to calculate the average. 4545 * For example: 4546 * 4547 * Bucket Midpoint 4548 * 8ns-15ns: 12ns 4549 * 16ns-31ns: 24ns 4550 * ... 4551 */ 4552 if (histo[i] != 0) { 4553 total += histo[i] * (((1UL << i) + ((1UL << i)/2))); 4554 count += histo[i]; 4555 } 4556 } 4557 4558 /* Prevent divide by zero */ 4559 return (count == 0 ? 0 : total / count); 4560 } 4561 4562 static void 4563 print_iostat_queues(iostat_cbdata_t *cb, nvlist_t *newnv) 4564 { 4565 const char *names[] = { 4566 ZPOOL_CONFIG_VDEV_SYNC_R_PEND_QUEUE, 4567 ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE, 4568 ZPOOL_CONFIG_VDEV_SYNC_W_PEND_QUEUE, 4569 ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE, 4570 ZPOOL_CONFIG_VDEV_ASYNC_R_PEND_QUEUE, 4571 ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE, 4572 ZPOOL_CONFIG_VDEV_ASYNC_W_PEND_QUEUE, 4573 ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE, 4574 ZPOOL_CONFIG_VDEV_SCRUB_PEND_QUEUE, 4575 ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE, 4576 ZPOOL_CONFIG_VDEV_TRIM_PEND_QUEUE, 4577 ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE, 4578 ZPOOL_CONFIG_VDEV_REBUILD_PEND_QUEUE, 4579 ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE, 4580 }; 4581 4582 struct stat_array *nva; 4583 4584 unsigned int column_width = default_column_width(cb, IOS_QUEUES); 4585 enum zfs_nicenum_format format; 4586 4587 nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), NULL, newnv); 4588 4589 if (cb->cb_literal) 4590 format = ZFS_NICENUM_RAW; 4591 else 4592 format = ZFS_NICENUM_1024; 4593 4594 for (int i = 0; i < ARRAY_SIZE(names); i++) { 4595 uint64_t val = nva[i].data[0]; 4596 print_one_stat(val, format, column_width, cb->cb_scripted); 4597 } 4598 4599 free_calc_stats(nva, ARRAY_SIZE(names)); 4600 } 4601 4602 static void 4603 print_iostat_latency(iostat_cbdata_t *cb, nvlist_t *oldnv, 4604 nvlist_t *newnv) 4605 { 4606 int i; 4607 uint64_t val; 4608 const char *names[] = { 4609 ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO, 4610 ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO, 4611 ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO, 4612 ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO, 4613 ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO, 4614 ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO, 4615 ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO, 4616 ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO, 4617 ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO, 4618 ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO, 4619 ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO, 4620 }; 4621 struct stat_array *nva; 4622 4623 unsigned int column_width = default_column_width(cb, IOS_LATENCY); 4624 enum zfs_nicenum_format format; 4625 4626 nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), oldnv, newnv); 4627 4628 if (cb->cb_literal) 4629 format = ZFS_NICENUM_RAWTIME; 4630 else 4631 format = ZFS_NICENUM_TIME; 4632 4633 /* Print our avg latencies on the line */ 4634 for (i = 0; i < ARRAY_SIZE(names); i++) { 4635 /* Compute average latency for a latency histo */ 4636 val = single_histo_average(nva[i].data, nva[i].count); 4637 print_one_stat(val, format, column_width, cb->cb_scripted); 4638 } 4639 free_calc_stats(nva, ARRAY_SIZE(names)); 4640 } 4641 4642 /* 4643 * Print default statistics (capacity/operations/bandwidth) 4644 */ 4645 static void 4646 print_iostat_default(vdev_stat_t *vs, iostat_cbdata_t *cb, double scale) 4647 { 4648 unsigned int column_width = default_column_width(cb, IOS_DEFAULT); 4649 enum zfs_nicenum_format format; 4650 char na; /* char to print for "not applicable" values */ 4651 4652 if (cb->cb_literal) { 4653 format = ZFS_NICENUM_RAW; 4654 na = '0'; 4655 } else { 4656 format = ZFS_NICENUM_1024; 4657 na = '-'; 4658 } 4659 4660 /* only toplevel vdevs have capacity stats */ 4661 if (vs->vs_space == 0) { 4662 if (cb->cb_scripted) 4663 printf("\t%c\t%c", na, na); 4664 else 4665 printf(" %*c %*c", column_width, na, column_width, 4666 na); 4667 } else { 4668 print_one_stat(vs->vs_alloc, format, column_width, 4669 cb->cb_scripted); 4670 print_one_stat(vs->vs_space - vs->vs_alloc, format, 4671 column_width, cb->cb_scripted); 4672 } 4673 4674 print_one_stat((uint64_t)(vs->vs_ops[ZIO_TYPE_READ] * scale), 4675 format, column_width, cb->cb_scripted); 4676 print_one_stat((uint64_t)(vs->vs_ops[ZIO_TYPE_WRITE] * scale), 4677 format, column_width, cb->cb_scripted); 4678 print_one_stat((uint64_t)(vs->vs_bytes[ZIO_TYPE_READ] * scale), 4679 format, column_width, cb->cb_scripted); 4680 print_one_stat((uint64_t)(vs->vs_bytes[ZIO_TYPE_WRITE] * scale), 4681 format, column_width, cb->cb_scripted); 4682 } 4683 4684 static const char *const class_name[] = { 4685 VDEV_ALLOC_BIAS_DEDUP, 4686 VDEV_ALLOC_BIAS_SPECIAL, 4687 VDEV_ALLOC_CLASS_LOGS 4688 }; 4689 4690 /* 4691 * Print out all the statistics for the given vdev. This can either be the 4692 * toplevel configuration, or called recursively. If 'name' is NULL, then this 4693 * is a verbose output, and we don't want to display the toplevel pool stats. 4694 * 4695 * Returns the number of stat lines printed. 4696 */ 4697 static unsigned int 4698 print_vdev_stats(zpool_handle_t *zhp, const char *name, nvlist_t *oldnv, 4699 nvlist_t *newnv, iostat_cbdata_t *cb, int depth) 4700 { 4701 nvlist_t **oldchild, **newchild; 4702 uint_t c, children, oldchildren; 4703 vdev_stat_t *oldvs, *newvs, *calcvs; 4704 vdev_stat_t zerovs = { 0 }; 4705 char *vname; 4706 int i; 4707 int ret = 0; 4708 uint64_t tdelta; 4709 double scale; 4710 4711 if (strcmp(name, VDEV_TYPE_INDIRECT) == 0) 4712 return (ret); 4713 4714 calcvs = safe_malloc(sizeof (*calcvs)); 4715 4716 if (oldnv != NULL) { 4717 verify(nvlist_lookup_uint64_array(oldnv, 4718 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&oldvs, &c) == 0); 4719 } else { 4720 oldvs = &zerovs; 4721 } 4722 4723 /* Do we only want to see a specific vdev? */ 4724 for (i = 0; i < cb->cb_vdevs.cb_names_count; i++) { 4725 /* Yes we do. Is this the vdev? */ 4726 if (strcmp(name, cb->cb_vdevs.cb_names[i]) == 0) { 4727 /* 4728 * This is our vdev. Since it is the only vdev we 4729 * will be displaying, make depth = 0 so that it 4730 * doesn't get indented. 4731 */ 4732 depth = 0; 4733 break; 4734 } 4735 } 4736 4737 if (cb->cb_vdevs.cb_names_count && (i == cb->cb_vdevs.cb_names_count)) { 4738 /* Couldn't match the name */ 4739 goto children; 4740 } 4741 4742 4743 verify(nvlist_lookup_uint64_array(newnv, ZPOOL_CONFIG_VDEV_STATS, 4744 (uint64_t **)&newvs, &c) == 0); 4745 4746 /* 4747 * Print the vdev name unless it's is a histogram. Histograms 4748 * display the vdev name in the header itself. 4749 */ 4750 if (!(cb->cb_flags & IOS_ANYHISTO_M)) { 4751 if (cb->cb_scripted) { 4752 printf("%s", name); 4753 } else { 4754 if (strlen(name) + depth > cb->cb_namewidth) 4755 (void) printf("%*s%s", depth, "", name); 4756 else 4757 (void) printf("%*s%s%*s", depth, "", name, 4758 (int)(cb->cb_namewidth - strlen(name) - 4759 depth), ""); 4760 } 4761 } 4762 4763 /* Calculate our scaling factor */ 4764 tdelta = newvs->vs_timestamp - oldvs->vs_timestamp; 4765 if ((oldvs->vs_timestamp == 0) && (cb->cb_flags & IOS_ANYHISTO_M)) { 4766 /* 4767 * If we specify printing histograms with no time interval, then 4768 * print the histogram numbers over the entire lifetime of the 4769 * vdev. 4770 */ 4771 scale = 1; 4772 } else { 4773 if (tdelta == 0) 4774 scale = 1.0; 4775 else 4776 scale = (double)NANOSEC / tdelta; 4777 } 4778 4779 if (cb->cb_flags & IOS_DEFAULT_M) { 4780 calc_default_iostats(oldvs, newvs, calcvs); 4781 print_iostat_default(calcvs, cb, scale); 4782 } 4783 if (cb->cb_flags & IOS_LATENCY_M) 4784 print_iostat_latency(cb, oldnv, newnv); 4785 if (cb->cb_flags & IOS_QUEUES_M) 4786 print_iostat_queues(cb, newnv); 4787 if (cb->cb_flags & IOS_ANYHISTO_M) { 4788 printf("\n"); 4789 print_iostat_histos(cb, oldnv, newnv, scale, name); 4790 } 4791 4792 if (cb->vcdl != NULL) { 4793 char *path; 4794 if (nvlist_lookup_string(newnv, ZPOOL_CONFIG_PATH, 4795 &path) == 0) { 4796 printf(" "); 4797 zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path); 4798 } 4799 } 4800 4801 if (!(cb->cb_flags & IOS_ANYHISTO_M)) 4802 printf("\n"); 4803 4804 ret++; 4805 4806 children: 4807 4808 free(calcvs); 4809 4810 if (!cb->cb_verbose) 4811 return (ret); 4812 4813 if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_CHILDREN, 4814 &newchild, &children) != 0) 4815 return (ret); 4816 4817 if (oldnv) { 4818 if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_CHILDREN, 4819 &oldchild, &oldchildren) != 0) 4820 return (ret); 4821 4822 children = MIN(oldchildren, children); 4823 } 4824 4825 /* 4826 * print normal top-level devices 4827 */ 4828 for (c = 0; c < children; c++) { 4829 uint64_t ishole = B_FALSE, islog = B_FALSE; 4830 4831 (void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_HOLE, 4832 &ishole); 4833 4834 (void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_LOG, 4835 &islog); 4836 4837 if (ishole || islog) 4838 continue; 4839 4840 if (nvlist_exists(newchild[c], ZPOOL_CONFIG_ALLOCATION_BIAS)) 4841 continue; 4842 4843 vname = zpool_vdev_name(g_zfs, zhp, newchild[c], 4844 cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID); 4845 ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c] : NULL, 4846 newchild[c], cb, depth + 2); 4847 free(vname); 4848 } 4849 4850 /* 4851 * print all other top-level devices 4852 */ 4853 for (uint_t n = 0; n < ARRAY_SIZE(class_name); n++) { 4854 boolean_t printed = B_FALSE; 4855 4856 for (c = 0; c < children; c++) { 4857 uint64_t islog = B_FALSE; 4858 char *bias = NULL; 4859 char *type = NULL; 4860 4861 (void) nvlist_lookup_uint64(newchild[c], 4862 ZPOOL_CONFIG_IS_LOG, &islog); 4863 if (islog) { 4864 bias = (char *)VDEV_ALLOC_CLASS_LOGS; 4865 } else { 4866 (void) nvlist_lookup_string(newchild[c], 4867 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias); 4868 (void) nvlist_lookup_string(newchild[c], 4869 ZPOOL_CONFIG_TYPE, &type); 4870 } 4871 if (bias == NULL || strcmp(bias, class_name[n]) != 0) 4872 continue; 4873 if (!islog && strcmp(type, VDEV_TYPE_INDIRECT) == 0) 4874 continue; 4875 4876 if (!printed) { 4877 if ((!(cb->cb_flags & IOS_ANYHISTO_M)) && 4878 !cb->cb_scripted && 4879 !cb->cb_vdevs.cb_names) { 4880 print_iostat_dashes(cb, 0, 4881 class_name[n]); 4882 } 4883 printf("\n"); 4884 printed = B_TRUE; 4885 } 4886 4887 vname = zpool_vdev_name(g_zfs, zhp, newchild[c], 4888 cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID); 4889 ret += print_vdev_stats(zhp, vname, oldnv ? 4890 oldchild[c] : NULL, newchild[c], cb, depth + 2); 4891 free(vname); 4892 } 4893 } 4894 4895 /* 4896 * Include level 2 ARC devices in iostat output 4897 */ 4898 if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_L2CACHE, 4899 &newchild, &children) != 0) 4900 return (ret); 4901 4902 if (oldnv) { 4903 if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_L2CACHE, 4904 &oldchild, &oldchildren) != 0) 4905 return (ret); 4906 4907 children = MIN(oldchildren, children); 4908 } 4909 4910 if (children > 0) { 4911 if ((!(cb->cb_flags & IOS_ANYHISTO_M)) && !cb->cb_scripted && 4912 !cb->cb_vdevs.cb_names) { 4913 print_iostat_dashes(cb, 0, "cache"); 4914 } 4915 printf("\n"); 4916 4917 for (c = 0; c < children; c++) { 4918 vname = zpool_vdev_name(g_zfs, zhp, newchild[c], 4919 cb->cb_vdevs.cb_name_flags); 4920 ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c] 4921 : NULL, newchild[c], cb, depth + 2); 4922 free(vname); 4923 } 4924 } 4925 4926 return (ret); 4927 } 4928 4929 static int 4930 refresh_iostat(zpool_handle_t *zhp, void *data) 4931 { 4932 iostat_cbdata_t *cb = data; 4933 boolean_t missing; 4934 4935 /* 4936 * If the pool has disappeared, remove it from the list and continue. 4937 */ 4938 if (zpool_refresh_stats(zhp, &missing) != 0) 4939 return (-1); 4940 4941 if (missing) 4942 pool_list_remove(cb->cb_list, zhp); 4943 4944 return (0); 4945 } 4946 4947 /* 4948 * Callback to print out the iostats for the given pool. 4949 */ 4950 static int 4951 print_iostat(zpool_handle_t *zhp, void *data) 4952 { 4953 iostat_cbdata_t *cb = data; 4954 nvlist_t *oldconfig, *newconfig; 4955 nvlist_t *oldnvroot, *newnvroot; 4956 int ret; 4957 4958 newconfig = zpool_get_config(zhp, &oldconfig); 4959 4960 if (cb->cb_iteration == 1) 4961 oldconfig = NULL; 4962 4963 verify(nvlist_lookup_nvlist(newconfig, ZPOOL_CONFIG_VDEV_TREE, 4964 &newnvroot) == 0); 4965 4966 if (oldconfig == NULL) 4967 oldnvroot = NULL; 4968 else 4969 verify(nvlist_lookup_nvlist(oldconfig, ZPOOL_CONFIG_VDEV_TREE, 4970 &oldnvroot) == 0); 4971 4972 ret = print_vdev_stats(zhp, zpool_get_name(zhp), oldnvroot, newnvroot, 4973 cb, 0); 4974 if ((ret != 0) && !(cb->cb_flags & IOS_ANYHISTO_M) && 4975 !cb->cb_scripted && cb->cb_verbose && 4976 !cb->cb_vdevs.cb_names_count) { 4977 print_iostat_separator(cb); 4978 if (cb->vcdl != NULL) { 4979 print_cmd_columns(cb->vcdl, 1); 4980 } 4981 printf("\n"); 4982 } 4983 4984 return (ret); 4985 } 4986 4987 static int 4988 get_columns(void) 4989 { 4990 struct winsize ws; 4991 int columns = 80; 4992 int error; 4993 4994 if (isatty(STDOUT_FILENO)) { 4995 error = ioctl(STDOUT_FILENO, TIOCGWINSZ, &ws); 4996 if (error == 0) 4997 columns = ws.ws_col; 4998 } else { 4999 columns = 999; 5000 } 5001 5002 return (columns); 5003 } 5004 5005 /* 5006 * Return the required length of the pool/vdev name column. The minimum 5007 * allowed width and output formatting flags must be provided. 5008 */ 5009 static int 5010 get_namewidth(zpool_handle_t *zhp, int min_width, int flags, boolean_t verbose) 5011 { 5012 nvlist_t *config, *nvroot; 5013 int width = min_width; 5014 5015 if ((config = zpool_get_config(zhp, NULL)) != NULL) { 5016 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 5017 &nvroot) == 0); 5018 size_t poolname_len = strlen(zpool_get_name(zhp)); 5019 if (verbose == B_FALSE) { 5020 width = MAX(poolname_len, min_width); 5021 } else { 5022 width = MAX(poolname_len, 5023 max_width(zhp, nvroot, 0, min_width, flags)); 5024 } 5025 } 5026 5027 return (width); 5028 } 5029 5030 /* 5031 * Parse the input string, get the 'interval' and 'count' value if there is one. 5032 */ 5033 static void 5034 get_interval_count(int *argcp, char **argv, float *iv, 5035 unsigned long *cnt) 5036 { 5037 float interval = 0; 5038 unsigned long count = 0; 5039 int argc = *argcp; 5040 5041 /* 5042 * Determine if the last argument is an integer or a pool name 5043 */ 5044 if (argc > 0 && zfs_isnumber(argv[argc - 1])) { 5045 char *end; 5046 5047 errno = 0; 5048 interval = strtof(argv[argc - 1], &end); 5049 5050 if (*end == '\0' && errno == 0) { 5051 if (interval == 0) { 5052 (void) fprintf(stderr, gettext( 5053 "interval cannot be zero\n")); 5054 usage(B_FALSE); 5055 } 5056 /* 5057 * Ignore the last parameter 5058 */ 5059 argc--; 5060 } else { 5061 /* 5062 * If this is not a valid number, just plow on. The 5063 * user will get a more informative error message later 5064 * on. 5065 */ 5066 interval = 0; 5067 } 5068 } 5069 5070 /* 5071 * If the last argument is also an integer, then we have both a count 5072 * and an interval. 5073 */ 5074 if (argc > 0 && zfs_isnumber(argv[argc - 1])) { 5075 char *end; 5076 5077 errno = 0; 5078 count = interval; 5079 interval = strtof(argv[argc - 1], &end); 5080 5081 if (*end == '\0' && errno == 0) { 5082 if (interval == 0) { 5083 (void) fprintf(stderr, gettext( 5084 "interval cannot be zero\n")); 5085 usage(B_FALSE); 5086 } 5087 5088 /* 5089 * Ignore the last parameter 5090 */ 5091 argc--; 5092 } else { 5093 interval = 0; 5094 } 5095 } 5096 5097 *iv = interval; 5098 *cnt = count; 5099 *argcp = argc; 5100 } 5101 5102 static void 5103 get_timestamp_arg(char c) 5104 { 5105 if (c == 'u') 5106 timestamp_fmt = UDATE; 5107 else if (c == 'd') 5108 timestamp_fmt = DDATE; 5109 else 5110 usage(B_FALSE); 5111 } 5112 5113 /* 5114 * Return stat flags that are supported by all pools by both the module and 5115 * zpool iostat. "*data" should be initialized to all 0xFFs before running. 5116 * It will get ANDed down until only the flags that are supported on all pools 5117 * remain. 5118 */ 5119 static int 5120 get_stat_flags_cb(zpool_handle_t *zhp, void *data) 5121 { 5122 uint64_t *mask = data; 5123 nvlist_t *config, *nvroot, *nvx; 5124 uint64_t flags = 0; 5125 int i, j; 5126 5127 config = zpool_get_config(zhp, NULL); 5128 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 5129 &nvroot) == 0); 5130 5131 /* Default stats are always supported, but for completeness.. */ 5132 if (nvlist_exists(nvroot, ZPOOL_CONFIG_VDEV_STATS)) 5133 flags |= IOS_DEFAULT_M; 5134 5135 /* Get our extended stats nvlist from the main list */ 5136 if (nvlist_lookup_nvlist(nvroot, ZPOOL_CONFIG_VDEV_STATS_EX, 5137 &nvx) != 0) { 5138 /* 5139 * No extended stats; they're probably running an older 5140 * module. No big deal, we support that too. 5141 */ 5142 goto end; 5143 } 5144 5145 /* For each extended stat, make sure all its nvpairs are supported */ 5146 for (j = 0; j < ARRAY_SIZE(vsx_type_to_nvlist); j++) { 5147 if (!vsx_type_to_nvlist[j][0]) 5148 continue; 5149 5150 /* Start off by assuming the flag is supported, then check */ 5151 flags |= (1ULL << j); 5152 for (i = 0; vsx_type_to_nvlist[j][i]; i++) { 5153 if (!nvlist_exists(nvx, vsx_type_to_nvlist[j][i])) { 5154 /* flag isn't supported */ 5155 flags = flags & ~(1ULL << j); 5156 break; 5157 } 5158 } 5159 } 5160 end: 5161 *mask = *mask & flags; 5162 return (0); 5163 } 5164 5165 /* 5166 * Return a bitmask of stats that are supported on all pools by both the module 5167 * and zpool iostat. 5168 */ 5169 static uint64_t 5170 get_stat_flags(zpool_list_t *list) 5171 { 5172 uint64_t mask = -1; 5173 5174 /* 5175 * get_stat_flags_cb() will lop off bits from "mask" until only the 5176 * flags that are supported on all pools remain. 5177 */ 5178 pool_list_iter(list, B_FALSE, get_stat_flags_cb, &mask); 5179 return (mask); 5180 } 5181 5182 /* 5183 * Return 1 if cb_data->cb_names[0] is this vdev's name, 0 otherwise. 5184 */ 5185 static int 5186 is_vdev_cb(void *zhp_data, nvlist_t *nv, void *cb_data) 5187 { 5188 uint64_t guid; 5189 vdev_cbdata_t *cb = cb_data; 5190 zpool_handle_t *zhp = zhp_data; 5191 5192 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 5193 return (0); 5194 5195 return (guid == zpool_vdev_path_to_guid(zhp, cb->cb_names[0])); 5196 } 5197 5198 /* 5199 * Returns 1 if cb_data->cb_names[0] is a vdev name, 0 otherwise. 5200 */ 5201 static int 5202 is_vdev(zpool_handle_t *zhp, void *cb_data) 5203 { 5204 return (for_each_vdev(zhp, is_vdev_cb, cb_data)); 5205 } 5206 5207 /* 5208 * Check if vdevs are in a pool 5209 * 5210 * Return 1 if all argv[] strings are vdev names in pool "pool_name". Otherwise 5211 * return 0. If pool_name is NULL, then search all pools. 5212 */ 5213 static int 5214 are_vdevs_in_pool(int argc, char **argv, char *pool_name, 5215 vdev_cbdata_t *cb) 5216 { 5217 char **tmp_name; 5218 int ret = 0; 5219 int i; 5220 int pool_count = 0; 5221 5222 if ((argc == 0) || !*argv) 5223 return (0); 5224 5225 if (pool_name) 5226 pool_count = 1; 5227 5228 /* Temporarily hijack cb_names for a second... */ 5229 tmp_name = cb->cb_names; 5230 5231 /* Go though our list of prospective vdev names */ 5232 for (i = 0; i < argc; i++) { 5233 cb->cb_names = argv + i; 5234 5235 /* Is this name a vdev in our pools? */ 5236 ret = for_each_pool(pool_count, &pool_name, B_TRUE, NULL, 5237 ZFS_TYPE_POOL, B_FALSE, is_vdev, cb); 5238 if (!ret) { 5239 /* No match */ 5240 break; 5241 } 5242 } 5243 5244 cb->cb_names = tmp_name; 5245 5246 return (ret); 5247 } 5248 5249 static int 5250 is_pool_cb(zpool_handle_t *zhp, void *data) 5251 { 5252 char *name = data; 5253 if (strcmp(name, zpool_get_name(zhp)) == 0) 5254 return (1); 5255 5256 return (0); 5257 } 5258 5259 /* 5260 * Do we have a pool named *name? If so, return 1, otherwise 0. 5261 */ 5262 static int 5263 is_pool(char *name) 5264 { 5265 return (for_each_pool(0, NULL, B_TRUE, NULL, ZFS_TYPE_POOL, B_FALSE, 5266 is_pool_cb, name)); 5267 } 5268 5269 /* Are all our argv[] strings pool names? If so return 1, 0 otherwise. */ 5270 static int 5271 are_all_pools(int argc, char **argv) 5272 { 5273 if ((argc == 0) || !*argv) 5274 return (0); 5275 5276 while (--argc >= 0) 5277 if (!is_pool(argv[argc])) 5278 return (0); 5279 5280 return (1); 5281 } 5282 5283 /* 5284 * Helper function to print out vdev/pool names we can't resolve. Used for an 5285 * error message. 5286 */ 5287 static void 5288 error_list_unresolved_vdevs(int argc, char **argv, char *pool_name, 5289 vdev_cbdata_t *cb) 5290 { 5291 int i; 5292 char *name; 5293 char *str; 5294 for (i = 0; i < argc; i++) { 5295 name = argv[i]; 5296 5297 if (is_pool(name)) 5298 str = gettext("pool"); 5299 else if (are_vdevs_in_pool(1, &name, pool_name, cb)) 5300 str = gettext("vdev in this pool"); 5301 else if (are_vdevs_in_pool(1, &name, NULL, cb)) 5302 str = gettext("vdev in another pool"); 5303 else 5304 str = gettext("unknown"); 5305 5306 fprintf(stderr, "\t%s (%s)\n", name, str); 5307 } 5308 } 5309 5310 /* 5311 * Same as get_interval_count(), but with additional checks to not misinterpret 5312 * guids as interval/count values. Assumes VDEV_NAME_GUID is set in 5313 * cb.cb_vdevs.cb_name_flags. 5314 */ 5315 static void 5316 get_interval_count_filter_guids(int *argc, char **argv, float *interval, 5317 unsigned long *count, iostat_cbdata_t *cb) 5318 { 5319 char **tmpargv = argv; 5320 int argc_for_interval = 0; 5321 5322 /* Is the last arg an interval value? Or a guid? */ 5323 if (*argc >= 1 && !are_vdevs_in_pool(1, &argv[*argc - 1], NULL, 5324 &cb->cb_vdevs)) { 5325 /* 5326 * The last arg is not a guid, so it's probably an 5327 * interval value. 5328 */ 5329 argc_for_interval++; 5330 5331 if (*argc >= 2 && 5332 !are_vdevs_in_pool(1, &argv[*argc - 2], NULL, 5333 &cb->cb_vdevs)) { 5334 /* 5335 * The 2nd to last arg is not a guid, so it's probably 5336 * an interval value. 5337 */ 5338 argc_for_interval++; 5339 } 5340 } 5341 5342 /* Point to our list of possible intervals */ 5343 tmpargv = &argv[*argc - argc_for_interval]; 5344 5345 *argc = *argc - argc_for_interval; 5346 get_interval_count(&argc_for_interval, tmpargv, 5347 interval, count); 5348 } 5349 5350 /* 5351 * Floating point sleep(). Allows you to pass in a floating point value for 5352 * seconds. 5353 */ 5354 static void 5355 fsleep(float sec) 5356 { 5357 struct timespec req; 5358 req.tv_sec = floor(sec); 5359 req.tv_nsec = (sec - (float)req.tv_sec) * NANOSEC; 5360 nanosleep(&req, NULL); 5361 } 5362 5363 /* 5364 * Terminal height, in rows. Returns -1 if stdout is not connected to a TTY or 5365 * if we were unable to determine its size. 5366 */ 5367 static int 5368 terminal_height(void) 5369 { 5370 struct winsize win; 5371 5372 if (isatty(STDOUT_FILENO) == 0) 5373 return (-1); 5374 5375 if (ioctl(STDOUT_FILENO, TIOCGWINSZ, &win) != -1 && win.ws_row > 0) 5376 return (win.ws_row); 5377 5378 return (-1); 5379 } 5380 5381 /* 5382 * Run one of the zpool status/iostat -c scripts with the help (-h) option and 5383 * print the result. 5384 * 5385 * name: Short name of the script ('iostat'). 5386 * path: Full path to the script ('/usr/local/etc/zfs/zpool.d/iostat'); 5387 */ 5388 static void 5389 print_zpool_script_help(char *name, char *path) 5390 { 5391 char *argv[] = {path, (char *)"-h", NULL}; 5392 char **lines = NULL; 5393 int lines_cnt = 0; 5394 int rc; 5395 5396 rc = libzfs_run_process_get_stdout_nopath(path, argv, NULL, &lines, 5397 &lines_cnt); 5398 if (rc != 0 || lines == NULL || lines_cnt <= 0) { 5399 if (lines != NULL) 5400 libzfs_free_str_array(lines, lines_cnt); 5401 return; 5402 } 5403 5404 for (int i = 0; i < lines_cnt; i++) 5405 if (!is_blank_str(lines[i])) 5406 printf(" %-14s %s\n", name, lines[i]); 5407 5408 libzfs_free_str_array(lines, lines_cnt); 5409 } 5410 5411 /* 5412 * Go though the zpool status/iostat -c scripts in the user's path, run their 5413 * help option (-h), and print out the results. 5414 */ 5415 static void 5416 print_zpool_dir_scripts(char *dirpath) 5417 { 5418 DIR *dir; 5419 struct dirent *ent; 5420 char fullpath[MAXPATHLEN]; 5421 struct stat dir_stat; 5422 5423 if ((dir = opendir(dirpath)) != NULL) { 5424 /* print all the files and directories within directory */ 5425 while ((ent = readdir(dir)) != NULL) { 5426 if (snprintf(fullpath, sizeof (fullpath), "%s/%s", 5427 dirpath, ent->d_name) >= sizeof (fullpath)) { 5428 (void) fprintf(stderr, 5429 gettext("internal error: " 5430 "ZPOOL_SCRIPTS_PATH too large.\n")); 5431 exit(1); 5432 } 5433 5434 /* Print the scripts */ 5435 if (stat(fullpath, &dir_stat) == 0) 5436 if (dir_stat.st_mode & S_IXUSR && 5437 S_ISREG(dir_stat.st_mode)) 5438 print_zpool_script_help(ent->d_name, 5439 fullpath); 5440 } 5441 closedir(dir); 5442 } 5443 } 5444 5445 /* 5446 * Print out help text for all zpool status/iostat -c scripts. 5447 */ 5448 static void 5449 print_zpool_script_list(const char *subcommand) 5450 { 5451 char *dir, *sp, *tmp; 5452 5453 printf(gettext("Available 'zpool %s -c' commands:\n"), subcommand); 5454 5455 sp = zpool_get_cmd_search_path(); 5456 if (sp == NULL) 5457 return; 5458 5459 for (dir = strtok_r(sp, ":", &tmp); 5460 dir != NULL; 5461 dir = strtok_r(NULL, ":", &tmp)) 5462 print_zpool_dir_scripts(dir); 5463 5464 free(sp); 5465 } 5466 5467 /* 5468 * Set the minimum pool/vdev name column width. The width must be at least 10, 5469 * but may be as large as the column width - 42 so it still fits on one line. 5470 * NOTE: 42 is the width of the default capacity/operations/bandwidth output 5471 */ 5472 static int 5473 get_namewidth_iostat(zpool_handle_t *zhp, void *data) 5474 { 5475 iostat_cbdata_t *cb = data; 5476 int width, available_width; 5477 5478 /* 5479 * get_namewidth() returns the maximum width of any name in that column 5480 * for any pool/vdev/device line that will be output. 5481 */ 5482 width = get_namewidth(zhp, cb->cb_namewidth, 5483 cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID, cb->cb_verbose); 5484 5485 /* 5486 * The width we are calculating is the width of the header and also the 5487 * padding width for names that are less than maximum width. The stats 5488 * take up 42 characters, so the width available for names is: 5489 */ 5490 available_width = get_columns() - 42; 5491 5492 /* 5493 * If the maximum width fits on a screen, then great! Make everything 5494 * line up by justifying all lines to the same width. If that max 5495 * width is larger than what's available, the name plus stats won't fit 5496 * on one line, and justifying to that width would cause every line to 5497 * wrap on the screen. We only want lines with long names to wrap. 5498 * Limit the padding to what won't wrap. 5499 */ 5500 if (width > available_width) 5501 width = available_width; 5502 5503 /* 5504 * And regardless of whatever the screen width is (get_columns can 5505 * return 0 if the width is not known or less than 42 for a narrow 5506 * terminal) have the width be a minimum of 10. 5507 */ 5508 if (width < 10) 5509 width = 10; 5510 5511 /* Save the calculated width */ 5512 cb->cb_namewidth = width; 5513 5514 return (0); 5515 } 5516 5517 /* 5518 * zpool iostat [[-c [script1,script2,...]] [-lq]|[-rw]] [-ghHLpPvy] [-n name] 5519 * [-T d|u] [[ pool ...]|[pool vdev ...]|[vdev ...]] 5520 * [interval [count]] 5521 * 5522 * -c CMD For each vdev, run command CMD 5523 * -g Display guid for individual vdev name. 5524 * -L Follow links when resolving vdev path name. 5525 * -P Display full path for vdev name. 5526 * -v Display statistics for individual vdevs 5527 * -h Display help 5528 * -p Display values in parsable (exact) format. 5529 * -H Scripted mode. Don't display headers, and separate properties 5530 * by a single tab. 5531 * -l Display average latency 5532 * -q Display queue depths 5533 * -w Display latency histograms 5534 * -r Display request size histogram 5535 * -T Display a timestamp in date(1) or Unix format 5536 * -n Only print headers once 5537 * 5538 * This command can be tricky because we want to be able to deal with pool 5539 * creation/destruction as well as vdev configuration changes. The bulk of this 5540 * processing is handled by the pool_list_* routines in zpool_iter.c. We rely 5541 * on pool_list_update() to detect the addition of new pools. Configuration 5542 * changes are all handled within libzfs. 5543 */ 5544 int 5545 zpool_do_iostat(int argc, char **argv) 5546 { 5547 int c; 5548 int ret; 5549 int npools; 5550 float interval = 0; 5551 unsigned long count = 0; 5552 int winheight = 24; 5553 zpool_list_t *list; 5554 boolean_t verbose = B_FALSE; 5555 boolean_t latency = B_FALSE, l_histo = B_FALSE, rq_histo = B_FALSE; 5556 boolean_t queues = B_FALSE, parsable = B_FALSE, scripted = B_FALSE; 5557 boolean_t omit_since_boot = B_FALSE; 5558 boolean_t guid = B_FALSE; 5559 boolean_t follow_links = B_FALSE; 5560 boolean_t full_name = B_FALSE; 5561 boolean_t headers_once = B_FALSE; 5562 iostat_cbdata_t cb = { 0 }; 5563 char *cmd = NULL; 5564 5565 /* Used for printing error message */ 5566 const char flag_to_arg[] = {[IOS_LATENCY] = 'l', [IOS_QUEUES] = 'q', 5567 [IOS_L_HISTO] = 'w', [IOS_RQ_HISTO] = 'r'}; 5568 5569 uint64_t unsupported_flags; 5570 5571 /* check options */ 5572 while ((c = getopt(argc, argv, "c:gLPT:vyhplqrwnH")) != -1) { 5573 switch (c) { 5574 case 'c': 5575 if (cmd != NULL) { 5576 fprintf(stderr, 5577 gettext("Can't set -c flag twice\n")); 5578 exit(1); 5579 } 5580 5581 if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL && 5582 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) { 5583 fprintf(stderr, gettext( 5584 "Can't run -c, disabled by " 5585 "ZPOOL_SCRIPTS_ENABLED.\n")); 5586 exit(1); 5587 } 5588 5589 if ((getuid() <= 0 || geteuid() <= 0) && 5590 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) { 5591 fprintf(stderr, gettext( 5592 "Can't run -c with root privileges " 5593 "unless ZPOOL_SCRIPTS_AS_ROOT is set.\n")); 5594 exit(1); 5595 } 5596 cmd = optarg; 5597 verbose = B_TRUE; 5598 break; 5599 case 'g': 5600 guid = B_TRUE; 5601 break; 5602 case 'L': 5603 follow_links = B_TRUE; 5604 break; 5605 case 'P': 5606 full_name = B_TRUE; 5607 break; 5608 case 'T': 5609 get_timestamp_arg(*optarg); 5610 break; 5611 case 'v': 5612 verbose = B_TRUE; 5613 break; 5614 case 'p': 5615 parsable = B_TRUE; 5616 break; 5617 case 'l': 5618 latency = B_TRUE; 5619 break; 5620 case 'q': 5621 queues = B_TRUE; 5622 break; 5623 case 'H': 5624 scripted = B_TRUE; 5625 break; 5626 case 'w': 5627 l_histo = B_TRUE; 5628 break; 5629 case 'r': 5630 rq_histo = B_TRUE; 5631 break; 5632 case 'y': 5633 omit_since_boot = B_TRUE; 5634 break; 5635 case 'n': 5636 headers_once = B_TRUE; 5637 break; 5638 case 'h': 5639 usage(B_FALSE); 5640 break; 5641 case '?': 5642 if (optopt == 'c') { 5643 print_zpool_script_list("iostat"); 5644 exit(0); 5645 } else { 5646 fprintf(stderr, 5647 gettext("invalid option '%c'\n"), optopt); 5648 } 5649 usage(B_FALSE); 5650 } 5651 } 5652 5653 argc -= optind; 5654 argv += optind; 5655 5656 cb.cb_literal = parsable; 5657 cb.cb_scripted = scripted; 5658 5659 if (guid) 5660 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_GUID; 5661 if (follow_links) 5662 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS; 5663 if (full_name) 5664 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_PATH; 5665 cb.cb_iteration = 0; 5666 cb.cb_namewidth = 0; 5667 cb.cb_verbose = verbose; 5668 5669 /* Get our interval and count values (if any) */ 5670 if (guid) { 5671 get_interval_count_filter_guids(&argc, argv, &interval, 5672 &count, &cb); 5673 } else { 5674 get_interval_count(&argc, argv, &interval, &count); 5675 } 5676 5677 if (argc == 0) { 5678 /* No args, so just print the defaults. */ 5679 } else if (are_all_pools(argc, argv)) { 5680 /* All the args are pool names */ 5681 } else if (are_vdevs_in_pool(argc, argv, NULL, &cb.cb_vdevs)) { 5682 /* All the args are vdevs */ 5683 cb.cb_vdevs.cb_names = argv; 5684 cb.cb_vdevs.cb_names_count = argc; 5685 argc = 0; /* No pools to process */ 5686 } else if (are_all_pools(1, argv)) { 5687 /* The first arg is a pool name */ 5688 if (are_vdevs_in_pool(argc - 1, argv + 1, argv[0], 5689 &cb.cb_vdevs)) { 5690 /* ...and the rest are vdev names */ 5691 cb.cb_vdevs.cb_names = argv + 1; 5692 cb.cb_vdevs.cb_names_count = argc - 1; 5693 argc = 1; /* One pool to process */ 5694 } else { 5695 fprintf(stderr, gettext("Expected either a list of ")); 5696 fprintf(stderr, gettext("pools, or list of vdevs in")); 5697 fprintf(stderr, " \"%s\", ", argv[0]); 5698 fprintf(stderr, gettext("but got:\n")); 5699 error_list_unresolved_vdevs(argc - 1, argv + 1, 5700 argv[0], &cb.cb_vdevs); 5701 fprintf(stderr, "\n"); 5702 usage(B_FALSE); 5703 return (1); 5704 } 5705 } else { 5706 /* 5707 * The args don't make sense. The first arg isn't a pool name, 5708 * nor are all the args vdevs. 5709 */ 5710 fprintf(stderr, gettext("Unable to parse pools/vdevs list.\n")); 5711 fprintf(stderr, "\n"); 5712 return (1); 5713 } 5714 5715 if (cb.cb_vdevs.cb_names_count != 0) { 5716 /* 5717 * If user specified vdevs, it implies verbose. 5718 */ 5719 cb.cb_verbose = B_TRUE; 5720 } 5721 5722 /* 5723 * Construct the list of all interesting pools. 5724 */ 5725 ret = 0; 5726 if ((list = pool_list_get(argc, argv, NULL, ZFS_TYPE_POOL, parsable, 5727 &ret)) == NULL) 5728 return (1); 5729 5730 if (pool_list_count(list) == 0 && argc != 0) { 5731 pool_list_free(list); 5732 return (1); 5733 } 5734 5735 if (pool_list_count(list) == 0 && interval == 0) { 5736 pool_list_free(list); 5737 (void) fprintf(stderr, gettext("no pools available\n")); 5738 return (1); 5739 } 5740 5741 if ((l_histo || rq_histo) && (cmd != NULL || latency || queues)) { 5742 pool_list_free(list); 5743 (void) fprintf(stderr, 5744 gettext("[-r|-w] isn't allowed with [-c|-l|-q]\n")); 5745 usage(B_FALSE); 5746 return (1); 5747 } 5748 5749 if (l_histo && rq_histo) { 5750 pool_list_free(list); 5751 (void) fprintf(stderr, 5752 gettext("Only one of [-r|-w] can be passed at a time\n")); 5753 usage(B_FALSE); 5754 return (1); 5755 } 5756 5757 /* 5758 * Enter the main iostat loop. 5759 */ 5760 cb.cb_list = list; 5761 5762 if (l_histo) { 5763 /* 5764 * Histograms tables look out of place when you try to display 5765 * them with the other stats, so make a rule that you can only 5766 * print histograms by themselves. 5767 */ 5768 cb.cb_flags = IOS_L_HISTO_M; 5769 } else if (rq_histo) { 5770 cb.cb_flags = IOS_RQ_HISTO_M; 5771 } else { 5772 cb.cb_flags = IOS_DEFAULT_M; 5773 if (latency) 5774 cb.cb_flags |= IOS_LATENCY_M; 5775 if (queues) 5776 cb.cb_flags |= IOS_QUEUES_M; 5777 } 5778 5779 /* 5780 * See if the module supports all the stats we want to display. 5781 */ 5782 unsupported_flags = cb.cb_flags & ~get_stat_flags(list); 5783 if (unsupported_flags) { 5784 uint64_t f; 5785 int idx; 5786 fprintf(stderr, 5787 gettext("The loaded zfs module doesn't support:")); 5788 5789 /* for each bit set in unsupported_flags */ 5790 for (f = unsupported_flags; f; f &= ~(1ULL << idx)) { 5791 idx = lowbit64(f) - 1; 5792 fprintf(stderr, " -%c", flag_to_arg[idx]); 5793 } 5794 5795 fprintf(stderr, ". Try running a newer module.\n"); 5796 pool_list_free(list); 5797 5798 return (1); 5799 } 5800 5801 for (;;) { 5802 if ((npools = pool_list_count(list)) == 0) 5803 (void) fprintf(stderr, gettext("no pools available\n")); 5804 else { 5805 /* 5806 * If this is the first iteration and -y was supplied 5807 * we skip any printing. 5808 */ 5809 boolean_t skip = (omit_since_boot && 5810 cb.cb_iteration == 0); 5811 5812 /* 5813 * Refresh all statistics. This is done as an 5814 * explicit step before calculating the maximum name 5815 * width, so that any * configuration changes are 5816 * properly accounted for. 5817 */ 5818 (void) pool_list_iter(list, B_FALSE, refresh_iostat, 5819 &cb); 5820 5821 /* 5822 * Iterate over all pools to determine the maximum width 5823 * for the pool / device name column across all pools. 5824 */ 5825 cb.cb_namewidth = 0; 5826 (void) pool_list_iter(list, B_FALSE, 5827 get_namewidth_iostat, &cb); 5828 5829 if (timestamp_fmt != NODATE) 5830 print_timestamp(timestamp_fmt); 5831 5832 if (cmd != NULL && cb.cb_verbose && 5833 !(cb.cb_flags & IOS_ANYHISTO_M)) { 5834 cb.vcdl = all_pools_for_each_vdev_run(argc, 5835 argv, cmd, g_zfs, cb.cb_vdevs.cb_names, 5836 cb.cb_vdevs.cb_names_count, 5837 cb.cb_vdevs.cb_name_flags); 5838 } else { 5839 cb.vcdl = NULL; 5840 } 5841 5842 5843 /* 5844 * Check terminal size so we can print headers 5845 * even when terminal window has its height 5846 * changed. 5847 */ 5848 winheight = terminal_height(); 5849 /* 5850 * Are we connected to TTY? If not, headers_once 5851 * should be true, to avoid breaking scripts. 5852 */ 5853 if (winheight < 0) 5854 headers_once = B_TRUE; 5855 5856 /* 5857 * If it's the first time and we're not skipping it, 5858 * or either skip or verbose mode, print the header. 5859 * 5860 * The histogram code explicitly prints its header on 5861 * every vdev, so skip this for histograms. 5862 */ 5863 if (((++cb.cb_iteration == 1 && !skip) || 5864 (skip != verbose) || 5865 (!headers_once && 5866 (cb.cb_iteration % winheight) == 0)) && 5867 (!(cb.cb_flags & IOS_ANYHISTO_M)) && 5868 !cb.cb_scripted) 5869 print_iostat_header(&cb); 5870 5871 if (skip) { 5872 (void) fsleep(interval); 5873 continue; 5874 } 5875 5876 pool_list_iter(list, B_FALSE, print_iostat, &cb); 5877 5878 /* 5879 * If there's more than one pool, and we're not in 5880 * verbose mode (which prints a separator for us), 5881 * then print a separator. 5882 * 5883 * In addition, if we're printing specific vdevs then 5884 * we also want an ending separator. 5885 */ 5886 if (((npools > 1 && !verbose && 5887 !(cb.cb_flags & IOS_ANYHISTO_M)) || 5888 (!(cb.cb_flags & IOS_ANYHISTO_M) && 5889 cb.cb_vdevs.cb_names_count)) && 5890 !cb.cb_scripted) { 5891 print_iostat_separator(&cb); 5892 if (cb.vcdl != NULL) 5893 print_cmd_columns(cb.vcdl, 1); 5894 printf("\n"); 5895 } 5896 5897 if (cb.vcdl != NULL) 5898 free_vdev_cmd_data_list(cb.vcdl); 5899 5900 } 5901 5902 /* 5903 * Flush the output so that redirection to a file isn't buffered 5904 * indefinitely. 5905 */ 5906 (void) fflush(stdout); 5907 5908 if (interval == 0) 5909 break; 5910 5911 if (count != 0 && --count == 0) 5912 break; 5913 5914 (void) fsleep(interval); 5915 } 5916 5917 pool_list_free(list); 5918 5919 return (ret); 5920 } 5921 5922 typedef struct list_cbdata { 5923 boolean_t cb_verbose; 5924 int cb_name_flags; 5925 int cb_namewidth; 5926 boolean_t cb_scripted; 5927 zprop_list_t *cb_proplist; 5928 boolean_t cb_literal; 5929 } list_cbdata_t; 5930 5931 5932 /* 5933 * Given a list of columns to display, output appropriate headers for each one. 5934 */ 5935 static void 5936 print_header(list_cbdata_t *cb) 5937 { 5938 zprop_list_t *pl = cb->cb_proplist; 5939 char headerbuf[ZPOOL_MAXPROPLEN]; 5940 const char *header; 5941 boolean_t first = B_TRUE; 5942 boolean_t right_justify; 5943 size_t width = 0; 5944 5945 for (; pl != NULL; pl = pl->pl_next) { 5946 width = pl->pl_width; 5947 if (first && cb->cb_verbose) { 5948 /* 5949 * Reset the width to accommodate the verbose listing 5950 * of devices. 5951 */ 5952 width = cb->cb_namewidth; 5953 } 5954 5955 if (!first) 5956 (void) fputs(" ", stdout); 5957 else 5958 first = B_FALSE; 5959 5960 right_justify = B_FALSE; 5961 if (pl->pl_prop != ZPROP_USERPROP) { 5962 header = zpool_prop_column_name(pl->pl_prop); 5963 right_justify = zpool_prop_align_right(pl->pl_prop); 5964 } else { 5965 int i; 5966 5967 for (i = 0; pl->pl_user_prop[i] != '\0'; i++) 5968 headerbuf[i] = toupper(pl->pl_user_prop[i]); 5969 headerbuf[i] = '\0'; 5970 header = headerbuf; 5971 } 5972 5973 if (pl->pl_next == NULL && !right_justify) 5974 (void) fputs(header, stdout); 5975 else if (right_justify) 5976 (void) printf("%*s", (int)width, header); 5977 else 5978 (void) printf("%-*s", (int)width, header); 5979 } 5980 5981 (void) fputc('\n', stdout); 5982 } 5983 5984 /* 5985 * Given a pool and a list of properties, print out all the properties according 5986 * to the described layout. Used by zpool_do_list(). 5987 */ 5988 static void 5989 print_pool(zpool_handle_t *zhp, list_cbdata_t *cb) 5990 { 5991 zprop_list_t *pl = cb->cb_proplist; 5992 boolean_t first = B_TRUE; 5993 char property[ZPOOL_MAXPROPLEN]; 5994 const char *propstr; 5995 boolean_t right_justify; 5996 size_t width; 5997 5998 for (; pl != NULL; pl = pl->pl_next) { 5999 6000 width = pl->pl_width; 6001 if (first && cb->cb_verbose) { 6002 /* 6003 * Reset the width to accommodate the verbose listing 6004 * of devices. 6005 */ 6006 width = cb->cb_namewidth; 6007 } 6008 6009 if (!first) { 6010 if (cb->cb_scripted) 6011 (void) fputc('\t', stdout); 6012 else 6013 (void) fputs(" ", stdout); 6014 } else { 6015 first = B_FALSE; 6016 } 6017 6018 right_justify = B_FALSE; 6019 if (pl->pl_prop != ZPROP_USERPROP) { 6020 if (zpool_get_prop(zhp, pl->pl_prop, property, 6021 sizeof (property), NULL, cb->cb_literal) != 0) 6022 propstr = "-"; 6023 else 6024 propstr = property; 6025 6026 right_justify = zpool_prop_align_right(pl->pl_prop); 6027 } else if ((zpool_prop_feature(pl->pl_user_prop) || 6028 zpool_prop_unsupported(pl->pl_user_prop)) && 6029 zpool_prop_get_feature(zhp, pl->pl_user_prop, property, 6030 sizeof (property)) == 0) { 6031 propstr = property; 6032 } else { 6033 propstr = "-"; 6034 } 6035 6036 6037 /* 6038 * If this is being called in scripted mode, or if this is the 6039 * last column and it is left-justified, don't include a width 6040 * format specifier. 6041 */ 6042 if (cb->cb_scripted || (pl->pl_next == NULL && !right_justify)) 6043 (void) fputs(propstr, stdout); 6044 else if (right_justify) 6045 (void) printf("%*s", (int)width, propstr); 6046 else 6047 (void) printf("%-*s", (int)width, propstr); 6048 } 6049 6050 (void) fputc('\n', stdout); 6051 } 6052 6053 static void 6054 print_one_column(zpool_prop_t prop, uint64_t value, const char *str, 6055 boolean_t scripted, boolean_t valid, enum zfs_nicenum_format format) 6056 { 6057 char propval[64]; 6058 boolean_t fixed; 6059 size_t width = zprop_width(prop, &fixed, ZFS_TYPE_POOL); 6060 6061 switch (prop) { 6062 case ZPOOL_PROP_SIZE: 6063 case ZPOOL_PROP_EXPANDSZ: 6064 case ZPOOL_PROP_CHECKPOINT: 6065 case ZPOOL_PROP_DEDUPRATIO: 6066 if (value == 0) 6067 (void) strlcpy(propval, "-", sizeof (propval)); 6068 else 6069 zfs_nicenum_format(value, propval, sizeof (propval), 6070 format); 6071 break; 6072 case ZPOOL_PROP_FRAGMENTATION: 6073 if (value == ZFS_FRAG_INVALID) { 6074 (void) strlcpy(propval, "-", sizeof (propval)); 6075 } else if (format == ZFS_NICENUM_RAW) { 6076 (void) snprintf(propval, sizeof (propval), "%llu", 6077 (unsigned long long)value); 6078 } else { 6079 (void) snprintf(propval, sizeof (propval), "%llu%%", 6080 (unsigned long long)value); 6081 } 6082 break; 6083 case ZPOOL_PROP_CAPACITY: 6084 /* capacity value is in parts-per-10,000 (aka permyriad) */ 6085 if (format == ZFS_NICENUM_RAW) 6086 (void) snprintf(propval, sizeof (propval), "%llu", 6087 (unsigned long long)value / 100); 6088 else 6089 (void) snprintf(propval, sizeof (propval), 6090 value < 1000 ? "%1.2f%%" : value < 10000 ? 6091 "%2.1f%%" : "%3.0f%%", value / 100.0); 6092 break; 6093 case ZPOOL_PROP_HEALTH: 6094 width = 8; 6095 (void) strlcpy(propval, str, sizeof (propval)); 6096 break; 6097 default: 6098 zfs_nicenum_format(value, propval, sizeof (propval), format); 6099 } 6100 6101 if (!valid) 6102 (void) strlcpy(propval, "-", sizeof (propval)); 6103 6104 if (scripted) 6105 (void) printf("\t%s", propval); 6106 else 6107 (void) printf(" %*s", (int)width, propval); 6108 } 6109 6110 /* 6111 * print static default line per vdev 6112 * not compatible with '-o' <proplist> option 6113 */ 6114 static void 6115 print_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv, 6116 list_cbdata_t *cb, int depth, boolean_t isspare) 6117 { 6118 nvlist_t **child; 6119 vdev_stat_t *vs; 6120 uint_t c, children; 6121 char *vname; 6122 boolean_t scripted = cb->cb_scripted; 6123 uint64_t islog = B_FALSE; 6124 const char *dashes = "%-*s - - - - " 6125 "- - - - -\n"; 6126 6127 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, 6128 (uint64_t **)&vs, &c) == 0); 6129 6130 if (name != NULL) { 6131 boolean_t toplevel = (vs->vs_space != 0); 6132 uint64_t cap; 6133 enum zfs_nicenum_format format; 6134 const char *state; 6135 6136 if (cb->cb_literal) 6137 format = ZFS_NICENUM_RAW; 6138 else 6139 format = ZFS_NICENUM_1024; 6140 6141 if (strcmp(name, VDEV_TYPE_INDIRECT) == 0) 6142 return; 6143 6144 if (scripted) 6145 (void) printf("\t%s", name); 6146 else if (strlen(name) + depth > cb->cb_namewidth) 6147 (void) printf("%*s%s", depth, "", name); 6148 else 6149 (void) printf("%*s%s%*s", depth, "", name, 6150 (int)(cb->cb_namewidth - strlen(name) - depth), ""); 6151 6152 /* 6153 * Print the properties for the individual vdevs. Some 6154 * properties are only applicable to toplevel vdevs. The 6155 * 'toplevel' boolean value is passed to the print_one_column() 6156 * to indicate that the value is valid. 6157 */ 6158 if (VDEV_STAT_VALID(vs_pspace, c) && vs->vs_pspace) 6159 print_one_column(ZPOOL_PROP_SIZE, vs->vs_pspace, NULL, 6160 scripted, B_TRUE, format); 6161 else 6162 print_one_column(ZPOOL_PROP_SIZE, vs->vs_space, NULL, 6163 scripted, toplevel, format); 6164 print_one_column(ZPOOL_PROP_ALLOCATED, vs->vs_alloc, NULL, 6165 scripted, toplevel, format); 6166 print_one_column(ZPOOL_PROP_FREE, vs->vs_space - vs->vs_alloc, 6167 NULL, scripted, toplevel, format); 6168 print_one_column(ZPOOL_PROP_CHECKPOINT, 6169 vs->vs_checkpoint_space, NULL, scripted, toplevel, format); 6170 print_one_column(ZPOOL_PROP_EXPANDSZ, vs->vs_esize, NULL, 6171 scripted, B_TRUE, format); 6172 print_one_column(ZPOOL_PROP_FRAGMENTATION, 6173 vs->vs_fragmentation, NULL, scripted, 6174 (vs->vs_fragmentation != ZFS_FRAG_INVALID && toplevel), 6175 format); 6176 cap = (vs->vs_space == 0) ? 0 : 6177 (vs->vs_alloc * 10000 / vs->vs_space); 6178 print_one_column(ZPOOL_PROP_CAPACITY, cap, NULL, 6179 scripted, toplevel, format); 6180 print_one_column(ZPOOL_PROP_DEDUPRATIO, 0, NULL, 6181 scripted, toplevel, format); 6182 state = zpool_state_to_name(vs->vs_state, vs->vs_aux); 6183 if (isspare) { 6184 if (vs->vs_aux == VDEV_AUX_SPARED) 6185 state = "INUSE"; 6186 else if (vs->vs_state == VDEV_STATE_HEALTHY) 6187 state = "AVAIL"; 6188 } 6189 print_one_column(ZPOOL_PROP_HEALTH, 0, state, scripted, 6190 B_TRUE, format); 6191 (void) fputc('\n', stdout); 6192 } 6193 6194 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 6195 &child, &children) != 0) 6196 return; 6197 6198 /* list the normal vdevs first */ 6199 for (c = 0; c < children; c++) { 6200 uint64_t ishole = B_FALSE; 6201 6202 if (nvlist_lookup_uint64(child[c], 6203 ZPOOL_CONFIG_IS_HOLE, &ishole) == 0 && ishole) 6204 continue; 6205 6206 if (nvlist_lookup_uint64(child[c], 6207 ZPOOL_CONFIG_IS_LOG, &islog) == 0 && islog) 6208 continue; 6209 6210 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS)) 6211 continue; 6212 6213 vname = zpool_vdev_name(g_zfs, zhp, child[c], 6214 cb->cb_name_flags | VDEV_NAME_TYPE_ID); 6215 print_list_stats(zhp, vname, child[c], cb, depth + 2, B_FALSE); 6216 free(vname); 6217 } 6218 6219 /* list the classes: 'logs', 'dedup', and 'special' */ 6220 for (uint_t n = 0; n < ARRAY_SIZE(class_name); n++) { 6221 boolean_t printed = B_FALSE; 6222 6223 for (c = 0; c < children; c++) { 6224 char *bias = NULL; 6225 char *type = NULL; 6226 6227 if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 6228 &islog) == 0 && islog) { 6229 bias = (char *)VDEV_ALLOC_CLASS_LOGS; 6230 } else { 6231 (void) nvlist_lookup_string(child[c], 6232 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias); 6233 (void) nvlist_lookup_string(child[c], 6234 ZPOOL_CONFIG_TYPE, &type); 6235 } 6236 if (bias == NULL || strcmp(bias, class_name[n]) != 0) 6237 continue; 6238 if (!islog && strcmp(type, VDEV_TYPE_INDIRECT) == 0) 6239 continue; 6240 6241 if (!printed) { 6242 /* LINTED E_SEC_PRINTF_VAR_FMT */ 6243 (void) printf(dashes, cb->cb_namewidth, 6244 class_name[n]); 6245 printed = B_TRUE; 6246 } 6247 vname = zpool_vdev_name(g_zfs, zhp, child[c], 6248 cb->cb_name_flags | VDEV_NAME_TYPE_ID); 6249 print_list_stats(zhp, vname, child[c], cb, depth + 2, 6250 B_FALSE); 6251 free(vname); 6252 } 6253 } 6254 6255 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, 6256 &child, &children) == 0 && children > 0) { 6257 /* LINTED E_SEC_PRINTF_VAR_FMT */ 6258 (void) printf(dashes, cb->cb_namewidth, "cache"); 6259 for (c = 0; c < children; c++) { 6260 vname = zpool_vdev_name(g_zfs, zhp, child[c], 6261 cb->cb_name_flags); 6262 print_list_stats(zhp, vname, child[c], cb, depth + 2, 6263 B_FALSE); 6264 free(vname); 6265 } 6266 } 6267 6268 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, &child, 6269 &children) == 0 && children > 0) { 6270 /* LINTED E_SEC_PRINTF_VAR_FMT */ 6271 (void) printf(dashes, cb->cb_namewidth, "spare"); 6272 for (c = 0; c < children; c++) { 6273 vname = zpool_vdev_name(g_zfs, zhp, child[c], 6274 cb->cb_name_flags); 6275 print_list_stats(zhp, vname, child[c], cb, depth + 2, 6276 B_TRUE); 6277 free(vname); 6278 } 6279 } 6280 } 6281 6282 /* 6283 * Generic callback function to list a pool. 6284 */ 6285 static int 6286 list_callback(zpool_handle_t *zhp, void *data) 6287 { 6288 list_cbdata_t *cbp = data; 6289 6290 print_pool(zhp, cbp); 6291 6292 if (cbp->cb_verbose) { 6293 nvlist_t *config, *nvroot; 6294 6295 config = zpool_get_config(zhp, NULL); 6296 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 6297 &nvroot) == 0); 6298 print_list_stats(zhp, NULL, nvroot, cbp, 0, B_FALSE); 6299 } 6300 6301 return (0); 6302 } 6303 6304 /* 6305 * Set the minimum pool/vdev name column width. The width must be at least 9, 6306 * but may be as large as needed. 6307 */ 6308 static int 6309 get_namewidth_list(zpool_handle_t *zhp, void *data) 6310 { 6311 list_cbdata_t *cb = data; 6312 int width; 6313 6314 width = get_namewidth(zhp, cb->cb_namewidth, 6315 cb->cb_name_flags | VDEV_NAME_TYPE_ID, cb->cb_verbose); 6316 6317 if (width < 9) 6318 width = 9; 6319 6320 cb->cb_namewidth = width; 6321 6322 return (0); 6323 } 6324 6325 /* 6326 * zpool list [-gHLpP] [-o prop[,prop]*] [-T d|u] [pool] ... [interval [count]] 6327 * 6328 * -g Display guid for individual vdev name. 6329 * -H Scripted mode. Don't display headers, and separate properties 6330 * by a single tab. 6331 * -L Follow links when resolving vdev path name. 6332 * -o List of properties to display. Defaults to 6333 * "name,size,allocated,free,expandsize,fragmentation,capacity," 6334 * "dedupratio,health,altroot" 6335 * -p Display values in parsable (exact) format. 6336 * -P Display full path for vdev name. 6337 * -T Display a timestamp in date(1) or Unix format 6338 * 6339 * List all pools in the system, whether or not they're healthy. Output space 6340 * statistics for each one, as well as health status summary. 6341 */ 6342 int 6343 zpool_do_list(int argc, char **argv) 6344 { 6345 int c; 6346 int ret = 0; 6347 list_cbdata_t cb = { 0 }; 6348 static char default_props[] = 6349 "name,size,allocated,free,checkpoint,expandsize,fragmentation," 6350 "capacity,dedupratio,health,altroot"; 6351 char *props = default_props; 6352 float interval = 0; 6353 unsigned long count = 0; 6354 zpool_list_t *list; 6355 boolean_t first = B_TRUE; 6356 current_prop_type = ZFS_TYPE_POOL; 6357 6358 /* check options */ 6359 while ((c = getopt(argc, argv, ":gHLo:pPT:v")) != -1) { 6360 switch (c) { 6361 case 'g': 6362 cb.cb_name_flags |= VDEV_NAME_GUID; 6363 break; 6364 case 'H': 6365 cb.cb_scripted = B_TRUE; 6366 break; 6367 case 'L': 6368 cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS; 6369 break; 6370 case 'o': 6371 props = optarg; 6372 break; 6373 case 'P': 6374 cb.cb_name_flags |= VDEV_NAME_PATH; 6375 break; 6376 case 'p': 6377 cb.cb_literal = B_TRUE; 6378 break; 6379 case 'T': 6380 get_timestamp_arg(*optarg); 6381 break; 6382 case 'v': 6383 cb.cb_verbose = B_TRUE; 6384 cb.cb_namewidth = 8; /* 8 until precalc is avail */ 6385 break; 6386 case ':': 6387 (void) fprintf(stderr, gettext("missing argument for " 6388 "'%c' option\n"), optopt); 6389 usage(B_FALSE); 6390 break; 6391 case '?': 6392 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 6393 optopt); 6394 usage(B_FALSE); 6395 } 6396 } 6397 6398 argc -= optind; 6399 argv += optind; 6400 6401 get_interval_count(&argc, argv, &interval, &count); 6402 6403 if (zprop_get_list(g_zfs, props, &cb.cb_proplist, ZFS_TYPE_POOL) != 0) 6404 usage(B_FALSE); 6405 6406 for (;;) { 6407 if ((list = pool_list_get(argc, argv, &cb.cb_proplist, 6408 ZFS_TYPE_POOL, cb.cb_literal, &ret)) == NULL) 6409 return (1); 6410 6411 if (pool_list_count(list) == 0) 6412 break; 6413 6414 cb.cb_namewidth = 0; 6415 (void) pool_list_iter(list, B_FALSE, get_namewidth_list, &cb); 6416 6417 if (timestamp_fmt != NODATE) 6418 print_timestamp(timestamp_fmt); 6419 6420 if (!cb.cb_scripted && (first || cb.cb_verbose)) { 6421 print_header(&cb); 6422 first = B_FALSE; 6423 } 6424 ret = pool_list_iter(list, B_TRUE, list_callback, &cb); 6425 6426 if (interval == 0) 6427 break; 6428 6429 if (count != 0 && --count == 0) 6430 break; 6431 6432 pool_list_free(list); 6433 (void) fsleep(interval); 6434 } 6435 6436 if (argc == 0 && !cb.cb_scripted && pool_list_count(list) == 0) { 6437 (void) printf(gettext("no pools available\n")); 6438 ret = 0; 6439 } 6440 6441 pool_list_free(list); 6442 zprop_free_list(cb.cb_proplist); 6443 return (ret); 6444 } 6445 6446 static int 6447 zpool_do_attach_or_replace(int argc, char **argv, int replacing) 6448 { 6449 boolean_t force = B_FALSE; 6450 boolean_t rebuild = B_FALSE; 6451 boolean_t wait = B_FALSE; 6452 int c; 6453 nvlist_t *nvroot; 6454 char *poolname, *old_disk, *new_disk; 6455 zpool_handle_t *zhp; 6456 nvlist_t *props = NULL; 6457 char *propval; 6458 int ret; 6459 6460 /* check options */ 6461 while ((c = getopt(argc, argv, "fo:sw")) != -1) { 6462 switch (c) { 6463 case 'f': 6464 force = B_TRUE; 6465 break; 6466 case 'o': 6467 if ((propval = strchr(optarg, '=')) == NULL) { 6468 (void) fprintf(stderr, gettext("missing " 6469 "'=' for -o option\n")); 6470 usage(B_FALSE); 6471 } 6472 *propval = '\0'; 6473 propval++; 6474 6475 if ((strcmp(optarg, ZPOOL_CONFIG_ASHIFT) != 0) || 6476 (add_prop_list(optarg, propval, &props, B_TRUE))) 6477 usage(B_FALSE); 6478 break; 6479 case 's': 6480 rebuild = B_TRUE; 6481 break; 6482 case 'w': 6483 wait = B_TRUE; 6484 break; 6485 case '?': 6486 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 6487 optopt); 6488 usage(B_FALSE); 6489 } 6490 } 6491 6492 argc -= optind; 6493 argv += optind; 6494 6495 /* get pool name and check number of arguments */ 6496 if (argc < 1) { 6497 (void) fprintf(stderr, gettext("missing pool name argument\n")); 6498 usage(B_FALSE); 6499 } 6500 6501 poolname = argv[0]; 6502 6503 if (argc < 2) { 6504 (void) fprintf(stderr, 6505 gettext("missing <device> specification\n")); 6506 usage(B_FALSE); 6507 } 6508 6509 old_disk = argv[1]; 6510 6511 if (argc < 3) { 6512 if (!replacing) { 6513 (void) fprintf(stderr, 6514 gettext("missing <new_device> specification\n")); 6515 usage(B_FALSE); 6516 } 6517 new_disk = old_disk; 6518 argc -= 1; 6519 argv += 1; 6520 } else { 6521 new_disk = argv[2]; 6522 argc -= 2; 6523 argv += 2; 6524 } 6525 6526 if (argc > 1) { 6527 (void) fprintf(stderr, gettext("too many arguments\n")); 6528 usage(B_FALSE); 6529 } 6530 6531 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) { 6532 nvlist_free(props); 6533 return (1); 6534 } 6535 6536 if (zpool_get_config(zhp, NULL) == NULL) { 6537 (void) fprintf(stderr, gettext("pool '%s' is unavailable\n"), 6538 poolname); 6539 zpool_close(zhp); 6540 nvlist_free(props); 6541 return (1); 6542 } 6543 6544 /* unless manually specified use "ashift" pool property (if set) */ 6545 if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) { 6546 int intval; 6547 zprop_source_t src; 6548 char strval[ZPOOL_MAXPROPLEN]; 6549 6550 intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src); 6551 if (src != ZPROP_SRC_DEFAULT) { 6552 (void) sprintf(strval, "%" PRId32, intval); 6553 verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval, 6554 &props, B_TRUE) == 0); 6555 } 6556 } 6557 6558 nvroot = make_root_vdev(zhp, props, force, B_FALSE, replacing, B_FALSE, 6559 argc, argv); 6560 if (nvroot == NULL) { 6561 zpool_close(zhp); 6562 nvlist_free(props); 6563 return (1); 6564 } 6565 6566 ret = zpool_vdev_attach(zhp, old_disk, new_disk, nvroot, replacing, 6567 rebuild); 6568 6569 if (ret == 0 && wait) 6570 ret = zpool_wait(zhp, 6571 replacing ? ZPOOL_WAIT_REPLACE : ZPOOL_WAIT_RESILVER); 6572 6573 nvlist_free(props); 6574 nvlist_free(nvroot); 6575 zpool_close(zhp); 6576 6577 return (ret); 6578 } 6579 6580 /* 6581 * zpool replace [-fsw] [-o property=value] <pool> <device> <new_device> 6582 * 6583 * -f Force attach, even if <new_device> appears to be in use. 6584 * -s Use sequential instead of healing reconstruction for resilver. 6585 * -o Set property=value. 6586 * -w Wait for replacing to complete before returning 6587 * 6588 * Replace <device> with <new_device>. 6589 */ 6590 int 6591 zpool_do_replace(int argc, char **argv) 6592 { 6593 return (zpool_do_attach_or_replace(argc, argv, B_TRUE)); 6594 } 6595 6596 /* 6597 * zpool attach [-fsw] [-o property=value] <pool> <device> <new_device> 6598 * 6599 * -f Force attach, even if <new_device> appears to be in use. 6600 * -s Use sequential instead of healing reconstruction for resilver. 6601 * -o Set property=value. 6602 * -w Wait for resilvering to complete before returning 6603 * 6604 * Attach <new_device> to the mirror containing <device>. If <device> is not 6605 * part of a mirror, then <device> will be transformed into a mirror of 6606 * <device> and <new_device>. In either case, <new_device> will begin life 6607 * with a DTL of [0, now], and will immediately begin to resilver itself. 6608 */ 6609 int 6610 zpool_do_attach(int argc, char **argv) 6611 { 6612 return (zpool_do_attach_or_replace(argc, argv, B_FALSE)); 6613 } 6614 6615 /* 6616 * zpool detach [-f] <pool> <device> 6617 * 6618 * -f Force detach of <device>, even if DTLs argue against it 6619 * (not supported yet) 6620 * 6621 * Detach a device from a mirror. The operation will be refused if <device> 6622 * is the last device in the mirror, or if the DTLs indicate that this device 6623 * has the only valid copy of some data. 6624 */ 6625 int 6626 zpool_do_detach(int argc, char **argv) 6627 { 6628 int c; 6629 char *poolname, *path; 6630 zpool_handle_t *zhp; 6631 int ret; 6632 6633 /* check options */ 6634 while ((c = getopt(argc, argv, "")) != -1) { 6635 switch (c) { 6636 case '?': 6637 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 6638 optopt); 6639 usage(B_FALSE); 6640 } 6641 } 6642 6643 argc -= optind; 6644 argv += optind; 6645 6646 /* get pool name and check number of arguments */ 6647 if (argc < 1) { 6648 (void) fprintf(stderr, gettext("missing pool name argument\n")); 6649 usage(B_FALSE); 6650 } 6651 6652 if (argc < 2) { 6653 (void) fprintf(stderr, 6654 gettext("missing <device> specification\n")); 6655 usage(B_FALSE); 6656 } 6657 6658 poolname = argv[0]; 6659 path = argv[1]; 6660 6661 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) 6662 return (1); 6663 6664 ret = zpool_vdev_detach(zhp, path); 6665 6666 zpool_close(zhp); 6667 6668 return (ret); 6669 } 6670 6671 /* 6672 * zpool split [-gLnP] [-o prop=val] ... 6673 * [-o mntopt] ... 6674 * [-R altroot] <pool> <newpool> [<device> ...] 6675 * 6676 * -g Display guid for individual vdev name. 6677 * -L Follow links when resolving vdev path name. 6678 * -n Do not split the pool, but display the resulting layout if 6679 * it were to be split. 6680 * -o Set property=value, or set mount options. 6681 * -P Display full path for vdev name. 6682 * -R Mount the split-off pool under an alternate root. 6683 * -l Load encryption keys while importing. 6684 * 6685 * Splits the named pool and gives it the new pool name. Devices to be split 6686 * off may be listed, provided that no more than one device is specified 6687 * per top-level vdev mirror. The newly split pool is left in an exported 6688 * state unless -R is specified. 6689 * 6690 * Restrictions: the top-level of the pool pool must only be made up of 6691 * mirrors; all devices in the pool must be healthy; no device may be 6692 * undergoing a resilvering operation. 6693 */ 6694 int 6695 zpool_do_split(int argc, char **argv) 6696 { 6697 char *srcpool, *newpool, *propval; 6698 char *mntopts = NULL; 6699 splitflags_t flags; 6700 int c, ret = 0; 6701 boolean_t loadkeys = B_FALSE; 6702 zpool_handle_t *zhp; 6703 nvlist_t *config, *props = NULL; 6704 6705 flags.dryrun = B_FALSE; 6706 flags.import = B_FALSE; 6707 flags.name_flags = 0; 6708 6709 /* check options */ 6710 while ((c = getopt(argc, argv, ":gLR:lno:P")) != -1) { 6711 switch (c) { 6712 case 'g': 6713 flags.name_flags |= VDEV_NAME_GUID; 6714 break; 6715 case 'L': 6716 flags.name_flags |= VDEV_NAME_FOLLOW_LINKS; 6717 break; 6718 case 'R': 6719 flags.import = B_TRUE; 6720 if (add_prop_list( 6721 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), optarg, 6722 &props, B_TRUE) != 0) { 6723 nvlist_free(props); 6724 usage(B_FALSE); 6725 } 6726 break; 6727 case 'l': 6728 loadkeys = B_TRUE; 6729 break; 6730 case 'n': 6731 flags.dryrun = B_TRUE; 6732 break; 6733 case 'o': 6734 if ((propval = strchr(optarg, '=')) != NULL) { 6735 *propval = '\0'; 6736 propval++; 6737 if (add_prop_list(optarg, propval, 6738 &props, B_TRUE) != 0) { 6739 nvlist_free(props); 6740 usage(B_FALSE); 6741 } 6742 } else { 6743 mntopts = optarg; 6744 } 6745 break; 6746 case 'P': 6747 flags.name_flags |= VDEV_NAME_PATH; 6748 break; 6749 case ':': 6750 (void) fprintf(stderr, gettext("missing argument for " 6751 "'%c' option\n"), optopt); 6752 usage(B_FALSE); 6753 break; 6754 case '?': 6755 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 6756 optopt); 6757 usage(B_FALSE); 6758 break; 6759 } 6760 } 6761 6762 if (!flags.import && mntopts != NULL) { 6763 (void) fprintf(stderr, gettext("setting mntopts is only " 6764 "valid when importing the pool\n")); 6765 usage(B_FALSE); 6766 } 6767 6768 if (!flags.import && loadkeys) { 6769 (void) fprintf(stderr, gettext("loading keys is only " 6770 "valid when importing the pool\n")); 6771 usage(B_FALSE); 6772 } 6773 6774 argc -= optind; 6775 argv += optind; 6776 6777 if (argc < 1) { 6778 (void) fprintf(stderr, gettext("Missing pool name\n")); 6779 usage(B_FALSE); 6780 } 6781 if (argc < 2) { 6782 (void) fprintf(stderr, gettext("Missing new pool name\n")); 6783 usage(B_FALSE); 6784 } 6785 6786 srcpool = argv[0]; 6787 newpool = argv[1]; 6788 6789 argc -= 2; 6790 argv += 2; 6791 6792 if ((zhp = zpool_open(g_zfs, srcpool)) == NULL) { 6793 nvlist_free(props); 6794 return (1); 6795 } 6796 6797 config = split_mirror_vdev(zhp, newpool, props, flags, argc, argv); 6798 if (config == NULL) { 6799 ret = 1; 6800 } else { 6801 if (flags.dryrun) { 6802 (void) printf(gettext("would create '%s' with the " 6803 "following layout:\n\n"), newpool); 6804 print_vdev_tree(NULL, newpool, config, 0, "", 6805 flags.name_flags); 6806 print_vdev_tree(NULL, "dedup", config, 0, 6807 VDEV_ALLOC_BIAS_DEDUP, 0); 6808 print_vdev_tree(NULL, "special", config, 0, 6809 VDEV_ALLOC_BIAS_SPECIAL, 0); 6810 } 6811 } 6812 6813 zpool_close(zhp); 6814 6815 if (ret != 0 || flags.dryrun || !flags.import) { 6816 nvlist_free(config); 6817 nvlist_free(props); 6818 return (ret); 6819 } 6820 6821 /* 6822 * The split was successful. Now we need to open the new 6823 * pool and import it. 6824 */ 6825 if ((zhp = zpool_open_canfail(g_zfs, newpool)) == NULL) { 6826 nvlist_free(config); 6827 nvlist_free(props); 6828 return (1); 6829 } 6830 6831 if (loadkeys) { 6832 ret = zfs_crypto_attempt_load_keys(g_zfs, newpool); 6833 if (ret != 0) 6834 ret = 1; 6835 } 6836 6837 if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL && 6838 zpool_enable_datasets(zhp, mntopts, 0) != 0) { 6839 ret = 1; 6840 (void) fprintf(stderr, gettext("Split was successful, but " 6841 "the datasets could not all be mounted\n")); 6842 (void) fprintf(stderr, gettext("Try doing '%s' with a " 6843 "different altroot\n"), "zpool import"); 6844 } 6845 zpool_close(zhp); 6846 nvlist_free(config); 6847 nvlist_free(props); 6848 6849 return (ret); 6850 } 6851 6852 6853 6854 /* 6855 * zpool online <pool> <device> ... 6856 */ 6857 int 6858 zpool_do_online(int argc, char **argv) 6859 { 6860 int c, i; 6861 char *poolname; 6862 zpool_handle_t *zhp; 6863 int ret = 0; 6864 vdev_state_t newstate; 6865 int flags = 0; 6866 6867 /* check options */ 6868 while ((c = getopt(argc, argv, "e")) != -1) { 6869 switch (c) { 6870 case 'e': 6871 flags |= ZFS_ONLINE_EXPAND; 6872 break; 6873 case '?': 6874 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 6875 optopt); 6876 usage(B_FALSE); 6877 } 6878 } 6879 6880 argc -= optind; 6881 argv += optind; 6882 6883 /* get pool name and check number of arguments */ 6884 if (argc < 1) { 6885 (void) fprintf(stderr, gettext("missing pool name\n")); 6886 usage(B_FALSE); 6887 } 6888 if (argc < 2) { 6889 (void) fprintf(stderr, gettext("missing device name\n")); 6890 usage(B_FALSE); 6891 } 6892 6893 poolname = argv[0]; 6894 6895 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) 6896 return (1); 6897 6898 for (i = 1; i < argc; i++) { 6899 if (zpool_vdev_online(zhp, argv[i], flags, &newstate) == 0) { 6900 if (newstate != VDEV_STATE_HEALTHY) { 6901 (void) printf(gettext("warning: device '%s' " 6902 "onlined, but remains in faulted state\n"), 6903 argv[i]); 6904 if (newstate == VDEV_STATE_FAULTED) 6905 (void) printf(gettext("use 'zpool " 6906 "clear' to restore a faulted " 6907 "device\n")); 6908 else 6909 (void) printf(gettext("use 'zpool " 6910 "replace' to replace devices " 6911 "that are no longer present\n")); 6912 } 6913 } else { 6914 ret = 1; 6915 } 6916 } 6917 6918 zpool_close(zhp); 6919 6920 return (ret); 6921 } 6922 6923 /* 6924 * zpool offline [-ft] <pool> <device> ... 6925 * 6926 * -f Force the device into a faulted state. 6927 * 6928 * -t Only take the device off-line temporarily. The offline/faulted 6929 * state will not be persistent across reboots. 6930 */ 6931 int 6932 zpool_do_offline(int argc, char **argv) 6933 { 6934 int c, i; 6935 char *poolname; 6936 zpool_handle_t *zhp; 6937 int ret = 0; 6938 boolean_t istmp = B_FALSE; 6939 boolean_t fault = B_FALSE; 6940 6941 /* check options */ 6942 while ((c = getopt(argc, argv, "ft")) != -1) { 6943 switch (c) { 6944 case 'f': 6945 fault = B_TRUE; 6946 break; 6947 case 't': 6948 istmp = B_TRUE; 6949 break; 6950 case '?': 6951 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 6952 optopt); 6953 usage(B_FALSE); 6954 } 6955 } 6956 6957 argc -= optind; 6958 argv += optind; 6959 6960 /* get pool name and check number of arguments */ 6961 if (argc < 1) { 6962 (void) fprintf(stderr, gettext("missing pool name\n")); 6963 usage(B_FALSE); 6964 } 6965 if (argc < 2) { 6966 (void) fprintf(stderr, gettext("missing device name\n")); 6967 usage(B_FALSE); 6968 } 6969 6970 poolname = argv[0]; 6971 6972 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) 6973 return (1); 6974 6975 for (i = 1; i < argc; i++) { 6976 if (fault) { 6977 uint64_t guid = zpool_vdev_path_to_guid(zhp, argv[i]); 6978 vdev_aux_t aux; 6979 if (istmp == B_FALSE) { 6980 /* Force the fault to persist across imports */ 6981 aux = VDEV_AUX_EXTERNAL_PERSIST; 6982 } else { 6983 aux = VDEV_AUX_EXTERNAL; 6984 } 6985 6986 if (guid == 0 || zpool_vdev_fault(zhp, guid, aux) != 0) 6987 ret = 1; 6988 } else { 6989 if (zpool_vdev_offline(zhp, argv[i], istmp) != 0) 6990 ret = 1; 6991 } 6992 } 6993 6994 zpool_close(zhp); 6995 6996 return (ret); 6997 } 6998 6999 /* 7000 * zpool clear <pool> [device] 7001 * 7002 * Clear all errors associated with a pool or a particular device. 7003 */ 7004 int 7005 zpool_do_clear(int argc, char **argv) 7006 { 7007 int c; 7008 int ret = 0; 7009 boolean_t dryrun = B_FALSE; 7010 boolean_t do_rewind = B_FALSE; 7011 boolean_t xtreme_rewind = B_FALSE; 7012 uint32_t rewind_policy = ZPOOL_NO_REWIND; 7013 nvlist_t *policy = NULL; 7014 zpool_handle_t *zhp; 7015 char *pool, *device; 7016 7017 /* check options */ 7018 while ((c = getopt(argc, argv, "FnX")) != -1) { 7019 switch (c) { 7020 case 'F': 7021 do_rewind = B_TRUE; 7022 break; 7023 case 'n': 7024 dryrun = B_TRUE; 7025 break; 7026 case 'X': 7027 xtreme_rewind = B_TRUE; 7028 break; 7029 case '?': 7030 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 7031 optopt); 7032 usage(B_FALSE); 7033 } 7034 } 7035 7036 argc -= optind; 7037 argv += optind; 7038 7039 if (argc < 1) { 7040 (void) fprintf(stderr, gettext("missing pool name\n")); 7041 usage(B_FALSE); 7042 } 7043 7044 if (argc > 2) { 7045 (void) fprintf(stderr, gettext("too many arguments\n")); 7046 usage(B_FALSE); 7047 } 7048 7049 if ((dryrun || xtreme_rewind) && !do_rewind) { 7050 (void) fprintf(stderr, 7051 gettext("-n or -X only meaningful with -F\n")); 7052 usage(B_FALSE); 7053 } 7054 if (dryrun) 7055 rewind_policy = ZPOOL_TRY_REWIND; 7056 else if (do_rewind) 7057 rewind_policy = ZPOOL_DO_REWIND; 7058 if (xtreme_rewind) 7059 rewind_policy |= ZPOOL_EXTREME_REWIND; 7060 7061 /* In future, further rewind policy choices can be passed along here */ 7062 if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 || 7063 nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY, 7064 rewind_policy) != 0) { 7065 return (1); 7066 } 7067 7068 pool = argv[0]; 7069 device = argc == 2 ? argv[1] : NULL; 7070 7071 if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) { 7072 nvlist_free(policy); 7073 return (1); 7074 } 7075 7076 if (zpool_clear(zhp, device, policy) != 0) 7077 ret = 1; 7078 7079 zpool_close(zhp); 7080 7081 nvlist_free(policy); 7082 7083 return (ret); 7084 } 7085 7086 /* 7087 * zpool reguid <pool> 7088 */ 7089 int 7090 zpool_do_reguid(int argc, char **argv) 7091 { 7092 int c; 7093 char *poolname; 7094 zpool_handle_t *zhp; 7095 int ret = 0; 7096 7097 /* check options */ 7098 while ((c = getopt(argc, argv, "")) != -1) { 7099 switch (c) { 7100 case '?': 7101 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 7102 optopt); 7103 usage(B_FALSE); 7104 } 7105 } 7106 7107 argc -= optind; 7108 argv += optind; 7109 7110 /* get pool name and check number of arguments */ 7111 if (argc < 1) { 7112 (void) fprintf(stderr, gettext("missing pool name\n")); 7113 usage(B_FALSE); 7114 } 7115 7116 if (argc > 1) { 7117 (void) fprintf(stderr, gettext("too many arguments\n")); 7118 usage(B_FALSE); 7119 } 7120 7121 poolname = argv[0]; 7122 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) 7123 return (1); 7124 7125 ret = zpool_reguid(zhp); 7126 7127 zpool_close(zhp); 7128 return (ret); 7129 } 7130 7131 7132 /* 7133 * zpool reopen <pool> 7134 * 7135 * Reopen the pool so that the kernel can update the sizes of all vdevs. 7136 */ 7137 int 7138 zpool_do_reopen(int argc, char **argv) 7139 { 7140 int c; 7141 int ret = 0; 7142 boolean_t scrub_restart = B_TRUE; 7143 7144 /* check options */ 7145 while ((c = getopt(argc, argv, "n")) != -1) { 7146 switch (c) { 7147 case 'n': 7148 scrub_restart = B_FALSE; 7149 break; 7150 case '?': 7151 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 7152 optopt); 7153 usage(B_FALSE); 7154 } 7155 } 7156 7157 argc -= optind; 7158 argv += optind; 7159 7160 /* if argc == 0 we will execute zpool_reopen_one on all pools */ 7161 ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL, 7162 B_FALSE, zpool_reopen_one, &scrub_restart); 7163 7164 return (ret); 7165 } 7166 7167 typedef struct scrub_cbdata { 7168 int cb_type; 7169 pool_scrub_cmd_t cb_scrub_cmd; 7170 } scrub_cbdata_t; 7171 7172 static boolean_t 7173 zpool_has_checkpoint(zpool_handle_t *zhp) 7174 { 7175 nvlist_t *config, *nvroot; 7176 7177 config = zpool_get_config(zhp, NULL); 7178 7179 if (config != NULL) { 7180 pool_checkpoint_stat_t *pcs = NULL; 7181 uint_t c; 7182 7183 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE); 7184 (void) nvlist_lookup_uint64_array(nvroot, 7185 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c); 7186 7187 if (pcs == NULL || pcs->pcs_state == CS_NONE) 7188 return (B_FALSE); 7189 7190 assert(pcs->pcs_state == CS_CHECKPOINT_EXISTS || 7191 pcs->pcs_state == CS_CHECKPOINT_DISCARDING); 7192 return (B_TRUE); 7193 } 7194 7195 return (B_FALSE); 7196 } 7197 7198 static int 7199 scrub_callback(zpool_handle_t *zhp, void *data) 7200 { 7201 scrub_cbdata_t *cb = data; 7202 int err; 7203 7204 /* 7205 * Ignore faulted pools. 7206 */ 7207 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) { 7208 (void) fprintf(stderr, gettext("cannot scan '%s': pool is " 7209 "currently unavailable\n"), zpool_get_name(zhp)); 7210 return (1); 7211 } 7212 7213 err = zpool_scan(zhp, cb->cb_type, cb->cb_scrub_cmd); 7214 7215 if (err == 0 && zpool_has_checkpoint(zhp) && 7216 cb->cb_type == POOL_SCAN_SCRUB) { 7217 (void) printf(gettext("warning: will not scrub state that " 7218 "belongs to the checkpoint of pool '%s'\n"), 7219 zpool_get_name(zhp)); 7220 } 7221 7222 return (err != 0); 7223 } 7224 7225 static int 7226 wait_callback(zpool_handle_t *zhp, void *data) 7227 { 7228 zpool_wait_activity_t *act = data; 7229 return (zpool_wait(zhp, *act)); 7230 } 7231 7232 /* 7233 * zpool scrub [-s | -p] [-w] <pool> ... 7234 * 7235 * -s Stop. Stops any in-progress scrub. 7236 * -p Pause. Pause in-progress scrub. 7237 * -w Wait. Blocks until scrub has completed. 7238 */ 7239 int 7240 zpool_do_scrub(int argc, char **argv) 7241 { 7242 int c; 7243 scrub_cbdata_t cb; 7244 boolean_t wait = B_FALSE; 7245 int error; 7246 7247 cb.cb_type = POOL_SCAN_SCRUB; 7248 cb.cb_scrub_cmd = POOL_SCRUB_NORMAL; 7249 7250 /* check options */ 7251 while ((c = getopt(argc, argv, "spw")) != -1) { 7252 switch (c) { 7253 case 's': 7254 cb.cb_type = POOL_SCAN_NONE; 7255 break; 7256 case 'p': 7257 cb.cb_scrub_cmd = POOL_SCRUB_PAUSE; 7258 break; 7259 case 'w': 7260 wait = B_TRUE; 7261 break; 7262 case '?': 7263 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 7264 optopt); 7265 usage(B_FALSE); 7266 } 7267 } 7268 7269 if (cb.cb_type == POOL_SCAN_NONE && 7270 cb.cb_scrub_cmd == POOL_SCRUB_PAUSE) { 7271 (void) fprintf(stderr, gettext("invalid option combination: " 7272 "-s and -p are mutually exclusive\n")); 7273 usage(B_FALSE); 7274 } 7275 7276 if (wait && (cb.cb_type == POOL_SCAN_NONE || 7277 cb.cb_scrub_cmd == POOL_SCRUB_PAUSE)) { 7278 (void) fprintf(stderr, gettext("invalid option combination: " 7279 "-w cannot be used with -p or -s\n")); 7280 usage(B_FALSE); 7281 } 7282 7283 argc -= optind; 7284 argv += optind; 7285 7286 if (argc < 1) { 7287 (void) fprintf(stderr, gettext("missing pool name argument\n")); 7288 usage(B_FALSE); 7289 } 7290 7291 error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL, 7292 B_FALSE, scrub_callback, &cb); 7293 7294 if (wait && !error) { 7295 zpool_wait_activity_t act = ZPOOL_WAIT_SCRUB; 7296 error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL, 7297 B_FALSE, wait_callback, &act); 7298 } 7299 7300 return (error); 7301 } 7302 7303 /* 7304 * zpool resilver <pool> ... 7305 * 7306 * Restarts any in-progress resilver 7307 */ 7308 int 7309 zpool_do_resilver(int argc, char **argv) 7310 { 7311 int c; 7312 scrub_cbdata_t cb; 7313 7314 cb.cb_type = POOL_SCAN_RESILVER; 7315 cb.cb_scrub_cmd = POOL_SCRUB_NORMAL; 7316 7317 /* check options */ 7318 while ((c = getopt(argc, argv, "")) != -1) { 7319 switch (c) { 7320 case '?': 7321 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 7322 optopt); 7323 usage(B_FALSE); 7324 } 7325 } 7326 7327 argc -= optind; 7328 argv += optind; 7329 7330 if (argc < 1) { 7331 (void) fprintf(stderr, gettext("missing pool name argument\n")); 7332 usage(B_FALSE); 7333 } 7334 7335 return (for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL, 7336 B_FALSE, scrub_callback, &cb)); 7337 } 7338 7339 /* 7340 * zpool trim [-d] [-r <rate>] [-c | -s] <pool> [<device> ...] 7341 * 7342 * -c Cancel. Ends any in-progress trim. 7343 * -d Secure trim. Requires kernel and device support. 7344 * -r <rate> Sets the TRIM rate in bytes (per second). Supports 7345 * adding a multiplier suffix such as 'k' or 'm'. 7346 * -s Suspend. TRIM can then be restarted with no flags. 7347 * -w Wait. Blocks until trimming has completed. 7348 */ 7349 int 7350 zpool_do_trim(int argc, char **argv) 7351 { 7352 struct option long_options[] = { 7353 {"cancel", no_argument, NULL, 'c'}, 7354 {"secure", no_argument, NULL, 'd'}, 7355 {"rate", required_argument, NULL, 'r'}, 7356 {"suspend", no_argument, NULL, 's'}, 7357 {"wait", no_argument, NULL, 'w'}, 7358 {0, 0, 0, 0} 7359 }; 7360 7361 pool_trim_func_t cmd_type = POOL_TRIM_START; 7362 uint64_t rate = 0; 7363 boolean_t secure = B_FALSE; 7364 boolean_t wait = B_FALSE; 7365 7366 int c; 7367 while ((c = getopt_long(argc, argv, "cdr:sw", long_options, NULL)) 7368 != -1) { 7369 switch (c) { 7370 case 'c': 7371 if (cmd_type != POOL_TRIM_START && 7372 cmd_type != POOL_TRIM_CANCEL) { 7373 (void) fprintf(stderr, gettext("-c cannot be " 7374 "combined with other options\n")); 7375 usage(B_FALSE); 7376 } 7377 cmd_type = POOL_TRIM_CANCEL; 7378 break; 7379 case 'd': 7380 if (cmd_type != POOL_TRIM_START) { 7381 (void) fprintf(stderr, gettext("-d cannot be " 7382 "combined with the -c or -s options\n")); 7383 usage(B_FALSE); 7384 } 7385 secure = B_TRUE; 7386 break; 7387 case 'r': 7388 if (cmd_type != POOL_TRIM_START) { 7389 (void) fprintf(stderr, gettext("-r cannot be " 7390 "combined with the -c or -s options\n")); 7391 usage(B_FALSE); 7392 } 7393 if (zfs_nicestrtonum(g_zfs, optarg, &rate) == -1) { 7394 (void) fprintf(stderr, "%s: %s\n", 7395 gettext("invalid value for rate"), 7396 libzfs_error_description(g_zfs)); 7397 usage(B_FALSE); 7398 } 7399 break; 7400 case 's': 7401 if (cmd_type != POOL_TRIM_START && 7402 cmd_type != POOL_TRIM_SUSPEND) { 7403 (void) fprintf(stderr, gettext("-s cannot be " 7404 "combined with other options\n")); 7405 usage(B_FALSE); 7406 } 7407 cmd_type = POOL_TRIM_SUSPEND; 7408 break; 7409 case 'w': 7410 wait = B_TRUE; 7411 break; 7412 case '?': 7413 if (optopt != 0) { 7414 (void) fprintf(stderr, 7415 gettext("invalid option '%c'\n"), optopt); 7416 } else { 7417 (void) fprintf(stderr, 7418 gettext("invalid option '%s'\n"), 7419 argv[optind - 1]); 7420 } 7421 usage(B_FALSE); 7422 } 7423 } 7424 7425 argc -= optind; 7426 argv += optind; 7427 7428 if (argc < 1) { 7429 (void) fprintf(stderr, gettext("missing pool name argument\n")); 7430 usage(B_FALSE); 7431 return (-1); 7432 } 7433 7434 if (wait && (cmd_type != POOL_TRIM_START)) { 7435 (void) fprintf(stderr, gettext("-w cannot be used with -c or " 7436 "-s\n")); 7437 usage(B_FALSE); 7438 } 7439 7440 char *poolname = argv[0]; 7441 zpool_handle_t *zhp = zpool_open(g_zfs, poolname); 7442 if (zhp == NULL) 7443 return (-1); 7444 7445 trimflags_t trim_flags = { 7446 .secure = secure, 7447 .rate = rate, 7448 .wait = wait, 7449 }; 7450 7451 nvlist_t *vdevs = fnvlist_alloc(); 7452 if (argc == 1) { 7453 /* no individual leaf vdevs specified, so add them all */ 7454 nvlist_t *config = zpool_get_config(zhp, NULL); 7455 nvlist_t *nvroot = fnvlist_lookup_nvlist(config, 7456 ZPOOL_CONFIG_VDEV_TREE); 7457 zpool_collect_leaves(zhp, nvroot, vdevs); 7458 trim_flags.fullpool = B_TRUE; 7459 } else { 7460 trim_flags.fullpool = B_FALSE; 7461 for (int i = 1; i < argc; i++) { 7462 fnvlist_add_boolean(vdevs, argv[i]); 7463 } 7464 } 7465 7466 int error = zpool_trim(zhp, cmd_type, vdevs, &trim_flags); 7467 7468 fnvlist_free(vdevs); 7469 zpool_close(zhp); 7470 7471 return (error); 7472 } 7473 7474 /* 7475 * Converts a total number of seconds to a human readable string broken 7476 * down in to days/hours/minutes/seconds. 7477 */ 7478 static void 7479 secs_to_dhms(uint64_t total, char *buf) 7480 { 7481 uint64_t days = total / 60 / 60 / 24; 7482 uint64_t hours = (total / 60 / 60) % 24; 7483 uint64_t mins = (total / 60) % 60; 7484 uint64_t secs = (total % 60); 7485 7486 if (days > 0) { 7487 (void) sprintf(buf, "%llu days %02llu:%02llu:%02llu", 7488 (u_longlong_t)days, (u_longlong_t)hours, 7489 (u_longlong_t)mins, (u_longlong_t)secs); 7490 } else { 7491 (void) sprintf(buf, "%02llu:%02llu:%02llu", 7492 (u_longlong_t)hours, (u_longlong_t)mins, 7493 (u_longlong_t)secs); 7494 } 7495 } 7496 7497 /* 7498 * Print out detailed scrub status. 7499 */ 7500 static void 7501 print_scan_scrub_resilver_status(pool_scan_stat_t *ps) 7502 { 7503 time_t start, end, pause; 7504 uint64_t pass_scanned, scanned, pass_issued, issued, total; 7505 uint64_t elapsed, scan_rate, issue_rate; 7506 double fraction_done; 7507 char processed_buf[7], scanned_buf[7], issued_buf[7], total_buf[7]; 7508 char srate_buf[7], irate_buf[7], time_buf[32]; 7509 7510 printf(" "); 7511 printf_color(ANSI_BOLD, gettext("scan:")); 7512 printf(" "); 7513 7514 /* If there's never been a scan, there's not much to say. */ 7515 if (ps == NULL || ps->pss_func == POOL_SCAN_NONE || 7516 ps->pss_func >= POOL_SCAN_FUNCS) { 7517 (void) printf(gettext("none requested\n")); 7518 return; 7519 } 7520 7521 start = ps->pss_start_time; 7522 end = ps->pss_end_time; 7523 pause = ps->pss_pass_scrub_pause; 7524 7525 zfs_nicebytes(ps->pss_processed, processed_buf, sizeof (processed_buf)); 7526 7527 int is_resilver = ps->pss_func == POOL_SCAN_RESILVER; 7528 int is_scrub = ps->pss_func == POOL_SCAN_SCRUB; 7529 assert(is_resilver || is_scrub); 7530 7531 /* Scan is finished or canceled. */ 7532 if (ps->pss_state == DSS_FINISHED) { 7533 secs_to_dhms(end - start, time_buf); 7534 7535 if (is_scrub) { 7536 (void) printf(gettext("scrub repaired %s " 7537 "in %s with %llu errors on %s"), processed_buf, 7538 time_buf, (u_longlong_t)ps->pss_errors, 7539 ctime(&end)); 7540 } else if (is_resilver) { 7541 (void) printf(gettext("resilvered %s " 7542 "in %s with %llu errors on %s"), processed_buf, 7543 time_buf, (u_longlong_t)ps->pss_errors, 7544 ctime(&end)); 7545 } 7546 return; 7547 } else if (ps->pss_state == DSS_CANCELED) { 7548 if (is_scrub) { 7549 (void) printf(gettext("scrub canceled on %s"), 7550 ctime(&end)); 7551 } else if (is_resilver) { 7552 (void) printf(gettext("resilver canceled on %s"), 7553 ctime(&end)); 7554 } 7555 return; 7556 } 7557 7558 assert(ps->pss_state == DSS_SCANNING); 7559 7560 /* Scan is in progress. Resilvers can't be paused. */ 7561 if (is_scrub) { 7562 if (pause == 0) { 7563 (void) printf(gettext("scrub in progress since %s"), 7564 ctime(&start)); 7565 } else { 7566 (void) printf(gettext("scrub paused since %s"), 7567 ctime(&pause)); 7568 (void) printf(gettext("\tscrub started on %s"), 7569 ctime(&start)); 7570 } 7571 } else if (is_resilver) { 7572 (void) printf(gettext("resilver in progress since %s"), 7573 ctime(&start)); 7574 } 7575 7576 scanned = ps->pss_examined; 7577 pass_scanned = ps->pss_pass_exam; 7578 issued = ps->pss_issued; 7579 pass_issued = ps->pss_pass_issued; 7580 total = ps->pss_to_examine; 7581 7582 /* we are only done with a block once we have issued the IO for it */ 7583 fraction_done = (double)issued / total; 7584 7585 /* elapsed time for this pass, rounding up to 1 if it's 0 */ 7586 elapsed = time(NULL) - ps->pss_pass_start; 7587 elapsed -= ps->pss_pass_scrub_spent_paused; 7588 elapsed = (elapsed != 0) ? elapsed : 1; 7589 7590 scan_rate = pass_scanned / elapsed; 7591 issue_rate = pass_issued / elapsed; 7592 uint64_t total_secs_left = (issue_rate != 0 && total >= issued) ? 7593 ((total - issued) / issue_rate) : UINT64_MAX; 7594 secs_to_dhms(total_secs_left, time_buf); 7595 7596 /* format all of the numbers we will be reporting */ 7597 zfs_nicebytes(scanned, scanned_buf, sizeof (scanned_buf)); 7598 zfs_nicebytes(issued, issued_buf, sizeof (issued_buf)); 7599 zfs_nicebytes(total, total_buf, sizeof (total_buf)); 7600 zfs_nicebytes(scan_rate, srate_buf, sizeof (srate_buf)); 7601 zfs_nicebytes(issue_rate, irate_buf, sizeof (irate_buf)); 7602 7603 /* do not print estimated time if we have a paused scrub */ 7604 if (pause == 0) { 7605 (void) printf(gettext("\t%s scanned at %s/s, " 7606 "%s issued at %s/s, %s total\n"), 7607 scanned_buf, srate_buf, issued_buf, irate_buf, total_buf); 7608 } else { 7609 (void) printf(gettext("\t%s scanned, %s issued, %s total\n"), 7610 scanned_buf, issued_buf, total_buf); 7611 } 7612 7613 if (is_resilver) { 7614 (void) printf(gettext("\t%s resilvered, %.2f%% done"), 7615 processed_buf, 100 * fraction_done); 7616 } else if (is_scrub) { 7617 (void) printf(gettext("\t%s repaired, %.2f%% done"), 7618 processed_buf, 100 * fraction_done); 7619 } 7620 7621 if (pause == 0) { 7622 /* 7623 * Only provide an estimate iff: 7624 * 1) the time remaining is valid, and 7625 * 2) the issue rate exceeds 10 MB/s, and 7626 * 3) it's either: 7627 * a) a resilver which has started repairs, or 7628 * b) a scrub which has entered the issue phase. 7629 */ 7630 if (total_secs_left != UINT64_MAX && 7631 issue_rate >= 10 * 1024 * 1024 && 7632 ((is_resilver && ps->pss_processed > 0) || 7633 (is_scrub && issued > 0))) { 7634 (void) printf(gettext(", %s to go\n"), time_buf); 7635 } else { 7636 (void) printf(gettext(", no estimated " 7637 "completion time\n")); 7638 } 7639 } else { 7640 (void) printf(gettext("\n")); 7641 } 7642 } 7643 7644 static void 7645 print_rebuild_status_impl(vdev_rebuild_stat_t *vrs, char *vdev_name) 7646 { 7647 if (vrs == NULL || vrs->vrs_state == VDEV_REBUILD_NONE) 7648 return; 7649 7650 printf(" "); 7651 printf_color(ANSI_BOLD, gettext("scan:")); 7652 printf(" "); 7653 7654 uint64_t bytes_scanned = vrs->vrs_bytes_scanned; 7655 uint64_t bytes_issued = vrs->vrs_bytes_issued; 7656 uint64_t bytes_rebuilt = vrs->vrs_bytes_rebuilt; 7657 uint64_t bytes_est = vrs->vrs_bytes_est; 7658 uint64_t scan_rate = (vrs->vrs_pass_bytes_scanned / 7659 (vrs->vrs_pass_time_ms + 1)) * 1000; 7660 uint64_t issue_rate = (vrs->vrs_pass_bytes_issued / 7661 (vrs->vrs_pass_time_ms + 1)) * 1000; 7662 double scan_pct = MIN((double)bytes_scanned * 100 / 7663 (bytes_est + 1), 100); 7664 7665 /* Format all of the numbers we will be reporting */ 7666 char bytes_scanned_buf[7], bytes_issued_buf[7]; 7667 char bytes_rebuilt_buf[7], bytes_est_buf[7]; 7668 char scan_rate_buf[7], issue_rate_buf[7], time_buf[32]; 7669 zfs_nicebytes(bytes_scanned, bytes_scanned_buf, 7670 sizeof (bytes_scanned_buf)); 7671 zfs_nicebytes(bytes_issued, bytes_issued_buf, 7672 sizeof (bytes_issued_buf)); 7673 zfs_nicebytes(bytes_rebuilt, bytes_rebuilt_buf, 7674 sizeof (bytes_rebuilt_buf)); 7675 zfs_nicebytes(bytes_est, bytes_est_buf, sizeof (bytes_est_buf)); 7676 zfs_nicebytes(scan_rate, scan_rate_buf, sizeof (scan_rate_buf)); 7677 zfs_nicebytes(issue_rate, issue_rate_buf, sizeof (issue_rate_buf)); 7678 7679 time_t start = vrs->vrs_start_time; 7680 time_t end = vrs->vrs_end_time; 7681 7682 /* Rebuild is finished or canceled. */ 7683 if (vrs->vrs_state == VDEV_REBUILD_COMPLETE) { 7684 secs_to_dhms(vrs->vrs_scan_time_ms / 1000, time_buf); 7685 (void) printf(gettext("resilvered (%s) %s in %s " 7686 "with %llu errors on %s"), vdev_name, bytes_rebuilt_buf, 7687 time_buf, (u_longlong_t)vrs->vrs_errors, ctime(&end)); 7688 return; 7689 } else if (vrs->vrs_state == VDEV_REBUILD_CANCELED) { 7690 (void) printf(gettext("resilver (%s) canceled on %s"), 7691 vdev_name, ctime(&end)); 7692 return; 7693 } else if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) { 7694 (void) printf(gettext("resilver (%s) in progress since %s"), 7695 vdev_name, ctime(&start)); 7696 } 7697 7698 assert(vrs->vrs_state == VDEV_REBUILD_ACTIVE); 7699 7700 secs_to_dhms(MAX((int64_t)bytes_est - (int64_t)bytes_scanned, 0) / 7701 MAX(scan_rate, 1), time_buf); 7702 7703 (void) printf(gettext("\t%s scanned at %s/s, %s issued %s/s, " 7704 "%s total\n"), bytes_scanned_buf, scan_rate_buf, 7705 bytes_issued_buf, issue_rate_buf, bytes_est_buf); 7706 (void) printf(gettext("\t%s resilvered, %.2f%% done"), 7707 bytes_rebuilt_buf, scan_pct); 7708 7709 if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) { 7710 if (scan_rate >= 10 * 1024 * 1024) { 7711 (void) printf(gettext(", %s to go\n"), time_buf); 7712 } else { 7713 (void) printf(gettext(", no estimated " 7714 "completion time\n")); 7715 } 7716 } else { 7717 (void) printf(gettext("\n")); 7718 } 7719 } 7720 7721 /* 7722 * Print rebuild status for top-level vdevs. 7723 */ 7724 static void 7725 print_rebuild_status(zpool_handle_t *zhp, nvlist_t *nvroot) 7726 { 7727 nvlist_t **child; 7728 uint_t children; 7729 7730 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 7731 &child, &children) != 0) 7732 children = 0; 7733 7734 for (uint_t c = 0; c < children; c++) { 7735 vdev_rebuild_stat_t *vrs; 7736 uint_t i; 7737 7738 if (nvlist_lookup_uint64_array(child[c], 7739 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i) == 0) { 7740 char *name = zpool_vdev_name(g_zfs, zhp, 7741 child[c], VDEV_NAME_TYPE_ID); 7742 print_rebuild_status_impl(vrs, name); 7743 free(name); 7744 } 7745 } 7746 } 7747 7748 /* 7749 * As we don't scrub checkpointed blocks, we want to warn the user that we 7750 * skipped scanning some blocks if a checkpoint exists or existed at any 7751 * time during the scan. If a sequential instead of healing reconstruction 7752 * was performed then the blocks were reconstructed. However, their checksums 7753 * have not been verified so we still print the warning. 7754 */ 7755 static void 7756 print_checkpoint_scan_warning(pool_scan_stat_t *ps, pool_checkpoint_stat_t *pcs) 7757 { 7758 if (ps == NULL || pcs == NULL) 7759 return; 7760 7761 if (pcs->pcs_state == CS_NONE || 7762 pcs->pcs_state == CS_CHECKPOINT_DISCARDING) 7763 return; 7764 7765 assert(pcs->pcs_state == CS_CHECKPOINT_EXISTS); 7766 7767 if (ps->pss_state == DSS_NONE) 7768 return; 7769 7770 if ((ps->pss_state == DSS_FINISHED || ps->pss_state == DSS_CANCELED) && 7771 ps->pss_end_time < pcs->pcs_start_time) 7772 return; 7773 7774 if (ps->pss_state == DSS_FINISHED || ps->pss_state == DSS_CANCELED) { 7775 (void) printf(gettext(" scan warning: skipped blocks " 7776 "that are only referenced by the checkpoint.\n")); 7777 } else { 7778 assert(ps->pss_state == DSS_SCANNING); 7779 (void) printf(gettext(" scan warning: skipping blocks " 7780 "that are only referenced by the checkpoint.\n")); 7781 } 7782 } 7783 7784 /* 7785 * Returns B_TRUE if there is an active rebuild in progress. Otherwise, 7786 * B_FALSE is returned and 'rebuild_end_time' is set to the end time for 7787 * the last completed (or cancelled) rebuild. 7788 */ 7789 static boolean_t 7790 check_rebuilding(nvlist_t *nvroot, uint64_t *rebuild_end_time) 7791 { 7792 nvlist_t **child; 7793 uint_t children; 7794 boolean_t rebuilding = B_FALSE; 7795 uint64_t end_time = 0; 7796 7797 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 7798 &child, &children) != 0) 7799 children = 0; 7800 7801 for (uint_t c = 0; c < children; c++) { 7802 vdev_rebuild_stat_t *vrs; 7803 uint_t i; 7804 7805 if (nvlist_lookup_uint64_array(child[c], 7806 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i) == 0) { 7807 7808 if (vrs->vrs_end_time > end_time) 7809 end_time = vrs->vrs_end_time; 7810 7811 if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) { 7812 rebuilding = B_TRUE; 7813 end_time = 0; 7814 break; 7815 } 7816 } 7817 } 7818 7819 if (rebuild_end_time != NULL) 7820 *rebuild_end_time = end_time; 7821 7822 return (rebuilding); 7823 } 7824 7825 /* 7826 * Print the scan status. 7827 */ 7828 static void 7829 print_scan_status(zpool_handle_t *zhp, nvlist_t *nvroot) 7830 { 7831 uint64_t rebuild_end_time = 0, resilver_end_time = 0; 7832 boolean_t have_resilver = B_FALSE, have_scrub = B_FALSE; 7833 boolean_t active_resilver = B_FALSE; 7834 pool_checkpoint_stat_t *pcs = NULL; 7835 pool_scan_stat_t *ps = NULL; 7836 uint_t c; 7837 7838 if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_SCAN_STATS, 7839 (uint64_t **)&ps, &c) == 0) { 7840 if (ps->pss_func == POOL_SCAN_RESILVER) { 7841 resilver_end_time = ps->pss_end_time; 7842 active_resilver = (ps->pss_state == DSS_SCANNING); 7843 } 7844 7845 have_resilver = (ps->pss_func == POOL_SCAN_RESILVER); 7846 have_scrub = (ps->pss_func == POOL_SCAN_SCRUB); 7847 } 7848 7849 boolean_t active_rebuild = check_rebuilding(nvroot, &rebuild_end_time); 7850 boolean_t have_rebuild = (active_rebuild || (rebuild_end_time > 0)); 7851 7852 /* Always print the scrub status when available. */ 7853 if (have_scrub) 7854 print_scan_scrub_resilver_status(ps); 7855 7856 /* 7857 * When there is an active resilver or rebuild print its status. 7858 * Otherwise print the status of the last resilver or rebuild. 7859 */ 7860 if (active_resilver || (!active_rebuild && have_resilver && 7861 resilver_end_time && resilver_end_time > rebuild_end_time)) { 7862 print_scan_scrub_resilver_status(ps); 7863 } else if (active_rebuild || (!active_resilver && have_rebuild && 7864 rebuild_end_time && rebuild_end_time > resilver_end_time)) { 7865 print_rebuild_status(zhp, nvroot); 7866 } 7867 7868 (void) nvlist_lookup_uint64_array(nvroot, 7869 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c); 7870 print_checkpoint_scan_warning(ps, pcs); 7871 } 7872 7873 /* 7874 * Print out detailed removal status. 7875 */ 7876 static void 7877 print_removal_status(zpool_handle_t *zhp, pool_removal_stat_t *prs) 7878 { 7879 char copied_buf[7], examined_buf[7], total_buf[7], rate_buf[7]; 7880 time_t start, end; 7881 nvlist_t *config, *nvroot; 7882 nvlist_t **child; 7883 uint_t children; 7884 char *vdev_name; 7885 7886 if (prs == NULL || prs->prs_state == DSS_NONE) 7887 return; 7888 7889 /* 7890 * Determine name of vdev. 7891 */ 7892 config = zpool_get_config(zhp, NULL); 7893 nvroot = fnvlist_lookup_nvlist(config, 7894 ZPOOL_CONFIG_VDEV_TREE); 7895 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 7896 &child, &children) == 0); 7897 assert(prs->prs_removing_vdev < children); 7898 vdev_name = zpool_vdev_name(g_zfs, zhp, 7899 child[prs->prs_removing_vdev], B_TRUE); 7900 7901 printf_color(ANSI_BOLD, gettext("remove: ")); 7902 7903 start = prs->prs_start_time; 7904 end = prs->prs_end_time; 7905 zfs_nicenum(prs->prs_copied, copied_buf, sizeof (copied_buf)); 7906 7907 /* 7908 * Removal is finished or canceled. 7909 */ 7910 if (prs->prs_state == DSS_FINISHED) { 7911 uint64_t minutes_taken = (end - start) / 60; 7912 7913 (void) printf(gettext("Removal of vdev %llu copied %s " 7914 "in %lluh%um, completed on %s"), 7915 (longlong_t)prs->prs_removing_vdev, 7916 copied_buf, 7917 (u_longlong_t)(minutes_taken / 60), 7918 (uint_t)(minutes_taken % 60), 7919 ctime((time_t *)&end)); 7920 } else if (prs->prs_state == DSS_CANCELED) { 7921 (void) printf(gettext("Removal of %s canceled on %s"), 7922 vdev_name, ctime(&end)); 7923 } else { 7924 uint64_t copied, total, elapsed, mins_left, hours_left; 7925 double fraction_done; 7926 uint_t rate; 7927 7928 assert(prs->prs_state == DSS_SCANNING); 7929 7930 /* 7931 * Removal is in progress. 7932 */ 7933 (void) printf(gettext( 7934 "Evacuation of %s in progress since %s"), 7935 vdev_name, ctime(&start)); 7936 7937 copied = prs->prs_copied > 0 ? prs->prs_copied : 1; 7938 total = prs->prs_to_copy; 7939 fraction_done = (double)copied / total; 7940 7941 /* elapsed time for this pass */ 7942 elapsed = time(NULL) - prs->prs_start_time; 7943 elapsed = elapsed > 0 ? elapsed : 1; 7944 rate = copied / elapsed; 7945 rate = rate > 0 ? rate : 1; 7946 mins_left = ((total - copied) / rate) / 60; 7947 hours_left = mins_left / 60; 7948 7949 zfs_nicenum(copied, examined_buf, sizeof (examined_buf)); 7950 zfs_nicenum(total, total_buf, sizeof (total_buf)); 7951 zfs_nicenum(rate, rate_buf, sizeof (rate_buf)); 7952 7953 /* 7954 * do not print estimated time if hours_left is more than 7955 * 30 days 7956 */ 7957 (void) printf(gettext( 7958 "\t%s copied out of %s at %s/s, %.2f%% done"), 7959 examined_buf, total_buf, rate_buf, 100 * fraction_done); 7960 if (hours_left < (30 * 24)) { 7961 (void) printf(gettext(", %lluh%um to go\n"), 7962 (u_longlong_t)hours_left, (uint_t)(mins_left % 60)); 7963 } else { 7964 (void) printf(gettext( 7965 ", (copy is slow, no estimated time)\n")); 7966 } 7967 } 7968 free(vdev_name); 7969 7970 if (prs->prs_mapping_memory > 0) { 7971 char mem_buf[7]; 7972 zfs_nicenum(prs->prs_mapping_memory, mem_buf, sizeof (mem_buf)); 7973 (void) printf(gettext( 7974 "\t%s memory used for removed device mappings\n"), 7975 mem_buf); 7976 } 7977 } 7978 7979 static void 7980 print_checkpoint_status(pool_checkpoint_stat_t *pcs) 7981 { 7982 time_t start; 7983 char space_buf[7]; 7984 7985 if (pcs == NULL || pcs->pcs_state == CS_NONE) 7986 return; 7987 7988 (void) printf(gettext("checkpoint: ")); 7989 7990 start = pcs->pcs_start_time; 7991 zfs_nicenum(pcs->pcs_space, space_buf, sizeof (space_buf)); 7992 7993 if (pcs->pcs_state == CS_CHECKPOINT_EXISTS) { 7994 char *date = ctime(&start); 7995 7996 /* 7997 * ctime() adds a newline at the end of the generated 7998 * string, thus the weird format specifier and the 7999 * strlen() call used to chop it off from the output. 8000 */ 8001 (void) printf(gettext("created %.*s, consumes %s\n"), 8002 (int)(strlen(date) - 1), date, space_buf); 8003 return; 8004 } 8005 8006 assert(pcs->pcs_state == CS_CHECKPOINT_DISCARDING); 8007 8008 (void) printf(gettext("discarding, %s remaining.\n"), 8009 space_buf); 8010 } 8011 8012 static void 8013 print_error_log(zpool_handle_t *zhp) 8014 { 8015 nvlist_t *nverrlist = NULL; 8016 nvpair_t *elem; 8017 char *pathname; 8018 size_t len = MAXPATHLEN * 2; 8019 8020 if (zpool_get_errlog(zhp, &nverrlist) != 0) 8021 return; 8022 8023 (void) printf("errors: Permanent errors have been " 8024 "detected in the following files:\n\n"); 8025 8026 pathname = safe_malloc(len); 8027 elem = NULL; 8028 while ((elem = nvlist_next_nvpair(nverrlist, elem)) != NULL) { 8029 nvlist_t *nv; 8030 uint64_t dsobj, obj; 8031 8032 verify(nvpair_value_nvlist(elem, &nv) == 0); 8033 verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_DATASET, 8034 &dsobj) == 0); 8035 verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_OBJECT, 8036 &obj) == 0); 8037 zpool_obj_to_path(zhp, dsobj, obj, pathname, len); 8038 (void) printf("%7s %s\n", "", pathname); 8039 } 8040 free(pathname); 8041 nvlist_free(nverrlist); 8042 } 8043 8044 static void 8045 print_spares(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **spares, 8046 uint_t nspares) 8047 { 8048 uint_t i; 8049 char *name; 8050 8051 if (nspares == 0) 8052 return; 8053 8054 (void) printf(gettext("\tspares\n")); 8055 8056 for (i = 0; i < nspares; i++) { 8057 name = zpool_vdev_name(g_zfs, zhp, spares[i], 8058 cb->cb_name_flags); 8059 print_status_config(zhp, cb, name, spares[i], 2, B_TRUE, NULL); 8060 free(name); 8061 } 8062 } 8063 8064 static void 8065 print_l2cache(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **l2cache, 8066 uint_t nl2cache) 8067 { 8068 uint_t i; 8069 char *name; 8070 8071 if (nl2cache == 0) 8072 return; 8073 8074 (void) printf(gettext("\tcache\n")); 8075 8076 for (i = 0; i < nl2cache; i++) { 8077 name = zpool_vdev_name(g_zfs, zhp, l2cache[i], 8078 cb->cb_name_flags); 8079 print_status_config(zhp, cb, name, l2cache[i], 2, 8080 B_FALSE, NULL); 8081 free(name); 8082 } 8083 } 8084 8085 static void 8086 print_dedup_stats(nvlist_t *config) 8087 { 8088 ddt_histogram_t *ddh; 8089 ddt_stat_t *dds; 8090 ddt_object_t *ddo; 8091 uint_t c; 8092 char dspace[6], mspace[6]; 8093 8094 /* 8095 * If the pool was faulted then we may not have been able to 8096 * obtain the config. Otherwise, if we have anything in the dedup 8097 * table continue processing the stats. 8098 */ 8099 if (nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_OBJ_STATS, 8100 (uint64_t **)&ddo, &c) != 0) 8101 return; 8102 8103 (void) printf("\n"); 8104 (void) printf(gettext(" dedup: ")); 8105 if (ddo->ddo_count == 0) { 8106 (void) printf(gettext("no DDT entries\n")); 8107 return; 8108 } 8109 8110 zfs_nicebytes(ddo->ddo_dspace, dspace, sizeof (dspace)); 8111 zfs_nicebytes(ddo->ddo_mspace, mspace, sizeof (mspace)); 8112 (void) printf("DDT entries %llu, size %s on disk, %s in core\n", 8113 (u_longlong_t)ddo->ddo_count, 8114 dspace, 8115 mspace); 8116 8117 verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_STATS, 8118 (uint64_t **)&dds, &c) == 0); 8119 verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_HISTOGRAM, 8120 (uint64_t **)&ddh, &c) == 0); 8121 zpool_dump_ddt(dds, ddh); 8122 } 8123 8124 /* 8125 * Display a summary of pool status. Displays a summary such as: 8126 * 8127 * pool: tank 8128 * status: DEGRADED 8129 * reason: One or more devices ... 8130 * see: https://openzfs.github.io/openzfs-docs/msg/ZFS-xxxx-01 8131 * config: 8132 * mirror DEGRADED 8133 * c1t0d0 OK 8134 * c2t0d0 UNAVAIL 8135 * 8136 * When given the '-v' option, we print out the complete config. If the '-e' 8137 * option is specified, then we print out error rate information as well. 8138 */ 8139 static int 8140 status_callback(zpool_handle_t *zhp, void *data) 8141 { 8142 status_cbdata_t *cbp = data; 8143 nvlist_t *config, *nvroot; 8144 const char *msgid; 8145 zpool_status_t reason; 8146 zpool_errata_t errata; 8147 const char *health; 8148 uint_t c; 8149 vdev_stat_t *vs; 8150 8151 config = zpool_get_config(zhp, NULL); 8152 reason = zpool_get_status(zhp, &msgid, &errata); 8153 8154 cbp->cb_count++; 8155 8156 /* 8157 * If we were given 'zpool status -x', only report those pools with 8158 * problems. 8159 */ 8160 if (cbp->cb_explain && 8161 (reason == ZPOOL_STATUS_OK || 8162 reason == ZPOOL_STATUS_VERSION_OLDER || 8163 reason == ZPOOL_STATUS_FEAT_DISABLED || 8164 reason == ZPOOL_STATUS_COMPATIBILITY_ERR || 8165 reason == ZPOOL_STATUS_INCOMPATIBLE_FEAT)) { 8166 if (!cbp->cb_allpools) { 8167 (void) printf(gettext("pool '%s' is healthy\n"), 8168 zpool_get_name(zhp)); 8169 if (cbp->cb_first) 8170 cbp->cb_first = B_FALSE; 8171 } 8172 return (0); 8173 } 8174 8175 if (cbp->cb_first) 8176 cbp->cb_first = B_FALSE; 8177 else 8178 (void) printf("\n"); 8179 8180 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE); 8181 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS, 8182 (uint64_t **)&vs, &c) == 0); 8183 8184 health = zpool_get_state_str(zhp); 8185 8186 printf(" "); 8187 printf_color(ANSI_BOLD, gettext("pool:")); 8188 printf(" %s\n", zpool_get_name(zhp)); 8189 fputc(' ', stdout); 8190 printf_color(ANSI_BOLD, gettext("state: ")); 8191 8192 printf_color(health_str_to_color(health), "%s", health); 8193 8194 fputc('\n', stdout); 8195 8196 switch (reason) { 8197 case ZPOOL_STATUS_MISSING_DEV_R: 8198 printf_color(ANSI_BOLD, gettext("status: ")); 8199 printf_color(ANSI_YELLOW, gettext("One or more devices could " 8200 "not be opened. Sufficient replicas exist for\n\tthe pool " 8201 "to continue functioning in a degraded state.\n")); 8202 printf_color(ANSI_BOLD, gettext("action: ")); 8203 printf_color(ANSI_YELLOW, gettext("Attach the missing device " 8204 "and online it using 'zpool online'.\n")); 8205 break; 8206 8207 case ZPOOL_STATUS_MISSING_DEV_NR: 8208 printf_color(ANSI_BOLD, gettext("status: ")); 8209 printf_color(ANSI_YELLOW, gettext("One or more devices could " 8210 "not be opened. There are insufficient\n\treplicas for the" 8211 " pool to continue functioning.\n")); 8212 printf_color(ANSI_BOLD, gettext("action: ")); 8213 printf_color(ANSI_YELLOW, gettext("Attach the missing device " 8214 "and online it using 'zpool online'.\n")); 8215 break; 8216 8217 case ZPOOL_STATUS_CORRUPT_LABEL_R: 8218 printf_color(ANSI_BOLD, gettext("status: ")); 8219 printf_color(ANSI_YELLOW, gettext("One or more devices could " 8220 "not be used because the label is missing or\n\tinvalid. " 8221 "Sufficient replicas exist for the pool to continue\n\t" 8222 "functioning in a degraded state.\n")); 8223 printf_color(ANSI_BOLD, gettext("action: ")); 8224 printf_color(ANSI_YELLOW, gettext("Replace the device using " 8225 "'zpool replace'.\n")); 8226 break; 8227 8228 case ZPOOL_STATUS_CORRUPT_LABEL_NR: 8229 printf_color(ANSI_BOLD, gettext("status: ")); 8230 printf_color(ANSI_YELLOW, gettext("One or more devices could " 8231 "not be used because the label is missing \n\tor invalid. " 8232 "There are insufficient replicas for the pool to " 8233 "continue\n\tfunctioning.\n")); 8234 zpool_explain_recover(zpool_get_handle(zhp), 8235 zpool_get_name(zhp), reason, config); 8236 break; 8237 8238 case ZPOOL_STATUS_FAILING_DEV: 8239 printf_color(ANSI_BOLD, gettext("status: ")); 8240 printf_color(ANSI_YELLOW, gettext("One or more devices has " 8241 "experienced an unrecoverable error. An\n\tattempt was " 8242 "made to correct the error. Applications are " 8243 "unaffected.\n")); 8244 printf_color(ANSI_BOLD, gettext("action: ")); 8245 printf_color(ANSI_YELLOW, gettext("Determine if the " 8246 "device needs to be replaced, and clear the errors\n\tusing" 8247 " 'zpool clear' or replace the device with 'zpool " 8248 "replace'.\n")); 8249 break; 8250 8251 case ZPOOL_STATUS_OFFLINE_DEV: 8252 printf_color(ANSI_BOLD, gettext("status: ")); 8253 printf_color(ANSI_YELLOW, gettext("One or more devices has " 8254 "been taken offline by the administrator.\n\tSufficient " 8255 "replicas exist for the pool to continue functioning in " 8256 "a\n\tdegraded state.\n")); 8257 printf_color(ANSI_BOLD, gettext("action: ")); 8258 printf_color(ANSI_YELLOW, gettext("Online the device " 8259 "using 'zpool online' or replace the device with\n\t'zpool " 8260 "replace'.\n")); 8261 break; 8262 8263 case ZPOOL_STATUS_REMOVED_DEV: 8264 printf_color(ANSI_BOLD, gettext("status: ")); 8265 printf_color(ANSI_YELLOW, gettext("One or more devices has " 8266 "been removed by the administrator.\n\tSufficient " 8267 "replicas exist for the pool to continue functioning in " 8268 "a\n\tdegraded state.\n")); 8269 printf_color(ANSI_BOLD, gettext("action: ")); 8270 printf_color(ANSI_YELLOW, gettext("Online the device " 8271 "using zpool online' or replace the device with\n\t'zpool " 8272 "replace'.\n")); 8273 break; 8274 8275 case ZPOOL_STATUS_RESILVERING: 8276 case ZPOOL_STATUS_REBUILDING: 8277 printf_color(ANSI_BOLD, gettext("status: ")); 8278 printf_color(ANSI_YELLOW, gettext("One or more devices is " 8279 "currently being resilvered. The pool will\n\tcontinue " 8280 "to function, possibly in a degraded state.\n")); 8281 printf_color(ANSI_BOLD, gettext("action: ")); 8282 printf_color(ANSI_YELLOW, gettext("Wait for the resilver to " 8283 "complete.\n")); 8284 break; 8285 8286 case ZPOOL_STATUS_REBUILD_SCRUB: 8287 printf_color(ANSI_BOLD, gettext("status: ")); 8288 printf_color(ANSI_YELLOW, gettext("One or more devices have " 8289 "been sequentially resilvered, scrubbing\n\tthe pool " 8290 "is recommended.\n")); 8291 printf_color(ANSI_BOLD, gettext("action: ")); 8292 printf_color(ANSI_YELLOW, gettext("Use 'zpool scrub' to " 8293 "verify all data checksums.\n")); 8294 break; 8295 8296 case ZPOOL_STATUS_CORRUPT_DATA: 8297 printf_color(ANSI_BOLD, gettext("status: ")); 8298 printf_color(ANSI_YELLOW, gettext("One or more devices has " 8299 "experienced an error resulting in data\n\tcorruption. " 8300 "Applications may be affected.\n")); 8301 printf_color(ANSI_BOLD, gettext("action: ")); 8302 printf_color(ANSI_YELLOW, gettext("Restore the file in question" 8303 " if possible. Otherwise restore the\n\tentire pool from " 8304 "backup.\n")); 8305 break; 8306 8307 case ZPOOL_STATUS_CORRUPT_POOL: 8308 printf_color(ANSI_BOLD, gettext("status: ")); 8309 printf_color(ANSI_YELLOW, gettext("The pool metadata is " 8310 "corrupted and the pool cannot be opened.\n")); 8311 zpool_explain_recover(zpool_get_handle(zhp), 8312 zpool_get_name(zhp), reason, config); 8313 break; 8314 8315 case ZPOOL_STATUS_VERSION_OLDER: 8316 printf_color(ANSI_BOLD, gettext("status: ")); 8317 printf_color(ANSI_YELLOW, gettext("The pool is formatted using " 8318 "a legacy on-disk format. The pool can\n\tstill be used, " 8319 "but some features are unavailable.\n")); 8320 printf_color(ANSI_BOLD, gettext("action: ")); 8321 printf_color(ANSI_YELLOW, gettext("Upgrade the pool using " 8322 "'zpool upgrade'. Once this is done, the\n\tpool will no " 8323 "longer be accessible on software that does not support\n\t" 8324 "feature flags.\n")); 8325 break; 8326 8327 case ZPOOL_STATUS_VERSION_NEWER: 8328 printf_color(ANSI_BOLD, gettext("status: ")); 8329 printf_color(ANSI_YELLOW, gettext("The pool has been upgraded " 8330 "to a newer, incompatible on-disk version.\n\tThe pool " 8331 "cannot be accessed on this system.\n")); 8332 printf_color(ANSI_BOLD, gettext("action: ")); 8333 printf_color(ANSI_YELLOW, gettext("Access the pool from a " 8334 "system running more recent software, or\n\trestore the " 8335 "pool from backup.\n")); 8336 break; 8337 8338 case ZPOOL_STATUS_FEAT_DISABLED: 8339 printf_color(ANSI_BOLD, gettext("status: ")); 8340 printf_color(ANSI_YELLOW, gettext("Some supported and " 8341 "requested features are not enabled on the pool.\n\t" 8342 "The pool can still be used, but some features are " 8343 "unavailable.\n")); 8344 printf_color(ANSI_BOLD, gettext("action: ")); 8345 printf_color(ANSI_YELLOW, gettext("Enable all features using " 8346 "'zpool upgrade'. Once this is done,\n\tthe pool may no " 8347 "longer be accessible by software that does not support\n\t" 8348 "the features. See zpool-features(7) for details.\n")); 8349 break; 8350 8351 case ZPOOL_STATUS_COMPATIBILITY_ERR: 8352 printf_color(ANSI_BOLD, gettext("status: ")); 8353 printf_color(ANSI_YELLOW, gettext("This pool has a " 8354 "compatibility list specified, but it could not be\n\t" 8355 "read/parsed at this time. The pool can still be used, " 8356 "but this\n\tshould be investigated.\n")); 8357 printf_color(ANSI_BOLD, gettext("action: ")); 8358 printf_color(ANSI_YELLOW, gettext("Check the value of the " 8359 "'compatibility' property against the\n\t" 8360 "appropriate file in " ZPOOL_SYSCONF_COMPAT_D " or " 8361 ZPOOL_DATA_COMPAT_D ".\n")); 8362 break; 8363 8364 case ZPOOL_STATUS_INCOMPATIBLE_FEAT: 8365 printf_color(ANSI_BOLD, gettext("status: ")); 8366 printf_color(ANSI_YELLOW, gettext("One or more features " 8367 "are enabled on the pool despite not being\n\t" 8368 "requested by the 'compatibility' property.\n")); 8369 printf_color(ANSI_BOLD, gettext("action: ")); 8370 printf_color(ANSI_YELLOW, gettext("Consider setting " 8371 "'compatibility' to an appropriate value, or\n\t" 8372 "adding needed features to the relevant file in\n\t" 8373 ZPOOL_SYSCONF_COMPAT_D " or " ZPOOL_DATA_COMPAT_D ".\n")); 8374 break; 8375 8376 case ZPOOL_STATUS_UNSUP_FEAT_READ: 8377 printf_color(ANSI_BOLD, gettext("status: ")); 8378 printf_color(ANSI_YELLOW, gettext("The pool cannot be accessed " 8379 "on this system because it uses the\n\tfollowing feature(s)" 8380 " not supported on this system:\n")); 8381 zpool_print_unsup_feat(config); 8382 (void) printf("\n"); 8383 printf_color(ANSI_BOLD, gettext("action: ")); 8384 printf_color(ANSI_YELLOW, gettext("Access the pool from a " 8385 "system that supports the required feature(s),\n\tor " 8386 "restore the pool from backup.\n")); 8387 break; 8388 8389 case ZPOOL_STATUS_UNSUP_FEAT_WRITE: 8390 printf_color(ANSI_BOLD, gettext("status: ")); 8391 printf_color(ANSI_YELLOW, gettext("The pool can only be " 8392 "accessed in read-only mode on this system. It\n\tcannot be" 8393 " accessed in read-write mode because it uses the " 8394 "following\n\tfeature(s) not supported on this system:\n")); 8395 zpool_print_unsup_feat(config); 8396 (void) printf("\n"); 8397 printf_color(ANSI_BOLD, gettext("action: ")); 8398 printf_color(ANSI_YELLOW, gettext("The pool cannot be accessed " 8399 "in read-write mode. Import the pool with\n" 8400 "\t\"-o readonly=on\", access the pool from a system that " 8401 "supports the\n\trequired feature(s), or restore the " 8402 "pool from backup.\n")); 8403 break; 8404 8405 case ZPOOL_STATUS_FAULTED_DEV_R: 8406 printf_color(ANSI_BOLD, gettext("status: ")); 8407 printf_color(ANSI_YELLOW, gettext("One or more devices are " 8408 "faulted in response to persistent errors.\n\tSufficient " 8409 "replicas exist for the pool to continue functioning " 8410 "in a\n\tdegraded state.\n")); 8411 printf_color(ANSI_BOLD, gettext("action: ")); 8412 printf_color(ANSI_YELLOW, gettext("Replace the faulted device, " 8413 "or use 'zpool clear' to mark the device\n\trepaired.\n")); 8414 break; 8415 8416 case ZPOOL_STATUS_FAULTED_DEV_NR: 8417 printf_color(ANSI_BOLD, gettext("status: ")); 8418 printf_color(ANSI_YELLOW, gettext("One or more devices are " 8419 "faulted in response to persistent errors. There are " 8420 "insufficient replicas for the pool to\n\tcontinue " 8421 "functioning.\n")); 8422 printf_color(ANSI_BOLD, gettext("action: ")); 8423 printf_color(ANSI_YELLOW, gettext("Destroy and re-create the " 8424 "pool from a backup source. Manually marking the device\n" 8425 "\trepaired using 'zpool clear' may allow some data " 8426 "to be recovered.\n")); 8427 break; 8428 8429 case ZPOOL_STATUS_IO_FAILURE_MMP: 8430 printf_color(ANSI_BOLD, gettext("status: ")); 8431 printf_color(ANSI_YELLOW, gettext("The pool is suspended " 8432 "because multihost writes failed or were delayed;\n\t" 8433 "another system could import the pool undetected.\n")); 8434 printf_color(ANSI_BOLD, gettext("action: ")); 8435 printf_color(ANSI_YELLOW, gettext("Make sure the pool's devices" 8436 " are connected, then reboot your system and\n\timport the " 8437 "pool.\n")); 8438 break; 8439 8440 case ZPOOL_STATUS_IO_FAILURE_WAIT: 8441 case ZPOOL_STATUS_IO_FAILURE_CONTINUE: 8442 printf_color(ANSI_BOLD, gettext("status: ")); 8443 printf_color(ANSI_YELLOW, gettext("One or more devices are " 8444 "faulted in response to IO failures.\n")); 8445 printf_color(ANSI_BOLD, gettext("action: ")); 8446 printf_color(ANSI_YELLOW, gettext("Make sure the affected " 8447 "devices are connected, then run 'zpool clear'.\n")); 8448 break; 8449 8450 case ZPOOL_STATUS_BAD_LOG: 8451 printf_color(ANSI_BOLD, gettext("status: ")); 8452 printf_color(ANSI_YELLOW, gettext("An intent log record " 8453 "could not be read.\n" 8454 "\tWaiting for administrator intervention to fix the " 8455 "faulted pool.\n")); 8456 printf_color(ANSI_BOLD, gettext("action: ")); 8457 printf_color(ANSI_YELLOW, gettext("Either restore the affected " 8458 "device(s) and run 'zpool online',\n" 8459 "\tor ignore the intent log records by running " 8460 "'zpool clear'.\n")); 8461 break; 8462 8463 case ZPOOL_STATUS_NON_NATIVE_ASHIFT: 8464 (void) printf(gettext("status: One or more devices are " 8465 "configured to use a non-native block size.\n" 8466 "\tExpect reduced performance.\n")); 8467 (void) printf(gettext("action: Replace affected devices with " 8468 "devices that support the\n\tconfigured block size, or " 8469 "migrate data to a properly configured\n\tpool.\n")); 8470 break; 8471 8472 case ZPOOL_STATUS_HOSTID_MISMATCH: 8473 printf_color(ANSI_BOLD, gettext("status: ")); 8474 printf_color(ANSI_YELLOW, gettext("Mismatch between pool hostid" 8475 " and system hostid on imported pool.\n\tThis pool was " 8476 "previously imported into a system with a different " 8477 "hostid,\n\tand then was verbatim imported into this " 8478 "system.\n")); 8479 printf_color(ANSI_BOLD, gettext("action: ")); 8480 printf_color(ANSI_YELLOW, gettext("Export this pool on all " 8481 "systems on which it is imported.\n" 8482 "\tThen import it to correct the mismatch.\n")); 8483 break; 8484 8485 case ZPOOL_STATUS_ERRATA: 8486 printf_color(ANSI_BOLD, gettext("status: ")); 8487 printf_color(ANSI_YELLOW, gettext("Errata #%d detected.\n"), 8488 errata); 8489 8490 switch (errata) { 8491 case ZPOOL_ERRATA_NONE: 8492 break; 8493 8494 case ZPOOL_ERRATA_ZOL_2094_SCRUB: 8495 printf_color(ANSI_BOLD, gettext("action: ")); 8496 printf_color(ANSI_YELLOW, gettext("To correct the issue" 8497 " run 'zpool scrub'.\n")); 8498 break; 8499 8500 case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION: 8501 (void) printf(gettext("\tExisting encrypted datasets " 8502 "contain an on-disk incompatibility\n\twhich " 8503 "needs to be corrected.\n")); 8504 printf_color(ANSI_BOLD, gettext("action: ")); 8505 printf_color(ANSI_YELLOW, gettext("To correct the issue" 8506 " backup existing encrypted datasets to new\n\t" 8507 "encrypted datasets and destroy the old ones. " 8508 "'zfs mount -o ro' can\n\tbe used to temporarily " 8509 "mount existing encrypted datasets readonly.\n")); 8510 break; 8511 8512 case ZPOOL_ERRATA_ZOL_8308_ENCRYPTION: 8513 (void) printf(gettext("\tExisting encrypted snapshots " 8514 "and bookmarks contain an on-disk\n\tincompat" 8515 "ibility. This may cause on-disk corruption if " 8516 "they are used\n\twith 'zfs recv'.\n")); 8517 printf_color(ANSI_BOLD, gettext("action: ")); 8518 printf_color(ANSI_YELLOW, gettext("To correct the" 8519 "issue, enable the bookmark_v2 feature. No " 8520 "additional\n\taction is needed if there are no " 8521 "encrypted snapshots or bookmarks.\n\tIf preserving" 8522 "the encrypted snapshots and bookmarks is required," 8523 " use\n\ta non-raw send to backup and restore them." 8524 " Alternately, they may be\n\tremoved to resolve " 8525 "the incompatibility.\n")); 8526 break; 8527 8528 default: 8529 /* 8530 * All errata which allow the pool to be imported 8531 * must contain an action message. 8532 */ 8533 assert(0); 8534 } 8535 break; 8536 8537 default: 8538 /* 8539 * The remaining errors can't actually be generated, yet. 8540 */ 8541 assert(reason == ZPOOL_STATUS_OK); 8542 } 8543 8544 if (msgid != NULL) { 8545 printf(" "); 8546 printf_color(ANSI_BOLD, gettext("see:")); 8547 printf(gettext( 8548 " https://openzfs.github.io/openzfs-docs/msg/%s\n"), 8549 msgid); 8550 } 8551 8552 if (config != NULL) { 8553 uint64_t nerr; 8554 nvlist_t **spares, **l2cache; 8555 uint_t nspares, nl2cache; 8556 pool_checkpoint_stat_t *pcs = NULL; 8557 pool_removal_stat_t *prs = NULL; 8558 8559 print_scan_status(zhp, nvroot); 8560 8561 (void) nvlist_lookup_uint64_array(nvroot, 8562 ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c); 8563 print_removal_status(zhp, prs); 8564 8565 (void) nvlist_lookup_uint64_array(nvroot, 8566 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c); 8567 print_checkpoint_status(pcs); 8568 8569 cbp->cb_namewidth = max_width(zhp, nvroot, 0, 0, 8570 cbp->cb_name_flags | VDEV_NAME_TYPE_ID); 8571 if (cbp->cb_namewidth < 10) 8572 cbp->cb_namewidth = 10; 8573 8574 color_start(ANSI_BOLD); 8575 (void) printf(gettext("config:\n\n")); 8576 (void) printf(gettext("\t%-*s %-8s %5s %5s %5s"), 8577 cbp->cb_namewidth, "NAME", "STATE", "READ", "WRITE", 8578 "CKSUM"); 8579 color_end(); 8580 8581 if (cbp->cb_print_slow_ios) { 8582 printf_color(ANSI_BOLD, " %5s", gettext("SLOW")); 8583 } 8584 8585 if (cbp->vcdl != NULL) 8586 print_cmd_columns(cbp->vcdl, 0); 8587 8588 printf("\n"); 8589 8590 print_status_config(zhp, cbp, zpool_get_name(zhp), nvroot, 0, 8591 B_FALSE, NULL); 8592 8593 print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_BIAS_DEDUP); 8594 print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_BIAS_SPECIAL); 8595 print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_CLASS_LOGS); 8596 8597 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 8598 &l2cache, &nl2cache) == 0) 8599 print_l2cache(zhp, cbp, l2cache, nl2cache); 8600 8601 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 8602 &spares, &nspares) == 0) 8603 print_spares(zhp, cbp, spares, nspares); 8604 8605 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT, 8606 &nerr) == 0) { 8607 (void) printf("\n"); 8608 if (nerr == 0) { 8609 (void) printf(gettext( 8610 "errors: No known data errors\n")); 8611 } else if (!cbp->cb_verbose) { 8612 (void) printf(gettext("errors: %llu data " 8613 "errors, use '-v' for a list\n"), 8614 (u_longlong_t)nerr); 8615 } else { 8616 print_error_log(zhp); 8617 } 8618 } 8619 8620 if (cbp->cb_dedup_stats) 8621 print_dedup_stats(config); 8622 } else { 8623 (void) printf(gettext("config: The configuration cannot be " 8624 "determined.\n")); 8625 } 8626 8627 return (0); 8628 } 8629 8630 /* 8631 * zpool status [-c [script1,script2,...]] [-igLpPstvx] [-T d|u] [pool] ... 8632 * [interval [count]] 8633 * 8634 * -c CMD For each vdev, run command CMD 8635 * -i Display vdev initialization status. 8636 * -g Display guid for individual vdev name. 8637 * -L Follow links when resolving vdev path name. 8638 * -p Display values in parsable (exact) format. 8639 * -P Display full path for vdev name. 8640 * -s Display slow IOs column. 8641 * -v Display complete error logs 8642 * -x Display only pools with potential problems 8643 * -D Display dedup status (undocumented) 8644 * -t Display vdev TRIM status. 8645 * -T Display a timestamp in date(1) or Unix format 8646 * 8647 * Describes the health status of all pools or some subset. 8648 */ 8649 int 8650 zpool_do_status(int argc, char **argv) 8651 { 8652 int c; 8653 int ret; 8654 float interval = 0; 8655 unsigned long count = 0; 8656 status_cbdata_t cb = { 0 }; 8657 char *cmd = NULL; 8658 8659 /* check options */ 8660 while ((c = getopt(argc, argv, "c:igLpPsvxDtT:")) != -1) { 8661 switch (c) { 8662 case 'c': 8663 if (cmd != NULL) { 8664 fprintf(stderr, 8665 gettext("Can't set -c flag twice\n")); 8666 exit(1); 8667 } 8668 8669 if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL && 8670 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) { 8671 fprintf(stderr, gettext( 8672 "Can't run -c, disabled by " 8673 "ZPOOL_SCRIPTS_ENABLED.\n")); 8674 exit(1); 8675 } 8676 8677 if ((getuid() <= 0 || geteuid() <= 0) && 8678 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) { 8679 fprintf(stderr, gettext( 8680 "Can't run -c with root privileges " 8681 "unless ZPOOL_SCRIPTS_AS_ROOT is set.\n")); 8682 exit(1); 8683 } 8684 cmd = optarg; 8685 break; 8686 case 'i': 8687 cb.cb_print_vdev_init = B_TRUE; 8688 break; 8689 case 'g': 8690 cb.cb_name_flags |= VDEV_NAME_GUID; 8691 break; 8692 case 'L': 8693 cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS; 8694 break; 8695 case 'p': 8696 cb.cb_literal = B_TRUE; 8697 break; 8698 case 'P': 8699 cb.cb_name_flags |= VDEV_NAME_PATH; 8700 break; 8701 case 's': 8702 cb.cb_print_slow_ios = B_TRUE; 8703 break; 8704 case 'v': 8705 cb.cb_verbose = B_TRUE; 8706 break; 8707 case 'x': 8708 cb.cb_explain = B_TRUE; 8709 break; 8710 case 'D': 8711 cb.cb_dedup_stats = B_TRUE; 8712 break; 8713 case 't': 8714 cb.cb_print_vdev_trim = B_TRUE; 8715 break; 8716 case 'T': 8717 get_timestamp_arg(*optarg); 8718 break; 8719 case '?': 8720 if (optopt == 'c') { 8721 print_zpool_script_list("status"); 8722 exit(0); 8723 } else { 8724 fprintf(stderr, 8725 gettext("invalid option '%c'\n"), optopt); 8726 } 8727 usage(B_FALSE); 8728 } 8729 } 8730 8731 argc -= optind; 8732 argv += optind; 8733 8734 get_interval_count(&argc, argv, &interval, &count); 8735 8736 if (argc == 0) 8737 cb.cb_allpools = B_TRUE; 8738 8739 cb.cb_first = B_TRUE; 8740 cb.cb_print_status = B_TRUE; 8741 8742 for (;;) { 8743 if (timestamp_fmt != NODATE) 8744 print_timestamp(timestamp_fmt); 8745 8746 if (cmd != NULL) 8747 cb.vcdl = all_pools_for_each_vdev_run(argc, argv, cmd, 8748 NULL, NULL, 0, 0); 8749 8750 ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL, 8751 cb.cb_literal, status_callback, &cb); 8752 8753 if (cb.vcdl != NULL) 8754 free_vdev_cmd_data_list(cb.vcdl); 8755 8756 if (argc == 0 && cb.cb_count == 0) 8757 (void) fprintf(stderr, gettext("no pools available\n")); 8758 else if (cb.cb_explain && cb.cb_first && cb.cb_allpools) 8759 (void) printf(gettext("all pools are healthy\n")); 8760 8761 if (ret != 0) 8762 return (ret); 8763 8764 if (interval == 0) 8765 break; 8766 8767 if (count != 0 && --count == 0) 8768 break; 8769 8770 (void) fsleep(interval); 8771 } 8772 8773 return (0); 8774 } 8775 8776 typedef struct upgrade_cbdata { 8777 int cb_first; 8778 int cb_argc; 8779 uint64_t cb_version; 8780 char **cb_argv; 8781 } upgrade_cbdata_t; 8782 8783 static int 8784 check_unsupp_fs(zfs_handle_t *zhp, void *unsupp_fs) 8785 { 8786 int zfs_version = (int)zfs_prop_get_int(zhp, ZFS_PROP_VERSION); 8787 int *count = (int *)unsupp_fs; 8788 8789 if (zfs_version > ZPL_VERSION) { 8790 (void) printf(gettext("%s (v%d) is not supported by this " 8791 "implementation of ZFS.\n"), 8792 zfs_get_name(zhp), zfs_version); 8793 (*count)++; 8794 } 8795 8796 zfs_iter_filesystems(zhp, 0, check_unsupp_fs, unsupp_fs); 8797 8798 zfs_close(zhp); 8799 8800 return (0); 8801 } 8802 8803 static int 8804 upgrade_version(zpool_handle_t *zhp, uint64_t version) 8805 { 8806 int ret; 8807 nvlist_t *config; 8808 uint64_t oldversion; 8809 int unsupp_fs = 0; 8810 8811 config = zpool_get_config(zhp, NULL); 8812 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 8813 &oldversion) == 0); 8814 8815 char compat[ZFS_MAXPROPLEN]; 8816 if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, compat, 8817 ZFS_MAXPROPLEN, NULL, B_FALSE) != 0) 8818 compat[0] = '\0'; 8819 8820 assert(SPA_VERSION_IS_SUPPORTED(oldversion)); 8821 assert(oldversion < version); 8822 8823 ret = zfs_iter_root(zpool_get_handle(zhp), check_unsupp_fs, &unsupp_fs); 8824 if (ret != 0) 8825 return (ret); 8826 8827 if (unsupp_fs) { 8828 (void) fprintf(stderr, gettext("Upgrade not performed due " 8829 "to %d unsupported filesystems (max v%d).\n"), 8830 unsupp_fs, (int)ZPL_VERSION); 8831 return (1); 8832 } 8833 8834 if (strcmp(compat, ZPOOL_COMPAT_LEGACY) == 0) { 8835 (void) fprintf(stderr, gettext("Upgrade not performed because " 8836 "'compatibility' property set to '" 8837 ZPOOL_COMPAT_LEGACY "'.\n")); 8838 return (1); 8839 } 8840 8841 ret = zpool_upgrade(zhp, version); 8842 if (ret != 0) 8843 return (ret); 8844 8845 if (version >= SPA_VERSION_FEATURES) { 8846 (void) printf(gettext("Successfully upgraded " 8847 "'%s' from version %llu to feature flags.\n"), 8848 zpool_get_name(zhp), (u_longlong_t)oldversion); 8849 } else { 8850 (void) printf(gettext("Successfully upgraded " 8851 "'%s' from version %llu to version %llu.\n"), 8852 zpool_get_name(zhp), (u_longlong_t)oldversion, 8853 (u_longlong_t)version); 8854 } 8855 8856 return (0); 8857 } 8858 8859 static int 8860 upgrade_enable_all(zpool_handle_t *zhp, int *countp) 8861 { 8862 int i, ret, count; 8863 boolean_t firstff = B_TRUE; 8864 nvlist_t *enabled = zpool_get_features(zhp); 8865 8866 char compat[ZFS_MAXPROPLEN]; 8867 if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, compat, 8868 ZFS_MAXPROPLEN, NULL, B_FALSE) != 0) 8869 compat[0] = '\0'; 8870 8871 boolean_t requested_features[SPA_FEATURES]; 8872 if (zpool_do_load_compat(compat, requested_features) != 8873 ZPOOL_COMPATIBILITY_OK) 8874 return (-1); 8875 8876 count = 0; 8877 for (i = 0; i < SPA_FEATURES; i++) { 8878 const char *fname = spa_feature_table[i].fi_uname; 8879 const char *fguid = spa_feature_table[i].fi_guid; 8880 8881 if (!spa_feature_table[i].fi_zfs_mod_supported) 8882 continue; 8883 8884 if (!nvlist_exists(enabled, fguid) && requested_features[i]) { 8885 char *propname; 8886 verify(-1 != asprintf(&propname, "feature@%s", fname)); 8887 ret = zpool_set_prop(zhp, propname, 8888 ZFS_FEATURE_ENABLED); 8889 if (ret != 0) { 8890 free(propname); 8891 return (ret); 8892 } 8893 count++; 8894 8895 if (firstff) { 8896 (void) printf(gettext("Enabled the " 8897 "following features on '%s':\n"), 8898 zpool_get_name(zhp)); 8899 firstff = B_FALSE; 8900 } 8901 (void) printf(gettext(" %s\n"), fname); 8902 free(propname); 8903 } 8904 } 8905 8906 if (countp != NULL) 8907 *countp = count; 8908 return (0); 8909 } 8910 8911 static int 8912 upgrade_cb(zpool_handle_t *zhp, void *arg) 8913 { 8914 upgrade_cbdata_t *cbp = arg; 8915 nvlist_t *config; 8916 uint64_t version; 8917 boolean_t modified_pool = B_FALSE; 8918 int ret; 8919 8920 config = zpool_get_config(zhp, NULL); 8921 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 8922 &version) == 0); 8923 8924 assert(SPA_VERSION_IS_SUPPORTED(version)); 8925 8926 if (version < cbp->cb_version) { 8927 cbp->cb_first = B_FALSE; 8928 ret = upgrade_version(zhp, cbp->cb_version); 8929 if (ret != 0) 8930 return (ret); 8931 modified_pool = B_TRUE; 8932 8933 /* 8934 * If they did "zpool upgrade -a", then we could 8935 * be doing ioctls to different pools. We need 8936 * to log this history once to each pool, and bypass 8937 * the normal history logging that happens in main(). 8938 */ 8939 (void) zpool_log_history(g_zfs, history_str); 8940 log_history = B_FALSE; 8941 } 8942 8943 if (cbp->cb_version >= SPA_VERSION_FEATURES) { 8944 int count; 8945 ret = upgrade_enable_all(zhp, &count); 8946 if (ret != 0) 8947 return (ret); 8948 8949 if (count > 0) { 8950 cbp->cb_first = B_FALSE; 8951 modified_pool = B_TRUE; 8952 } 8953 } 8954 8955 if (modified_pool) { 8956 (void) printf("\n"); 8957 (void) after_zpool_upgrade(zhp); 8958 } 8959 8960 return (0); 8961 } 8962 8963 static int 8964 upgrade_list_older_cb(zpool_handle_t *zhp, void *arg) 8965 { 8966 upgrade_cbdata_t *cbp = arg; 8967 nvlist_t *config; 8968 uint64_t version; 8969 8970 config = zpool_get_config(zhp, NULL); 8971 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 8972 &version) == 0); 8973 8974 assert(SPA_VERSION_IS_SUPPORTED(version)); 8975 8976 if (version < SPA_VERSION_FEATURES) { 8977 if (cbp->cb_first) { 8978 (void) printf(gettext("The following pools are " 8979 "formatted with legacy version numbers and can\n" 8980 "be upgraded to use feature flags. After " 8981 "being upgraded, these pools\nwill no " 8982 "longer be accessible by software that does not " 8983 "support feature\nflags.\n\n" 8984 "Note that setting a pool's 'compatibility' " 8985 "feature to '" ZPOOL_COMPAT_LEGACY "' will\n" 8986 "inhibit upgrades.\n\n")); 8987 (void) printf(gettext("VER POOL\n")); 8988 (void) printf(gettext("--- ------------\n")); 8989 cbp->cb_first = B_FALSE; 8990 } 8991 8992 (void) printf("%2llu %s\n", (u_longlong_t)version, 8993 zpool_get_name(zhp)); 8994 } 8995 8996 return (0); 8997 } 8998 8999 static int 9000 upgrade_list_disabled_cb(zpool_handle_t *zhp, void *arg) 9001 { 9002 upgrade_cbdata_t *cbp = arg; 9003 nvlist_t *config; 9004 uint64_t version; 9005 9006 config = zpool_get_config(zhp, NULL); 9007 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 9008 &version) == 0); 9009 9010 if (version >= SPA_VERSION_FEATURES) { 9011 int i; 9012 boolean_t poolfirst = B_TRUE; 9013 nvlist_t *enabled = zpool_get_features(zhp); 9014 9015 for (i = 0; i < SPA_FEATURES; i++) { 9016 const char *fguid = spa_feature_table[i].fi_guid; 9017 const char *fname = spa_feature_table[i].fi_uname; 9018 9019 if (!spa_feature_table[i].fi_zfs_mod_supported) 9020 continue; 9021 9022 if (!nvlist_exists(enabled, fguid)) { 9023 if (cbp->cb_first) { 9024 (void) printf(gettext("\nSome " 9025 "supported features are not " 9026 "enabled on the following pools. " 9027 "Once a\nfeature is enabled the " 9028 "pool may become incompatible with " 9029 "software\nthat does not support " 9030 "the feature. See " 9031 "zpool-features(7) for " 9032 "details.\n\n" 9033 "Note that the pool " 9034 "'compatibility' feature can be " 9035 "used to inhibit\nfeature " 9036 "upgrades.\n\n")); 9037 (void) printf(gettext("POOL " 9038 "FEATURE\n")); 9039 (void) printf(gettext("------" 9040 "---------\n")); 9041 cbp->cb_first = B_FALSE; 9042 } 9043 9044 if (poolfirst) { 9045 (void) printf(gettext("%s\n"), 9046 zpool_get_name(zhp)); 9047 poolfirst = B_FALSE; 9048 } 9049 9050 (void) printf(gettext(" %s\n"), fname); 9051 } 9052 /* 9053 * If they did "zpool upgrade -a", then we could 9054 * be doing ioctls to different pools. We need 9055 * to log this history once to each pool, and bypass 9056 * the normal history logging that happens in main(). 9057 */ 9058 (void) zpool_log_history(g_zfs, history_str); 9059 log_history = B_FALSE; 9060 } 9061 } 9062 9063 return (0); 9064 } 9065 9066 static int 9067 upgrade_one(zpool_handle_t *zhp, void *data) 9068 { 9069 boolean_t modified_pool = B_FALSE; 9070 upgrade_cbdata_t *cbp = data; 9071 uint64_t cur_version; 9072 int ret; 9073 9074 if (strcmp("log", zpool_get_name(zhp)) == 0) { 9075 (void) fprintf(stderr, gettext("'log' is now a reserved word\n" 9076 "Pool 'log' must be renamed using export and import" 9077 " to upgrade.\n")); 9078 return (1); 9079 } 9080 9081 cur_version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 9082 if (cur_version > cbp->cb_version) { 9083 (void) printf(gettext("Pool '%s' is already formatted " 9084 "using more current version '%llu'.\n\n"), 9085 zpool_get_name(zhp), (u_longlong_t)cur_version); 9086 return (0); 9087 } 9088 9089 if (cbp->cb_version != SPA_VERSION && cur_version == cbp->cb_version) { 9090 (void) printf(gettext("Pool '%s' is already formatted " 9091 "using version %llu.\n\n"), zpool_get_name(zhp), 9092 (u_longlong_t)cbp->cb_version); 9093 return (0); 9094 } 9095 9096 if (cur_version != cbp->cb_version) { 9097 modified_pool = B_TRUE; 9098 ret = upgrade_version(zhp, cbp->cb_version); 9099 if (ret != 0) 9100 return (ret); 9101 } 9102 9103 if (cbp->cb_version >= SPA_VERSION_FEATURES) { 9104 int count = 0; 9105 ret = upgrade_enable_all(zhp, &count); 9106 if (ret != 0) 9107 return (ret); 9108 9109 if (count != 0) { 9110 modified_pool = B_TRUE; 9111 } else if (cur_version == SPA_VERSION) { 9112 (void) printf(gettext("Pool '%s' already has all " 9113 "supported and requested features enabled.\n"), 9114 zpool_get_name(zhp)); 9115 } 9116 } 9117 9118 if (modified_pool) { 9119 (void) printf("\n"); 9120 (void) after_zpool_upgrade(zhp); 9121 } 9122 9123 return (0); 9124 } 9125 9126 /* 9127 * zpool upgrade 9128 * zpool upgrade -v 9129 * zpool upgrade [-V version] <-a | pool ...> 9130 * 9131 * With no arguments, display downrev'd ZFS pool available for upgrade. 9132 * Individual pools can be upgraded by specifying the pool, and '-a' will 9133 * upgrade all pools. 9134 */ 9135 int 9136 zpool_do_upgrade(int argc, char **argv) 9137 { 9138 int c; 9139 upgrade_cbdata_t cb = { 0 }; 9140 int ret = 0; 9141 boolean_t showversions = B_FALSE; 9142 boolean_t upgradeall = B_FALSE; 9143 char *end; 9144 9145 9146 /* check options */ 9147 while ((c = getopt(argc, argv, ":avV:")) != -1) { 9148 switch (c) { 9149 case 'a': 9150 upgradeall = B_TRUE; 9151 break; 9152 case 'v': 9153 showversions = B_TRUE; 9154 break; 9155 case 'V': 9156 cb.cb_version = strtoll(optarg, &end, 10); 9157 if (*end != '\0' || 9158 !SPA_VERSION_IS_SUPPORTED(cb.cb_version)) { 9159 (void) fprintf(stderr, 9160 gettext("invalid version '%s'\n"), optarg); 9161 usage(B_FALSE); 9162 } 9163 break; 9164 case ':': 9165 (void) fprintf(stderr, gettext("missing argument for " 9166 "'%c' option\n"), optopt); 9167 usage(B_FALSE); 9168 break; 9169 case '?': 9170 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 9171 optopt); 9172 usage(B_FALSE); 9173 } 9174 } 9175 9176 cb.cb_argc = argc; 9177 cb.cb_argv = argv; 9178 argc -= optind; 9179 argv += optind; 9180 9181 if (cb.cb_version == 0) { 9182 cb.cb_version = SPA_VERSION; 9183 } else if (!upgradeall && argc == 0) { 9184 (void) fprintf(stderr, gettext("-V option is " 9185 "incompatible with other arguments\n")); 9186 usage(B_FALSE); 9187 } 9188 9189 if (showversions) { 9190 if (upgradeall || argc != 0) { 9191 (void) fprintf(stderr, gettext("-v option is " 9192 "incompatible with other arguments\n")); 9193 usage(B_FALSE); 9194 } 9195 } else if (upgradeall) { 9196 if (argc != 0) { 9197 (void) fprintf(stderr, gettext("-a option should not " 9198 "be used along with a pool name\n")); 9199 usage(B_FALSE); 9200 } 9201 } 9202 9203 (void) printf("%s", gettext("This system supports ZFS pool feature " 9204 "flags.\n\n")); 9205 if (showversions) { 9206 int i; 9207 9208 (void) printf(gettext("The following features are " 9209 "supported:\n\n")); 9210 (void) printf(gettext("FEAT DESCRIPTION\n")); 9211 (void) printf("----------------------------------------------" 9212 "---------------\n"); 9213 for (i = 0; i < SPA_FEATURES; i++) { 9214 zfeature_info_t *fi = &spa_feature_table[i]; 9215 if (!fi->fi_zfs_mod_supported) 9216 continue; 9217 const char *ro = 9218 (fi->fi_flags & ZFEATURE_FLAG_READONLY_COMPAT) ? 9219 " (read-only compatible)" : ""; 9220 9221 (void) printf("%-37s%s\n", fi->fi_uname, ro); 9222 (void) printf(" %s\n", fi->fi_desc); 9223 } 9224 (void) printf("\n"); 9225 9226 (void) printf(gettext("The following legacy versions are also " 9227 "supported:\n\n")); 9228 (void) printf(gettext("VER DESCRIPTION\n")); 9229 (void) printf("--- -----------------------------------------" 9230 "---------------\n"); 9231 (void) printf(gettext(" 1 Initial ZFS version\n")); 9232 (void) printf(gettext(" 2 Ditto blocks " 9233 "(replicated metadata)\n")); 9234 (void) printf(gettext(" 3 Hot spares and double parity " 9235 "RAID-Z\n")); 9236 (void) printf(gettext(" 4 zpool history\n")); 9237 (void) printf(gettext(" 5 Compression using the gzip " 9238 "algorithm\n")); 9239 (void) printf(gettext(" 6 bootfs pool property\n")); 9240 (void) printf(gettext(" 7 Separate intent log devices\n")); 9241 (void) printf(gettext(" 8 Delegated administration\n")); 9242 (void) printf(gettext(" 9 refquota and refreservation " 9243 "properties\n")); 9244 (void) printf(gettext(" 10 Cache devices\n")); 9245 (void) printf(gettext(" 11 Improved scrub performance\n")); 9246 (void) printf(gettext(" 12 Snapshot properties\n")); 9247 (void) printf(gettext(" 13 snapused property\n")); 9248 (void) printf(gettext(" 14 passthrough-x aclinherit\n")); 9249 (void) printf(gettext(" 15 user/group space accounting\n")); 9250 (void) printf(gettext(" 16 stmf property support\n")); 9251 (void) printf(gettext(" 17 Triple-parity RAID-Z\n")); 9252 (void) printf(gettext(" 18 Snapshot user holds\n")); 9253 (void) printf(gettext(" 19 Log device removal\n")); 9254 (void) printf(gettext(" 20 Compression using zle " 9255 "(zero-length encoding)\n")); 9256 (void) printf(gettext(" 21 Deduplication\n")); 9257 (void) printf(gettext(" 22 Received properties\n")); 9258 (void) printf(gettext(" 23 Slim ZIL\n")); 9259 (void) printf(gettext(" 24 System attributes\n")); 9260 (void) printf(gettext(" 25 Improved scrub stats\n")); 9261 (void) printf(gettext(" 26 Improved snapshot deletion " 9262 "performance\n")); 9263 (void) printf(gettext(" 27 Improved snapshot creation " 9264 "performance\n")); 9265 (void) printf(gettext(" 28 Multiple vdev replacements\n")); 9266 (void) printf(gettext("\nFor more information on a particular " 9267 "version, including supported releases,\n")); 9268 (void) printf(gettext("see the ZFS Administration Guide.\n\n")); 9269 } else if (argc == 0 && upgradeall) { 9270 cb.cb_first = B_TRUE; 9271 ret = zpool_iter(g_zfs, upgrade_cb, &cb); 9272 if (ret == 0 && cb.cb_first) { 9273 if (cb.cb_version == SPA_VERSION) { 9274 (void) printf(gettext("All pools are already " 9275 "formatted using feature flags.\n\n")); 9276 (void) printf(gettext("Every feature flags " 9277 "pool already has all supported and " 9278 "requested features enabled.\n")); 9279 } else { 9280 (void) printf(gettext("All pools are already " 9281 "formatted with version %llu or higher.\n"), 9282 (u_longlong_t)cb.cb_version); 9283 } 9284 } 9285 } else if (argc == 0) { 9286 cb.cb_first = B_TRUE; 9287 ret = zpool_iter(g_zfs, upgrade_list_older_cb, &cb); 9288 assert(ret == 0); 9289 9290 if (cb.cb_first) { 9291 (void) printf(gettext("All pools are formatted " 9292 "using feature flags.\n\n")); 9293 } else { 9294 (void) printf(gettext("\nUse 'zpool upgrade -v' " 9295 "for a list of available legacy versions.\n")); 9296 } 9297 9298 cb.cb_first = B_TRUE; 9299 ret = zpool_iter(g_zfs, upgrade_list_disabled_cb, &cb); 9300 assert(ret == 0); 9301 9302 if (cb.cb_first) { 9303 (void) printf(gettext("Every feature flags pool has " 9304 "all supported and requested features enabled.\n")); 9305 } else { 9306 (void) printf(gettext("\n")); 9307 } 9308 } else { 9309 ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL, 9310 B_FALSE, upgrade_one, &cb); 9311 } 9312 9313 return (ret); 9314 } 9315 9316 typedef struct hist_cbdata { 9317 boolean_t first; 9318 boolean_t longfmt; 9319 boolean_t internal; 9320 } hist_cbdata_t; 9321 9322 static void 9323 print_history_records(nvlist_t *nvhis, hist_cbdata_t *cb) 9324 { 9325 nvlist_t **records; 9326 uint_t numrecords; 9327 int i; 9328 9329 verify(nvlist_lookup_nvlist_array(nvhis, ZPOOL_HIST_RECORD, 9330 &records, &numrecords) == 0); 9331 for (i = 0; i < numrecords; i++) { 9332 nvlist_t *rec = records[i]; 9333 char tbuf[64] = ""; 9334 9335 if (nvlist_exists(rec, ZPOOL_HIST_TIME)) { 9336 time_t tsec; 9337 struct tm t; 9338 9339 tsec = fnvlist_lookup_uint64(records[i], 9340 ZPOOL_HIST_TIME); 9341 (void) localtime_r(&tsec, &t); 9342 (void) strftime(tbuf, sizeof (tbuf), "%F.%T", &t); 9343 } 9344 9345 if (nvlist_exists(rec, ZPOOL_HIST_ELAPSED_NS)) { 9346 uint64_t elapsed_ns = fnvlist_lookup_int64(records[i], 9347 ZPOOL_HIST_ELAPSED_NS); 9348 (void) snprintf(tbuf + strlen(tbuf), 9349 sizeof (tbuf) - strlen(tbuf), 9350 " (%lldms)", (long long)elapsed_ns / 1000 / 1000); 9351 } 9352 9353 if (nvlist_exists(rec, ZPOOL_HIST_CMD)) { 9354 (void) printf("%s %s", tbuf, 9355 fnvlist_lookup_string(rec, ZPOOL_HIST_CMD)); 9356 } else if (nvlist_exists(rec, ZPOOL_HIST_INT_EVENT)) { 9357 int ievent = 9358 fnvlist_lookup_uint64(rec, ZPOOL_HIST_INT_EVENT); 9359 if (!cb->internal) 9360 continue; 9361 if (ievent >= ZFS_NUM_LEGACY_HISTORY_EVENTS) { 9362 (void) printf("%s unrecognized record:\n", 9363 tbuf); 9364 dump_nvlist(rec, 4); 9365 continue; 9366 } 9367 (void) printf("%s [internal %s txg:%lld] %s", tbuf, 9368 zfs_history_event_names[ievent], 9369 (longlong_t)fnvlist_lookup_uint64( 9370 rec, ZPOOL_HIST_TXG), 9371 fnvlist_lookup_string(rec, ZPOOL_HIST_INT_STR)); 9372 } else if (nvlist_exists(rec, ZPOOL_HIST_INT_NAME)) { 9373 if (!cb->internal) 9374 continue; 9375 (void) printf("%s [txg:%lld] %s", tbuf, 9376 (longlong_t)fnvlist_lookup_uint64( 9377 rec, ZPOOL_HIST_TXG), 9378 fnvlist_lookup_string(rec, ZPOOL_HIST_INT_NAME)); 9379 if (nvlist_exists(rec, ZPOOL_HIST_DSNAME)) { 9380 (void) printf(" %s (%llu)", 9381 fnvlist_lookup_string(rec, 9382 ZPOOL_HIST_DSNAME), 9383 (u_longlong_t)fnvlist_lookup_uint64(rec, 9384 ZPOOL_HIST_DSID)); 9385 } 9386 (void) printf(" %s", fnvlist_lookup_string(rec, 9387 ZPOOL_HIST_INT_STR)); 9388 } else if (nvlist_exists(rec, ZPOOL_HIST_IOCTL)) { 9389 if (!cb->internal) 9390 continue; 9391 (void) printf("%s ioctl %s\n", tbuf, 9392 fnvlist_lookup_string(rec, ZPOOL_HIST_IOCTL)); 9393 if (nvlist_exists(rec, ZPOOL_HIST_INPUT_NVL)) { 9394 (void) printf(" input:\n"); 9395 dump_nvlist(fnvlist_lookup_nvlist(rec, 9396 ZPOOL_HIST_INPUT_NVL), 8); 9397 } 9398 if (nvlist_exists(rec, ZPOOL_HIST_OUTPUT_NVL)) { 9399 (void) printf(" output:\n"); 9400 dump_nvlist(fnvlist_lookup_nvlist(rec, 9401 ZPOOL_HIST_OUTPUT_NVL), 8); 9402 } 9403 if (nvlist_exists(rec, ZPOOL_HIST_OUTPUT_SIZE)) { 9404 (void) printf(" output nvlist omitted; " 9405 "original size: %lldKB\n", 9406 (longlong_t)fnvlist_lookup_int64(rec, 9407 ZPOOL_HIST_OUTPUT_SIZE) / 1024); 9408 } 9409 if (nvlist_exists(rec, ZPOOL_HIST_ERRNO)) { 9410 (void) printf(" errno: %lld\n", 9411 (longlong_t)fnvlist_lookup_int64(rec, 9412 ZPOOL_HIST_ERRNO)); 9413 } 9414 } else { 9415 if (!cb->internal) 9416 continue; 9417 (void) printf("%s unrecognized record:\n", tbuf); 9418 dump_nvlist(rec, 4); 9419 } 9420 9421 if (!cb->longfmt) { 9422 (void) printf("\n"); 9423 continue; 9424 } 9425 (void) printf(" ["); 9426 if (nvlist_exists(rec, ZPOOL_HIST_WHO)) { 9427 uid_t who = fnvlist_lookup_uint64(rec, ZPOOL_HIST_WHO); 9428 struct passwd *pwd = getpwuid(who); 9429 (void) printf("user %d ", (int)who); 9430 if (pwd != NULL) 9431 (void) printf("(%s) ", pwd->pw_name); 9432 } 9433 if (nvlist_exists(rec, ZPOOL_HIST_HOST)) { 9434 (void) printf("on %s", 9435 fnvlist_lookup_string(rec, ZPOOL_HIST_HOST)); 9436 } 9437 if (nvlist_exists(rec, ZPOOL_HIST_ZONE)) { 9438 (void) printf(":%s", 9439 fnvlist_lookup_string(rec, ZPOOL_HIST_ZONE)); 9440 } 9441 9442 (void) printf("]"); 9443 (void) printf("\n"); 9444 } 9445 } 9446 9447 /* 9448 * Print out the command history for a specific pool. 9449 */ 9450 static int 9451 get_history_one(zpool_handle_t *zhp, void *data) 9452 { 9453 nvlist_t *nvhis; 9454 int ret; 9455 hist_cbdata_t *cb = (hist_cbdata_t *)data; 9456 uint64_t off = 0; 9457 boolean_t eof = B_FALSE; 9458 9459 cb->first = B_FALSE; 9460 9461 (void) printf(gettext("History for '%s':\n"), zpool_get_name(zhp)); 9462 9463 while (!eof) { 9464 if ((ret = zpool_get_history(zhp, &nvhis, &off, &eof)) != 0) 9465 return (ret); 9466 9467 print_history_records(nvhis, cb); 9468 nvlist_free(nvhis); 9469 } 9470 (void) printf("\n"); 9471 9472 return (ret); 9473 } 9474 9475 /* 9476 * zpool history <pool> 9477 * 9478 * Displays the history of commands that modified pools. 9479 */ 9480 int 9481 zpool_do_history(int argc, char **argv) 9482 { 9483 hist_cbdata_t cbdata = { 0 }; 9484 int ret; 9485 int c; 9486 9487 cbdata.first = B_TRUE; 9488 /* check options */ 9489 while ((c = getopt(argc, argv, "li")) != -1) { 9490 switch (c) { 9491 case 'l': 9492 cbdata.longfmt = B_TRUE; 9493 break; 9494 case 'i': 9495 cbdata.internal = B_TRUE; 9496 break; 9497 case '?': 9498 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 9499 optopt); 9500 usage(B_FALSE); 9501 } 9502 } 9503 argc -= optind; 9504 argv += optind; 9505 9506 ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL, 9507 B_FALSE, get_history_one, &cbdata); 9508 9509 if (argc == 0 && cbdata.first == B_TRUE) { 9510 (void) fprintf(stderr, gettext("no pools available\n")); 9511 return (0); 9512 } 9513 9514 return (ret); 9515 } 9516 9517 typedef struct ev_opts { 9518 int verbose; 9519 int scripted; 9520 int follow; 9521 int clear; 9522 char poolname[ZFS_MAX_DATASET_NAME_LEN]; 9523 } ev_opts_t; 9524 9525 static void 9526 zpool_do_events_short(nvlist_t *nvl, ev_opts_t *opts) 9527 { 9528 char ctime_str[26], str[32], *ptr; 9529 int64_t *tv; 9530 uint_t n; 9531 9532 verify(nvlist_lookup_int64_array(nvl, FM_EREPORT_TIME, &tv, &n) == 0); 9533 memset(str, ' ', 32); 9534 (void) ctime_r((const time_t *)&tv[0], ctime_str); 9535 (void) memcpy(str, ctime_str+4, 6); /* 'Jun 30' */ 9536 (void) memcpy(str+7, ctime_str+20, 4); /* '1993' */ 9537 (void) memcpy(str+12, ctime_str+11, 8); /* '21:49:08' */ 9538 (void) sprintf(str+20, ".%09lld", (longlong_t)tv[1]); /* '.123456789' */ 9539 if (opts->scripted) 9540 (void) printf(gettext("%s\t"), str); 9541 else 9542 (void) printf(gettext("%s "), str); 9543 9544 verify(nvlist_lookup_string(nvl, FM_CLASS, &ptr) == 0); 9545 (void) printf(gettext("%s\n"), ptr); 9546 } 9547 9548 static void 9549 zpool_do_events_nvprint(nvlist_t *nvl, int depth) 9550 { 9551 nvpair_t *nvp; 9552 9553 for (nvp = nvlist_next_nvpair(nvl, NULL); 9554 nvp != NULL; nvp = nvlist_next_nvpair(nvl, nvp)) { 9555 9556 data_type_t type = nvpair_type(nvp); 9557 const char *name = nvpair_name(nvp); 9558 9559 boolean_t b; 9560 uint8_t i8; 9561 uint16_t i16; 9562 uint32_t i32; 9563 uint64_t i64; 9564 char *str; 9565 nvlist_t *cnv; 9566 9567 printf(gettext("%*s%s = "), depth, "", name); 9568 9569 switch (type) { 9570 case DATA_TYPE_BOOLEAN: 9571 printf(gettext("%s"), "1"); 9572 break; 9573 9574 case DATA_TYPE_BOOLEAN_VALUE: 9575 (void) nvpair_value_boolean_value(nvp, &b); 9576 printf(gettext("%s"), b ? "1" : "0"); 9577 break; 9578 9579 case DATA_TYPE_BYTE: 9580 (void) nvpair_value_byte(nvp, &i8); 9581 printf(gettext("0x%x"), i8); 9582 break; 9583 9584 case DATA_TYPE_INT8: 9585 (void) nvpair_value_int8(nvp, (void *)&i8); 9586 printf(gettext("0x%x"), i8); 9587 break; 9588 9589 case DATA_TYPE_UINT8: 9590 (void) nvpair_value_uint8(nvp, &i8); 9591 printf(gettext("0x%x"), i8); 9592 break; 9593 9594 case DATA_TYPE_INT16: 9595 (void) nvpair_value_int16(nvp, (void *)&i16); 9596 printf(gettext("0x%x"), i16); 9597 break; 9598 9599 case DATA_TYPE_UINT16: 9600 (void) nvpair_value_uint16(nvp, &i16); 9601 printf(gettext("0x%x"), i16); 9602 break; 9603 9604 case DATA_TYPE_INT32: 9605 (void) nvpair_value_int32(nvp, (void *)&i32); 9606 printf(gettext("0x%x"), i32); 9607 break; 9608 9609 case DATA_TYPE_UINT32: 9610 (void) nvpair_value_uint32(nvp, &i32); 9611 printf(gettext("0x%x"), i32); 9612 break; 9613 9614 case DATA_TYPE_INT64: 9615 (void) nvpair_value_int64(nvp, (void *)&i64); 9616 printf(gettext("0x%llx"), (u_longlong_t)i64); 9617 break; 9618 9619 case DATA_TYPE_UINT64: 9620 (void) nvpair_value_uint64(nvp, &i64); 9621 /* 9622 * translate vdev state values to readable 9623 * strings to aide zpool events consumers 9624 */ 9625 if (strcmp(name, 9626 FM_EREPORT_PAYLOAD_ZFS_VDEV_STATE) == 0 || 9627 strcmp(name, 9628 FM_EREPORT_PAYLOAD_ZFS_VDEV_LASTSTATE) == 0) { 9629 printf(gettext("\"%s\" (0x%llx)"), 9630 zpool_state_to_name(i64, VDEV_AUX_NONE), 9631 (u_longlong_t)i64); 9632 } else { 9633 printf(gettext("0x%llx"), (u_longlong_t)i64); 9634 } 9635 break; 9636 9637 case DATA_TYPE_HRTIME: 9638 (void) nvpair_value_hrtime(nvp, (void *)&i64); 9639 printf(gettext("0x%llx"), (u_longlong_t)i64); 9640 break; 9641 9642 case DATA_TYPE_STRING: 9643 (void) nvpair_value_string(nvp, &str); 9644 printf(gettext("\"%s\""), str ? str : "<NULL>"); 9645 break; 9646 9647 case DATA_TYPE_NVLIST: 9648 printf(gettext("(embedded nvlist)\n")); 9649 (void) nvpair_value_nvlist(nvp, &cnv); 9650 zpool_do_events_nvprint(cnv, depth + 8); 9651 printf(gettext("%*s(end %s)"), depth, "", name); 9652 break; 9653 9654 case DATA_TYPE_NVLIST_ARRAY: { 9655 nvlist_t **val; 9656 uint_t i, nelem; 9657 9658 (void) nvpair_value_nvlist_array(nvp, &val, &nelem); 9659 printf(gettext("(%d embedded nvlists)\n"), nelem); 9660 for (i = 0; i < nelem; i++) { 9661 printf(gettext("%*s%s[%d] = %s\n"), 9662 depth, "", name, i, "(embedded nvlist)"); 9663 zpool_do_events_nvprint(val[i], depth + 8); 9664 printf(gettext("%*s(end %s[%i])\n"), 9665 depth, "", name, i); 9666 } 9667 printf(gettext("%*s(end %s)\n"), depth, "", name); 9668 } 9669 break; 9670 9671 case DATA_TYPE_INT8_ARRAY: { 9672 int8_t *val; 9673 uint_t i, nelem; 9674 9675 (void) nvpair_value_int8_array(nvp, &val, &nelem); 9676 for (i = 0; i < nelem; i++) 9677 printf(gettext("0x%x "), val[i]); 9678 9679 break; 9680 } 9681 9682 case DATA_TYPE_UINT8_ARRAY: { 9683 uint8_t *val; 9684 uint_t i, nelem; 9685 9686 (void) nvpair_value_uint8_array(nvp, &val, &nelem); 9687 for (i = 0; i < nelem; i++) 9688 printf(gettext("0x%x "), val[i]); 9689 9690 break; 9691 } 9692 9693 case DATA_TYPE_INT16_ARRAY: { 9694 int16_t *val; 9695 uint_t i, nelem; 9696 9697 (void) nvpair_value_int16_array(nvp, &val, &nelem); 9698 for (i = 0; i < nelem; i++) 9699 printf(gettext("0x%x "), val[i]); 9700 9701 break; 9702 } 9703 9704 case DATA_TYPE_UINT16_ARRAY: { 9705 uint16_t *val; 9706 uint_t i, nelem; 9707 9708 (void) nvpair_value_uint16_array(nvp, &val, &nelem); 9709 for (i = 0; i < nelem; i++) 9710 printf(gettext("0x%x "), val[i]); 9711 9712 break; 9713 } 9714 9715 case DATA_TYPE_INT32_ARRAY: { 9716 int32_t *val; 9717 uint_t i, nelem; 9718 9719 (void) nvpair_value_int32_array(nvp, &val, &nelem); 9720 for (i = 0; i < nelem; i++) 9721 printf(gettext("0x%x "), val[i]); 9722 9723 break; 9724 } 9725 9726 case DATA_TYPE_UINT32_ARRAY: { 9727 uint32_t *val; 9728 uint_t i, nelem; 9729 9730 (void) nvpair_value_uint32_array(nvp, &val, &nelem); 9731 for (i = 0; i < nelem; i++) 9732 printf(gettext("0x%x "), val[i]); 9733 9734 break; 9735 } 9736 9737 case DATA_TYPE_INT64_ARRAY: { 9738 int64_t *val; 9739 uint_t i, nelem; 9740 9741 (void) nvpair_value_int64_array(nvp, &val, &nelem); 9742 for (i = 0; i < nelem; i++) 9743 printf(gettext("0x%llx "), 9744 (u_longlong_t)val[i]); 9745 9746 break; 9747 } 9748 9749 case DATA_TYPE_UINT64_ARRAY: { 9750 uint64_t *val; 9751 uint_t i, nelem; 9752 9753 (void) nvpair_value_uint64_array(nvp, &val, &nelem); 9754 for (i = 0; i < nelem; i++) 9755 printf(gettext("0x%llx "), 9756 (u_longlong_t)val[i]); 9757 9758 break; 9759 } 9760 9761 case DATA_TYPE_STRING_ARRAY: { 9762 char **str; 9763 uint_t i, nelem; 9764 9765 (void) nvpair_value_string_array(nvp, &str, &nelem); 9766 for (i = 0; i < nelem; i++) 9767 printf(gettext("\"%s\" "), 9768 str[i] ? str[i] : "<NULL>"); 9769 9770 break; 9771 } 9772 9773 case DATA_TYPE_BOOLEAN_ARRAY: 9774 case DATA_TYPE_BYTE_ARRAY: 9775 case DATA_TYPE_DOUBLE: 9776 case DATA_TYPE_DONTCARE: 9777 case DATA_TYPE_UNKNOWN: 9778 printf(gettext("<unknown>")); 9779 break; 9780 } 9781 9782 printf(gettext("\n")); 9783 } 9784 } 9785 9786 static int 9787 zpool_do_events_next(ev_opts_t *opts) 9788 { 9789 nvlist_t *nvl; 9790 int zevent_fd, ret, dropped; 9791 char *pool; 9792 9793 zevent_fd = open(ZFS_DEV, O_RDWR); 9794 VERIFY(zevent_fd >= 0); 9795 9796 if (!opts->scripted) 9797 (void) printf(gettext("%-30s %s\n"), "TIME", "CLASS"); 9798 9799 while (1) { 9800 ret = zpool_events_next(g_zfs, &nvl, &dropped, 9801 (opts->follow ? ZEVENT_NONE : ZEVENT_NONBLOCK), zevent_fd); 9802 if (ret || nvl == NULL) 9803 break; 9804 9805 if (dropped > 0) 9806 (void) printf(gettext("dropped %d events\n"), dropped); 9807 9808 if (strlen(opts->poolname) > 0 && 9809 nvlist_lookup_string(nvl, FM_FMRI_ZFS_POOL, &pool) == 0 && 9810 strcmp(opts->poolname, pool) != 0) 9811 continue; 9812 9813 zpool_do_events_short(nvl, opts); 9814 9815 if (opts->verbose) { 9816 zpool_do_events_nvprint(nvl, 8); 9817 printf(gettext("\n")); 9818 } 9819 (void) fflush(stdout); 9820 9821 nvlist_free(nvl); 9822 } 9823 9824 VERIFY(0 == close(zevent_fd)); 9825 9826 return (ret); 9827 } 9828 9829 static int 9830 zpool_do_events_clear(void) 9831 { 9832 int count, ret; 9833 9834 ret = zpool_events_clear(g_zfs, &count); 9835 if (!ret) 9836 (void) printf(gettext("cleared %d events\n"), count); 9837 9838 return (ret); 9839 } 9840 9841 /* 9842 * zpool events [-vHf [pool] | -c] 9843 * 9844 * Displays events logs by ZFS. 9845 */ 9846 int 9847 zpool_do_events(int argc, char **argv) 9848 { 9849 ev_opts_t opts = { 0 }; 9850 int ret; 9851 int c; 9852 9853 /* check options */ 9854 while ((c = getopt(argc, argv, "vHfc")) != -1) { 9855 switch (c) { 9856 case 'v': 9857 opts.verbose = 1; 9858 break; 9859 case 'H': 9860 opts.scripted = 1; 9861 break; 9862 case 'f': 9863 opts.follow = 1; 9864 break; 9865 case 'c': 9866 opts.clear = 1; 9867 break; 9868 case '?': 9869 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 9870 optopt); 9871 usage(B_FALSE); 9872 } 9873 } 9874 argc -= optind; 9875 argv += optind; 9876 9877 if (argc > 1) { 9878 (void) fprintf(stderr, gettext("too many arguments\n")); 9879 usage(B_FALSE); 9880 } else if (argc == 1) { 9881 (void) strlcpy(opts.poolname, argv[0], sizeof (opts.poolname)); 9882 if (!zfs_name_valid(opts.poolname, ZFS_TYPE_POOL)) { 9883 (void) fprintf(stderr, 9884 gettext("invalid pool name '%s'\n"), opts.poolname); 9885 usage(B_FALSE); 9886 } 9887 } 9888 9889 if ((argc == 1 || opts.verbose || opts.scripted || opts.follow) && 9890 opts.clear) { 9891 (void) fprintf(stderr, 9892 gettext("invalid options combined with -c\n")); 9893 usage(B_FALSE); 9894 } 9895 9896 if (opts.clear) 9897 ret = zpool_do_events_clear(); 9898 else 9899 ret = zpool_do_events_next(&opts); 9900 9901 return (ret); 9902 } 9903 9904 static int 9905 get_callback_vdev(zpool_handle_t *zhp, char *vdevname, void *data) 9906 { 9907 zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data; 9908 char value[ZFS_MAXPROPLEN]; 9909 zprop_source_t srctype; 9910 9911 for (zprop_list_t *pl = cbp->cb_proplist; pl != NULL; 9912 pl = pl->pl_next) { 9913 char *prop_name; 9914 /* 9915 * If the first property is pool name, it is a special 9916 * placeholder that we can skip. This will also skip 9917 * over the name property when 'all' is specified. 9918 */ 9919 if (pl->pl_prop == ZPOOL_PROP_NAME && 9920 pl == cbp->cb_proplist) 9921 continue; 9922 9923 if (pl->pl_prop == ZPROP_INVAL) { 9924 prop_name = pl->pl_user_prop; 9925 } else { 9926 prop_name = (char *)vdev_prop_to_name(pl->pl_prop); 9927 } 9928 if (zpool_get_vdev_prop(zhp, vdevname, pl->pl_prop, 9929 prop_name, value, sizeof (value), &srctype, 9930 cbp->cb_literal) == 0) { 9931 zprop_print_one_property(vdevname, cbp, prop_name, 9932 value, srctype, NULL, NULL); 9933 } 9934 } 9935 9936 return (0); 9937 } 9938 9939 static int 9940 get_callback_vdev_width_cb(void *zhp_data, nvlist_t *nv, void *data) 9941 { 9942 zpool_handle_t *zhp = zhp_data; 9943 zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data; 9944 char *vdevname = zpool_vdev_name(g_zfs, zhp, nv, 9945 cbp->cb_vdevs.cb_name_flags); 9946 int ret; 9947 9948 /* Adjust the column widths for the vdev properties */ 9949 ret = vdev_expand_proplist(zhp, vdevname, &cbp->cb_proplist); 9950 9951 return (ret); 9952 } 9953 9954 static int 9955 get_callback_vdev_cb(void *zhp_data, nvlist_t *nv, void *data) 9956 { 9957 zpool_handle_t *zhp = zhp_data; 9958 zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data; 9959 char *vdevname = zpool_vdev_name(g_zfs, zhp, nv, 9960 cbp->cb_vdevs.cb_name_flags); 9961 int ret; 9962 9963 /* Display the properties */ 9964 ret = get_callback_vdev(zhp, vdevname, data); 9965 9966 return (ret); 9967 } 9968 9969 static int 9970 get_callback(zpool_handle_t *zhp, void *data) 9971 { 9972 zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data; 9973 char value[MAXNAMELEN]; 9974 zprop_source_t srctype; 9975 zprop_list_t *pl; 9976 int vid; 9977 9978 if (cbp->cb_type == ZFS_TYPE_VDEV) { 9979 if (strcmp(cbp->cb_vdevs.cb_names[0], "all-vdevs") == 0) { 9980 for_each_vdev(zhp, get_callback_vdev_width_cb, data); 9981 for_each_vdev(zhp, get_callback_vdev_cb, data); 9982 } else { 9983 /* Adjust column widths for vdev properties */ 9984 for (vid = 0; vid < cbp->cb_vdevs.cb_names_count; 9985 vid++) { 9986 vdev_expand_proplist(zhp, 9987 cbp->cb_vdevs.cb_names[vid], 9988 &cbp->cb_proplist); 9989 } 9990 /* Display the properties */ 9991 for (vid = 0; vid < cbp->cb_vdevs.cb_names_count; 9992 vid++) { 9993 get_callback_vdev(zhp, 9994 cbp->cb_vdevs.cb_names[vid], data); 9995 } 9996 } 9997 } else { 9998 assert(cbp->cb_type == ZFS_TYPE_POOL); 9999 for (pl = cbp->cb_proplist; pl != NULL; pl = pl->pl_next) { 10000 /* 10001 * Skip the special fake placeholder. This will also 10002 * skip over the name property when 'all' is specified. 10003 */ 10004 if (pl->pl_prop == ZPOOL_PROP_NAME && 10005 pl == cbp->cb_proplist) 10006 continue; 10007 10008 if (pl->pl_prop == ZPROP_INVAL && 10009 (zpool_prop_feature(pl->pl_user_prop) || 10010 zpool_prop_unsupported(pl->pl_user_prop))) { 10011 srctype = ZPROP_SRC_LOCAL; 10012 10013 if (zpool_prop_get_feature(zhp, 10014 pl->pl_user_prop, value, 10015 sizeof (value)) == 0) { 10016 zprop_print_one_property( 10017 zpool_get_name(zhp), cbp, 10018 pl->pl_user_prop, value, srctype, 10019 NULL, NULL); 10020 } 10021 } else { 10022 if (zpool_get_prop(zhp, pl->pl_prop, value, 10023 sizeof (value), &srctype, 10024 cbp->cb_literal) != 0) 10025 continue; 10026 10027 zprop_print_one_property(zpool_get_name(zhp), 10028 cbp, zpool_prop_to_name(pl->pl_prop), 10029 value, srctype, NULL, NULL); 10030 } 10031 } 10032 } 10033 10034 return (0); 10035 } 10036 10037 /* 10038 * zpool get [-Hp] [-o "all" | field[,...]] <"all" | property[,...]> <pool> ... 10039 * 10040 * -H Scripted mode. Don't display headers, and separate properties 10041 * by a single tab. 10042 * -o List of columns to display. Defaults to 10043 * "name,property,value,source". 10044 * -p Display values in parsable (exact) format. 10045 * 10046 * Get properties of pools in the system. Output space statistics 10047 * for each one as well as other attributes. 10048 */ 10049 int 10050 zpool_do_get(int argc, char **argv) 10051 { 10052 zprop_get_cbdata_t cb = { 0 }; 10053 zprop_list_t fake_name = { 0 }; 10054 int ret; 10055 int c, i; 10056 char *propstr = NULL; 10057 10058 cb.cb_first = B_TRUE; 10059 10060 /* 10061 * Set up default columns and sources. 10062 */ 10063 cb.cb_sources = ZPROP_SRC_ALL; 10064 cb.cb_columns[0] = GET_COL_NAME; 10065 cb.cb_columns[1] = GET_COL_PROPERTY; 10066 cb.cb_columns[2] = GET_COL_VALUE; 10067 cb.cb_columns[3] = GET_COL_SOURCE; 10068 cb.cb_type = ZFS_TYPE_POOL; 10069 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_TYPE_ID; 10070 current_prop_type = cb.cb_type; 10071 10072 /* check options */ 10073 while ((c = getopt(argc, argv, ":Hpo:")) != -1) { 10074 switch (c) { 10075 case 'p': 10076 cb.cb_literal = B_TRUE; 10077 break; 10078 case 'H': 10079 cb.cb_scripted = B_TRUE; 10080 break; 10081 case 'o': 10082 memset(&cb.cb_columns, 0, sizeof (cb.cb_columns)); 10083 i = 0; 10084 10085 for (char *tok; (tok = strsep(&optarg, ",")); ) { 10086 static const char *const col_opts[] = 10087 { "name", "property", "value", "source", 10088 "all" }; 10089 static const zfs_get_column_t col_cols[] = 10090 { GET_COL_NAME, GET_COL_PROPERTY, GET_COL_VALUE, 10091 GET_COL_SOURCE }; 10092 10093 if (i == ZFS_GET_NCOLS - 1) { 10094 (void) fprintf(stderr, gettext("too " 10095 "many fields given to -o " 10096 "option\n")); 10097 usage(B_FALSE); 10098 } 10099 10100 for (c = 0; c < ARRAY_SIZE(col_opts); ++c) 10101 if (strcmp(tok, col_opts[c]) == 0) 10102 goto found; 10103 10104 (void) fprintf(stderr, 10105 gettext("invalid column name '%s'\n"), tok); 10106 usage(B_FALSE); 10107 10108 found: 10109 if (c >= 4) { 10110 if (i > 0) { 10111 (void) fprintf(stderr, 10112 gettext("\"all\" conflicts " 10113 "with specific fields " 10114 "given to -o option\n")); 10115 usage(B_FALSE); 10116 } 10117 10118 memcpy(cb.cb_columns, col_cols, 10119 sizeof (col_cols)); 10120 i = ZFS_GET_NCOLS - 1; 10121 } else 10122 cb.cb_columns[i++] = col_cols[c]; 10123 } 10124 break; 10125 case '?': 10126 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 10127 optopt); 10128 usage(B_FALSE); 10129 } 10130 } 10131 10132 argc -= optind; 10133 argv += optind; 10134 10135 if (argc < 1) { 10136 (void) fprintf(stderr, gettext("missing property " 10137 "argument\n")); 10138 usage(B_FALSE); 10139 } 10140 10141 /* Properties list is needed later by zprop_get_list() */ 10142 propstr = argv[0]; 10143 10144 argc--; 10145 argv++; 10146 10147 if (argc == 0) { 10148 /* No args, so just print the defaults. */ 10149 } else if (are_all_pools(argc, argv)) { 10150 /* All the args are pool names */ 10151 } else if (are_all_pools(1, argv)) { 10152 /* The first arg is a pool name */ 10153 if ((argc == 2 && strcmp(argv[1], "all-vdevs") == 0) || 10154 are_vdevs_in_pool(argc - 1, argv + 1, argv[0], 10155 &cb.cb_vdevs)) { 10156 /* ... and the rest are vdev names */ 10157 cb.cb_vdevs.cb_names = argv + 1; 10158 cb.cb_vdevs.cb_names_count = argc - 1; 10159 cb.cb_type = ZFS_TYPE_VDEV; 10160 argc = 1; /* One pool to process */ 10161 } else { 10162 fprintf(stderr, gettext("Expected a list of vdevs in" 10163 " \"%s\", but got:\n"), argv[0]); 10164 error_list_unresolved_vdevs(argc - 1, argv + 1, 10165 argv[0], &cb.cb_vdevs); 10166 fprintf(stderr, "\n"); 10167 usage(B_FALSE); 10168 return (1); 10169 } 10170 } else { 10171 /* 10172 * The first arg isn't a pool name, 10173 */ 10174 fprintf(stderr, gettext("missing pool name.\n")); 10175 fprintf(stderr, "\n"); 10176 usage(B_FALSE); 10177 return (1); 10178 } 10179 10180 if (zprop_get_list(g_zfs, propstr, &cb.cb_proplist, 10181 cb.cb_type) != 0) { 10182 /* Use correct list of valid properties (pool or vdev) */ 10183 current_prop_type = cb.cb_type; 10184 usage(B_FALSE); 10185 } 10186 10187 if (cb.cb_proplist != NULL) { 10188 fake_name.pl_prop = ZPOOL_PROP_NAME; 10189 fake_name.pl_width = strlen(gettext("NAME")); 10190 fake_name.pl_next = cb.cb_proplist; 10191 cb.cb_proplist = &fake_name; 10192 } 10193 10194 ret = for_each_pool(argc, argv, B_TRUE, &cb.cb_proplist, cb.cb_type, 10195 cb.cb_literal, get_callback, &cb); 10196 10197 if (cb.cb_proplist == &fake_name) 10198 zprop_free_list(fake_name.pl_next); 10199 else 10200 zprop_free_list(cb.cb_proplist); 10201 10202 return (ret); 10203 } 10204 10205 typedef struct set_cbdata { 10206 char *cb_propname; 10207 char *cb_value; 10208 zfs_type_t cb_type; 10209 vdev_cbdata_t cb_vdevs; 10210 boolean_t cb_any_successful; 10211 } set_cbdata_t; 10212 10213 static int 10214 set_pool_callback(zpool_handle_t *zhp, set_cbdata_t *cb) 10215 { 10216 int error; 10217 10218 /* Check if we have out-of-bounds features */ 10219 if (strcmp(cb->cb_propname, ZPOOL_CONFIG_COMPATIBILITY) == 0) { 10220 boolean_t features[SPA_FEATURES]; 10221 if (zpool_do_load_compat(cb->cb_value, features) != 10222 ZPOOL_COMPATIBILITY_OK) 10223 return (-1); 10224 10225 nvlist_t *enabled = zpool_get_features(zhp); 10226 spa_feature_t i; 10227 for (i = 0; i < SPA_FEATURES; i++) { 10228 const char *fguid = spa_feature_table[i].fi_guid; 10229 if (nvlist_exists(enabled, fguid) && !features[i]) 10230 break; 10231 } 10232 if (i < SPA_FEATURES) 10233 (void) fprintf(stderr, gettext("Warning: one or " 10234 "more features already enabled on pool '%s'\n" 10235 "are not present in this compatibility set.\n"), 10236 zpool_get_name(zhp)); 10237 } 10238 10239 /* if we're setting a feature, check it's in compatibility set */ 10240 if (zpool_prop_feature(cb->cb_propname) && 10241 strcmp(cb->cb_value, ZFS_FEATURE_ENABLED) == 0) { 10242 char *fname = strchr(cb->cb_propname, '@') + 1; 10243 spa_feature_t f; 10244 10245 if (zfeature_lookup_name(fname, &f) == 0) { 10246 char compat[ZFS_MAXPROPLEN]; 10247 if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, 10248 compat, ZFS_MAXPROPLEN, NULL, B_FALSE) != 0) 10249 compat[0] = '\0'; 10250 10251 boolean_t features[SPA_FEATURES]; 10252 if (zpool_do_load_compat(compat, features) != 10253 ZPOOL_COMPATIBILITY_OK) { 10254 (void) fprintf(stderr, gettext("Error: " 10255 "cannot enable feature '%s' on pool '%s'\n" 10256 "because the pool's 'compatibility' " 10257 "property cannot be parsed.\n"), 10258 fname, zpool_get_name(zhp)); 10259 return (-1); 10260 } 10261 10262 if (!features[f]) { 10263 (void) fprintf(stderr, gettext("Error: " 10264 "cannot enable feature '%s' on pool '%s'\n" 10265 "as it is not specified in this pool's " 10266 "current compatibility set.\n" 10267 "Consider setting 'compatibility' to a " 10268 "less restrictive set, or to 'off'.\n"), 10269 fname, zpool_get_name(zhp)); 10270 return (-1); 10271 } 10272 } 10273 } 10274 10275 error = zpool_set_prop(zhp, cb->cb_propname, cb->cb_value); 10276 10277 return (error); 10278 } 10279 10280 static int 10281 set_callback(zpool_handle_t *zhp, void *data) 10282 { 10283 int error; 10284 set_cbdata_t *cb = (set_cbdata_t *)data; 10285 10286 if (cb->cb_type == ZFS_TYPE_VDEV) { 10287 error = zpool_set_vdev_prop(zhp, *cb->cb_vdevs.cb_names, 10288 cb->cb_propname, cb->cb_value); 10289 } else { 10290 assert(cb->cb_type == ZFS_TYPE_POOL); 10291 error = set_pool_callback(zhp, cb); 10292 } 10293 10294 cb->cb_any_successful = !error; 10295 return (error); 10296 } 10297 10298 int 10299 zpool_do_set(int argc, char **argv) 10300 { 10301 set_cbdata_t cb = { 0 }; 10302 int error; 10303 10304 current_prop_type = ZFS_TYPE_POOL; 10305 if (argc > 1 && argv[1][0] == '-') { 10306 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 10307 argv[1][1]); 10308 usage(B_FALSE); 10309 } 10310 10311 if (argc < 2) { 10312 (void) fprintf(stderr, gettext("missing property=value " 10313 "argument\n")); 10314 usage(B_FALSE); 10315 } 10316 10317 if (argc < 3) { 10318 (void) fprintf(stderr, gettext("missing pool name\n")); 10319 usage(B_FALSE); 10320 } 10321 10322 if (argc > 4) { 10323 (void) fprintf(stderr, gettext("too many pool names\n")); 10324 usage(B_FALSE); 10325 } 10326 10327 cb.cb_propname = argv[1]; 10328 cb.cb_type = ZFS_TYPE_POOL; 10329 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_TYPE_ID; 10330 cb.cb_value = strchr(cb.cb_propname, '='); 10331 if (cb.cb_value == NULL) { 10332 (void) fprintf(stderr, gettext("missing value in " 10333 "property=value argument\n")); 10334 usage(B_FALSE); 10335 } 10336 10337 *(cb.cb_value) = '\0'; 10338 cb.cb_value++; 10339 argc -= 2; 10340 argv += 2; 10341 10342 /* argv[0] is pool name */ 10343 if (!is_pool(argv[0])) { 10344 (void) fprintf(stderr, 10345 gettext("cannot open '%s': is not a pool\n"), argv[0]); 10346 return (EINVAL); 10347 } 10348 10349 /* argv[1], when supplied, is vdev name */ 10350 if (argc == 2) { 10351 if (!are_vdevs_in_pool(1, argv + 1, argv[0], &cb.cb_vdevs)) { 10352 (void) fprintf(stderr, gettext( 10353 "cannot find '%s' in '%s': device not in pool\n"), 10354 argv[1], argv[0]); 10355 return (EINVAL); 10356 } 10357 cb.cb_vdevs.cb_names = argv + 1; 10358 cb.cb_vdevs.cb_names_count = 1; 10359 cb.cb_type = ZFS_TYPE_VDEV; 10360 } 10361 10362 error = for_each_pool(1, argv, B_TRUE, NULL, ZFS_TYPE_POOL, 10363 B_FALSE, set_callback, &cb); 10364 10365 return (error); 10366 } 10367 10368 /* Add up the total number of bytes left to initialize/trim across all vdevs */ 10369 static uint64_t 10370 vdev_activity_remaining(nvlist_t *nv, zpool_wait_activity_t activity) 10371 { 10372 uint64_t bytes_remaining; 10373 nvlist_t **child; 10374 uint_t c, children; 10375 vdev_stat_t *vs; 10376 10377 assert(activity == ZPOOL_WAIT_INITIALIZE || 10378 activity == ZPOOL_WAIT_TRIM); 10379 10380 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, 10381 (uint64_t **)&vs, &c) == 0); 10382 10383 if (activity == ZPOOL_WAIT_INITIALIZE && 10384 vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE) 10385 bytes_remaining = vs->vs_initialize_bytes_est - 10386 vs->vs_initialize_bytes_done; 10387 else if (activity == ZPOOL_WAIT_TRIM && 10388 vs->vs_trim_state == VDEV_TRIM_ACTIVE) 10389 bytes_remaining = vs->vs_trim_bytes_est - 10390 vs->vs_trim_bytes_done; 10391 else 10392 bytes_remaining = 0; 10393 10394 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 10395 &child, &children) != 0) 10396 children = 0; 10397 10398 for (c = 0; c < children; c++) 10399 bytes_remaining += vdev_activity_remaining(child[c], activity); 10400 10401 return (bytes_remaining); 10402 } 10403 10404 /* Add up the total number of bytes left to rebuild across top-level vdevs */ 10405 static uint64_t 10406 vdev_activity_top_remaining(nvlist_t *nv) 10407 { 10408 uint64_t bytes_remaining = 0; 10409 nvlist_t **child; 10410 uint_t children; 10411 int error; 10412 10413 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 10414 &child, &children) != 0) 10415 children = 0; 10416 10417 for (uint_t c = 0; c < children; c++) { 10418 vdev_rebuild_stat_t *vrs; 10419 uint_t i; 10420 10421 error = nvlist_lookup_uint64_array(child[c], 10422 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i); 10423 if (error == 0) { 10424 if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) { 10425 bytes_remaining += (vrs->vrs_bytes_est - 10426 vrs->vrs_bytes_rebuilt); 10427 } 10428 } 10429 } 10430 10431 return (bytes_remaining); 10432 } 10433 10434 /* Whether any vdevs are 'spare' or 'replacing' vdevs */ 10435 static boolean_t 10436 vdev_any_spare_replacing(nvlist_t *nv) 10437 { 10438 nvlist_t **child; 10439 uint_t c, children; 10440 char *vdev_type; 10441 10442 (void) nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &vdev_type); 10443 10444 if (strcmp(vdev_type, VDEV_TYPE_REPLACING) == 0 || 10445 strcmp(vdev_type, VDEV_TYPE_SPARE) == 0 || 10446 strcmp(vdev_type, VDEV_TYPE_DRAID_SPARE) == 0) { 10447 return (B_TRUE); 10448 } 10449 10450 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 10451 &child, &children) != 0) 10452 children = 0; 10453 10454 for (c = 0; c < children; c++) { 10455 if (vdev_any_spare_replacing(child[c])) 10456 return (B_TRUE); 10457 } 10458 10459 return (B_FALSE); 10460 } 10461 10462 typedef struct wait_data { 10463 char *wd_poolname; 10464 boolean_t wd_scripted; 10465 boolean_t wd_exact; 10466 boolean_t wd_headers_once; 10467 boolean_t wd_should_exit; 10468 /* Which activities to wait for */ 10469 boolean_t wd_enabled[ZPOOL_WAIT_NUM_ACTIVITIES]; 10470 float wd_interval; 10471 pthread_cond_t wd_cv; 10472 pthread_mutex_t wd_mutex; 10473 } wait_data_t; 10474 10475 /* 10476 * Print to stdout a single line, containing one column for each activity that 10477 * we are waiting for specifying how many bytes of work are left for that 10478 * activity. 10479 */ 10480 static void 10481 print_wait_status_row(wait_data_t *wd, zpool_handle_t *zhp, int row) 10482 { 10483 nvlist_t *config, *nvroot; 10484 uint_t c; 10485 int i; 10486 pool_checkpoint_stat_t *pcs = NULL; 10487 pool_scan_stat_t *pss = NULL; 10488 pool_removal_stat_t *prs = NULL; 10489 const char *const headers[] = {"DISCARD", "FREE", "INITIALIZE", 10490 "REPLACE", "REMOVE", "RESILVER", "SCRUB", "TRIM"}; 10491 int col_widths[ZPOOL_WAIT_NUM_ACTIVITIES]; 10492 10493 /* Calculate the width of each column */ 10494 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) { 10495 /* 10496 * Make sure we have enough space in the col for pretty-printed 10497 * numbers and for the column header, and then leave a couple 10498 * spaces between cols for readability. 10499 */ 10500 col_widths[i] = MAX(strlen(headers[i]), 6) + 2; 10501 } 10502 10503 /* Print header if appropriate */ 10504 int term_height = terminal_height(); 10505 boolean_t reprint_header = (!wd->wd_headers_once && term_height > 0 && 10506 row % (term_height-1) == 0); 10507 if (!wd->wd_scripted && (row == 0 || reprint_header)) { 10508 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) { 10509 if (wd->wd_enabled[i]) 10510 (void) printf("%*s", col_widths[i], headers[i]); 10511 } 10512 (void) fputc('\n', stdout); 10513 } 10514 10515 /* Bytes of work remaining in each activity */ 10516 int64_t bytes_rem[ZPOOL_WAIT_NUM_ACTIVITIES] = {0}; 10517 10518 bytes_rem[ZPOOL_WAIT_FREE] = 10519 zpool_get_prop_int(zhp, ZPOOL_PROP_FREEING, NULL); 10520 10521 config = zpool_get_config(zhp, NULL); 10522 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE); 10523 10524 (void) nvlist_lookup_uint64_array(nvroot, 10525 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c); 10526 if (pcs != NULL && pcs->pcs_state == CS_CHECKPOINT_DISCARDING) 10527 bytes_rem[ZPOOL_WAIT_CKPT_DISCARD] = pcs->pcs_space; 10528 10529 (void) nvlist_lookup_uint64_array(nvroot, 10530 ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c); 10531 if (prs != NULL && prs->prs_state == DSS_SCANNING) 10532 bytes_rem[ZPOOL_WAIT_REMOVE] = prs->prs_to_copy - 10533 prs->prs_copied; 10534 10535 (void) nvlist_lookup_uint64_array(nvroot, 10536 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&pss, &c); 10537 if (pss != NULL && pss->pss_state == DSS_SCANNING && 10538 pss->pss_pass_scrub_pause == 0) { 10539 int64_t rem = pss->pss_to_examine - pss->pss_issued; 10540 if (pss->pss_func == POOL_SCAN_SCRUB) 10541 bytes_rem[ZPOOL_WAIT_SCRUB] = rem; 10542 else 10543 bytes_rem[ZPOOL_WAIT_RESILVER] = rem; 10544 } else if (check_rebuilding(nvroot, NULL)) { 10545 bytes_rem[ZPOOL_WAIT_RESILVER] = 10546 vdev_activity_top_remaining(nvroot); 10547 } 10548 10549 bytes_rem[ZPOOL_WAIT_INITIALIZE] = 10550 vdev_activity_remaining(nvroot, ZPOOL_WAIT_INITIALIZE); 10551 bytes_rem[ZPOOL_WAIT_TRIM] = 10552 vdev_activity_remaining(nvroot, ZPOOL_WAIT_TRIM); 10553 10554 /* 10555 * A replace finishes after resilvering finishes, so the amount of work 10556 * left for a replace is the same as for resilvering. 10557 * 10558 * It isn't quite correct to say that if we have any 'spare' or 10559 * 'replacing' vdevs and a resilver is happening, then a replace is in 10560 * progress, like we do here. When a hot spare is used, the faulted vdev 10561 * is not removed after the hot spare is resilvered, so parent 'spare' 10562 * vdev is not removed either. So we could have a 'spare' vdev, but be 10563 * resilvering for a different reason. However, we use it as a heuristic 10564 * because we don't have access to the DTLs, which could tell us whether 10565 * or not we have really finished resilvering a hot spare. 10566 */ 10567 if (vdev_any_spare_replacing(nvroot)) 10568 bytes_rem[ZPOOL_WAIT_REPLACE] = bytes_rem[ZPOOL_WAIT_RESILVER]; 10569 10570 if (timestamp_fmt != NODATE) 10571 print_timestamp(timestamp_fmt); 10572 10573 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) { 10574 char buf[64]; 10575 if (!wd->wd_enabled[i]) 10576 continue; 10577 10578 if (wd->wd_exact) 10579 (void) snprintf(buf, sizeof (buf), "%" PRIi64, 10580 bytes_rem[i]); 10581 else 10582 zfs_nicenum(bytes_rem[i], buf, sizeof (buf)); 10583 10584 if (wd->wd_scripted) 10585 (void) printf(i == 0 ? "%s" : "\t%s", buf); 10586 else 10587 (void) printf(" %*s", col_widths[i] - 1, buf); 10588 } 10589 (void) printf("\n"); 10590 (void) fflush(stdout); 10591 } 10592 10593 static void * 10594 wait_status_thread(void *arg) 10595 { 10596 wait_data_t *wd = (wait_data_t *)arg; 10597 zpool_handle_t *zhp; 10598 10599 if ((zhp = zpool_open(g_zfs, wd->wd_poolname)) == NULL) 10600 return (void *)(1); 10601 10602 for (int row = 0; ; row++) { 10603 boolean_t missing; 10604 struct timespec timeout; 10605 int ret = 0; 10606 (void) clock_gettime(CLOCK_REALTIME, &timeout); 10607 10608 if (zpool_refresh_stats(zhp, &missing) != 0 || missing || 10609 zpool_props_refresh(zhp) != 0) { 10610 zpool_close(zhp); 10611 return (void *)(uintptr_t)(missing ? 0 : 1); 10612 } 10613 10614 print_wait_status_row(wd, zhp, row); 10615 10616 timeout.tv_sec += floor(wd->wd_interval); 10617 long nanos = timeout.tv_nsec + 10618 (wd->wd_interval - floor(wd->wd_interval)) * NANOSEC; 10619 if (nanos >= NANOSEC) { 10620 timeout.tv_sec++; 10621 timeout.tv_nsec = nanos - NANOSEC; 10622 } else { 10623 timeout.tv_nsec = nanos; 10624 } 10625 pthread_mutex_lock(&wd->wd_mutex); 10626 if (!wd->wd_should_exit) 10627 ret = pthread_cond_timedwait(&wd->wd_cv, &wd->wd_mutex, 10628 &timeout); 10629 pthread_mutex_unlock(&wd->wd_mutex); 10630 if (ret == 0) { 10631 break; /* signaled by main thread */ 10632 } else if (ret != ETIMEDOUT) { 10633 (void) fprintf(stderr, gettext("pthread_cond_timedwait " 10634 "failed: %s\n"), strerror(ret)); 10635 zpool_close(zhp); 10636 return (void *)(uintptr_t)(1); 10637 } 10638 } 10639 10640 zpool_close(zhp); 10641 return (void *)(0); 10642 } 10643 10644 int 10645 zpool_do_wait(int argc, char **argv) 10646 { 10647 boolean_t verbose = B_FALSE; 10648 int c, i; 10649 unsigned long count; 10650 pthread_t status_thr; 10651 int error = 0; 10652 zpool_handle_t *zhp; 10653 10654 wait_data_t wd; 10655 wd.wd_scripted = B_FALSE; 10656 wd.wd_exact = B_FALSE; 10657 wd.wd_headers_once = B_FALSE; 10658 wd.wd_should_exit = B_FALSE; 10659 10660 pthread_mutex_init(&wd.wd_mutex, NULL); 10661 pthread_cond_init(&wd.wd_cv, NULL); 10662 10663 /* By default, wait for all types of activity. */ 10664 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) 10665 wd.wd_enabled[i] = B_TRUE; 10666 10667 while ((c = getopt(argc, argv, "HpT:t:")) != -1) { 10668 switch (c) { 10669 case 'H': 10670 wd.wd_scripted = B_TRUE; 10671 break; 10672 case 'n': 10673 wd.wd_headers_once = B_TRUE; 10674 break; 10675 case 'p': 10676 wd.wd_exact = B_TRUE; 10677 break; 10678 case 'T': 10679 get_timestamp_arg(*optarg); 10680 break; 10681 case 't': 10682 /* Reset activities array */ 10683 memset(&wd.wd_enabled, 0, sizeof (wd.wd_enabled)); 10684 10685 for (char *tok; (tok = strsep(&optarg, ",")); ) { 10686 static const char *const col_opts[] = { 10687 "discard", "free", "initialize", "replace", 10688 "remove", "resilver", "scrub", "trim" }; 10689 10690 for (i = 0; i < ARRAY_SIZE(col_opts); ++i) 10691 if (strcmp(tok, col_opts[i]) == 0) { 10692 wd.wd_enabled[i] = B_TRUE; 10693 goto found; 10694 } 10695 10696 (void) fprintf(stderr, 10697 gettext("invalid activity '%s'\n"), tok); 10698 usage(B_FALSE); 10699 found:; 10700 } 10701 break; 10702 case '?': 10703 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 10704 optopt); 10705 usage(B_FALSE); 10706 } 10707 } 10708 10709 argc -= optind; 10710 argv += optind; 10711 10712 get_interval_count(&argc, argv, &wd.wd_interval, &count); 10713 if (count != 0) { 10714 /* This subcmd only accepts an interval, not a count */ 10715 (void) fprintf(stderr, gettext("too many arguments\n")); 10716 usage(B_FALSE); 10717 } 10718 10719 if (wd.wd_interval != 0) 10720 verbose = B_TRUE; 10721 10722 if (argc < 1) { 10723 (void) fprintf(stderr, gettext("missing 'pool' argument\n")); 10724 usage(B_FALSE); 10725 } 10726 if (argc > 1) { 10727 (void) fprintf(stderr, gettext("too many arguments\n")); 10728 usage(B_FALSE); 10729 } 10730 10731 wd.wd_poolname = argv[0]; 10732 10733 if ((zhp = zpool_open(g_zfs, wd.wd_poolname)) == NULL) 10734 return (1); 10735 10736 if (verbose) { 10737 /* 10738 * We use a separate thread for printing status updates because 10739 * the main thread will call lzc_wait(), which blocks as long 10740 * as an activity is in progress, which can be a long time. 10741 */ 10742 if (pthread_create(&status_thr, NULL, wait_status_thread, &wd) 10743 != 0) { 10744 (void) fprintf(stderr, gettext("failed to create status" 10745 "thread: %s\n"), strerror(errno)); 10746 zpool_close(zhp); 10747 return (1); 10748 } 10749 } 10750 10751 /* 10752 * Loop over all activities that we are supposed to wait for until none 10753 * of them are in progress. Note that this means we can end up waiting 10754 * for more activities to complete than just those that were in progress 10755 * when we began waiting; if an activity we are interested in begins 10756 * while we are waiting for another activity, we will wait for both to 10757 * complete before exiting. 10758 */ 10759 for (;;) { 10760 boolean_t missing = B_FALSE; 10761 boolean_t any_waited = B_FALSE; 10762 10763 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) { 10764 boolean_t waited; 10765 10766 if (!wd.wd_enabled[i]) 10767 continue; 10768 10769 error = zpool_wait_status(zhp, i, &missing, &waited); 10770 if (error != 0 || missing) 10771 break; 10772 10773 any_waited = (any_waited || waited); 10774 } 10775 10776 if (error != 0 || missing || !any_waited) 10777 break; 10778 } 10779 10780 zpool_close(zhp); 10781 10782 if (verbose) { 10783 uintptr_t status; 10784 pthread_mutex_lock(&wd.wd_mutex); 10785 wd.wd_should_exit = B_TRUE; 10786 pthread_cond_signal(&wd.wd_cv); 10787 pthread_mutex_unlock(&wd.wd_mutex); 10788 (void) pthread_join(status_thr, (void *)&status); 10789 if (status != 0) 10790 error = status; 10791 } 10792 10793 pthread_mutex_destroy(&wd.wd_mutex); 10794 pthread_cond_destroy(&wd.wd_cv); 10795 return (error); 10796 } 10797 10798 static int 10799 find_command_idx(const char *command, int *idx) 10800 { 10801 for (int i = 0; i < NCOMMAND; ++i) { 10802 if (command_table[i].name == NULL) 10803 continue; 10804 10805 if (strcmp(command, command_table[i].name) == 0) { 10806 *idx = i; 10807 return (0); 10808 } 10809 } 10810 return (1); 10811 } 10812 10813 /* 10814 * Display version message 10815 */ 10816 static int 10817 zpool_do_version(int argc, char **argv) 10818 { 10819 (void) argc, (void) argv; 10820 return (zfs_version_print() != 0); 10821 } 10822 10823 /* 10824 * Do zpool_load_compat() and print error message on failure 10825 */ 10826 static zpool_compat_status_t 10827 zpool_do_load_compat(const char *compat, boolean_t *list) 10828 { 10829 char report[1024]; 10830 10831 zpool_compat_status_t ret; 10832 10833 ret = zpool_load_compat(compat, list, report, 1024); 10834 switch (ret) { 10835 10836 case ZPOOL_COMPATIBILITY_OK: 10837 break; 10838 10839 case ZPOOL_COMPATIBILITY_NOFILES: 10840 case ZPOOL_COMPATIBILITY_BADFILE: 10841 case ZPOOL_COMPATIBILITY_BADTOKEN: 10842 (void) fprintf(stderr, "Error: %s\n", report); 10843 break; 10844 10845 case ZPOOL_COMPATIBILITY_WARNTOKEN: 10846 (void) fprintf(stderr, "Warning: %s\n", report); 10847 ret = ZPOOL_COMPATIBILITY_OK; 10848 break; 10849 } 10850 return (ret); 10851 } 10852 10853 int 10854 main(int argc, char **argv) 10855 { 10856 int ret = 0; 10857 int i = 0; 10858 char *cmdname; 10859 char **newargv; 10860 10861 (void) setlocale(LC_ALL, ""); 10862 (void) setlocale(LC_NUMERIC, "C"); 10863 (void) textdomain(TEXT_DOMAIN); 10864 srand(time(NULL)); 10865 10866 opterr = 0; 10867 10868 /* 10869 * Make sure the user has specified some command. 10870 */ 10871 if (argc < 2) { 10872 (void) fprintf(stderr, gettext("missing command\n")); 10873 usage(B_FALSE); 10874 } 10875 10876 cmdname = argv[1]; 10877 10878 /* 10879 * Special case '-?' 10880 */ 10881 if ((strcmp(cmdname, "-?") == 0) || strcmp(cmdname, "--help") == 0) 10882 usage(B_TRUE); 10883 10884 /* 10885 * Special case '-V|--version' 10886 */ 10887 if ((strcmp(cmdname, "-V") == 0) || (strcmp(cmdname, "--version") == 0)) 10888 return (zpool_do_version(argc, argv)); 10889 10890 if ((g_zfs = libzfs_init()) == NULL) { 10891 (void) fprintf(stderr, "%s\n", libzfs_error_init(errno)); 10892 return (1); 10893 } 10894 10895 libzfs_print_on_error(g_zfs, B_TRUE); 10896 10897 zfs_save_arguments(argc, argv, history_str, sizeof (history_str)); 10898 10899 /* 10900 * Many commands modify input strings for string parsing reasons. 10901 * We create a copy to protect the original argv. 10902 */ 10903 newargv = safe_malloc((argc + 1) * sizeof (newargv[0])); 10904 for (i = 0; i < argc; i++) 10905 newargv[i] = strdup(argv[i]); 10906 newargv[argc] = NULL; 10907 10908 /* 10909 * Run the appropriate command. 10910 */ 10911 if (find_command_idx(cmdname, &i) == 0) { 10912 current_command = &command_table[i]; 10913 ret = command_table[i].func(argc - 1, newargv + 1); 10914 } else if (strchr(cmdname, '=')) { 10915 verify(find_command_idx("set", &i) == 0); 10916 current_command = &command_table[i]; 10917 ret = command_table[i].func(argc, newargv); 10918 } else if (strcmp(cmdname, "freeze") == 0 && argc == 3) { 10919 /* 10920 * 'freeze' is a vile debugging abomination, so we treat 10921 * it as such. 10922 */ 10923 zfs_cmd_t zc = {"\0"}; 10924 10925 (void) strlcpy(zc.zc_name, argv[2], sizeof (zc.zc_name)); 10926 ret = zfs_ioctl(g_zfs, ZFS_IOC_POOL_FREEZE, &zc); 10927 if (ret != 0) { 10928 (void) fprintf(stderr, 10929 gettext("failed to freeze pool: %d\n"), errno); 10930 ret = 1; 10931 } 10932 10933 log_history = 0; 10934 } else { 10935 (void) fprintf(stderr, gettext("unrecognized " 10936 "command '%s'\n"), cmdname); 10937 usage(B_FALSE); 10938 ret = 1; 10939 } 10940 10941 for (i = 0; i < argc; i++) 10942 free(newargv[i]); 10943 free(newargv); 10944 10945 if (ret == 0 && log_history) 10946 (void) zpool_log_history(g_zfs, history_str); 10947 10948 libzfs_fini(g_zfs); 10949 10950 /* 10951 * The 'ZFS_ABORT' environment variable causes us to dump core on exit 10952 * for the purposes of running ::findleaks. 10953 */ 10954 if (getenv("ZFS_ABORT") != NULL) { 10955 (void) printf("dumping core by request\n"); 10956 abort(); 10957 } 10958 10959 return (ret); 10960 } 10961