1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or https://opensource.org/licenses/CDDL-1.0. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 25 * Copyright (c) 2011, 2024 by Delphix. All rights reserved. 26 * Copyright (c) 2012 by Frederik Wessels. All rights reserved. 27 * Copyright (c) 2012 by Cyril Plisko. All rights reserved. 28 * Copyright (c) 2013 by Prasad Joshi (sTec). All rights reserved. 29 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>. 30 * Copyright (c) 2017 Datto Inc. 31 * Copyright (c) 2017 Open-E, Inc. All Rights Reserved. 32 * Copyright (c) 2017, Intel Corporation. 33 * Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com> 34 * Copyright (c) 2021, Colm Buckley <colm@tuatha.org> 35 * Copyright (c) 2021, Klara Inc. 36 * Copyright [2021] Hewlett Packard Enterprise Development LP 37 */ 38 39 #include <assert.h> 40 #include <ctype.h> 41 #include <dirent.h> 42 #include <errno.h> 43 #include <fcntl.h> 44 #include <getopt.h> 45 #include <libgen.h> 46 #include <libintl.h> 47 #include <libuutil.h> 48 #include <locale.h> 49 #include <pthread.h> 50 #include <stdio.h> 51 #include <stdlib.h> 52 #include <string.h> 53 #include <thread_pool.h> 54 #include <time.h> 55 #include <unistd.h> 56 #include <pwd.h> 57 #include <zone.h> 58 #include <sys/wait.h> 59 #include <zfs_prop.h> 60 #include <sys/fs/zfs.h> 61 #include <sys/stat.h> 62 #include <sys/systeminfo.h> 63 #include <sys/fm/fs/zfs.h> 64 #include <sys/fm/util.h> 65 #include <sys/fm/protocol.h> 66 #include <sys/zfs_ioctl.h> 67 #include <sys/mount.h> 68 #include <sys/sysmacros.h> 69 70 #include <math.h> 71 72 #include <libzfs.h> 73 #include <libzutil.h> 74 75 #include "zpool_util.h" 76 #include "zfs_comutil.h" 77 #include "zfeature_common.h" 78 79 #include "statcommon.h" 80 81 libzfs_handle_t *g_zfs; 82 83 static int mount_tp_nthr = 512; /* tpool threads for multi-threaded mounting */ 84 85 static int zpool_do_create(int, char **); 86 static int zpool_do_destroy(int, char **); 87 88 static int zpool_do_add(int, char **); 89 static int zpool_do_remove(int, char **); 90 static int zpool_do_labelclear(int, char **); 91 92 static int zpool_do_checkpoint(int, char **); 93 94 static int zpool_do_list(int, char **); 95 static int zpool_do_iostat(int, char **); 96 static int zpool_do_status(int, char **); 97 98 static int zpool_do_online(int, char **); 99 static int zpool_do_offline(int, char **); 100 static int zpool_do_clear(int, char **); 101 static int zpool_do_reopen(int, char **); 102 103 static int zpool_do_reguid(int, char **); 104 105 static int zpool_do_attach(int, char **); 106 static int zpool_do_detach(int, char **); 107 static int zpool_do_replace(int, char **); 108 static int zpool_do_split(int, char **); 109 110 static int zpool_do_initialize(int, char **); 111 static int zpool_do_scrub(int, char **); 112 static int zpool_do_resilver(int, char **); 113 static int zpool_do_trim(int, char **); 114 115 static int zpool_do_import(int, char **); 116 static int zpool_do_export(int, char **); 117 118 static int zpool_do_upgrade(int, char **); 119 120 static int zpool_do_history(int, char **); 121 static int zpool_do_events(int, char **); 122 123 static int zpool_do_get(int, char **); 124 static int zpool_do_set(int, char **); 125 126 static int zpool_do_sync(int, char **); 127 128 static int zpool_do_version(int, char **); 129 130 static int zpool_do_wait(int, char **); 131 132 static int zpool_do_help(int argc, char **argv); 133 134 static zpool_compat_status_t zpool_do_load_compat( 135 const char *, boolean_t *); 136 137 enum zpool_options { 138 ZPOOL_OPTION_POWER = 1024, 139 ZPOOL_OPTION_ALLOW_INUSE, 140 ZPOOL_OPTION_ALLOW_REPLICATION_MISMATCH, 141 ZPOOL_OPTION_ALLOW_ASHIFT_MISMATCH 142 }; 143 144 /* 145 * These libumem hooks provide a reasonable set of defaults for the allocator's 146 * debugging facilities. 147 */ 148 149 #ifdef DEBUG 150 const char * 151 _umem_debug_init(void) 152 { 153 return ("default,verbose"); /* $UMEM_DEBUG setting */ 154 } 155 156 const char * 157 _umem_logging_init(void) 158 { 159 return ("fail,contents"); /* $UMEM_LOGGING setting */ 160 } 161 #endif 162 163 typedef enum { 164 HELP_ADD, 165 HELP_ATTACH, 166 HELP_CLEAR, 167 HELP_CREATE, 168 HELP_CHECKPOINT, 169 HELP_DESTROY, 170 HELP_DETACH, 171 HELP_EXPORT, 172 HELP_HISTORY, 173 HELP_IMPORT, 174 HELP_IOSTAT, 175 HELP_LABELCLEAR, 176 HELP_LIST, 177 HELP_OFFLINE, 178 HELP_ONLINE, 179 HELP_REPLACE, 180 HELP_REMOVE, 181 HELP_INITIALIZE, 182 HELP_SCRUB, 183 HELP_RESILVER, 184 HELP_TRIM, 185 HELP_STATUS, 186 HELP_UPGRADE, 187 HELP_EVENTS, 188 HELP_GET, 189 HELP_SET, 190 HELP_SPLIT, 191 HELP_SYNC, 192 HELP_REGUID, 193 HELP_REOPEN, 194 HELP_VERSION, 195 HELP_WAIT 196 } zpool_help_t; 197 198 199 /* 200 * Flags for stats to display with "zpool iostats" 201 */ 202 enum iostat_type { 203 IOS_DEFAULT = 0, 204 IOS_LATENCY = 1, 205 IOS_QUEUES = 2, 206 IOS_L_HISTO = 3, 207 IOS_RQ_HISTO = 4, 208 IOS_COUNT, /* always last element */ 209 }; 210 211 /* iostat_type entries as bitmasks */ 212 #define IOS_DEFAULT_M (1ULL << IOS_DEFAULT) 213 #define IOS_LATENCY_M (1ULL << IOS_LATENCY) 214 #define IOS_QUEUES_M (1ULL << IOS_QUEUES) 215 #define IOS_L_HISTO_M (1ULL << IOS_L_HISTO) 216 #define IOS_RQ_HISTO_M (1ULL << IOS_RQ_HISTO) 217 218 /* Mask of all the histo bits */ 219 #define IOS_ANYHISTO_M (IOS_L_HISTO_M | IOS_RQ_HISTO_M) 220 221 /* 222 * Lookup table for iostat flags to nvlist names. Basically a list 223 * of all the nvlists a flag requires. Also specifies the order in 224 * which data gets printed in zpool iostat. 225 */ 226 static const char *vsx_type_to_nvlist[IOS_COUNT][15] = { 227 [IOS_L_HISTO] = { 228 ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO, 229 ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO, 230 ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO, 231 ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO, 232 ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO, 233 ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO, 234 ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO, 235 ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO, 236 ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO, 237 ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO, 238 ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO, 239 NULL}, 240 [IOS_LATENCY] = { 241 ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO, 242 ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO, 243 ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO, 244 ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO, 245 ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO, 246 ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO, 247 NULL}, 248 [IOS_QUEUES] = { 249 ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE, 250 ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE, 251 ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE, 252 ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE, 253 ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE, 254 ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE, 255 ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE, 256 NULL}, 257 [IOS_RQ_HISTO] = { 258 ZPOOL_CONFIG_VDEV_SYNC_IND_R_HISTO, 259 ZPOOL_CONFIG_VDEV_SYNC_AGG_R_HISTO, 260 ZPOOL_CONFIG_VDEV_SYNC_IND_W_HISTO, 261 ZPOOL_CONFIG_VDEV_SYNC_AGG_W_HISTO, 262 ZPOOL_CONFIG_VDEV_ASYNC_IND_R_HISTO, 263 ZPOOL_CONFIG_VDEV_ASYNC_AGG_R_HISTO, 264 ZPOOL_CONFIG_VDEV_ASYNC_IND_W_HISTO, 265 ZPOOL_CONFIG_VDEV_ASYNC_AGG_W_HISTO, 266 ZPOOL_CONFIG_VDEV_IND_SCRUB_HISTO, 267 ZPOOL_CONFIG_VDEV_AGG_SCRUB_HISTO, 268 ZPOOL_CONFIG_VDEV_IND_TRIM_HISTO, 269 ZPOOL_CONFIG_VDEV_AGG_TRIM_HISTO, 270 ZPOOL_CONFIG_VDEV_IND_REBUILD_HISTO, 271 ZPOOL_CONFIG_VDEV_AGG_REBUILD_HISTO, 272 NULL}, 273 }; 274 275 276 /* 277 * Given a cb->cb_flags with a histogram bit set, return the iostat_type. 278 * Right now, only one histo bit is ever set at one time, so we can 279 * just do a highbit64(a) 280 */ 281 #define IOS_HISTO_IDX(a) (highbit64(a & IOS_ANYHISTO_M) - 1) 282 283 typedef struct zpool_command { 284 const char *name; 285 int (*func)(int, char **); 286 zpool_help_t usage; 287 } zpool_command_t; 288 289 /* 290 * Master command table. Each ZFS command has a name, associated function, and 291 * usage message. The usage messages need to be internationalized, so we have 292 * to have a function to return the usage message based on a command index. 293 * 294 * These commands are organized according to how they are displayed in the usage 295 * message. An empty command (one with a NULL name) indicates an empty line in 296 * the generic usage message. 297 */ 298 static zpool_command_t command_table[] = { 299 { "version", zpool_do_version, HELP_VERSION }, 300 { NULL }, 301 { "create", zpool_do_create, HELP_CREATE }, 302 { "destroy", zpool_do_destroy, HELP_DESTROY }, 303 { NULL }, 304 { "add", zpool_do_add, HELP_ADD }, 305 { "remove", zpool_do_remove, HELP_REMOVE }, 306 { NULL }, 307 { "labelclear", zpool_do_labelclear, HELP_LABELCLEAR }, 308 { NULL }, 309 { "checkpoint", zpool_do_checkpoint, HELP_CHECKPOINT }, 310 { NULL }, 311 { "list", zpool_do_list, HELP_LIST }, 312 { "iostat", zpool_do_iostat, HELP_IOSTAT }, 313 { "status", zpool_do_status, HELP_STATUS }, 314 { NULL }, 315 { "online", zpool_do_online, HELP_ONLINE }, 316 { "offline", zpool_do_offline, HELP_OFFLINE }, 317 { "clear", zpool_do_clear, HELP_CLEAR }, 318 { "reopen", zpool_do_reopen, HELP_REOPEN }, 319 { NULL }, 320 { "attach", zpool_do_attach, HELP_ATTACH }, 321 { "detach", zpool_do_detach, HELP_DETACH }, 322 { "replace", zpool_do_replace, HELP_REPLACE }, 323 { "split", zpool_do_split, HELP_SPLIT }, 324 { NULL }, 325 { "initialize", zpool_do_initialize, HELP_INITIALIZE }, 326 { "resilver", zpool_do_resilver, HELP_RESILVER }, 327 { "scrub", zpool_do_scrub, HELP_SCRUB }, 328 { "trim", zpool_do_trim, HELP_TRIM }, 329 { NULL }, 330 { "import", zpool_do_import, HELP_IMPORT }, 331 { "export", zpool_do_export, HELP_EXPORT }, 332 { "upgrade", zpool_do_upgrade, HELP_UPGRADE }, 333 { "reguid", zpool_do_reguid, HELP_REGUID }, 334 { NULL }, 335 { "history", zpool_do_history, HELP_HISTORY }, 336 { "events", zpool_do_events, HELP_EVENTS }, 337 { NULL }, 338 { "get", zpool_do_get, HELP_GET }, 339 { "set", zpool_do_set, HELP_SET }, 340 { "sync", zpool_do_sync, HELP_SYNC }, 341 { NULL }, 342 { "wait", zpool_do_wait, HELP_WAIT }, 343 }; 344 345 #define NCOMMAND (ARRAY_SIZE(command_table)) 346 347 #define VDEV_ALLOC_CLASS_LOGS "logs" 348 349 static zpool_command_t *current_command; 350 static zfs_type_t current_prop_type = (ZFS_TYPE_POOL | ZFS_TYPE_VDEV); 351 static char history_str[HIS_MAX_RECORD_LEN]; 352 static boolean_t log_history = B_TRUE; 353 static uint_t timestamp_fmt = NODATE; 354 355 static const char * 356 get_usage(zpool_help_t idx) 357 { 358 switch (idx) { 359 case HELP_ADD: 360 return (gettext("\tadd [-afgLnP] [-o property=value] " 361 "<pool> <vdev> ...\n")); 362 case HELP_ATTACH: 363 return (gettext("\tattach [-fsw] [-o property=value] " 364 "<pool> <device> <new-device>\n")); 365 case HELP_CLEAR: 366 return (gettext("\tclear [[--power]|[-nF]] <pool> [device]\n")); 367 case HELP_CREATE: 368 return (gettext("\tcreate [-fnd] [-o property=value] ... \n" 369 "\t [-O file-system-property=value] ... \n" 370 "\t [-m mountpoint] [-R root] <pool> <vdev> ...\n")); 371 case HELP_CHECKPOINT: 372 return (gettext("\tcheckpoint [-d [-w]] <pool> ...\n")); 373 case HELP_DESTROY: 374 return (gettext("\tdestroy [-f] <pool>\n")); 375 case HELP_DETACH: 376 return (gettext("\tdetach <pool> <device>\n")); 377 case HELP_EXPORT: 378 return (gettext("\texport [-af] <pool> ...\n")); 379 case HELP_HISTORY: 380 return (gettext("\thistory [-il] [<pool>] ...\n")); 381 case HELP_IMPORT: 382 return (gettext("\timport [-d dir] [-D]\n" 383 "\timport [-o mntopts] [-o property=value] ... \n" 384 "\t [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] " 385 "[-R root] [-F [-n]] -a\n" 386 "\timport [-o mntopts] [-o property=value] ... \n" 387 "\t [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] " 388 "[-R root] [-F [-n]]\n" 389 "\t [--rewind-to-checkpoint] <pool | id> [newpool]\n")); 390 case HELP_IOSTAT: 391 return (gettext("\tiostat [[[-c [script1,script2,...]" 392 "[-lq]]|[-rw]] [-T d | u] [-ghHLpPvy]\n" 393 "\t [[pool ...]|[pool vdev ...]|[vdev ...]]" 394 " [[-n] interval [count]]\n")); 395 case HELP_LABELCLEAR: 396 return (gettext("\tlabelclear [-f] <vdev>\n")); 397 case HELP_LIST: 398 return (gettext("\tlist [-gHLpPv] [-o property[,...]] " 399 "[-T d|u] [pool] ... \n" 400 "\t [interval [count]]\n")); 401 case HELP_OFFLINE: 402 return (gettext("\toffline [--power]|[[-f][-t]] <pool> " 403 "<device> ...\n")); 404 case HELP_ONLINE: 405 return (gettext("\tonline [--power][-e] <pool> <device> " 406 "...\n")); 407 case HELP_REPLACE: 408 return (gettext("\treplace [-fsw] [-o property=value] " 409 "<pool> <device> [new-device]\n")); 410 case HELP_REMOVE: 411 return (gettext("\tremove [-npsw] <pool> <device> ...\n")); 412 case HELP_REOPEN: 413 return (gettext("\treopen [-n] <pool>\n")); 414 case HELP_INITIALIZE: 415 return (gettext("\tinitialize [-c | -s | -u] [-w] <pool> " 416 "[<device> ...]\n")); 417 case HELP_SCRUB: 418 return (gettext("\tscrub [-s | -p] [-w] [-e] <pool> ...\n")); 419 case HELP_RESILVER: 420 return (gettext("\tresilver <pool> ...\n")); 421 case HELP_TRIM: 422 return (gettext("\ttrim [-dw] [-r <rate>] [-c | -s] <pool> " 423 "[<device> ...]\n")); 424 case HELP_STATUS: 425 return (gettext("\tstatus [--power] [-c [script1,script2,...]] " 426 "[-DegiLpPstvx] [-T d|u] [pool] ...\n" 427 "\t [interval [count]]\n")); 428 case HELP_UPGRADE: 429 return (gettext("\tupgrade\n" 430 "\tupgrade -v\n" 431 "\tupgrade [-V version] <-a | pool ...>\n")); 432 case HELP_EVENTS: 433 return (gettext("\tevents [-vHf [pool] | -c]\n")); 434 case HELP_GET: 435 return (gettext("\tget [-Hp] [-o \"all\" | field[,...]] " 436 "<\"all\" | property[,...]> <pool> ...\n")); 437 case HELP_SET: 438 return (gettext("\tset <property=value> <pool>\n" 439 "\tset <vdev_property=value> <pool> <vdev>\n")); 440 case HELP_SPLIT: 441 return (gettext("\tsplit [-gLnPl] [-R altroot] [-o mntopts]\n" 442 "\t [-o property=value] <pool> <newpool> " 443 "[<device> ...]\n")); 444 case HELP_REGUID: 445 return (gettext("\treguid <pool>\n")); 446 case HELP_SYNC: 447 return (gettext("\tsync [pool] ...\n")); 448 case HELP_VERSION: 449 return (gettext("\tversion\n")); 450 case HELP_WAIT: 451 return (gettext("\twait [-Hp] [-T d|u] [-t <activity>[,...]] " 452 "<pool> [interval]\n")); 453 default: 454 __builtin_unreachable(); 455 } 456 } 457 458 static void 459 zpool_collect_leaves(zpool_handle_t *zhp, nvlist_t *nvroot, nvlist_t *res) 460 { 461 uint_t children = 0; 462 nvlist_t **child; 463 uint_t i; 464 465 (void) nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 466 &child, &children); 467 468 if (children == 0) { 469 char *path = zpool_vdev_name(g_zfs, zhp, nvroot, 470 VDEV_NAME_PATH); 471 472 if (strcmp(path, VDEV_TYPE_INDIRECT) != 0 && 473 strcmp(path, VDEV_TYPE_HOLE) != 0) 474 fnvlist_add_boolean(res, path); 475 476 free(path); 477 return; 478 } 479 480 for (i = 0; i < children; i++) { 481 zpool_collect_leaves(zhp, child[i], res); 482 } 483 } 484 485 /* 486 * Callback routine that will print out a pool property value. 487 */ 488 static int 489 print_pool_prop_cb(int prop, void *cb) 490 { 491 FILE *fp = cb; 492 493 (void) fprintf(fp, "\t%-19s ", zpool_prop_to_name(prop)); 494 495 if (zpool_prop_readonly(prop)) 496 (void) fprintf(fp, " NO "); 497 else 498 (void) fprintf(fp, " YES "); 499 500 if (zpool_prop_values(prop) == NULL) 501 (void) fprintf(fp, "-\n"); 502 else 503 (void) fprintf(fp, "%s\n", zpool_prop_values(prop)); 504 505 return (ZPROP_CONT); 506 } 507 508 /* 509 * Callback routine that will print out a vdev property value. 510 */ 511 static int 512 print_vdev_prop_cb(int prop, void *cb) 513 { 514 FILE *fp = cb; 515 516 (void) fprintf(fp, "\t%-19s ", vdev_prop_to_name(prop)); 517 518 if (vdev_prop_readonly(prop)) 519 (void) fprintf(fp, " NO "); 520 else 521 (void) fprintf(fp, " YES "); 522 523 if (vdev_prop_values(prop) == NULL) 524 (void) fprintf(fp, "-\n"); 525 else 526 (void) fprintf(fp, "%s\n", vdev_prop_values(prop)); 527 528 return (ZPROP_CONT); 529 } 530 531 /* 532 * Given a leaf vdev name like 'L5' return its VDEV_CONFIG_PATH like 533 * '/dev/disk/by-vdev/L5'. 534 */ 535 static const char * 536 vdev_name_to_path(zpool_handle_t *zhp, char *vdev) 537 { 538 nvlist_t *vdev_nv = zpool_find_vdev(zhp, vdev, NULL, NULL, NULL); 539 if (vdev_nv == NULL) { 540 return (NULL); 541 } 542 return (fnvlist_lookup_string(vdev_nv, ZPOOL_CONFIG_PATH)); 543 } 544 545 static int 546 zpool_power_on(zpool_handle_t *zhp, char *vdev) 547 { 548 return (zpool_power(zhp, vdev, B_TRUE)); 549 } 550 551 static int 552 zpool_power_on_and_disk_wait(zpool_handle_t *zhp, char *vdev) 553 { 554 int rc; 555 556 rc = zpool_power_on(zhp, vdev); 557 if (rc != 0) 558 return (rc); 559 560 zpool_disk_wait(vdev_name_to_path(zhp, vdev)); 561 562 return (0); 563 } 564 565 static int 566 zpool_power_on_pool_and_wait_for_devices(zpool_handle_t *zhp) 567 { 568 nvlist_t *nv; 569 const char *path = NULL; 570 int rc; 571 572 /* Power up all the devices first */ 573 FOR_EACH_REAL_LEAF_VDEV(zhp, nv) { 574 path = fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH); 575 if (path != NULL) { 576 rc = zpool_power_on(zhp, (char *)path); 577 if (rc != 0) { 578 return (rc); 579 } 580 } 581 } 582 583 /* 584 * Wait for their devices to show up. Since we powered them on 585 * at roughly the same time, they should all come online around 586 * the same time. 587 */ 588 FOR_EACH_REAL_LEAF_VDEV(zhp, nv) { 589 path = fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH); 590 zpool_disk_wait(path); 591 } 592 593 return (0); 594 } 595 596 static int 597 zpool_power_off(zpool_handle_t *zhp, char *vdev) 598 { 599 return (zpool_power(zhp, vdev, B_FALSE)); 600 } 601 602 /* 603 * Display usage message. If we're inside a command, display only the usage for 604 * that command. Otherwise, iterate over the entire command table and display 605 * a complete usage message. 606 */ 607 static __attribute__((noreturn)) void 608 usage(boolean_t requested) 609 { 610 FILE *fp = requested ? stdout : stderr; 611 612 if (current_command == NULL) { 613 int i; 614 615 (void) fprintf(fp, gettext("usage: zpool command args ...\n")); 616 (void) fprintf(fp, 617 gettext("where 'command' is one of the following:\n\n")); 618 619 for (i = 0; i < NCOMMAND; i++) { 620 if (command_table[i].name == NULL) 621 (void) fprintf(fp, "\n"); 622 else 623 (void) fprintf(fp, "%s", 624 get_usage(command_table[i].usage)); 625 } 626 627 (void) fprintf(fp, 628 gettext("\nFor further help on a command or topic, " 629 "run: %s\n"), "zpool help [<topic>]"); 630 } else { 631 (void) fprintf(fp, gettext("usage:\n")); 632 (void) fprintf(fp, "%s", get_usage(current_command->usage)); 633 } 634 635 if (current_command != NULL && 636 current_prop_type != (ZFS_TYPE_POOL | ZFS_TYPE_VDEV) && 637 ((strcmp(current_command->name, "set") == 0) || 638 (strcmp(current_command->name, "get") == 0) || 639 (strcmp(current_command->name, "list") == 0))) { 640 641 (void) fprintf(fp, "%s", 642 gettext("\nthe following properties are supported:\n")); 643 644 (void) fprintf(fp, "\n\t%-19s %s %s\n\n", 645 "PROPERTY", "EDIT", "VALUES"); 646 647 /* Iterate over all properties */ 648 if (current_prop_type == ZFS_TYPE_POOL) { 649 (void) zprop_iter(print_pool_prop_cb, fp, B_FALSE, 650 B_TRUE, current_prop_type); 651 652 (void) fprintf(fp, "\t%-19s ", "feature@..."); 653 (void) fprintf(fp, "YES " 654 "disabled | enabled | active\n"); 655 656 (void) fprintf(fp, gettext("\nThe feature@ properties " 657 "must be appended with a feature name.\n" 658 "See zpool-features(7).\n")); 659 } else if (current_prop_type == ZFS_TYPE_VDEV) { 660 (void) zprop_iter(print_vdev_prop_cb, fp, B_FALSE, 661 B_TRUE, current_prop_type); 662 } 663 } 664 665 /* 666 * See comments at end of main(). 667 */ 668 if (getenv("ZFS_ABORT") != NULL) { 669 (void) printf("dumping core by request\n"); 670 abort(); 671 } 672 673 exit(requested ? 0 : 2); 674 } 675 676 /* 677 * zpool initialize [-c | -s | -u] [-w] <pool> [<vdev> ...] 678 * Initialize all unused blocks in the specified vdevs, or all vdevs in the pool 679 * if none specified. 680 * 681 * -c Cancel. Ends active initializing. 682 * -s Suspend. Initializing can then be restarted with no flags. 683 * -u Uninitialize. Clears initialization state. 684 * -w Wait. Blocks until initializing has completed. 685 */ 686 int 687 zpool_do_initialize(int argc, char **argv) 688 { 689 int c; 690 char *poolname; 691 zpool_handle_t *zhp; 692 nvlist_t *vdevs; 693 int err = 0; 694 boolean_t wait = B_FALSE; 695 696 struct option long_options[] = { 697 {"cancel", no_argument, NULL, 'c'}, 698 {"suspend", no_argument, NULL, 's'}, 699 {"uninit", no_argument, NULL, 'u'}, 700 {"wait", no_argument, NULL, 'w'}, 701 {0, 0, 0, 0} 702 }; 703 704 pool_initialize_func_t cmd_type = POOL_INITIALIZE_START; 705 while ((c = getopt_long(argc, argv, "csuw", long_options, 706 NULL)) != -1) { 707 switch (c) { 708 case 'c': 709 if (cmd_type != POOL_INITIALIZE_START && 710 cmd_type != POOL_INITIALIZE_CANCEL) { 711 (void) fprintf(stderr, gettext("-c cannot be " 712 "combined with other options\n")); 713 usage(B_FALSE); 714 } 715 cmd_type = POOL_INITIALIZE_CANCEL; 716 break; 717 case 's': 718 if (cmd_type != POOL_INITIALIZE_START && 719 cmd_type != POOL_INITIALIZE_SUSPEND) { 720 (void) fprintf(stderr, gettext("-s cannot be " 721 "combined with other options\n")); 722 usage(B_FALSE); 723 } 724 cmd_type = POOL_INITIALIZE_SUSPEND; 725 break; 726 case 'u': 727 if (cmd_type != POOL_INITIALIZE_START && 728 cmd_type != POOL_INITIALIZE_UNINIT) { 729 (void) fprintf(stderr, gettext("-u cannot be " 730 "combined with other options\n")); 731 usage(B_FALSE); 732 } 733 cmd_type = POOL_INITIALIZE_UNINIT; 734 break; 735 case 'w': 736 wait = B_TRUE; 737 break; 738 case '?': 739 if (optopt != 0) { 740 (void) fprintf(stderr, 741 gettext("invalid option '%c'\n"), optopt); 742 } else { 743 (void) fprintf(stderr, 744 gettext("invalid option '%s'\n"), 745 argv[optind - 1]); 746 } 747 usage(B_FALSE); 748 } 749 } 750 751 argc -= optind; 752 argv += optind; 753 754 if (argc < 1) { 755 (void) fprintf(stderr, gettext("missing pool name argument\n")); 756 usage(B_FALSE); 757 return (-1); 758 } 759 760 if (wait && (cmd_type != POOL_INITIALIZE_START)) { 761 (void) fprintf(stderr, gettext("-w cannot be used with -c, -s" 762 "or -u\n")); 763 usage(B_FALSE); 764 } 765 766 poolname = argv[0]; 767 zhp = zpool_open(g_zfs, poolname); 768 if (zhp == NULL) 769 return (-1); 770 771 vdevs = fnvlist_alloc(); 772 if (argc == 1) { 773 /* no individual leaf vdevs specified, so add them all */ 774 nvlist_t *config = zpool_get_config(zhp, NULL); 775 nvlist_t *nvroot = fnvlist_lookup_nvlist(config, 776 ZPOOL_CONFIG_VDEV_TREE); 777 zpool_collect_leaves(zhp, nvroot, vdevs); 778 } else { 779 for (int i = 1; i < argc; i++) { 780 fnvlist_add_boolean(vdevs, argv[i]); 781 } 782 } 783 784 if (wait) 785 err = zpool_initialize_wait(zhp, cmd_type, vdevs); 786 else 787 err = zpool_initialize(zhp, cmd_type, vdevs); 788 789 fnvlist_free(vdevs); 790 zpool_close(zhp); 791 792 return (err); 793 } 794 795 /* 796 * print a pool vdev config for dry runs 797 */ 798 static void 799 print_vdev_tree(zpool_handle_t *zhp, const char *name, nvlist_t *nv, int indent, 800 const char *match, int name_flags) 801 { 802 nvlist_t **child; 803 uint_t c, children; 804 char *vname; 805 boolean_t printed = B_FALSE; 806 807 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 808 &child, &children) != 0) { 809 if (name != NULL) 810 (void) printf("\t%*s%s\n", indent, "", name); 811 return; 812 } 813 814 for (c = 0; c < children; c++) { 815 uint64_t is_log = B_FALSE, is_hole = B_FALSE; 816 const char *class = ""; 817 818 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 819 &is_hole); 820 821 if (is_hole == B_TRUE) { 822 continue; 823 } 824 825 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 826 &is_log); 827 if (is_log) 828 class = VDEV_ALLOC_BIAS_LOG; 829 (void) nvlist_lookup_string(child[c], 830 ZPOOL_CONFIG_ALLOCATION_BIAS, &class); 831 if (strcmp(match, class) != 0) 832 continue; 833 834 if (!printed && name != NULL) { 835 (void) printf("\t%*s%s\n", indent, "", name); 836 printed = B_TRUE; 837 } 838 vname = zpool_vdev_name(g_zfs, zhp, child[c], name_flags); 839 print_vdev_tree(zhp, vname, child[c], indent + 2, "", 840 name_flags); 841 free(vname); 842 } 843 } 844 845 /* 846 * Print the list of l2cache devices for dry runs. 847 */ 848 static void 849 print_cache_list(nvlist_t *nv, int indent) 850 { 851 nvlist_t **child; 852 uint_t c, children; 853 854 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, 855 &child, &children) == 0 && children > 0) { 856 (void) printf("\t%*s%s\n", indent, "", "cache"); 857 } else { 858 return; 859 } 860 for (c = 0; c < children; c++) { 861 char *vname; 862 863 vname = zpool_vdev_name(g_zfs, NULL, child[c], 0); 864 (void) printf("\t%*s%s\n", indent + 2, "", vname); 865 free(vname); 866 } 867 } 868 869 /* 870 * Print the list of spares for dry runs. 871 */ 872 static void 873 print_spare_list(nvlist_t *nv, int indent) 874 { 875 nvlist_t **child; 876 uint_t c, children; 877 878 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, 879 &child, &children) == 0 && children > 0) { 880 (void) printf("\t%*s%s\n", indent, "", "spares"); 881 } else { 882 return; 883 } 884 for (c = 0; c < children; c++) { 885 char *vname; 886 887 vname = zpool_vdev_name(g_zfs, NULL, child[c], 0); 888 (void) printf("\t%*s%s\n", indent + 2, "", vname); 889 free(vname); 890 } 891 } 892 893 static boolean_t 894 prop_list_contains_feature(nvlist_t *proplist) 895 { 896 nvpair_t *nvp; 897 for (nvp = nvlist_next_nvpair(proplist, NULL); NULL != nvp; 898 nvp = nvlist_next_nvpair(proplist, nvp)) { 899 if (zpool_prop_feature(nvpair_name(nvp))) 900 return (B_TRUE); 901 } 902 return (B_FALSE); 903 } 904 905 /* 906 * Add a property pair (name, string-value) into a property nvlist. 907 */ 908 static int 909 add_prop_list(const char *propname, const char *propval, nvlist_t **props, 910 boolean_t poolprop) 911 { 912 zpool_prop_t prop = ZPOOL_PROP_INVAL; 913 nvlist_t *proplist; 914 const char *normnm; 915 const char *strval; 916 917 if (*props == NULL && 918 nvlist_alloc(props, NV_UNIQUE_NAME, 0) != 0) { 919 (void) fprintf(stderr, 920 gettext("internal error: out of memory\n")); 921 return (1); 922 } 923 924 proplist = *props; 925 926 if (poolprop) { 927 const char *vname = zpool_prop_to_name(ZPOOL_PROP_VERSION); 928 const char *cname = 929 zpool_prop_to_name(ZPOOL_PROP_COMPATIBILITY); 930 931 if ((prop = zpool_name_to_prop(propname)) == ZPOOL_PROP_INVAL && 932 (!zpool_prop_feature(propname) && 933 !zpool_prop_vdev(propname))) { 934 (void) fprintf(stderr, gettext("property '%s' is " 935 "not a valid pool or vdev property\n"), propname); 936 return (2); 937 } 938 939 /* 940 * feature@ properties and version should not be specified 941 * at the same time. 942 */ 943 if ((prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname) && 944 nvlist_exists(proplist, vname)) || 945 (prop == ZPOOL_PROP_VERSION && 946 prop_list_contains_feature(proplist))) { 947 (void) fprintf(stderr, gettext("'feature@' and " 948 "'version' properties cannot be specified " 949 "together\n")); 950 return (2); 951 } 952 953 /* 954 * if version is specified, only "legacy" compatibility 955 * may be requested 956 */ 957 if ((prop == ZPOOL_PROP_COMPATIBILITY && 958 strcmp(propval, ZPOOL_COMPAT_LEGACY) != 0 && 959 nvlist_exists(proplist, vname)) || 960 (prop == ZPOOL_PROP_VERSION && 961 nvlist_exists(proplist, cname) && 962 strcmp(fnvlist_lookup_string(proplist, cname), 963 ZPOOL_COMPAT_LEGACY) != 0)) { 964 (void) fprintf(stderr, gettext("when 'version' is " 965 "specified, the 'compatibility' feature may only " 966 "be set to '" ZPOOL_COMPAT_LEGACY "'\n")); 967 return (2); 968 } 969 970 if (zpool_prop_feature(propname) || zpool_prop_vdev(propname)) 971 normnm = propname; 972 else 973 normnm = zpool_prop_to_name(prop); 974 } else { 975 zfs_prop_t fsprop = zfs_name_to_prop(propname); 976 977 if (zfs_prop_valid_for_type(fsprop, ZFS_TYPE_FILESYSTEM, 978 B_FALSE)) { 979 normnm = zfs_prop_to_name(fsprop); 980 } else if (zfs_prop_user(propname) || 981 zfs_prop_userquota(propname)) { 982 normnm = propname; 983 } else { 984 (void) fprintf(stderr, gettext("property '%s' is " 985 "not a valid filesystem property\n"), propname); 986 return (2); 987 } 988 } 989 990 if (nvlist_lookup_string(proplist, normnm, &strval) == 0 && 991 prop != ZPOOL_PROP_CACHEFILE) { 992 (void) fprintf(stderr, gettext("property '%s' " 993 "specified multiple times\n"), propname); 994 return (2); 995 } 996 997 if (nvlist_add_string(proplist, normnm, propval) != 0) { 998 (void) fprintf(stderr, gettext("internal " 999 "error: out of memory\n")); 1000 return (1); 1001 } 1002 1003 return (0); 1004 } 1005 1006 /* 1007 * Set a default property pair (name, string-value) in a property nvlist 1008 */ 1009 static int 1010 add_prop_list_default(const char *propname, const char *propval, 1011 nvlist_t **props) 1012 { 1013 const char *pval; 1014 1015 if (nvlist_lookup_string(*props, propname, &pval) == 0) 1016 return (0); 1017 1018 return (add_prop_list(propname, propval, props, B_TRUE)); 1019 } 1020 1021 /* 1022 * zpool add [-afgLnP] [-o property=value] <pool> <vdev> ... 1023 * 1024 * -a Disable the ashift validation checks 1025 * -f Force addition of devices, even if they appear in use 1026 * -g Display guid for individual vdev name. 1027 * -L Follow links when resolving vdev path name. 1028 * -n Do not add the devices, but display the resulting layout if 1029 * they were to be added. 1030 * -o Set property=value. 1031 * -P Display full path for vdev name. 1032 * 1033 * Adds the given vdevs to 'pool'. As with create, the bulk of this work is 1034 * handled by make_root_vdev(), which constructs the nvlist needed to pass to 1035 * libzfs. 1036 */ 1037 int 1038 zpool_do_add(int argc, char **argv) 1039 { 1040 boolean_t check_replication = B_TRUE; 1041 boolean_t check_inuse = B_TRUE; 1042 boolean_t dryrun = B_FALSE; 1043 boolean_t check_ashift = B_TRUE; 1044 boolean_t force = B_FALSE; 1045 int name_flags = 0; 1046 int c; 1047 nvlist_t *nvroot; 1048 char *poolname; 1049 int ret; 1050 zpool_handle_t *zhp; 1051 nvlist_t *config; 1052 nvlist_t *props = NULL; 1053 char *propval; 1054 1055 struct option long_options[] = { 1056 {"allow-in-use", no_argument, NULL, ZPOOL_OPTION_ALLOW_INUSE}, 1057 {"allow-replication-mismatch", no_argument, NULL, 1058 ZPOOL_OPTION_ALLOW_REPLICATION_MISMATCH}, 1059 {"allow-ashift-mismatch", no_argument, NULL, 1060 ZPOOL_OPTION_ALLOW_ASHIFT_MISMATCH}, 1061 {0, 0, 0, 0} 1062 }; 1063 1064 /* check options */ 1065 while ((c = getopt_long(argc, argv, "fgLno:P", long_options, NULL)) 1066 != -1) { 1067 switch (c) { 1068 case 'f': 1069 force = B_TRUE; 1070 break; 1071 case 'g': 1072 name_flags |= VDEV_NAME_GUID; 1073 break; 1074 case 'L': 1075 name_flags |= VDEV_NAME_FOLLOW_LINKS; 1076 break; 1077 case 'n': 1078 dryrun = B_TRUE; 1079 break; 1080 case 'o': 1081 if ((propval = strchr(optarg, '=')) == NULL) { 1082 (void) fprintf(stderr, gettext("missing " 1083 "'=' for -o option\n")); 1084 usage(B_FALSE); 1085 } 1086 *propval = '\0'; 1087 propval++; 1088 1089 if ((strcmp(optarg, ZPOOL_CONFIG_ASHIFT) != 0) || 1090 (add_prop_list(optarg, propval, &props, B_TRUE))) 1091 usage(B_FALSE); 1092 break; 1093 case 'P': 1094 name_flags |= VDEV_NAME_PATH; 1095 break; 1096 case ZPOOL_OPTION_ALLOW_INUSE: 1097 check_inuse = B_FALSE; 1098 break; 1099 case ZPOOL_OPTION_ALLOW_REPLICATION_MISMATCH: 1100 check_replication = B_FALSE; 1101 break; 1102 case ZPOOL_OPTION_ALLOW_ASHIFT_MISMATCH: 1103 check_ashift = B_FALSE; 1104 break; 1105 case '?': 1106 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 1107 optopt); 1108 usage(B_FALSE); 1109 } 1110 } 1111 1112 argc -= optind; 1113 argv += optind; 1114 1115 /* get pool name and check number of arguments */ 1116 if (argc < 1) { 1117 (void) fprintf(stderr, gettext("missing pool name argument\n")); 1118 usage(B_FALSE); 1119 } 1120 if (argc < 2) { 1121 (void) fprintf(stderr, gettext("missing vdev specification\n")); 1122 usage(B_FALSE); 1123 } 1124 1125 if (force) { 1126 if (!check_inuse || !check_replication || !check_ashift) { 1127 (void) fprintf(stderr, gettext("'-f' option is not " 1128 "allowed with '--allow-replication-mismatch', " 1129 "'--allow-ashift-mismatch', or " 1130 "'--allow-in-use'\n")); 1131 usage(B_FALSE); 1132 } 1133 check_inuse = B_FALSE; 1134 check_replication = B_FALSE; 1135 check_ashift = B_FALSE; 1136 } 1137 1138 poolname = argv[0]; 1139 1140 argc--; 1141 argv++; 1142 1143 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) 1144 return (1); 1145 1146 if ((config = zpool_get_config(zhp, NULL)) == NULL) { 1147 (void) fprintf(stderr, gettext("pool '%s' is unavailable\n"), 1148 poolname); 1149 zpool_close(zhp); 1150 return (1); 1151 } 1152 1153 /* unless manually specified use "ashift" pool property (if set) */ 1154 if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) { 1155 int intval; 1156 zprop_source_t src; 1157 char strval[ZPOOL_MAXPROPLEN]; 1158 1159 intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src); 1160 if (src != ZPROP_SRC_DEFAULT) { 1161 (void) sprintf(strval, "%" PRId32, intval); 1162 verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval, 1163 &props, B_TRUE) == 0); 1164 } 1165 } 1166 1167 /* pass off to make_root_vdev for processing */ 1168 nvroot = make_root_vdev(zhp, props, !check_inuse, 1169 check_replication, B_FALSE, dryrun, argc, argv); 1170 if (nvroot == NULL) { 1171 zpool_close(zhp); 1172 return (1); 1173 } 1174 1175 if (dryrun) { 1176 nvlist_t *poolnvroot; 1177 nvlist_t **l2child, **sparechild; 1178 uint_t l2children, sparechildren, c; 1179 char *vname; 1180 boolean_t hadcache = B_FALSE, hadspare = B_FALSE; 1181 1182 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 1183 &poolnvroot) == 0); 1184 1185 (void) printf(gettext("would update '%s' to the following " 1186 "configuration:\n\n"), zpool_get_name(zhp)); 1187 1188 /* print original main pool and new tree */ 1189 print_vdev_tree(zhp, poolname, poolnvroot, 0, "", 1190 name_flags | VDEV_NAME_TYPE_ID); 1191 print_vdev_tree(zhp, NULL, nvroot, 0, "", name_flags); 1192 1193 /* print other classes: 'dedup', 'special', and 'log' */ 1194 if (zfs_special_devs(poolnvroot, VDEV_ALLOC_BIAS_DEDUP)) { 1195 print_vdev_tree(zhp, "dedup", poolnvroot, 0, 1196 VDEV_ALLOC_BIAS_DEDUP, name_flags); 1197 print_vdev_tree(zhp, NULL, nvroot, 0, 1198 VDEV_ALLOC_BIAS_DEDUP, name_flags); 1199 } else if (zfs_special_devs(nvroot, VDEV_ALLOC_BIAS_DEDUP)) { 1200 print_vdev_tree(zhp, "dedup", nvroot, 0, 1201 VDEV_ALLOC_BIAS_DEDUP, name_flags); 1202 } 1203 1204 if (zfs_special_devs(poolnvroot, VDEV_ALLOC_BIAS_SPECIAL)) { 1205 print_vdev_tree(zhp, "special", poolnvroot, 0, 1206 VDEV_ALLOC_BIAS_SPECIAL, name_flags); 1207 print_vdev_tree(zhp, NULL, nvroot, 0, 1208 VDEV_ALLOC_BIAS_SPECIAL, name_flags); 1209 } else if (zfs_special_devs(nvroot, VDEV_ALLOC_BIAS_SPECIAL)) { 1210 print_vdev_tree(zhp, "special", nvroot, 0, 1211 VDEV_ALLOC_BIAS_SPECIAL, name_flags); 1212 } 1213 1214 if (num_logs(poolnvroot) > 0) { 1215 print_vdev_tree(zhp, "logs", poolnvroot, 0, 1216 VDEV_ALLOC_BIAS_LOG, name_flags); 1217 print_vdev_tree(zhp, NULL, nvroot, 0, 1218 VDEV_ALLOC_BIAS_LOG, name_flags); 1219 } else if (num_logs(nvroot) > 0) { 1220 print_vdev_tree(zhp, "logs", nvroot, 0, 1221 VDEV_ALLOC_BIAS_LOG, name_flags); 1222 } 1223 1224 /* Do the same for the caches */ 1225 if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_L2CACHE, 1226 &l2child, &l2children) == 0 && l2children) { 1227 hadcache = B_TRUE; 1228 (void) printf(gettext("\tcache\n")); 1229 for (c = 0; c < l2children; c++) { 1230 vname = zpool_vdev_name(g_zfs, NULL, 1231 l2child[c], name_flags); 1232 (void) printf("\t %s\n", vname); 1233 free(vname); 1234 } 1235 } 1236 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 1237 &l2child, &l2children) == 0 && l2children) { 1238 if (!hadcache) 1239 (void) printf(gettext("\tcache\n")); 1240 for (c = 0; c < l2children; c++) { 1241 vname = zpool_vdev_name(g_zfs, NULL, 1242 l2child[c], name_flags); 1243 (void) printf("\t %s\n", vname); 1244 free(vname); 1245 } 1246 } 1247 /* And finally the spares */ 1248 if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_SPARES, 1249 &sparechild, &sparechildren) == 0 && sparechildren > 0) { 1250 hadspare = B_TRUE; 1251 (void) printf(gettext("\tspares\n")); 1252 for (c = 0; c < sparechildren; c++) { 1253 vname = zpool_vdev_name(g_zfs, NULL, 1254 sparechild[c], name_flags); 1255 (void) printf("\t %s\n", vname); 1256 free(vname); 1257 } 1258 } 1259 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 1260 &sparechild, &sparechildren) == 0 && sparechildren > 0) { 1261 if (!hadspare) 1262 (void) printf(gettext("\tspares\n")); 1263 for (c = 0; c < sparechildren; c++) { 1264 vname = zpool_vdev_name(g_zfs, NULL, 1265 sparechild[c], name_flags); 1266 (void) printf("\t %s\n", vname); 1267 free(vname); 1268 } 1269 } 1270 1271 ret = 0; 1272 } else { 1273 ret = (zpool_add(zhp, nvroot, check_ashift) != 0); 1274 } 1275 1276 nvlist_free(props); 1277 nvlist_free(nvroot); 1278 zpool_close(zhp); 1279 1280 return (ret); 1281 } 1282 1283 /* 1284 * zpool remove [-npsw] <pool> <vdev> ... 1285 * 1286 * Removes the given vdev from the pool. 1287 */ 1288 int 1289 zpool_do_remove(int argc, char **argv) 1290 { 1291 char *poolname; 1292 int i, ret = 0; 1293 zpool_handle_t *zhp = NULL; 1294 boolean_t stop = B_FALSE; 1295 int c; 1296 boolean_t noop = B_FALSE; 1297 boolean_t parsable = B_FALSE; 1298 boolean_t wait = B_FALSE; 1299 1300 /* check options */ 1301 while ((c = getopt(argc, argv, "npsw")) != -1) { 1302 switch (c) { 1303 case 'n': 1304 noop = B_TRUE; 1305 break; 1306 case 'p': 1307 parsable = B_TRUE; 1308 break; 1309 case 's': 1310 stop = B_TRUE; 1311 break; 1312 case 'w': 1313 wait = B_TRUE; 1314 break; 1315 case '?': 1316 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 1317 optopt); 1318 usage(B_FALSE); 1319 } 1320 } 1321 1322 argc -= optind; 1323 argv += optind; 1324 1325 /* get pool name and check number of arguments */ 1326 if (argc < 1) { 1327 (void) fprintf(stderr, gettext("missing pool name argument\n")); 1328 usage(B_FALSE); 1329 } 1330 1331 poolname = argv[0]; 1332 1333 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) 1334 return (1); 1335 1336 if (stop && noop) { 1337 zpool_close(zhp); 1338 (void) fprintf(stderr, gettext("stop request ignored\n")); 1339 return (0); 1340 } 1341 1342 if (stop) { 1343 if (argc > 1) { 1344 (void) fprintf(stderr, gettext("too many arguments\n")); 1345 usage(B_FALSE); 1346 } 1347 if (zpool_vdev_remove_cancel(zhp) != 0) 1348 ret = 1; 1349 if (wait) { 1350 (void) fprintf(stderr, gettext("invalid option " 1351 "combination: -w cannot be used with -s\n")); 1352 usage(B_FALSE); 1353 } 1354 } else { 1355 if (argc < 2) { 1356 (void) fprintf(stderr, gettext("missing device\n")); 1357 usage(B_FALSE); 1358 } 1359 1360 for (i = 1; i < argc; i++) { 1361 if (noop) { 1362 uint64_t size; 1363 1364 if (zpool_vdev_indirect_size(zhp, argv[i], 1365 &size) != 0) { 1366 ret = 1; 1367 break; 1368 } 1369 if (parsable) { 1370 (void) printf("%s %llu\n", 1371 argv[i], (unsigned long long)size); 1372 } else { 1373 char valstr[32]; 1374 zfs_nicenum(size, valstr, 1375 sizeof (valstr)); 1376 (void) printf("Memory that will be " 1377 "used after removing %s: %s\n", 1378 argv[i], valstr); 1379 } 1380 } else { 1381 if (zpool_vdev_remove(zhp, argv[i]) != 0) 1382 ret = 1; 1383 } 1384 } 1385 1386 if (ret == 0 && wait) 1387 ret = zpool_wait(zhp, ZPOOL_WAIT_REMOVE); 1388 } 1389 zpool_close(zhp); 1390 1391 return (ret); 1392 } 1393 1394 /* 1395 * Return 1 if a vdev is active (being used in a pool) 1396 * Return 0 if a vdev is inactive (offlined or faulted, or not in active pool) 1397 * 1398 * This is useful for checking if a disk in an active pool is offlined or 1399 * faulted. 1400 */ 1401 static int 1402 vdev_is_active(char *vdev_path) 1403 { 1404 int fd; 1405 fd = open(vdev_path, O_EXCL); 1406 if (fd < 0) { 1407 return (1); /* cant open O_EXCL - disk is active */ 1408 } 1409 1410 close(fd); 1411 return (0); /* disk is inactive in the pool */ 1412 } 1413 1414 /* 1415 * zpool labelclear [-f] <vdev> 1416 * 1417 * -f Force clearing the label for the vdevs which are members of 1418 * the exported or foreign pools. 1419 * 1420 * Verifies that the vdev is not active and zeros out the label information 1421 * on the device. 1422 */ 1423 int 1424 zpool_do_labelclear(int argc, char **argv) 1425 { 1426 char vdev[MAXPATHLEN]; 1427 char *name = NULL; 1428 int c, fd = -1, ret = 0; 1429 nvlist_t *config; 1430 pool_state_t state; 1431 boolean_t inuse = B_FALSE; 1432 boolean_t force = B_FALSE; 1433 1434 /* check options */ 1435 while ((c = getopt(argc, argv, "f")) != -1) { 1436 switch (c) { 1437 case 'f': 1438 force = B_TRUE; 1439 break; 1440 default: 1441 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 1442 optopt); 1443 usage(B_FALSE); 1444 } 1445 } 1446 1447 argc -= optind; 1448 argv += optind; 1449 1450 /* get vdev name */ 1451 if (argc < 1) { 1452 (void) fprintf(stderr, gettext("missing vdev name\n")); 1453 usage(B_FALSE); 1454 } 1455 if (argc > 1) { 1456 (void) fprintf(stderr, gettext("too many arguments\n")); 1457 usage(B_FALSE); 1458 } 1459 1460 (void) strlcpy(vdev, argv[0], sizeof (vdev)); 1461 1462 /* 1463 * If we cannot open an absolute path, we quit. 1464 * Otherwise if the provided vdev name doesn't point to a file, 1465 * try prepending expected disk paths and partition numbers. 1466 */ 1467 if ((fd = open(vdev, O_RDWR)) < 0) { 1468 int error; 1469 if (vdev[0] == '/') { 1470 (void) fprintf(stderr, gettext("failed to open " 1471 "%s: %s\n"), vdev, strerror(errno)); 1472 return (1); 1473 } 1474 1475 error = zfs_resolve_shortname(argv[0], vdev, MAXPATHLEN); 1476 if (error == 0 && zfs_dev_is_whole_disk(vdev)) { 1477 if (zfs_append_partition(vdev, MAXPATHLEN) == -1) 1478 error = ENOENT; 1479 } 1480 1481 if (error || ((fd = open(vdev, O_RDWR)) < 0)) { 1482 if (errno == ENOENT) { 1483 (void) fprintf(stderr, gettext( 1484 "failed to find device %s, try " 1485 "specifying absolute path instead\n"), 1486 argv[0]); 1487 return (1); 1488 } 1489 1490 (void) fprintf(stderr, gettext("failed to open %s:" 1491 " %s\n"), vdev, strerror(errno)); 1492 return (1); 1493 } 1494 } 1495 1496 /* 1497 * Flush all dirty pages for the block device. This should not be 1498 * fatal when the device does not support BLKFLSBUF as would be the 1499 * case for a file vdev. 1500 */ 1501 if ((zfs_dev_flush(fd) != 0) && (errno != ENOTTY)) 1502 (void) fprintf(stderr, gettext("failed to invalidate " 1503 "cache for %s: %s\n"), vdev, strerror(errno)); 1504 1505 if (zpool_read_label(fd, &config, NULL) != 0) { 1506 (void) fprintf(stderr, 1507 gettext("failed to read label from %s\n"), vdev); 1508 ret = 1; 1509 goto errout; 1510 } 1511 nvlist_free(config); 1512 1513 ret = zpool_in_use(g_zfs, fd, &state, &name, &inuse); 1514 if (ret != 0) { 1515 (void) fprintf(stderr, 1516 gettext("failed to check state for %s\n"), vdev); 1517 ret = 1; 1518 goto errout; 1519 } 1520 1521 if (!inuse) 1522 goto wipe_label; 1523 1524 switch (state) { 1525 default: 1526 case POOL_STATE_ACTIVE: 1527 case POOL_STATE_SPARE: 1528 case POOL_STATE_L2CACHE: 1529 /* 1530 * We allow the user to call 'zpool offline -f' 1531 * on an offlined disk in an active pool. We can check if 1532 * the disk is online by calling vdev_is_active(). 1533 */ 1534 if (force && !vdev_is_active(vdev)) 1535 break; 1536 1537 (void) fprintf(stderr, gettext( 1538 "%s is a member (%s) of pool \"%s\""), 1539 vdev, zpool_pool_state_to_name(state), name); 1540 1541 if (force) { 1542 (void) fprintf(stderr, gettext( 1543 ". Offline the disk first to clear its label.")); 1544 } 1545 printf("\n"); 1546 ret = 1; 1547 goto errout; 1548 1549 case POOL_STATE_EXPORTED: 1550 if (force) 1551 break; 1552 (void) fprintf(stderr, gettext( 1553 "use '-f' to override the following error:\n" 1554 "%s is a member of exported pool \"%s\"\n"), 1555 vdev, name); 1556 ret = 1; 1557 goto errout; 1558 1559 case POOL_STATE_POTENTIALLY_ACTIVE: 1560 if (force) 1561 break; 1562 (void) fprintf(stderr, gettext( 1563 "use '-f' to override the following error:\n" 1564 "%s is a member of potentially active pool \"%s\"\n"), 1565 vdev, name); 1566 ret = 1; 1567 goto errout; 1568 1569 case POOL_STATE_DESTROYED: 1570 /* inuse should never be set for a destroyed pool */ 1571 assert(0); 1572 break; 1573 } 1574 1575 wipe_label: 1576 ret = zpool_clear_label(fd); 1577 if (ret != 0) { 1578 (void) fprintf(stderr, 1579 gettext("failed to clear label for %s\n"), vdev); 1580 } 1581 1582 errout: 1583 free(name); 1584 (void) close(fd); 1585 1586 return (ret); 1587 } 1588 1589 /* 1590 * zpool create [-fnd] [-o property=value] ... 1591 * [-O file-system-property=value] ... 1592 * [-R root] [-m mountpoint] <pool> <dev> ... 1593 * 1594 * -f Force creation, even if devices appear in use 1595 * -n Do not create the pool, but display the resulting layout if it 1596 * were to be created. 1597 * -R Create a pool under an alternate root 1598 * -m Set default mountpoint for the root dataset. By default it's 1599 * '/<pool>' 1600 * -o Set property=value. 1601 * -o Set feature@feature=enabled|disabled. 1602 * -d Don't automatically enable all supported pool features 1603 * (individual features can be enabled with -o). 1604 * -O Set fsproperty=value in the pool's root file system 1605 * 1606 * Creates the named pool according to the given vdev specification. The 1607 * bulk of the vdev processing is done in make_root_vdev() in zpool_vdev.c. 1608 * Once we get the nvlist back from make_root_vdev(), we either print out the 1609 * contents (if '-n' was specified), or pass it to libzfs to do the creation. 1610 */ 1611 int 1612 zpool_do_create(int argc, char **argv) 1613 { 1614 boolean_t force = B_FALSE; 1615 boolean_t dryrun = B_FALSE; 1616 boolean_t enable_pool_features = B_TRUE; 1617 1618 int c; 1619 nvlist_t *nvroot = NULL; 1620 char *poolname; 1621 char *tname = NULL; 1622 int ret = 1; 1623 char *altroot = NULL; 1624 char *compat = NULL; 1625 char *mountpoint = NULL; 1626 nvlist_t *fsprops = NULL; 1627 nvlist_t *props = NULL; 1628 char *propval; 1629 1630 /* check options */ 1631 while ((c = getopt(argc, argv, ":fndR:m:o:O:t:")) != -1) { 1632 switch (c) { 1633 case 'f': 1634 force = B_TRUE; 1635 break; 1636 case 'n': 1637 dryrun = B_TRUE; 1638 break; 1639 case 'd': 1640 enable_pool_features = B_FALSE; 1641 break; 1642 case 'R': 1643 altroot = optarg; 1644 if (add_prop_list(zpool_prop_to_name( 1645 ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE)) 1646 goto errout; 1647 if (add_prop_list_default(zpool_prop_to_name( 1648 ZPOOL_PROP_CACHEFILE), "none", &props)) 1649 goto errout; 1650 break; 1651 case 'm': 1652 /* Equivalent to -O mountpoint=optarg */ 1653 mountpoint = optarg; 1654 break; 1655 case 'o': 1656 if ((propval = strchr(optarg, '=')) == NULL) { 1657 (void) fprintf(stderr, gettext("missing " 1658 "'=' for -o option\n")); 1659 goto errout; 1660 } 1661 *propval = '\0'; 1662 propval++; 1663 1664 if (add_prop_list(optarg, propval, &props, B_TRUE)) 1665 goto errout; 1666 1667 /* 1668 * If the user is creating a pool that doesn't support 1669 * feature flags, don't enable any features. 1670 */ 1671 if (zpool_name_to_prop(optarg) == ZPOOL_PROP_VERSION) { 1672 char *end; 1673 u_longlong_t ver; 1674 1675 ver = strtoull(propval, &end, 10); 1676 if (*end == '\0' && 1677 ver < SPA_VERSION_FEATURES) { 1678 enable_pool_features = B_FALSE; 1679 } 1680 } 1681 if (zpool_name_to_prop(optarg) == ZPOOL_PROP_ALTROOT) 1682 altroot = propval; 1683 if (zpool_name_to_prop(optarg) == 1684 ZPOOL_PROP_COMPATIBILITY) 1685 compat = propval; 1686 break; 1687 case 'O': 1688 if ((propval = strchr(optarg, '=')) == NULL) { 1689 (void) fprintf(stderr, gettext("missing " 1690 "'=' for -O option\n")); 1691 goto errout; 1692 } 1693 *propval = '\0'; 1694 propval++; 1695 1696 /* 1697 * Mountpoints are checked and then added later. 1698 * Uniquely among properties, they can be specified 1699 * more than once, to avoid conflict with -m. 1700 */ 1701 if (0 == strcmp(optarg, 1702 zfs_prop_to_name(ZFS_PROP_MOUNTPOINT))) { 1703 mountpoint = propval; 1704 } else if (add_prop_list(optarg, propval, &fsprops, 1705 B_FALSE)) { 1706 goto errout; 1707 } 1708 break; 1709 case 't': 1710 /* 1711 * Sanity check temporary pool name. 1712 */ 1713 if (strchr(optarg, '/') != NULL) { 1714 (void) fprintf(stderr, gettext("cannot create " 1715 "'%s': invalid character '/' in temporary " 1716 "name\n"), optarg); 1717 (void) fprintf(stderr, gettext("use 'zfs " 1718 "create' to create a dataset\n")); 1719 goto errout; 1720 } 1721 1722 if (add_prop_list(zpool_prop_to_name( 1723 ZPOOL_PROP_TNAME), optarg, &props, B_TRUE)) 1724 goto errout; 1725 if (add_prop_list_default(zpool_prop_to_name( 1726 ZPOOL_PROP_CACHEFILE), "none", &props)) 1727 goto errout; 1728 tname = optarg; 1729 break; 1730 case ':': 1731 (void) fprintf(stderr, gettext("missing argument for " 1732 "'%c' option\n"), optopt); 1733 goto badusage; 1734 case '?': 1735 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 1736 optopt); 1737 goto badusage; 1738 } 1739 } 1740 1741 argc -= optind; 1742 argv += optind; 1743 1744 /* get pool name and check number of arguments */ 1745 if (argc < 1) { 1746 (void) fprintf(stderr, gettext("missing pool name argument\n")); 1747 goto badusage; 1748 } 1749 if (argc < 2) { 1750 (void) fprintf(stderr, gettext("missing vdev specification\n")); 1751 goto badusage; 1752 } 1753 1754 poolname = argv[0]; 1755 1756 /* 1757 * As a special case, check for use of '/' in the name, and direct the 1758 * user to use 'zfs create' instead. 1759 */ 1760 if (strchr(poolname, '/') != NULL) { 1761 (void) fprintf(stderr, gettext("cannot create '%s': invalid " 1762 "character '/' in pool name\n"), poolname); 1763 (void) fprintf(stderr, gettext("use 'zfs create' to " 1764 "create a dataset\n")); 1765 goto errout; 1766 } 1767 1768 /* pass off to make_root_vdev for bulk processing */ 1769 nvroot = make_root_vdev(NULL, props, force, !force, B_FALSE, dryrun, 1770 argc - 1, argv + 1); 1771 if (nvroot == NULL) 1772 goto errout; 1773 1774 /* make_root_vdev() allows 0 toplevel children if there are spares */ 1775 if (!zfs_allocatable_devs(nvroot)) { 1776 (void) fprintf(stderr, gettext("invalid vdev " 1777 "specification: at least one toplevel vdev must be " 1778 "specified\n")); 1779 goto errout; 1780 } 1781 1782 if (altroot != NULL && altroot[0] != '/') { 1783 (void) fprintf(stderr, gettext("invalid alternate root '%s': " 1784 "must be an absolute path\n"), altroot); 1785 goto errout; 1786 } 1787 1788 /* 1789 * Check the validity of the mountpoint and direct the user to use the 1790 * '-m' mountpoint option if it looks like its in use. 1791 */ 1792 if (mountpoint == NULL || 1793 (strcmp(mountpoint, ZFS_MOUNTPOINT_LEGACY) != 0 && 1794 strcmp(mountpoint, ZFS_MOUNTPOINT_NONE) != 0)) { 1795 char buf[MAXPATHLEN]; 1796 DIR *dirp; 1797 1798 if (mountpoint && mountpoint[0] != '/') { 1799 (void) fprintf(stderr, gettext("invalid mountpoint " 1800 "'%s': must be an absolute path, 'legacy', or " 1801 "'none'\n"), mountpoint); 1802 goto errout; 1803 } 1804 1805 if (mountpoint == NULL) { 1806 if (altroot != NULL) 1807 (void) snprintf(buf, sizeof (buf), "%s/%s", 1808 altroot, poolname); 1809 else 1810 (void) snprintf(buf, sizeof (buf), "/%s", 1811 poolname); 1812 } else { 1813 if (altroot != NULL) 1814 (void) snprintf(buf, sizeof (buf), "%s%s", 1815 altroot, mountpoint); 1816 else 1817 (void) snprintf(buf, sizeof (buf), "%s", 1818 mountpoint); 1819 } 1820 1821 if ((dirp = opendir(buf)) == NULL && errno != ENOENT) { 1822 (void) fprintf(stderr, gettext("mountpoint '%s' : " 1823 "%s\n"), buf, strerror(errno)); 1824 (void) fprintf(stderr, gettext("use '-m' " 1825 "option to provide a different default\n")); 1826 goto errout; 1827 } else if (dirp) { 1828 int count = 0; 1829 1830 while (count < 3 && readdir(dirp) != NULL) 1831 count++; 1832 (void) closedir(dirp); 1833 1834 if (count > 2) { 1835 (void) fprintf(stderr, gettext("mountpoint " 1836 "'%s' exists and is not empty\n"), buf); 1837 (void) fprintf(stderr, gettext("use '-m' " 1838 "option to provide a " 1839 "different default\n")); 1840 goto errout; 1841 } 1842 } 1843 } 1844 1845 /* 1846 * Now that the mountpoint's validity has been checked, ensure that 1847 * the property is set appropriately prior to creating the pool. 1848 */ 1849 if (mountpoint != NULL) { 1850 ret = add_prop_list(zfs_prop_to_name(ZFS_PROP_MOUNTPOINT), 1851 mountpoint, &fsprops, B_FALSE); 1852 if (ret != 0) 1853 goto errout; 1854 } 1855 1856 ret = 1; 1857 if (dryrun) { 1858 /* 1859 * For a dry run invocation, print out a basic message and run 1860 * through all the vdevs in the list and print out in an 1861 * appropriate hierarchy. 1862 */ 1863 (void) printf(gettext("would create '%s' with the " 1864 "following layout:\n\n"), poolname); 1865 1866 print_vdev_tree(NULL, poolname, nvroot, 0, "", 0); 1867 print_vdev_tree(NULL, "dedup", nvroot, 0, 1868 VDEV_ALLOC_BIAS_DEDUP, 0); 1869 print_vdev_tree(NULL, "special", nvroot, 0, 1870 VDEV_ALLOC_BIAS_SPECIAL, 0); 1871 print_vdev_tree(NULL, "logs", nvroot, 0, 1872 VDEV_ALLOC_BIAS_LOG, 0); 1873 print_cache_list(nvroot, 0); 1874 print_spare_list(nvroot, 0); 1875 1876 ret = 0; 1877 } else { 1878 /* 1879 * Load in feature set. 1880 * Note: if compatibility property not given, we'll have 1881 * NULL, which means 'all features'. 1882 */ 1883 boolean_t requested_features[SPA_FEATURES]; 1884 if (zpool_do_load_compat(compat, requested_features) != 1885 ZPOOL_COMPATIBILITY_OK) 1886 goto errout; 1887 1888 /* 1889 * props contains list of features to enable. 1890 * For each feature: 1891 * - remove it if feature@name=disabled 1892 * - leave it there if feature@name=enabled 1893 * - add it if: 1894 * - enable_pool_features (ie: no '-d' or '-o version') 1895 * - it's supported by the kernel module 1896 * - it's in the requested feature set 1897 * - warn if it's enabled but not in compat 1898 */ 1899 for (spa_feature_t i = 0; i < SPA_FEATURES; i++) { 1900 char propname[MAXPATHLEN]; 1901 const char *propval; 1902 zfeature_info_t *feat = &spa_feature_table[i]; 1903 1904 (void) snprintf(propname, sizeof (propname), 1905 "feature@%s", feat->fi_uname); 1906 1907 if (!nvlist_lookup_string(props, propname, &propval)) { 1908 if (strcmp(propval, 1909 ZFS_FEATURE_DISABLED) == 0) { 1910 (void) nvlist_remove_all(props, 1911 propname); 1912 } else if (strcmp(propval, 1913 ZFS_FEATURE_ENABLED) == 0 && 1914 !requested_features[i]) { 1915 (void) fprintf(stderr, gettext( 1916 "Warning: feature \"%s\" enabled " 1917 "but is not in specified " 1918 "'compatibility' feature set.\n"), 1919 feat->fi_uname); 1920 } 1921 } else if ( 1922 enable_pool_features && 1923 feat->fi_zfs_mod_supported && 1924 requested_features[i]) { 1925 ret = add_prop_list(propname, 1926 ZFS_FEATURE_ENABLED, &props, B_TRUE); 1927 if (ret != 0) 1928 goto errout; 1929 } 1930 } 1931 1932 ret = 1; 1933 if (zpool_create(g_zfs, poolname, 1934 nvroot, props, fsprops) == 0) { 1935 zfs_handle_t *pool = zfs_open(g_zfs, 1936 tname ? tname : poolname, ZFS_TYPE_FILESYSTEM); 1937 if (pool != NULL) { 1938 if (zfs_mount(pool, NULL, 0) == 0) { 1939 ret = zfs_share(pool, NULL); 1940 zfs_commit_shares(NULL); 1941 } 1942 zfs_close(pool); 1943 } 1944 } else if (libzfs_errno(g_zfs) == EZFS_INVALIDNAME) { 1945 (void) fprintf(stderr, gettext("pool name may have " 1946 "been omitted\n")); 1947 } 1948 } 1949 1950 errout: 1951 nvlist_free(nvroot); 1952 nvlist_free(fsprops); 1953 nvlist_free(props); 1954 return (ret); 1955 badusage: 1956 nvlist_free(fsprops); 1957 nvlist_free(props); 1958 usage(B_FALSE); 1959 return (2); 1960 } 1961 1962 /* 1963 * zpool destroy <pool> 1964 * 1965 * -f Forcefully unmount any datasets 1966 * 1967 * Destroy the given pool. Automatically unmounts any datasets in the pool. 1968 */ 1969 int 1970 zpool_do_destroy(int argc, char **argv) 1971 { 1972 boolean_t force = B_FALSE; 1973 int c; 1974 char *pool; 1975 zpool_handle_t *zhp; 1976 int ret; 1977 1978 /* check options */ 1979 while ((c = getopt(argc, argv, "f")) != -1) { 1980 switch (c) { 1981 case 'f': 1982 force = B_TRUE; 1983 break; 1984 case '?': 1985 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 1986 optopt); 1987 usage(B_FALSE); 1988 } 1989 } 1990 1991 argc -= optind; 1992 argv += optind; 1993 1994 /* check arguments */ 1995 if (argc < 1) { 1996 (void) fprintf(stderr, gettext("missing pool argument\n")); 1997 usage(B_FALSE); 1998 } 1999 if (argc > 1) { 2000 (void) fprintf(stderr, gettext("too many arguments\n")); 2001 usage(B_FALSE); 2002 } 2003 2004 pool = argv[0]; 2005 2006 if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) { 2007 /* 2008 * As a special case, check for use of '/' in the name, and 2009 * direct the user to use 'zfs destroy' instead. 2010 */ 2011 if (strchr(pool, '/') != NULL) 2012 (void) fprintf(stderr, gettext("use 'zfs destroy' to " 2013 "destroy a dataset\n")); 2014 return (1); 2015 } 2016 2017 if (zpool_disable_datasets(zhp, force) != 0) { 2018 (void) fprintf(stderr, gettext("could not destroy '%s': " 2019 "could not unmount datasets\n"), zpool_get_name(zhp)); 2020 zpool_close(zhp); 2021 return (1); 2022 } 2023 2024 /* The history must be logged as part of the export */ 2025 log_history = B_FALSE; 2026 2027 ret = (zpool_destroy(zhp, history_str) != 0); 2028 2029 zpool_close(zhp); 2030 2031 return (ret); 2032 } 2033 2034 typedef struct export_cbdata { 2035 tpool_t *tpool; 2036 pthread_mutex_t mnttab_lock; 2037 boolean_t force; 2038 boolean_t hardforce; 2039 int retval; 2040 } export_cbdata_t; 2041 2042 2043 typedef struct { 2044 char *aea_poolname; 2045 export_cbdata_t *aea_cbdata; 2046 } async_export_args_t; 2047 2048 /* 2049 * Export one pool 2050 */ 2051 static int 2052 zpool_export_one(zpool_handle_t *zhp, void *data) 2053 { 2054 export_cbdata_t *cb = data; 2055 2056 /* 2057 * zpool_disable_datasets() is not thread-safe for mnttab access. 2058 * So we serialize access here for 'zpool export -a' parallel case. 2059 */ 2060 if (cb->tpool != NULL) 2061 pthread_mutex_lock(&cb->mnttab_lock); 2062 2063 int retval = zpool_disable_datasets(zhp, cb->force); 2064 2065 if (cb->tpool != NULL) 2066 pthread_mutex_unlock(&cb->mnttab_lock); 2067 2068 if (retval) 2069 return (1); 2070 2071 if (cb->hardforce) { 2072 if (zpool_export_force(zhp, history_str) != 0) 2073 return (1); 2074 } else if (zpool_export(zhp, cb->force, history_str) != 0) { 2075 return (1); 2076 } 2077 2078 return (0); 2079 } 2080 2081 /* 2082 * Asynchronous export request 2083 */ 2084 static void 2085 zpool_export_task(void *arg) 2086 { 2087 async_export_args_t *aea = arg; 2088 2089 zpool_handle_t *zhp = zpool_open(g_zfs, aea->aea_poolname); 2090 if (zhp != NULL) { 2091 int ret = zpool_export_one(zhp, aea->aea_cbdata); 2092 if (ret != 0) 2093 aea->aea_cbdata->retval = ret; 2094 zpool_close(zhp); 2095 } else { 2096 aea->aea_cbdata->retval = 1; 2097 } 2098 2099 free(aea->aea_poolname); 2100 free(aea); 2101 } 2102 2103 /* 2104 * Process an export request in parallel 2105 */ 2106 static int 2107 zpool_export_one_async(zpool_handle_t *zhp, void *data) 2108 { 2109 tpool_t *tpool = ((export_cbdata_t *)data)->tpool; 2110 async_export_args_t *aea = safe_malloc(sizeof (async_export_args_t)); 2111 2112 /* save pool name since zhp will go out of scope */ 2113 aea->aea_poolname = strdup(zpool_get_name(zhp)); 2114 aea->aea_cbdata = data; 2115 2116 /* ship off actual export to another thread */ 2117 if (tpool_dispatch(tpool, zpool_export_task, (void *)aea) != 0) 2118 return (errno); /* unlikely */ 2119 else 2120 return (0); 2121 } 2122 2123 /* 2124 * zpool export [-f] <pool> ... 2125 * 2126 * -a Export all pools 2127 * -f Forcefully unmount datasets 2128 * 2129 * Export the given pools. By default, the command will attempt to cleanly 2130 * unmount any active datasets within the pool. If the '-f' flag is specified, 2131 * then the datasets will be forcefully unmounted. 2132 */ 2133 int 2134 zpool_do_export(int argc, char **argv) 2135 { 2136 export_cbdata_t cb; 2137 boolean_t do_all = B_FALSE; 2138 boolean_t force = B_FALSE; 2139 boolean_t hardforce = B_FALSE; 2140 int c, ret; 2141 2142 /* check options */ 2143 while ((c = getopt(argc, argv, "afF")) != -1) { 2144 switch (c) { 2145 case 'a': 2146 do_all = B_TRUE; 2147 break; 2148 case 'f': 2149 force = B_TRUE; 2150 break; 2151 case 'F': 2152 hardforce = B_TRUE; 2153 break; 2154 case '?': 2155 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 2156 optopt); 2157 usage(B_FALSE); 2158 } 2159 } 2160 2161 cb.force = force; 2162 cb.hardforce = hardforce; 2163 cb.tpool = NULL; 2164 cb.retval = 0; 2165 argc -= optind; 2166 argv += optind; 2167 2168 /* The history will be logged as part of the export itself */ 2169 log_history = B_FALSE; 2170 2171 if (do_all) { 2172 if (argc != 0) { 2173 (void) fprintf(stderr, gettext("too many arguments\n")); 2174 usage(B_FALSE); 2175 } 2176 2177 cb.tpool = tpool_create(1, 5 * sysconf(_SC_NPROCESSORS_ONLN), 2178 0, NULL); 2179 pthread_mutex_init(&cb.mnttab_lock, NULL); 2180 2181 /* Asynchronously call zpool_export_one using thread pool */ 2182 ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL, 2183 B_FALSE, zpool_export_one_async, &cb); 2184 2185 tpool_wait(cb.tpool); 2186 tpool_destroy(cb.tpool); 2187 (void) pthread_mutex_destroy(&cb.mnttab_lock); 2188 2189 return (ret | cb.retval); 2190 } 2191 2192 /* check arguments */ 2193 if (argc < 1) { 2194 (void) fprintf(stderr, gettext("missing pool argument\n")); 2195 usage(B_FALSE); 2196 } 2197 2198 ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL, 2199 B_FALSE, zpool_export_one, &cb); 2200 2201 return (ret); 2202 } 2203 2204 /* 2205 * Given a vdev configuration, determine the maximum width needed for the device 2206 * name column. 2207 */ 2208 static int 2209 max_width(zpool_handle_t *zhp, nvlist_t *nv, int depth, int max, 2210 int name_flags) 2211 { 2212 static const char *const subtypes[] = 2213 {ZPOOL_CONFIG_SPARES, ZPOOL_CONFIG_L2CACHE, ZPOOL_CONFIG_CHILDREN}; 2214 2215 char *name = zpool_vdev_name(g_zfs, zhp, nv, name_flags); 2216 max = MAX(strlen(name) + depth, max); 2217 free(name); 2218 2219 nvlist_t **child; 2220 uint_t children; 2221 for (size_t i = 0; i < ARRAY_SIZE(subtypes); ++i) 2222 if (nvlist_lookup_nvlist_array(nv, subtypes[i], 2223 &child, &children) == 0) 2224 for (uint_t c = 0; c < children; ++c) 2225 max = MAX(max_width(zhp, child[c], depth + 2, 2226 max, name_flags), max); 2227 2228 return (max); 2229 } 2230 2231 typedef struct spare_cbdata { 2232 uint64_t cb_guid; 2233 zpool_handle_t *cb_zhp; 2234 } spare_cbdata_t; 2235 2236 static boolean_t 2237 find_vdev(nvlist_t *nv, uint64_t search) 2238 { 2239 uint64_t guid; 2240 nvlist_t **child; 2241 uint_t c, children; 2242 2243 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0 && 2244 search == guid) 2245 return (B_TRUE); 2246 2247 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 2248 &child, &children) == 0) { 2249 for (c = 0; c < children; c++) 2250 if (find_vdev(child[c], search)) 2251 return (B_TRUE); 2252 } 2253 2254 return (B_FALSE); 2255 } 2256 2257 static int 2258 find_spare(zpool_handle_t *zhp, void *data) 2259 { 2260 spare_cbdata_t *cbp = data; 2261 nvlist_t *config, *nvroot; 2262 2263 config = zpool_get_config(zhp, NULL); 2264 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 2265 &nvroot) == 0); 2266 2267 if (find_vdev(nvroot, cbp->cb_guid)) { 2268 cbp->cb_zhp = zhp; 2269 return (1); 2270 } 2271 2272 zpool_close(zhp); 2273 return (0); 2274 } 2275 2276 typedef struct status_cbdata { 2277 int cb_count; 2278 int cb_name_flags; 2279 int cb_namewidth; 2280 boolean_t cb_allpools; 2281 boolean_t cb_verbose; 2282 boolean_t cb_literal; 2283 boolean_t cb_explain; 2284 boolean_t cb_first; 2285 boolean_t cb_dedup_stats; 2286 boolean_t cb_print_unhealthy; 2287 boolean_t cb_print_status; 2288 boolean_t cb_print_slow_ios; 2289 boolean_t cb_print_vdev_init; 2290 boolean_t cb_print_vdev_trim; 2291 vdev_cmd_data_list_t *vcdl; 2292 boolean_t cb_print_power; 2293 } status_cbdata_t; 2294 2295 /* Return 1 if string is NULL, empty, or whitespace; return 0 otherwise. */ 2296 static boolean_t 2297 is_blank_str(const char *str) 2298 { 2299 for (; str != NULL && *str != '\0'; ++str) 2300 if (!isblank(*str)) 2301 return (B_FALSE); 2302 return (B_TRUE); 2303 } 2304 2305 /* Print command output lines for specific vdev in a specific pool */ 2306 static void 2307 zpool_print_cmd(vdev_cmd_data_list_t *vcdl, const char *pool, const char *path) 2308 { 2309 vdev_cmd_data_t *data; 2310 int i, j; 2311 const char *val; 2312 2313 for (i = 0; i < vcdl->count; i++) { 2314 if ((strcmp(vcdl->data[i].path, path) != 0) || 2315 (strcmp(vcdl->data[i].pool, pool) != 0)) { 2316 /* Not the vdev we're looking for */ 2317 continue; 2318 } 2319 2320 data = &vcdl->data[i]; 2321 /* Print out all the output values for this vdev */ 2322 for (j = 0; j < vcdl->uniq_cols_cnt; j++) { 2323 val = NULL; 2324 /* Does this vdev have values for this column? */ 2325 for (int k = 0; k < data->cols_cnt; k++) { 2326 if (strcmp(data->cols[k], 2327 vcdl->uniq_cols[j]) == 0) { 2328 /* yes it does, record the value */ 2329 val = data->lines[k]; 2330 break; 2331 } 2332 } 2333 /* 2334 * Mark empty values with dashes to make output 2335 * awk-able. 2336 */ 2337 if (val == NULL || is_blank_str(val)) 2338 val = "-"; 2339 2340 printf("%*s", vcdl->uniq_cols_width[j], val); 2341 if (j < vcdl->uniq_cols_cnt - 1) 2342 fputs(" ", stdout); 2343 } 2344 2345 /* Print out any values that aren't in a column at the end */ 2346 for (j = data->cols_cnt; j < data->lines_cnt; j++) { 2347 /* Did we have any columns? If so print a spacer. */ 2348 if (vcdl->uniq_cols_cnt > 0) 2349 fputs(" ", stdout); 2350 2351 val = data->lines[j]; 2352 fputs(val ?: "", stdout); 2353 } 2354 break; 2355 } 2356 } 2357 2358 /* 2359 * Print vdev initialization status for leaves 2360 */ 2361 static void 2362 print_status_initialize(vdev_stat_t *vs, boolean_t verbose) 2363 { 2364 if (verbose) { 2365 if ((vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE || 2366 vs->vs_initialize_state == VDEV_INITIALIZE_SUSPENDED || 2367 vs->vs_initialize_state == VDEV_INITIALIZE_COMPLETE) && 2368 !vs->vs_scan_removing) { 2369 char zbuf[1024]; 2370 char tbuf[256]; 2371 2372 time_t t = vs->vs_initialize_action_time; 2373 int initialize_pct = 100; 2374 if (vs->vs_initialize_state != 2375 VDEV_INITIALIZE_COMPLETE) { 2376 initialize_pct = (vs->vs_initialize_bytes_done * 2377 100 / (vs->vs_initialize_bytes_est + 1)); 2378 } 2379 2380 (void) ctime_r(&t, tbuf); 2381 tbuf[24] = 0; 2382 2383 switch (vs->vs_initialize_state) { 2384 case VDEV_INITIALIZE_SUSPENDED: 2385 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s", 2386 gettext("suspended, started at"), tbuf); 2387 break; 2388 case VDEV_INITIALIZE_ACTIVE: 2389 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s", 2390 gettext("started at"), tbuf); 2391 break; 2392 case VDEV_INITIALIZE_COMPLETE: 2393 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s", 2394 gettext("completed at"), tbuf); 2395 break; 2396 } 2397 2398 (void) printf(gettext(" (%d%% initialized%s)"), 2399 initialize_pct, zbuf); 2400 } else { 2401 (void) printf(gettext(" (uninitialized)")); 2402 } 2403 } else if (vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE) { 2404 (void) printf(gettext(" (initializing)")); 2405 } 2406 } 2407 2408 /* 2409 * Print vdev TRIM status for leaves 2410 */ 2411 static void 2412 print_status_trim(vdev_stat_t *vs, boolean_t verbose) 2413 { 2414 if (verbose) { 2415 if ((vs->vs_trim_state == VDEV_TRIM_ACTIVE || 2416 vs->vs_trim_state == VDEV_TRIM_SUSPENDED || 2417 vs->vs_trim_state == VDEV_TRIM_COMPLETE) && 2418 !vs->vs_scan_removing) { 2419 char zbuf[1024]; 2420 char tbuf[256]; 2421 2422 time_t t = vs->vs_trim_action_time; 2423 int trim_pct = 100; 2424 if (vs->vs_trim_state != VDEV_TRIM_COMPLETE) { 2425 trim_pct = (vs->vs_trim_bytes_done * 2426 100 / (vs->vs_trim_bytes_est + 1)); 2427 } 2428 2429 (void) ctime_r(&t, tbuf); 2430 tbuf[24] = 0; 2431 2432 switch (vs->vs_trim_state) { 2433 case VDEV_TRIM_SUSPENDED: 2434 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s", 2435 gettext("suspended, started at"), tbuf); 2436 break; 2437 case VDEV_TRIM_ACTIVE: 2438 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s", 2439 gettext("started at"), tbuf); 2440 break; 2441 case VDEV_TRIM_COMPLETE: 2442 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s", 2443 gettext("completed at"), tbuf); 2444 break; 2445 } 2446 2447 (void) printf(gettext(" (%d%% trimmed%s)"), 2448 trim_pct, zbuf); 2449 } else if (vs->vs_trim_notsup) { 2450 (void) printf(gettext(" (trim unsupported)")); 2451 } else { 2452 (void) printf(gettext(" (untrimmed)")); 2453 } 2454 } else if (vs->vs_trim_state == VDEV_TRIM_ACTIVE) { 2455 (void) printf(gettext(" (trimming)")); 2456 } 2457 } 2458 2459 /* 2460 * Return the color associated with a health string. This includes returning 2461 * NULL for no color change. 2462 */ 2463 static const char * 2464 health_str_to_color(const char *health) 2465 { 2466 if (strcmp(health, gettext("FAULTED")) == 0 || 2467 strcmp(health, gettext("SUSPENDED")) == 0 || 2468 strcmp(health, gettext("UNAVAIL")) == 0) { 2469 return (ANSI_RED); 2470 } 2471 2472 if (strcmp(health, gettext("OFFLINE")) == 0 || 2473 strcmp(health, gettext("DEGRADED")) == 0 || 2474 strcmp(health, gettext("REMOVED")) == 0) { 2475 return (ANSI_YELLOW); 2476 } 2477 2478 return (NULL); 2479 } 2480 2481 /* 2482 * Called for each leaf vdev. Returns 0 if the vdev is healthy. 2483 * A vdev is unhealthy if any of the following are true: 2484 * 1) there are read, write, or checksum errors, 2485 * 2) its state is not ONLINE, or 2486 * 3) slow IO reporting was requested (-s) and there are slow IOs. 2487 */ 2488 static int 2489 vdev_health_check_cb(void *hdl_data, nvlist_t *nv, void *data) 2490 { 2491 status_cbdata_t *cb = data; 2492 vdev_stat_t *vs; 2493 uint_t vsc; 2494 (void) hdl_data; 2495 2496 if (nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, 2497 (uint64_t **)&vs, &vsc) != 0) 2498 return (1); 2499 2500 if (vs->vs_checksum_errors || vs->vs_read_errors || 2501 vs->vs_write_errors || vs->vs_state != VDEV_STATE_HEALTHY) 2502 return (1); 2503 2504 if (cb->cb_print_slow_ios && vs->vs_slow_ios) 2505 return (1); 2506 2507 return (0); 2508 } 2509 2510 /* 2511 * Print out configuration state as requested by status_callback. 2512 */ 2513 static void 2514 print_status_config(zpool_handle_t *zhp, status_cbdata_t *cb, const char *name, 2515 nvlist_t *nv, int depth, boolean_t isspare, vdev_rebuild_stat_t *vrs) 2516 { 2517 nvlist_t **child, *root; 2518 uint_t c, i, vsc, children; 2519 pool_scan_stat_t *ps = NULL; 2520 vdev_stat_t *vs; 2521 char rbuf[6], wbuf[6], cbuf[6]; 2522 char *vname; 2523 uint64_t notpresent; 2524 spare_cbdata_t spare_cb; 2525 const char *state; 2526 const char *type; 2527 const char *path = NULL; 2528 const char *rcolor = NULL, *wcolor = NULL, *ccolor = NULL, 2529 *scolor = NULL; 2530 2531 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 2532 &child, &children) != 0) 2533 children = 0; 2534 2535 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, 2536 (uint64_t **)&vs, &vsc) == 0); 2537 2538 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0); 2539 2540 if (strcmp(type, VDEV_TYPE_INDIRECT) == 0) 2541 return; 2542 2543 state = zpool_state_to_name(vs->vs_state, vs->vs_aux); 2544 2545 if (isspare) { 2546 /* 2547 * For hot spares, we use the terms 'INUSE' and 'AVAILABLE' for 2548 * online drives. 2549 */ 2550 if (vs->vs_aux == VDEV_AUX_SPARED) 2551 state = gettext("INUSE"); 2552 else if (vs->vs_state == VDEV_STATE_HEALTHY) 2553 state = gettext("AVAIL"); 2554 } 2555 2556 /* 2557 * If '-e' is specified then top-level vdevs and their children 2558 * can be pruned if all of their leaves are healthy. 2559 */ 2560 if (cb->cb_print_unhealthy && depth > 0 && 2561 for_each_vdev_in_nvlist(nv, vdev_health_check_cb, cb) == 0) { 2562 return; 2563 } 2564 2565 printf_color(health_str_to_color(state), 2566 "\t%*s%-*s %-8s", depth, "", cb->cb_namewidth - depth, 2567 name, state); 2568 2569 if (!isspare) { 2570 if (vs->vs_read_errors) 2571 rcolor = ANSI_RED; 2572 2573 if (vs->vs_write_errors) 2574 wcolor = ANSI_RED; 2575 2576 if (vs->vs_checksum_errors) 2577 ccolor = ANSI_RED; 2578 2579 if (vs->vs_slow_ios) 2580 scolor = ANSI_BLUE; 2581 2582 if (cb->cb_literal) { 2583 fputc(' ', stdout); 2584 printf_color(rcolor, "%5llu", 2585 (u_longlong_t)vs->vs_read_errors); 2586 fputc(' ', stdout); 2587 printf_color(wcolor, "%5llu", 2588 (u_longlong_t)vs->vs_write_errors); 2589 fputc(' ', stdout); 2590 printf_color(ccolor, "%5llu", 2591 (u_longlong_t)vs->vs_checksum_errors); 2592 } else { 2593 zfs_nicenum(vs->vs_read_errors, rbuf, sizeof (rbuf)); 2594 zfs_nicenum(vs->vs_write_errors, wbuf, sizeof (wbuf)); 2595 zfs_nicenum(vs->vs_checksum_errors, cbuf, 2596 sizeof (cbuf)); 2597 fputc(' ', stdout); 2598 printf_color(rcolor, "%5s", rbuf); 2599 fputc(' ', stdout); 2600 printf_color(wcolor, "%5s", wbuf); 2601 fputc(' ', stdout); 2602 printf_color(ccolor, "%5s", cbuf); 2603 } 2604 if (cb->cb_print_slow_ios) { 2605 if (children == 0) { 2606 /* Only leafs vdevs have slow IOs */ 2607 zfs_nicenum(vs->vs_slow_ios, rbuf, 2608 sizeof (rbuf)); 2609 } else { 2610 snprintf(rbuf, sizeof (rbuf), "-"); 2611 } 2612 2613 if (cb->cb_literal) 2614 printf_color(scolor, " %5llu", 2615 (u_longlong_t)vs->vs_slow_ios); 2616 else 2617 printf_color(scolor, " %5s", rbuf); 2618 } 2619 if (cb->cb_print_power) { 2620 if (children == 0) { 2621 /* Only leaf vdevs have physical slots */ 2622 switch (zpool_power_current_state(zhp, (char *) 2623 fnvlist_lookup_string(nv, 2624 ZPOOL_CONFIG_PATH))) { 2625 case 0: 2626 printf_color(ANSI_RED, " %5s", 2627 gettext("off")); 2628 break; 2629 case 1: 2630 printf(" %5s", gettext("on")); 2631 break; 2632 default: 2633 printf(" %5s", "-"); 2634 } 2635 } else { 2636 printf(" %5s", "-"); 2637 } 2638 } 2639 } 2640 2641 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 2642 ¬present) == 0) { 2643 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0); 2644 (void) printf(" %s %s", gettext("was"), path); 2645 } else if (vs->vs_aux != 0) { 2646 (void) printf(" "); 2647 color_start(ANSI_RED); 2648 switch (vs->vs_aux) { 2649 case VDEV_AUX_OPEN_FAILED: 2650 (void) printf(gettext("cannot open")); 2651 break; 2652 2653 case VDEV_AUX_BAD_GUID_SUM: 2654 (void) printf(gettext("missing device")); 2655 break; 2656 2657 case VDEV_AUX_NO_REPLICAS: 2658 (void) printf(gettext("insufficient replicas")); 2659 break; 2660 2661 case VDEV_AUX_VERSION_NEWER: 2662 (void) printf(gettext("newer version")); 2663 break; 2664 2665 case VDEV_AUX_UNSUP_FEAT: 2666 (void) printf(gettext("unsupported feature(s)")); 2667 break; 2668 2669 case VDEV_AUX_ASHIFT_TOO_BIG: 2670 (void) printf(gettext("unsupported minimum blocksize")); 2671 break; 2672 2673 case VDEV_AUX_SPARED: 2674 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 2675 &spare_cb.cb_guid) == 0); 2676 if (zpool_iter(g_zfs, find_spare, &spare_cb) == 1) { 2677 if (strcmp(zpool_get_name(spare_cb.cb_zhp), 2678 zpool_get_name(zhp)) == 0) 2679 (void) printf(gettext("currently in " 2680 "use")); 2681 else 2682 (void) printf(gettext("in use by " 2683 "pool '%s'"), 2684 zpool_get_name(spare_cb.cb_zhp)); 2685 zpool_close(spare_cb.cb_zhp); 2686 } else { 2687 (void) printf(gettext("currently in use")); 2688 } 2689 break; 2690 2691 case VDEV_AUX_ERR_EXCEEDED: 2692 if (vs->vs_read_errors + vs->vs_write_errors + 2693 vs->vs_checksum_errors == 0 && children == 0 && 2694 vs->vs_slow_ios > 0) { 2695 (void) printf(gettext("too many slow I/Os")); 2696 } else { 2697 (void) printf(gettext("too many errors")); 2698 } 2699 break; 2700 2701 case VDEV_AUX_IO_FAILURE: 2702 (void) printf(gettext("experienced I/O failures")); 2703 break; 2704 2705 case VDEV_AUX_BAD_LOG: 2706 (void) printf(gettext("bad intent log")); 2707 break; 2708 2709 case VDEV_AUX_EXTERNAL: 2710 (void) printf(gettext("external device fault")); 2711 break; 2712 2713 case VDEV_AUX_SPLIT_POOL: 2714 (void) printf(gettext("split into new pool")); 2715 break; 2716 2717 case VDEV_AUX_ACTIVE: 2718 (void) printf(gettext("currently in use")); 2719 break; 2720 2721 case VDEV_AUX_CHILDREN_OFFLINE: 2722 (void) printf(gettext("all children offline")); 2723 break; 2724 2725 case VDEV_AUX_BAD_LABEL: 2726 (void) printf(gettext("invalid label")); 2727 break; 2728 2729 default: 2730 (void) printf(gettext("corrupted data")); 2731 break; 2732 } 2733 color_end(); 2734 } else if (children == 0 && !isspare && 2735 getenv("ZPOOL_STATUS_NON_NATIVE_ASHIFT_IGNORE") == NULL && 2736 VDEV_STAT_VALID(vs_physical_ashift, vsc) && 2737 vs->vs_configured_ashift < vs->vs_physical_ashift) { 2738 (void) printf( 2739 gettext(" block size: %dB configured, %dB native"), 2740 1 << vs->vs_configured_ashift, 1 << vs->vs_physical_ashift); 2741 } 2742 2743 if (vs->vs_scan_removing != 0) { 2744 (void) printf(gettext(" (removing)")); 2745 } else if (VDEV_STAT_VALID(vs_noalloc, vsc) && vs->vs_noalloc != 0) { 2746 (void) printf(gettext(" (non-allocating)")); 2747 } 2748 2749 /* The root vdev has the scrub/resilver stats */ 2750 root = fnvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 2751 ZPOOL_CONFIG_VDEV_TREE); 2752 (void) nvlist_lookup_uint64_array(root, ZPOOL_CONFIG_SCAN_STATS, 2753 (uint64_t **)&ps, &c); 2754 2755 /* 2756 * If you force fault a drive that's resilvering, its scan stats can 2757 * get frozen in time, giving the false impression that it's 2758 * being resilvered. That's why we check the state to see if the vdev 2759 * is healthy before reporting "resilvering" or "repairing". 2760 */ 2761 if (ps != NULL && ps->pss_state == DSS_SCANNING && children == 0 && 2762 vs->vs_state == VDEV_STATE_HEALTHY) { 2763 if (vs->vs_scan_processed != 0) { 2764 (void) printf(gettext(" (%s)"), 2765 (ps->pss_func == POOL_SCAN_RESILVER) ? 2766 "resilvering" : "repairing"); 2767 } else if (vs->vs_resilver_deferred) { 2768 (void) printf(gettext(" (awaiting resilver)")); 2769 } 2770 } 2771 2772 /* The top-level vdevs have the rebuild stats */ 2773 if (vrs != NULL && vrs->vrs_state == VDEV_REBUILD_ACTIVE && 2774 children == 0 && vs->vs_state == VDEV_STATE_HEALTHY) { 2775 if (vs->vs_rebuild_processed != 0) { 2776 (void) printf(gettext(" (resilvering)")); 2777 } 2778 } 2779 2780 if (cb->vcdl != NULL) { 2781 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) { 2782 printf(" "); 2783 zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path); 2784 } 2785 } 2786 2787 /* Display vdev initialization and trim status for leaves. */ 2788 if (children == 0) { 2789 print_status_initialize(vs, cb->cb_print_vdev_init); 2790 print_status_trim(vs, cb->cb_print_vdev_trim); 2791 } 2792 2793 (void) printf("\n"); 2794 2795 for (c = 0; c < children; c++) { 2796 uint64_t islog = B_FALSE, ishole = B_FALSE; 2797 2798 /* Don't print logs or holes here */ 2799 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 2800 &islog); 2801 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 2802 &ishole); 2803 if (islog || ishole) 2804 continue; 2805 /* Only print normal classes here */ 2806 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS)) 2807 continue; 2808 2809 /* Provide vdev_rebuild_stats to children if available */ 2810 if (vrs == NULL) { 2811 (void) nvlist_lookup_uint64_array(nv, 2812 ZPOOL_CONFIG_REBUILD_STATS, 2813 (uint64_t **)&vrs, &i); 2814 } 2815 2816 vname = zpool_vdev_name(g_zfs, zhp, child[c], 2817 cb->cb_name_flags | VDEV_NAME_TYPE_ID); 2818 print_status_config(zhp, cb, vname, child[c], depth + 2, 2819 isspare, vrs); 2820 free(vname); 2821 } 2822 } 2823 2824 /* 2825 * Print the configuration of an exported pool. Iterate over all vdevs in the 2826 * pool, printing out the name and status for each one. 2827 */ 2828 static void 2829 print_import_config(status_cbdata_t *cb, const char *name, nvlist_t *nv, 2830 int depth) 2831 { 2832 nvlist_t **child; 2833 uint_t c, children; 2834 vdev_stat_t *vs; 2835 const char *type; 2836 char *vname; 2837 2838 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0); 2839 if (strcmp(type, VDEV_TYPE_MISSING) == 0 || 2840 strcmp(type, VDEV_TYPE_HOLE) == 0) 2841 return; 2842 2843 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, 2844 (uint64_t **)&vs, &c) == 0); 2845 2846 (void) printf("\t%*s%-*s", depth, "", cb->cb_namewidth - depth, name); 2847 (void) printf(" %s", zpool_state_to_name(vs->vs_state, vs->vs_aux)); 2848 2849 if (vs->vs_aux != 0) { 2850 (void) printf(" "); 2851 2852 switch (vs->vs_aux) { 2853 case VDEV_AUX_OPEN_FAILED: 2854 (void) printf(gettext("cannot open")); 2855 break; 2856 2857 case VDEV_AUX_BAD_GUID_SUM: 2858 (void) printf(gettext("missing device")); 2859 break; 2860 2861 case VDEV_AUX_NO_REPLICAS: 2862 (void) printf(gettext("insufficient replicas")); 2863 break; 2864 2865 case VDEV_AUX_VERSION_NEWER: 2866 (void) printf(gettext("newer version")); 2867 break; 2868 2869 case VDEV_AUX_UNSUP_FEAT: 2870 (void) printf(gettext("unsupported feature(s)")); 2871 break; 2872 2873 case VDEV_AUX_ERR_EXCEEDED: 2874 (void) printf(gettext("too many errors")); 2875 break; 2876 2877 case VDEV_AUX_ACTIVE: 2878 (void) printf(gettext("currently in use")); 2879 break; 2880 2881 case VDEV_AUX_CHILDREN_OFFLINE: 2882 (void) printf(gettext("all children offline")); 2883 break; 2884 2885 case VDEV_AUX_BAD_LABEL: 2886 (void) printf(gettext("invalid label")); 2887 break; 2888 2889 default: 2890 (void) printf(gettext("corrupted data")); 2891 break; 2892 } 2893 } 2894 (void) printf("\n"); 2895 2896 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 2897 &child, &children) != 0) 2898 return; 2899 2900 for (c = 0; c < children; c++) { 2901 uint64_t is_log = B_FALSE; 2902 2903 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 2904 &is_log); 2905 if (is_log) 2906 continue; 2907 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS)) 2908 continue; 2909 2910 vname = zpool_vdev_name(g_zfs, NULL, child[c], 2911 cb->cb_name_flags | VDEV_NAME_TYPE_ID); 2912 print_import_config(cb, vname, child[c], depth + 2); 2913 free(vname); 2914 } 2915 2916 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, 2917 &child, &children) == 0) { 2918 (void) printf(gettext("\tcache\n")); 2919 for (c = 0; c < children; c++) { 2920 vname = zpool_vdev_name(g_zfs, NULL, child[c], 2921 cb->cb_name_flags); 2922 (void) printf("\t %s\n", vname); 2923 free(vname); 2924 } 2925 } 2926 2927 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, 2928 &child, &children) == 0) { 2929 (void) printf(gettext("\tspares\n")); 2930 for (c = 0; c < children; c++) { 2931 vname = zpool_vdev_name(g_zfs, NULL, child[c], 2932 cb->cb_name_flags); 2933 (void) printf("\t %s\n", vname); 2934 free(vname); 2935 } 2936 } 2937 } 2938 2939 /* 2940 * Print specialized class vdevs. 2941 * 2942 * These are recorded as top level vdevs in the main pool child array 2943 * but with "is_log" set to 1 or an "alloc_bias" string. We use either 2944 * print_status_config() or print_import_config() to print the top level 2945 * class vdevs then any of their children (eg mirrored slogs) are printed 2946 * recursively - which works because only the top level vdev is marked. 2947 */ 2948 static void 2949 print_class_vdevs(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv, 2950 const char *class) 2951 { 2952 uint_t c, children; 2953 nvlist_t **child; 2954 boolean_t printed = B_FALSE; 2955 2956 assert(zhp != NULL || !cb->cb_verbose); 2957 2958 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &child, 2959 &children) != 0) 2960 return; 2961 2962 for (c = 0; c < children; c++) { 2963 uint64_t is_log = B_FALSE; 2964 const char *bias = NULL; 2965 const char *type = NULL; 2966 2967 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 2968 &is_log); 2969 2970 if (is_log) { 2971 bias = (char *)VDEV_ALLOC_CLASS_LOGS; 2972 } else { 2973 (void) nvlist_lookup_string(child[c], 2974 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias); 2975 (void) nvlist_lookup_string(child[c], 2976 ZPOOL_CONFIG_TYPE, &type); 2977 } 2978 2979 if (bias == NULL || strcmp(bias, class) != 0) 2980 continue; 2981 if (!is_log && strcmp(type, VDEV_TYPE_INDIRECT) == 0) 2982 continue; 2983 2984 if (!printed) { 2985 (void) printf("\t%s\t\n", gettext(class)); 2986 printed = B_TRUE; 2987 } 2988 2989 char *name = zpool_vdev_name(g_zfs, zhp, child[c], 2990 cb->cb_name_flags | VDEV_NAME_TYPE_ID); 2991 if (cb->cb_print_status) 2992 print_status_config(zhp, cb, name, child[c], 2, 2993 B_FALSE, NULL); 2994 else 2995 print_import_config(cb, name, child[c], 2); 2996 free(name); 2997 } 2998 } 2999 3000 /* 3001 * Display the status for the given pool. 3002 */ 3003 static int 3004 show_import(nvlist_t *config, boolean_t report_error) 3005 { 3006 uint64_t pool_state; 3007 vdev_stat_t *vs; 3008 const char *name; 3009 uint64_t guid; 3010 uint64_t hostid = 0; 3011 const char *msgid; 3012 const char *hostname = "unknown"; 3013 nvlist_t *nvroot, *nvinfo; 3014 zpool_status_t reason; 3015 zpool_errata_t errata; 3016 const char *health; 3017 uint_t vsc; 3018 const char *comment; 3019 const char *indent; 3020 status_cbdata_t cb = { 0 }; 3021 3022 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 3023 &name) == 0); 3024 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 3025 &guid) == 0); 3026 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, 3027 &pool_state) == 0); 3028 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 3029 &nvroot) == 0); 3030 3031 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS, 3032 (uint64_t **)&vs, &vsc) == 0); 3033 health = zpool_state_to_name(vs->vs_state, vs->vs_aux); 3034 3035 reason = zpool_import_status(config, &msgid, &errata); 3036 3037 /* 3038 * If we're importing using a cachefile, then we won't report any 3039 * errors unless we are in the scan phase of the import. 3040 */ 3041 if (reason != ZPOOL_STATUS_OK && !report_error) 3042 return (reason); 3043 3044 if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0) { 3045 indent = " "; 3046 } else { 3047 comment = NULL; 3048 indent = ""; 3049 } 3050 3051 (void) printf(gettext("%s pool: %s\n"), indent, name); 3052 (void) printf(gettext("%s id: %llu\n"), indent, (u_longlong_t)guid); 3053 (void) printf(gettext("%s state: %s"), indent, health); 3054 if (pool_state == POOL_STATE_DESTROYED) 3055 (void) printf(gettext(" (DESTROYED)")); 3056 (void) printf("\n"); 3057 3058 if (reason != ZPOOL_STATUS_OK) { 3059 (void) printf("%s", indent); 3060 printf_color(ANSI_BOLD, gettext("status: ")); 3061 } 3062 switch (reason) { 3063 case ZPOOL_STATUS_MISSING_DEV_R: 3064 case ZPOOL_STATUS_MISSING_DEV_NR: 3065 case ZPOOL_STATUS_BAD_GUID_SUM: 3066 printf_color(ANSI_YELLOW, gettext("One or more devices are " 3067 "missing from the system.\n")); 3068 break; 3069 3070 case ZPOOL_STATUS_CORRUPT_LABEL_R: 3071 case ZPOOL_STATUS_CORRUPT_LABEL_NR: 3072 printf_color(ANSI_YELLOW, gettext("One or more devices " 3073 "contains corrupted data.\n")); 3074 break; 3075 3076 case ZPOOL_STATUS_CORRUPT_DATA: 3077 printf_color(ANSI_YELLOW, gettext("The pool data is " 3078 "corrupted.\n")); 3079 break; 3080 3081 case ZPOOL_STATUS_OFFLINE_DEV: 3082 printf_color(ANSI_YELLOW, gettext("One or more devices " 3083 "are offlined.\n")); 3084 break; 3085 3086 case ZPOOL_STATUS_CORRUPT_POOL: 3087 printf_color(ANSI_YELLOW, gettext("The pool metadata is " 3088 "corrupted.\n")); 3089 break; 3090 3091 case ZPOOL_STATUS_VERSION_OLDER: 3092 printf_color(ANSI_YELLOW, gettext("The pool is formatted using " 3093 "a legacy on-disk version.\n")); 3094 break; 3095 3096 case ZPOOL_STATUS_VERSION_NEWER: 3097 printf_color(ANSI_YELLOW, gettext("The pool is formatted using " 3098 "an incompatible version.\n")); 3099 break; 3100 3101 case ZPOOL_STATUS_FEAT_DISABLED: 3102 printf_color(ANSI_YELLOW, gettext("Some supported " 3103 "features are not enabled on the pool.\n" 3104 "\t%s(Note that they may be intentionally disabled if the\n" 3105 "\t%s'compatibility' property is set.)\n"), indent, indent); 3106 break; 3107 3108 case ZPOOL_STATUS_COMPATIBILITY_ERR: 3109 printf_color(ANSI_YELLOW, gettext("Error reading or parsing " 3110 "the file(s) indicated by the 'compatibility'\n" 3111 "\t%sproperty.\n"), indent); 3112 break; 3113 3114 case ZPOOL_STATUS_INCOMPATIBLE_FEAT: 3115 printf_color(ANSI_YELLOW, gettext("One or more features " 3116 "are enabled on the pool despite not being\n" 3117 "\t%srequested by the 'compatibility' property.\n"), 3118 indent); 3119 break; 3120 3121 case ZPOOL_STATUS_UNSUP_FEAT_READ: 3122 printf_color(ANSI_YELLOW, gettext("The pool uses the following " 3123 "feature(s) not supported on this system:\n")); 3124 color_start(ANSI_YELLOW); 3125 zpool_print_unsup_feat(config); 3126 color_end(); 3127 break; 3128 3129 case ZPOOL_STATUS_UNSUP_FEAT_WRITE: 3130 printf_color(ANSI_YELLOW, gettext("The pool can only be " 3131 "accessed in read-only mode on this system. It\n" 3132 "\t%scannot be accessed in read-write mode because it uses " 3133 "the following\n" 3134 "\t%sfeature(s) not supported on this system:\n"), 3135 indent, indent); 3136 color_start(ANSI_YELLOW); 3137 zpool_print_unsup_feat(config); 3138 color_end(); 3139 break; 3140 3141 case ZPOOL_STATUS_HOSTID_ACTIVE: 3142 printf_color(ANSI_YELLOW, gettext("The pool is currently " 3143 "imported by another system.\n")); 3144 break; 3145 3146 case ZPOOL_STATUS_HOSTID_REQUIRED: 3147 printf_color(ANSI_YELLOW, gettext("The pool has the " 3148 "multihost property on. It cannot\n" 3149 "\t%sbe safely imported when the system hostid is not " 3150 "set.\n"), indent); 3151 break; 3152 3153 case ZPOOL_STATUS_HOSTID_MISMATCH: 3154 printf_color(ANSI_YELLOW, gettext("The pool was last accessed " 3155 "by another system.\n")); 3156 break; 3157 3158 case ZPOOL_STATUS_FAULTED_DEV_R: 3159 case ZPOOL_STATUS_FAULTED_DEV_NR: 3160 printf_color(ANSI_YELLOW, gettext("One or more devices are " 3161 "faulted.\n")); 3162 break; 3163 3164 case ZPOOL_STATUS_BAD_LOG: 3165 printf_color(ANSI_YELLOW, gettext("An intent log record cannot " 3166 "be read.\n")); 3167 break; 3168 3169 case ZPOOL_STATUS_RESILVERING: 3170 case ZPOOL_STATUS_REBUILDING: 3171 printf_color(ANSI_YELLOW, gettext("One or more devices were " 3172 "being resilvered.\n")); 3173 break; 3174 3175 case ZPOOL_STATUS_ERRATA: 3176 printf_color(ANSI_YELLOW, gettext("Errata #%d detected.\n"), 3177 errata); 3178 break; 3179 3180 case ZPOOL_STATUS_NON_NATIVE_ASHIFT: 3181 printf_color(ANSI_YELLOW, gettext("One or more devices are " 3182 "configured to use a non-native block size.\n" 3183 "\t%sExpect reduced performance.\n"), indent); 3184 break; 3185 3186 default: 3187 /* 3188 * No other status can be seen when importing pools. 3189 */ 3190 assert(reason == ZPOOL_STATUS_OK); 3191 } 3192 3193 /* 3194 * Print out an action according to the overall state of the pool. 3195 */ 3196 if (vs->vs_state != VDEV_STATE_HEALTHY || 3197 reason != ZPOOL_STATUS_ERRATA || errata != ZPOOL_ERRATA_NONE) { 3198 (void) printf("%s", indent); 3199 (void) printf(gettext("action: ")); 3200 } 3201 if (vs->vs_state == VDEV_STATE_HEALTHY) { 3202 if (reason == ZPOOL_STATUS_VERSION_OLDER || 3203 reason == ZPOOL_STATUS_FEAT_DISABLED) { 3204 (void) printf(gettext("The pool can be imported using " 3205 "its name or numeric identifier, though\n" 3206 "\t%ssome features will not be available without " 3207 "an explicit 'zpool upgrade'.\n"), indent); 3208 } else if (reason == ZPOOL_STATUS_COMPATIBILITY_ERR) { 3209 (void) printf(gettext("The pool can be imported using " 3210 "its name or numeric\n" 3211 "\t%sidentifier, though the file(s) indicated by " 3212 "its 'compatibility'\n" 3213 "\t%sproperty cannot be parsed at this time.\n"), 3214 indent, indent); 3215 } else if (reason == ZPOOL_STATUS_HOSTID_MISMATCH) { 3216 (void) printf(gettext("The pool can be imported using " 3217 "its name or numeric identifier and\n" 3218 "\t%sthe '-f' flag.\n"), indent); 3219 } else if (reason == ZPOOL_STATUS_ERRATA) { 3220 switch (errata) { 3221 case ZPOOL_ERRATA_ZOL_2094_SCRUB: 3222 (void) printf(gettext("The pool can be " 3223 "imported using its name or numeric " 3224 "identifier,\n" 3225 "\t%showever there is a compatibility " 3226 "issue which should be corrected\n" 3227 "\t%sby running 'zpool scrub'\n"), 3228 indent, indent); 3229 break; 3230 3231 case ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY: 3232 (void) printf(gettext("The pool cannot be " 3233 "imported with this version of ZFS due to\n" 3234 "\t%san active asynchronous destroy. " 3235 "Revert to an earlier version\n" 3236 "\t%sand allow the destroy to complete " 3237 "before updating.\n"), indent, indent); 3238 break; 3239 3240 case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION: 3241 (void) printf(gettext("Existing encrypted " 3242 "datasets contain an on-disk " 3243 "incompatibility, which\n" 3244 "\t%sneeds to be corrected. Backup these " 3245 "datasets to new encrypted datasets\n" 3246 "\t%sand destroy the old ones.\n"), 3247 indent, indent); 3248 break; 3249 3250 case ZPOOL_ERRATA_ZOL_8308_ENCRYPTION: 3251 (void) printf(gettext("Existing encrypted " 3252 "snapshots and bookmarks contain an " 3253 "on-disk\n" 3254 "\t%sincompatibility. This may cause " 3255 "on-disk corruption if they are used\n" 3256 "\t%swith 'zfs recv'. To correct the " 3257 "issue, enable the bookmark_v2 feature.\n" 3258 "\t%sNo additional action is needed if " 3259 "there are no encrypted snapshots or\n" 3260 "\t%sbookmarks. If preserving the " 3261 "encrypted snapshots and bookmarks is\n" 3262 "\t%srequired, use a non-raw send to " 3263 "backup and restore them. Alternately,\n" 3264 "\t%sthey may be removed to resolve the " 3265 "incompatibility.\n"), indent, indent, 3266 indent, indent, indent, indent); 3267 break; 3268 default: 3269 /* 3270 * All errata must contain an action message. 3271 */ 3272 assert(errata == ZPOOL_ERRATA_NONE); 3273 } 3274 } else { 3275 (void) printf(gettext("The pool can be imported using " 3276 "its name or numeric identifier.\n")); 3277 } 3278 } else if (vs->vs_state == VDEV_STATE_DEGRADED) { 3279 (void) printf(gettext("The pool can be imported despite " 3280 "missing or damaged devices. The\n" 3281 "\t%sfault tolerance of the pool may be compromised if " 3282 "imported.\n"), indent); 3283 } else { 3284 switch (reason) { 3285 case ZPOOL_STATUS_VERSION_NEWER: 3286 (void) printf(gettext("The pool cannot be imported. " 3287 "Access the pool on a system running newer\n" 3288 "\t%ssoftware, or recreate the pool from " 3289 "backup.\n"), indent); 3290 break; 3291 case ZPOOL_STATUS_UNSUP_FEAT_READ: 3292 (void) printf(gettext("The pool cannot be imported. " 3293 "Access the pool on a system that supports\n" 3294 "\t%sthe required feature(s), or recreate the pool " 3295 "from backup.\n"), indent); 3296 break; 3297 case ZPOOL_STATUS_UNSUP_FEAT_WRITE: 3298 (void) printf(gettext("The pool cannot be imported in " 3299 "read-write mode. Import the pool with\n" 3300 "\t%s'-o readonly=on', access the pool on a system " 3301 "that supports the\n" 3302 "\t%srequired feature(s), or recreate the pool " 3303 "from backup.\n"), indent, indent); 3304 break; 3305 case ZPOOL_STATUS_MISSING_DEV_R: 3306 case ZPOOL_STATUS_MISSING_DEV_NR: 3307 case ZPOOL_STATUS_BAD_GUID_SUM: 3308 (void) printf(gettext("The pool cannot be imported. " 3309 "Attach the missing\n" 3310 "\t%sdevices and try again.\n"), indent); 3311 break; 3312 case ZPOOL_STATUS_HOSTID_ACTIVE: 3313 VERIFY0(nvlist_lookup_nvlist(config, 3314 ZPOOL_CONFIG_LOAD_INFO, &nvinfo)); 3315 3316 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME)) 3317 hostname = fnvlist_lookup_string(nvinfo, 3318 ZPOOL_CONFIG_MMP_HOSTNAME); 3319 3320 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID)) 3321 hostid = fnvlist_lookup_uint64(nvinfo, 3322 ZPOOL_CONFIG_MMP_HOSTID); 3323 3324 (void) printf(gettext("The pool must be exported from " 3325 "%s (hostid=%"PRIx64")\n" 3326 "\t%sbefore it can be safely imported.\n"), 3327 hostname, hostid, indent); 3328 break; 3329 case ZPOOL_STATUS_HOSTID_REQUIRED: 3330 (void) printf(gettext("Set a unique system hostid with " 3331 "the zgenhostid(8) command.\n")); 3332 break; 3333 default: 3334 (void) printf(gettext("The pool cannot be imported due " 3335 "to damaged devices or data.\n")); 3336 } 3337 } 3338 3339 /* Print the comment attached to the pool. */ 3340 if (comment != NULL) 3341 (void) printf(gettext("comment: %s\n"), comment); 3342 3343 /* 3344 * If the state is "closed" or "can't open", and the aux state 3345 * is "corrupt data": 3346 */ 3347 if ((vs->vs_state == VDEV_STATE_CLOSED || 3348 vs->vs_state == VDEV_STATE_CANT_OPEN) && 3349 vs->vs_aux == VDEV_AUX_CORRUPT_DATA) { 3350 if (pool_state == POOL_STATE_DESTROYED) 3351 (void) printf(gettext("\t%sThe pool was destroyed, " 3352 "but can be imported using the '-Df' flags.\n"), 3353 indent); 3354 else if (pool_state != POOL_STATE_EXPORTED) 3355 (void) printf(gettext("\t%sThe pool may be active on " 3356 "another system, but can be imported using\n" 3357 "\t%sthe '-f' flag.\n"), indent, indent); 3358 } 3359 3360 if (msgid != NULL) { 3361 (void) printf(gettext("%s see: " 3362 "https://openzfs.github.io/openzfs-docs/msg/%s\n"), 3363 indent, msgid); 3364 } 3365 3366 (void) printf(gettext("%sconfig:\n\n"), indent); 3367 3368 cb.cb_namewidth = max_width(NULL, nvroot, 0, strlen(name), 3369 VDEV_NAME_TYPE_ID); 3370 if (cb.cb_namewidth < 10) 3371 cb.cb_namewidth = 10; 3372 3373 print_import_config(&cb, name, nvroot, 0); 3374 3375 print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_BIAS_DEDUP); 3376 print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_BIAS_SPECIAL); 3377 print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_CLASS_LOGS); 3378 3379 if (reason == ZPOOL_STATUS_BAD_GUID_SUM) { 3380 (void) printf(gettext("\n\t%sAdditional devices are known to " 3381 "be part of this pool, though their\n" 3382 "\t%sexact configuration cannot be determined.\n"), 3383 indent, indent); 3384 } 3385 return (0); 3386 } 3387 3388 static boolean_t 3389 zfs_force_import_required(nvlist_t *config) 3390 { 3391 uint64_t state; 3392 uint64_t hostid = 0; 3393 nvlist_t *nvinfo; 3394 3395 state = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE); 3396 nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO); 3397 3398 /* 3399 * The hostid on LOAD_INFO comes from the MOS label via 3400 * spa_tryimport(). If its not there then we're likely talking to an 3401 * older kernel, so use the top one, which will be from the label 3402 * discovered in zpool_find_import(), or if a cachefile is in use, the 3403 * local hostid. 3404 */ 3405 if (nvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_HOSTID, &hostid) != 0) 3406 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_HOSTID, 3407 &hostid); 3408 3409 if (state != POOL_STATE_EXPORTED && hostid != get_system_hostid()) 3410 return (B_TRUE); 3411 3412 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE)) { 3413 mmp_state_t mmp_state = fnvlist_lookup_uint64(nvinfo, 3414 ZPOOL_CONFIG_MMP_STATE); 3415 3416 if (mmp_state != MMP_STATE_INACTIVE) 3417 return (B_TRUE); 3418 } 3419 3420 return (B_FALSE); 3421 } 3422 3423 /* 3424 * Perform the import for the given configuration. This passes the heavy 3425 * lifting off to zpool_import_props(), and then mounts the datasets contained 3426 * within the pool. 3427 */ 3428 static int 3429 do_import(nvlist_t *config, const char *newname, const char *mntopts, 3430 nvlist_t *props, int flags, uint_t mntthreads) 3431 { 3432 int ret = 0; 3433 int ms_status = 0; 3434 zpool_handle_t *zhp; 3435 const char *name; 3436 uint64_t version; 3437 3438 name = fnvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME); 3439 version = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION); 3440 3441 if (!SPA_VERSION_IS_SUPPORTED(version)) { 3442 (void) fprintf(stderr, gettext("cannot import '%s': pool " 3443 "is formatted using an unsupported ZFS version\n"), name); 3444 return (1); 3445 } else if (zfs_force_import_required(config) && 3446 !(flags & ZFS_IMPORT_ANY_HOST)) { 3447 mmp_state_t mmp_state = MMP_STATE_INACTIVE; 3448 nvlist_t *nvinfo; 3449 3450 nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO); 3451 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE)) 3452 mmp_state = fnvlist_lookup_uint64(nvinfo, 3453 ZPOOL_CONFIG_MMP_STATE); 3454 3455 if (mmp_state == MMP_STATE_ACTIVE) { 3456 const char *hostname = "<unknown>"; 3457 uint64_t hostid = 0; 3458 3459 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME)) 3460 hostname = fnvlist_lookup_string(nvinfo, 3461 ZPOOL_CONFIG_MMP_HOSTNAME); 3462 3463 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID)) 3464 hostid = fnvlist_lookup_uint64(nvinfo, 3465 ZPOOL_CONFIG_MMP_HOSTID); 3466 3467 (void) fprintf(stderr, gettext("cannot import '%s': " 3468 "pool is imported on %s (hostid: " 3469 "0x%"PRIx64")\nExport the pool on the other " 3470 "system, then run 'zpool import'.\n"), 3471 name, hostname, hostid); 3472 } else if (mmp_state == MMP_STATE_NO_HOSTID) { 3473 (void) fprintf(stderr, gettext("Cannot import '%s': " 3474 "pool has the multihost property on and the\n" 3475 "system's hostid is not set. Set a unique hostid " 3476 "with the zgenhostid(8) command.\n"), name); 3477 } else { 3478 const char *hostname = "<unknown>"; 3479 time_t timestamp = 0; 3480 uint64_t hostid = 0; 3481 3482 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_HOSTNAME)) 3483 hostname = fnvlist_lookup_string(nvinfo, 3484 ZPOOL_CONFIG_HOSTNAME); 3485 else if (nvlist_exists(config, ZPOOL_CONFIG_HOSTNAME)) 3486 hostname = fnvlist_lookup_string(config, 3487 ZPOOL_CONFIG_HOSTNAME); 3488 3489 if (nvlist_exists(config, ZPOOL_CONFIG_TIMESTAMP)) 3490 timestamp = fnvlist_lookup_uint64(config, 3491 ZPOOL_CONFIG_TIMESTAMP); 3492 3493 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_HOSTID)) 3494 hostid = fnvlist_lookup_uint64(nvinfo, 3495 ZPOOL_CONFIG_HOSTID); 3496 else if (nvlist_exists(config, ZPOOL_CONFIG_HOSTID)) 3497 hostid = fnvlist_lookup_uint64(config, 3498 ZPOOL_CONFIG_HOSTID); 3499 3500 (void) fprintf(stderr, gettext("cannot import '%s': " 3501 "pool was previously in use from another system.\n" 3502 "Last accessed by %s (hostid=%"PRIx64") at %s" 3503 "The pool can be imported, use 'zpool import -f' " 3504 "to import the pool.\n"), name, hostname, 3505 hostid, ctime(×tamp)); 3506 } 3507 3508 return (1); 3509 } 3510 3511 if (zpool_import_props(g_zfs, config, newname, props, flags) != 0) 3512 return (1); 3513 3514 if (newname != NULL) 3515 name = newname; 3516 3517 if ((zhp = zpool_open_canfail(g_zfs, name)) == NULL) 3518 return (1); 3519 3520 /* 3521 * Loading keys is best effort. We don't want to return immediately 3522 * if it fails but we do want to give the error to the caller. 3523 */ 3524 if (flags & ZFS_IMPORT_LOAD_KEYS && 3525 zfs_crypto_attempt_load_keys(g_zfs, name) != 0) 3526 ret = 1; 3527 3528 if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL && 3529 !(flags & ZFS_IMPORT_ONLY)) { 3530 ms_status = zpool_enable_datasets(zhp, mntopts, 0, mntthreads); 3531 if (ms_status == EZFS_SHAREFAILED) { 3532 (void) fprintf(stderr, gettext("Import was " 3533 "successful, but unable to share some datasets\n")); 3534 } else if (ms_status == EZFS_MOUNTFAILED) { 3535 (void) fprintf(stderr, gettext("Import was " 3536 "successful, but unable to mount some datasets\n")); 3537 } 3538 } 3539 3540 zpool_close(zhp); 3541 return (ret); 3542 } 3543 3544 typedef struct import_parameters { 3545 nvlist_t *ip_config; 3546 const char *ip_mntopts; 3547 nvlist_t *ip_props; 3548 int ip_flags; 3549 uint_t ip_mntthreads; 3550 int *ip_err; 3551 } import_parameters_t; 3552 3553 static void 3554 do_import_task(void *arg) 3555 { 3556 import_parameters_t *ip = arg; 3557 *ip->ip_err |= do_import(ip->ip_config, NULL, ip->ip_mntopts, 3558 ip->ip_props, ip->ip_flags, ip->ip_mntthreads); 3559 free(ip); 3560 } 3561 3562 3563 static int 3564 import_pools(nvlist_t *pools, nvlist_t *props, char *mntopts, int flags, 3565 char *orig_name, char *new_name, importargs_t *import) 3566 { 3567 nvlist_t *config = NULL; 3568 nvlist_t *found_config = NULL; 3569 uint64_t pool_state; 3570 boolean_t pool_specified = (import->poolname != NULL || 3571 import->guid != 0); 3572 uint_t npools = 0; 3573 3574 3575 tpool_t *tp = NULL; 3576 if (import->do_all) { 3577 tp = tpool_create(1, 5 * sysconf(_SC_NPROCESSORS_ONLN), 3578 0, NULL); 3579 } 3580 3581 /* 3582 * At this point we have a list of import candidate configs. Even if 3583 * we were searching by pool name or guid, we still need to 3584 * post-process the list to deal with pool state and possible 3585 * duplicate names. 3586 */ 3587 int err = 0; 3588 nvpair_t *elem = NULL; 3589 boolean_t first = B_TRUE; 3590 if (!pool_specified && import->do_all) { 3591 while ((elem = nvlist_next_nvpair(pools, elem)) != NULL) 3592 npools++; 3593 } 3594 while ((elem = nvlist_next_nvpair(pools, elem)) != NULL) { 3595 3596 verify(nvpair_value_nvlist(elem, &config) == 0); 3597 3598 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, 3599 &pool_state) == 0); 3600 if (!import->do_destroyed && 3601 pool_state == POOL_STATE_DESTROYED) 3602 continue; 3603 if (import->do_destroyed && 3604 pool_state != POOL_STATE_DESTROYED) 3605 continue; 3606 3607 verify(nvlist_add_nvlist(config, ZPOOL_LOAD_POLICY, 3608 import->policy) == 0); 3609 3610 if (!pool_specified) { 3611 if (first) 3612 first = B_FALSE; 3613 else if (!import->do_all) 3614 (void) fputc('\n', stdout); 3615 3616 if (import->do_all) { 3617 import_parameters_t *ip = safe_malloc( 3618 sizeof (import_parameters_t)); 3619 3620 ip->ip_config = config; 3621 ip->ip_mntopts = mntopts; 3622 ip->ip_props = props; 3623 ip->ip_flags = flags; 3624 ip->ip_mntthreads = mount_tp_nthr / npools; 3625 ip->ip_err = &err; 3626 3627 (void) tpool_dispatch(tp, do_import_task, 3628 (void *)ip); 3629 } else { 3630 /* 3631 * If we're importing from cachefile, then 3632 * we don't want to report errors until we 3633 * are in the scan phase of the import. If 3634 * we get an error, then we return that error 3635 * to invoke the scan phase. 3636 */ 3637 if (import->cachefile && !import->scan) 3638 err = show_import(config, B_FALSE); 3639 else 3640 (void) show_import(config, B_TRUE); 3641 } 3642 } else if (import->poolname != NULL) { 3643 const char *name; 3644 3645 /* 3646 * We are searching for a pool based on name. 3647 */ 3648 verify(nvlist_lookup_string(config, 3649 ZPOOL_CONFIG_POOL_NAME, &name) == 0); 3650 3651 if (strcmp(name, import->poolname) == 0) { 3652 if (found_config != NULL) { 3653 (void) fprintf(stderr, gettext( 3654 "cannot import '%s': more than " 3655 "one matching pool\n"), 3656 import->poolname); 3657 (void) fprintf(stderr, gettext( 3658 "import by numeric ID instead\n")); 3659 err = B_TRUE; 3660 } 3661 found_config = config; 3662 } 3663 } else { 3664 uint64_t guid; 3665 3666 /* 3667 * Search for a pool by guid. 3668 */ 3669 verify(nvlist_lookup_uint64(config, 3670 ZPOOL_CONFIG_POOL_GUID, &guid) == 0); 3671 3672 if (guid == import->guid) 3673 found_config = config; 3674 } 3675 } 3676 if (import->do_all) { 3677 tpool_wait(tp); 3678 tpool_destroy(tp); 3679 } 3680 3681 /* 3682 * If we were searching for a specific pool, verify that we found a 3683 * pool, and then do the import. 3684 */ 3685 if (pool_specified && err == 0) { 3686 if (found_config == NULL) { 3687 (void) fprintf(stderr, gettext("cannot import '%s': " 3688 "no such pool available\n"), orig_name); 3689 err = B_TRUE; 3690 } else { 3691 err |= do_import(found_config, new_name, 3692 mntopts, props, flags, mount_tp_nthr); 3693 } 3694 } 3695 3696 /* 3697 * If we were just looking for pools, report an error if none were 3698 * found. 3699 */ 3700 if (!pool_specified && first) 3701 (void) fprintf(stderr, 3702 gettext("no pools available to import\n")); 3703 return (err); 3704 } 3705 3706 typedef struct target_exists_args { 3707 const char *poolname; 3708 uint64_t poolguid; 3709 } target_exists_args_t; 3710 3711 static int 3712 name_or_guid_exists(zpool_handle_t *zhp, void *data) 3713 { 3714 target_exists_args_t *args = data; 3715 nvlist_t *config = zpool_get_config(zhp, NULL); 3716 int found = 0; 3717 3718 if (config == NULL) 3719 return (0); 3720 3721 if (args->poolname != NULL) { 3722 const char *pool_name; 3723 3724 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 3725 &pool_name) == 0); 3726 if (strcmp(pool_name, args->poolname) == 0) 3727 found = 1; 3728 } else { 3729 uint64_t pool_guid; 3730 3731 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 3732 &pool_guid) == 0); 3733 if (pool_guid == args->poolguid) 3734 found = 1; 3735 } 3736 zpool_close(zhp); 3737 3738 return (found); 3739 } 3740 /* 3741 * zpool checkpoint <pool> 3742 * checkpoint --discard <pool> 3743 * 3744 * -d Discard the checkpoint from a checkpointed 3745 * --discard pool. 3746 * 3747 * -w Wait for discarding a checkpoint to complete. 3748 * --wait 3749 * 3750 * Checkpoints the specified pool, by taking a "snapshot" of its 3751 * current state. A pool can only have one checkpoint at a time. 3752 */ 3753 int 3754 zpool_do_checkpoint(int argc, char **argv) 3755 { 3756 boolean_t discard, wait; 3757 char *pool; 3758 zpool_handle_t *zhp; 3759 int c, err; 3760 3761 struct option long_options[] = { 3762 {"discard", no_argument, NULL, 'd'}, 3763 {"wait", no_argument, NULL, 'w'}, 3764 {0, 0, 0, 0} 3765 }; 3766 3767 discard = B_FALSE; 3768 wait = B_FALSE; 3769 while ((c = getopt_long(argc, argv, ":dw", long_options, NULL)) != -1) { 3770 switch (c) { 3771 case 'd': 3772 discard = B_TRUE; 3773 break; 3774 case 'w': 3775 wait = B_TRUE; 3776 break; 3777 case '?': 3778 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 3779 optopt); 3780 usage(B_FALSE); 3781 } 3782 } 3783 3784 if (wait && !discard) { 3785 (void) fprintf(stderr, gettext("--wait only valid when " 3786 "--discard also specified\n")); 3787 usage(B_FALSE); 3788 } 3789 3790 argc -= optind; 3791 argv += optind; 3792 3793 if (argc < 1) { 3794 (void) fprintf(stderr, gettext("missing pool argument\n")); 3795 usage(B_FALSE); 3796 } 3797 3798 if (argc > 1) { 3799 (void) fprintf(stderr, gettext("too many arguments\n")); 3800 usage(B_FALSE); 3801 } 3802 3803 pool = argv[0]; 3804 3805 if ((zhp = zpool_open(g_zfs, pool)) == NULL) { 3806 /* As a special case, check for use of '/' in the name */ 3807 if (strchr(pool, '/') != NULL) 3808 (void) fprintf(stderr, gettext("'zpool checkpoint' " 3809 "doesn't work on datasets. To save the state " 3810 "of a dataset from a specific point in time " 3811 "please use 'zfs snapshot'\n")); 3812 return (1); 3813 } 3814 3815 if (discard) { 3816 err = (zpool_discard_checkpoint(zhp) != 0); 3817 if (err == 0 && wait) 3818 err = zpool_wait(zhp, ZPOOL_WAIT_CKPT_DISCARD); 3819 } else { 3820 err = (zpool_checkpoint(zhp) != 0); 3821 } 3822 3823 zpool_close(zhp); 3824 3825 return (err); 3826 } 3827 3828 #define CHECKPOINT_OPT 1024 3829 3830 /* 3831 * zpool import [-d dir] [-D] 3832 * import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l] 3833 * [-d dir | -c cachefile | -s] [-f] -a 3834 * import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l] 3835 * [-d dir | -c cachefile | -s] [-f] [-n] [-F] <pool | id> 3836 * [newpool] 3837 * 3838 * -c Read pool information from a cachefile instead of searching 3839 * devices. If importing from a cachefile config fails, then 3840 * fallback to searching for devices only in the directories that 3841 * exist in the cachefile. 3842 * 3843 * -d Scan in a specific directory, other than /dev/. More than 3844 * one directory can be specified using multiple '-d' options. 3845 * 3846 * -D Scan for previously destroyed pools or import all or only 3847 * specified destroyed pools. 3848 * 3849 * -R Temporarily import the pool, with all mountpoints relative to 3850 * the given root. The pool will remain exported when the machine 3851 * is rebooted. 3852 * 3853 * -V Import even in the presence of faulted vdevs. This is an 3854 * intentionally undocumented option for testing purposes, and 3855 * treats the pool configuration as complete, leaving any bad 3856 * vdevs in the FAULTED state. In other words, it does verbatim 3857 * import. 3858 * 3859 * -f Force import, even if it appears that the pool is active. 3860 * 3861 * -F Attempt rewind if necessary. 3862 * 3863 * -n See if rewind would work, but don't actually rewind. 3864 * 3865 * -N Import the pool but don't mount datasets. 3866 * 3867 * -T Specify a starting txg to use for import. This option is 3868 * intentionally undocumented option for testing purposes. 3869 * 3870 * -a Import all pools found. 3871 * 3872 * -l Load encryption keys while importing. 3873 * 3874 * -o Set property=value and/or temporary mount options (without '='). 3875 * 3876 * -s Scan using the default search path, the libblkid cache will 3877 * not be consulted. 3878 * 3879 * --rewind-to-checkpoint 3880 * Import the pool and revert back to the checkpoint. 3881 * 3882 * The import command scans for pools to import, and import pools based on pool 3883 * name and GUID. The pool can also be renamed as part of the import process. 3884 */ 3885 int 3886 zpool_do_import(int argc, char **argv) 3887 { 3888 char **searchdirs = NULL; 3889 char *env, *envdup = NULL; 3890 int nsearch = 0; 3891 int c; 3892 int err = 0; 3893 nvlist_t *pools = NULL; 3894 boolean_t do_all = B_FALSE; 3895 boolean_t do_destroyed = B_FALSE; 3896 char *mntopts = NULL; 3897 uint64_t searchguid = 0; 3898 char *searchname = NULL; 3899 char *propval; 3900 nvlist_t *policy = NULL; 3901 nvlist_t *props = NULL; 3902 int flags = ZFS_IMPORT_NORMAL; 3903 uint32_t rewind_policy = ZPOOL_NO_REWIND; 3904 boolean_t dryrun = B_FALSE; 3905 boolean_t do_rewind = B_FALSE; 3906 boolean_t xtreme_rewind = B_FALSE; 3907 boolean_t do_scan = B_FALSE; 3908 boolean_t pool_exists = B_FALSE; 3909 uint64_t txg = -1ULL; 3910 char *cachefile = NULL; 3911 importargs_t idata = { 0 }; 3912 char *endptr; 3913 3914 struct option long_options[] = { 3915 {"rewind-to-checkpoint", no_argument, NULL, CHECKPOINT_OPT}, 3916 {0, 0, 0, 0} 3917 }; 3918 3919 /* check options */ 3920 while ((c = getopt_long(argc, argv, ":aCc:d:DEfFlmnNo:R:stT:VX", 3921 long_options, NULL)) != -1) { 3922 switch (c) { 3923 case 'a': 3924 do_all = B_TRUE; 3925 break; 3926 case 'c': 3927 cachefile = optarg; 3928 break; 3929 case 'd': 3930 searchdirs = safe_realloc(searchdirs, 3931 (nsearch + 1) * sizeof (char *)); 3932 searchdirs[nsearch++] = optarg; 3933 break; 3934 case 'D': 3935 do_destroyed = B_TRUE; 3936 break; 3937 case 'f': 3938 flags |= ZFS_IMPORT_ANY_HOST; 3939 break; 3940 case 'F': 3941 do_rewind = B_TRUE; 3942 break; 3943 case 'l': 3944 flags |= ZFS_IMPORT_LOAD_KEYS; 3945 break; 3946 case 'm': 3947 flags |= ZFS_IMPORT_MISSING_LOG; 3948 break; 3949 case 'n': 3950 dryrun = B_TRUE; 3951 break; 3952 case 'N': 3953 flags |= ZFS_IMPORT_ONLY; 3954 break; 3955 case 'o': 3956 if ((propval = strchr(optarg, '=')) != NULL) { 3957 *propval = '\0'; 3958 propval++; 3959 if (add_prop_list(optarg, propval, 3960 &props, B_TRUE)) 3961 goto error; 3962 } else { 3963 mntopts = optarg; 3964 } 3965 break; 3966 case 'R': 3967 if (add_prop_list(zpool_prop_to_name( 3968 ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE)) 3969 goto error; 3970 if (add_prop_list_default(zpool_prop_to_name( 3971 ZPOOL_PROP_CACHEFILE), "none", &props)) 3972 goto error; 3973 break; 3974 case 's': 3975 do_scan = B_TRUE; 3976 break; 3977 case 't': 3978 flags |= ZFS_IMPORT_TEMP_NAME; 3979 if (add_prop_list_default(zpool_prop_to_name( 3980 ZPOOL_PROP_CACHEFILE), "none", &props)) 3981 goto error; 3982 break; 3983 3984 case 'T': 3985 errno = 0; 3986 txg = strtoull(optarg, &endptr, 0); 3987 if (errno != 0 || *endptr != '\0') { 3988 (void) fprintf(stderr, 3989 gettext("invalid txg value\n")); 3990 usage(B_FALSE); 3991 } 3992 rewind_policy = ZPOOL_DO_REWIND | ZPOOL_EXTREME_REWIND; 3993 break; 3994 case 'V': 3995 flags |= ZFS_IMPORT_VERBATIM; 3996 break; 3997 case 'X': 3998 xtreme_rewind = B_TRUE; 3999 break; 4000 case CHECKPOINT_OPT: 4001 flags |= ZFS_IMPORT_CHECKPOINT; 4002 break; 4003 case ':': 4004 (void) fprintf(stderr, gettext("missing argument for " 4005 "'%c' option\n"), optopt); 4006 usage(B_FALSE); 4007 break; 4008 case '?': 4009 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 4010 optopt); 4011 usage(B_FALSE); 4012 } 4013 } 4014 4015 argc -= optind; 4016 argv += optind; 4017 4018 if (cachefile && nsearch != 0) { 4019 (void) fprintf(stderr, gettext("-c is incompatible with -d\n")); 4020 usage(B_FALSE); 4021 } 4022 4023 if (cachefile && do_scan) { 4024 (void) fprintf(stderr, gettext("-c is incompatible with -s\n")); 4025 usage(B_FALSE); 4026 } 4027 4028 if ((flags & ZFS_IMPORT_LOAD_KEYS) && (flags & ZFS_IMPORT_ONLY)) { 4029 (void) fprintf(stderr, gettext("-l is incompatible with -N\n")); 4030 usage(B_FALSE); 4031 } 4032 4033 if ((flags & ZFS_IMPORT_LOAD_KEYS) && !do_all && argc == 0) { 4034 (void) fprintf(stderr, gettext("-l is only meaningful during " 4035 "an import\n")); 4036 usage(B_FALSE); 4037 } 4038 4039 if ((dryrun || xtreme_rewind) && !do_rewind) { 4040 (void) fprintf(stderr, 4041 gettext("-n or -X only meaningful with -F\n")); 4042 usage(B_FALSE); 4043 } 4044 if (dryrun) 4045 rewind_policy = ZPOOL_TRY_REWIND; 4046 else if (do_rewind) 4047 rewind_policy = ZPOOL_DO_REWIND; 4048 if (xtreme_rewind) 4049 rewind_policy |= ZPOOL_EXTREME_REWIND; 4050 4051 /* In the future, we can capture further policy and include it here */ 4052 if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 || 4053 nvlist_add_uint64(policy, ZPOOL_LOAD_REQUEST_TXG, txg) != 0 || 4054 nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY, 4055 rewind_policy) != 0) 4056 goto error; 4057 4058 /* check argument count */ 4059 if (do_all) { 4060 if (argc != 0) { 4061 (void) fprintf(stderr, gettext("too many arguments\n")); 4062 usage(B_FALSE); 4063 } 4064 } else { 4065 if (argc > 2) { 4066 (void) fprintf(stderr, gettext("too many arguments\n")); 4067 usage(B_FALSE); 4068 } 4069 } 4070 4071 /* 4072 * Check for the effective uid. We do this explicitly here because 4073 * otherwise any attempt to discover pools will silently fail. 4074 */ 4075 if (argc == 0 && geteuid() != 0) { 4076 (void) fprintf(stderr, gettext("cannot " 4077 "discover pools: permission denied\n")); 4078 4079 free(searchdirs); 4080 nvlist_free(props); 4081 nvlist_free(policy); 4082 return (1); 4083 } 4084 4085 /* 4086 * Depending on the arguments given, we do one of the following: 4087 * 4088 * <none> Iterate through all pools and display information about 4089 * each one. 4090 * 4091 * -a Iterate through all pools and try to import each one. 4092 * 4093 * <id> Find the pool that corresponds to the given GUID/pool 4094 * name and import that one. 4095 * 4096 * -D Above options applies only to destroyed pools. 4097 */ 4098 if (argc != 0) { 4099 char *endptr; 4100 4101 errno = 0; 4102 searchguid = strtoull(argv[0], &endptr, 10); 4103 if (errno != 0 || *endptr != '\0') { 4104 searchname = argv[0]; 4105 searchguid = 0; 4106 } 4107 4108 /* 4109 * User specified a name or guid. Ensure it's unique. 4110 */ 4111 target_exists_args_t search = {searchname, searchguid}; 4112 pool_exists = zpool_iter(g_zfs, name_or_guid_exists, &search); 4113 } 4114 4115 /* 4116 * Check the environment for the preferred search path. 4117 */ 4118 if ((searchdirs == NULL) && (env = getenv("ZPOOL_IMPORT_PATH"))) { 4119 char *dir, *tmp = NULL; 4120 4121 envdup = strdup(env); 4122 4123 for (dir = strtok_r(envdup, ":", &tmp); 4124 dir != NULL; 4125 dir = strtok_r(NULL, ":", &tmp)) { 4126 searchdirs = safe_realloc(searchdirs, 4127 (nsearch + 1) * sizeof (char *)); 4128 searchdirs[nsearch++] = dir; 4129 } 4130 } 4131 4132 idata.path = searchdirs; 4133 idata.paths = nsearch; 4134 idata.poolname = searchname; 4135 idata.guid = searchguid; 4136 idata.cachefile = cachefile; 4137 idata.scan = do_scan; 4138 idata.policy = policy; 4139 idata.do_destroyed = do_destroyed; 4140 idata.do_all = do_all; 4141 4142 libpc_handle_t lpch = { 4143 .lpc_lib_handle = g_zfs, 4144 .lpc_ops = &libzfs_config_ops, 4145 .lpc_printerr = B_TRUE 4146 }; 4147 pools = zpool_search_import(&lpch, &idata); 4148 4149 if (pools != NULL && pool_exists && 4150 (argc == 1 || strcmp(argv[0], argv[1]) == 0)) { 4151 (void) fprintf(stderr, gettext("cannot import '%s': " 4152 "a pool with that name already exists\n"), 4153 argv[0]); 4154 (void) fprintf(stderr, gettext("use the form '%s " 4155 "<pool | id> <newpool>' to give it a new name\n"), 4156 "zpool import"); 4157 err = 1; 4158 } else if (pools == NULL && pool_exists) { 4159 (void) fprintf(stderr, gettext("cannot import '%s': " 4160 "a pool with that name is already created/imported,\n"), 4161 argv[0]); 4162 (void) fprintf(stderr, gettext("and no additional pools " 4163 "with that name were found\n")); 4164 err = 1; 4165 } else if (pools == NULL) { 4166 if (argc != 0) { 4167 (void) fprintf(stderr, gettext("cannot import '%s': " 4168 "no such pool available\n"), argv[0]); 4169 } 4170 err = 1; 4171 } 4172 4173 if (err == 1) { 4174 free(searchdirs); 4175 free(envdup); 4176 nvlist_free(policy); 4177 nvlist_free(pools); 4178 nvlist_free(props); 4179 return (1); 4180 } 4181 4182 err = import_pools(pools, props, mntopts, flags, 4183 argc >= 1 ? argv[0] : NULL, argc >= 2 ? argv[1] : NULL, &idata); 4184 4185 /* 4186 * If we're using the cachefile and we failed to import, then 4187 * fallback to scanning the directory for pools that match 4188 * those in the cachefile. 4189 */ 4190 if (err != 0 && cachefile != NULL) { 4191 (void) printf(gettext("cachefile import failed, retrying\n")); 4192 4193 /* 4194 * We use the scan flag to gather the directories that exist 4195 * in the cachefile. If we need to fallback to searching for 4196 * the pool config, we will only search devices in these 4197 * directories. 4198 */ 4199 idata.scan = B_TRUE; 4200 nvlist_free(pools); 4201 pools = zpool_search_import(&lpch, &idata); 4202 4203 err = import_pools(pools, props, mntopts, flags, 4204 argc >= 1 ? argv[0] : NULL, argc >= 2 ? argv[1] : NULL, 4205 &idata); 4206 } 4207 4208 error: 4209 nvlist_free(props); 4210 nvlist_free(pools); 4211 nvlist_free(policy); 4212 free(searchdirs); 4213 free(envdup); 4214 4215 return (err ? 1 : 0); 4216 } 4217 4218 /* 4219 * zpool sync [-f] [pool] ... 4220 * 4221 * -f (undocumented) force uberblock (and config including zpool cache file) 4222 * update. 4223 * 4224 * Sync the specified pool(s). 4225 * Without arguments "zpool sync" will sync all pools. 4226 * This command initiates TXG sync(s) and will return after the TXG(s) commit. 4227 * 4228 */ 4229 static int 4230 zpool_do_sync(int argc, char **argv) 4231 { 4232 int ret; 4233 boolean_t force = B_FALSE; 4234 4235 /* check options */ 4236 while ((ret = getopt(argc, argv, "f")) != -1) { 4237 switch (ret) { 4238 case 'f': 4239 force = B_TRUE; 4240 break; 4241 case '?': 4242 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 4243 optopt); 4244 usage(B_FALSE); 4245 } 4246 } 4247 4248 argc -= optind; 4249 argv += optind; 4250 4251 /* if argc == 0 we will execute zpool_sync_one on all pools */ 4252 ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL, 4253 B_FALSE, zpool_sync_one, &force); 4254 4255 return (ret); 4256 } 4257 4258 typedef struct iostat_cbdata { 4259 uint64_t cb_flags; 4260 int cb_namewidth; 4261 int cb_iteration; 4262 boolean_t cb_verbose; 4263 boolean_t cb_literal; 4264 boolean_t cb_scripted; 4265 zpool_list_t *cb_list; 4266 vdev_cmd_data_list_t *vcdl; 4267 vdev_cbdata_t cb_vdevs; 4268 } iostat_cbdata_t; 4269 4270 /* iostat labels */ 4271 typedef struct name_and_columns { 4272 const char *name; /* Column name */ 4273 unsigned int columns; /* Center name to this number of columns */ 4274 } name_and_columns_t; 4275 4276 #define IOSTAT_MAX_LABELS 15 /* Max number of labels on one line */ 4277 4278 static const name_and_columns_t iostat_top_labels[][IOSTAT_MAX_LABELS] = 4279 { 4280 [IOS_DEFAULT] = {{"capacity", 2}, {"operations", 2}, {"bandwidth", 2}, 4281 {NULL}}, 4282 [IOS_LATENCY] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2}, 4283 {"asyncq_wait", 2}, {"scrub", 1}, {"trim", 1}, {"rebuild", 1}, 4284 {NULL}}, 4285 [IOS_QUEUES] = {{"syncq_read", 2}, {"syncq_write", 2}, 4286 {"asyncq_read", 2}, {"asyncq_write", 2}, {"scrubq_read", 2}, 4287 {"trimq_write", 2}, {"rebuildq_write", 2}, {NULL}}, 4288 [IOS_L_HISTO] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2}, 4289 {"asyncq_wait", 2}, {NULL}}, 4290 [IOS_RQ_HISTO] = {{"sync_read", 2}, {"sync_write", 2}, 4291 {"async_read", 2}, {"async_write", 2}, {"scrub", 2}, 4292 {"trim", 2}, {"rebuild", 2}, {NULL}}, 4293 }; 4294 4295 /* Shorthand - if "columns" field not set, default to 1 column */ 4296 static const name_and_columns_t iostat_bottom_labels[][IOSTAT_MAX_LABELS] = 4297 { 4298 [IOS_DEFAULT] = {{"alloc"}, {"free"}, {"read"}, {"write"}, {"read"}, 4299 {"write"}, {NULL}}, 4300 [IOS_LATENCY] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"}, 4301 {"write"}, {"read"}, {"write"}, {"wait"}, {"wait"}, {"wait"}, 4302 {NULL}}, 4303 [IOS_QUEUES] = {{"pend"}, {"activ"}, {"pend"}, {"activ"}, {"pend"}, 4304 {"activ"}, {"pend"}, {"activ"}, {"pend"}, {"activ"}, 4305 {"pend"}, {"activ"}, {"pend"}, {"activ"}, {NULL}}, 4306 [IOS_L_HISTO] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"}, 4307 {"write"}, {"read"}, {"write"}, {"scrub"}, {"trim"}, {"rebuild"}, 4308 {NULL}}, 4309 [IOS_RQ_HISTO] = {{"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"}, 4310 {"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"}, 4311 {"ind"}, {"agg"}, {NULL}}, 4312 }; 4313 4314 static const char *histo_to_title[] = { 4315 [IOS_L_HISTO] = "latency", 4316 [IOS_RQ_HISTO] = "req_size", 4317 }; 4318 4319 /* 4320 * Return the number of labels in a null-terminated name_and_columns_t 4321 * array. 4322 * 4323 */ 4324 static unsigned int 4325 label_array_len(const name_and_columns_t *labels) 4326 { 4327 int i = 0; 4328 4329 while (labels[i].name) 4330 i++; 4331 4332 return (i); 4333 } 4334 4335 /* 4336 * Return the number of strings in a null-terminated string array. 4337 * For example: 4338 * 4339 * const char foo[] = {"bar", "baz", NULL} 4340 * 4341 * returns 2 4342 */ 4343 static uint64_t 4344 str_array_len(const char *array[]) 4345 { 4346 uint64_t i = 0; 4347 while (array[i]) 4348 i++; 4349 4350 return (i); 4351 } 4352 4353 4354 /* 4355 * Return a default column width for default/latency/queue columns. This does 4356 * not include histograms, which have their columns autosized. 4357 */ 4358 static unsigned int 4359 default_column_width(iostat_cbdata_t *cb, enum iostat_type type) 4360 { 4361 unsigned long column_width = 5; /* Normal niceprint */ 4362 static unsigned long widths[] = { 4363 /* 4364 * Choose some sane default column sizes for printing the 4365 * raw numbers. 4366 */ 4367 [IOS_DEFAULT] = 15, /* 1PB capacity */ 4368 [IOS_LATENCY] = 10, /* 1B ns = 10sec */ 4369 [IOS_QUEUES] = 6, /* 1M queue entries */ 4370 [IOS_L_HISTO] = 10, /* 1B ns = 10sec */ 4371 [IOS_RQ_HISTO] = 6, /* 1M queue entries */ 4372 }; 4373 4374 if (cb->cb_literal) 4375 column_width = widths[type]; 4376 4377 return (column_width); 4378 } 4379 4380 /* 4381 * Print the column labels, i.e: 4382 * 4383 * capacity operations bandwidth 4384 * alloc free read write read write ... 4385 * 4386 * If force_column_width is set, use it for the column width. If not set, use 4387 * the default column width. 4388 */ 4389 static void 4390 print_iostat_labels(iostat_cbdata_t *cb, unsigned int force_column_width, 4391 const name_and_columns_t labels[][IOSTAT_MAX_LABELS]) 4392 { 4393 int i, idx, s; 4394 int text_start, rw_column_width, spaces_to_end; 4395 uint64_t flags = cb->cb_flags; 4396 uint64_t f; 4397 unsigned int column_width = force_column_width; 4398 4399 /* For each bit set in flags */ 4400 for (f = flags; f; f &= ~(1ULL << idx)) { 4401 idx = lowbit64(f) - 1; 4402 if (!force_column_width) 4403 column_width = default_column_width(cb, idx); 4404 /* Print our top labels centered over "read write" label. */ 4405 for (i = 0; i < label_array_len(labels[idx]); i++) { 4406 const char *name = labels[idx][i].name; 4407 /* 4408 * We treat labels[][].columns == 0 as shorthand 4409 * for one column. It makes writing out the label 4410 * tables more concise. 4411 */ 4412 unsigned int columns = MAX(1, labels[idx][i].columns); 4413 unsigned int slen = strlen(name); 4414 4415 rw_column_width = (column_width * columns) + 4416 (2 * (columns - 1)); 4417 4418 text_start = (int)((rw_column_width) / columns - 4419 slen / columns); 4420 if (text_start < 0) 4421 text_start = 0; 4422 4423 printf(" "); /* Two spaces between columns */ 4424 4425 /* Space from beginning of column to label */ 4426 for (s = 0; s < text_start; s++) 4427 printf(" "); 4428 4429 printf("%s", name); 4430 4431 /* Print space after label to end of column */ 4432 spaces_to_end = rw_column_width - text_start - slen; 4433 if (spaces_to_end < 0) 4434 spaces_to_end = 0; 4435 4436 for (s = 0; s < spaces_to_end; s++) 4437 printf(" "); 4438 } 4439 } 4440 } 4441 4442 4443 /* 4444 * print_cmd_columns - Print custom column titles from -c 4445 * 4446 * If the user specified the "zpool status|iostat -c" then print their custom 4447 * column titles in the header. For example, print_cmd_columns() would print 4448 * the " col1 col2" part of this: 4449 * 4450 * $ zpool iostat -vc 'echo col1=val1; echo col2=val2' 4451 * ... 4452 * capacity operations bandwidth 4453 * pool alloc free read write read write col1 col2 4454 * ---------- ----- ----- ----- ----- ----- ----- ---- ---- 4455 * mypool 269K 1008M 0 0 107 946 4456 * mirror 269K 1008M 0 0 107 946 4457 * sdb - - 0 0 102 473 val1 val2 4458 * sdc - - 0 0 5 473 val1 val2 4459 * ---------- ----- ----- ----- ----- ----- ----- ---- ---- 4460 */ 4461 static void 4462 print_cmd_columns(vdev_cmd_data_list_t *vcdl, int use_dashes) 4463 { 4464 int i, j; 4465 vdev_cmd_data_t *data = &vcdl->data[0]; 4466 4467 if (vcdl->count == 0 || data == NULL) 4468 return; 4469 4470 /* 4471 * Each vdev cmd should have the same column names unless the user did 4472 * something weird with their cmd. Just take the column names from the 4473 * first vdev and assume it works for all of them. 4474 */ 4475 for (i = 0; i < vcdl->uniq_cols_cnt; i++) { 4476 printf(" "); 4477 if (use_dashes) { 4478 for (j = 0; j < vcdl->uniq_cols_width[i]; j++) 4479 printf("-"); 4480 } else { 4481 printf_color(ANSI_BOLD, "%*s", vcdl->uniq_cols_width[i], 4482 vcdl->uniq_cols[i]); 4483 } 4484 } 4485 } 4486 4487 4488 /* 4489 * Utility function to print out a line of dashes like: 4490 * 4491 * -------------------------------- ----- ----- ----- ----- ----- 4492 * 4493 * ...or a dashed named-row line like: 4494 * 4495 * logs - - - - - 4496 * 4497 * @cb: iostat data 4498 * 4499 * @force_column_width If non-zero, use the value as the column width. 4500 * Otherwise use the default column widths. 4501 * 4502 * @name: Print a dashed named-row line starting 4503 * with @name. Otherwise, print a regular 4504 * dashed line. 4505 */ 4506 static void 4507 print_iostat_dashes(iostat_cbdata_t *cb, unsigned int force_column_width, 4508 const char *name) 4509 { 4510 int i; 4511 unsigned int namewidth; 4512 uint64_t flags = cb->cb_flags; 4513 uint64_t f; 4514 int idx; 4515 const name_and_columns_t *labels; 4516 const char *title; 4517 4518 4519 if (cb->cb_flags & IOS_ANYHISTO_M) { 4520 title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)]; 4521 } else if (cb->cb_vdevs.cb_names_count) { 4522 title = "vdev"; 4523 } else { 4524 title = "pool"; 4525 } 4526 4527 namewidth = MAX(MAX(strlen(title), cb->cb_namewidth), 4528 name ? strlen(name) : 0); 4529 4530 4531 if (name) { 4532 printf("%-*s", namewidth, name); 4533 } else { 4534 for (i = 0; i < namewidth; i++) 4535 (void) printf("-"); 4536 } 4537 4538 /* For each bit in flags */ 4539 for (f = flags; f; f &= ~(1ULL << idx)) { 4540 unsigned int column_width; 4541 idx = lowbit64(f) - 1; 4542 if (force_column_width) 4543 column_width = force_column_width; 4544 else 4545 column_width = default_column_width(cb, idx); 4546 4547 labels = iostat_bottom_labels[idx]; 4548 for (i = 0; i < label_array_len(labels); i++) { 4549 if (name) 4550 printf(" %*s-", column_width - 1, " "); 4551 else 4552 printf(" %.*s", column_width, 4553 "--------------------"); 4554 } 4555 } 4556 } 4557 4558 4559 static void 4560 print_iostat_separator_impl(iostat_cbdata_t *cb, 4561 unsigned int force_column_width) 4562 { 4563 print_iostat_dashes(cb, force_column_width, NULL); 4564 } 4565 4566 static void 4567 print_iostat_separator(iostat_cbdata_t *cb) 4568 { 4569 print_iostat_separator_impl(cb, 0); 4570 } 4571 4572 static void 4573 print_iostat_header_impl(iostat_cbdata_t *cb, unsigned int force_column_width, 4574 const char *histo_vdev_name) 4575 { 4576 unsigned int namewidth; 4577 const char *title; 4578 4579 color_start(ANSI_BOLD); 4580 4581 if (cb->cb_flags & IOS_ANYHISTO_M) { 4582 title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)]; 4583 } else if (cb->cb_vdevs.cb_names_count) { 4584 title = "vdev"; 4585 } else { 4586 title = "pool"; 4587 } 4588 4589 namewidth = MAX(MAX(strlen(title), cb->cb_namewidth), 4590 histo_vdev_name ? strlen(histo_vdev_name) : 0); 4591 4592 if (histo_vdev_name) 4593 printf("%-*s", namewidth, histo_vdev_name); 4594 else 4595 printf("%*s", namewidth, ""); 4596 4597 4598 print_iostat_labels(cb, force_column_width, iostat_top_labels); 4599 printf("\n"); 4600 4601 printf("%-*s", namewidth, title); 4602 4603 print_iostat_labels(cb, force_column_width, iostat_bottom_labels); 4604 if (cb->vcdl != NULL) 4605 print_cmd_columns(cb->vcdl, 0); 4606 4607 printf("\n"); 4608 4609 print_iostat_separator_impl(cb, force_column_width); 4610 4611 if (cb->vcdl != NULL) 4612 print_cmd_columns(cb->vcdl, 1); 4613 4614 color_end(); 4615 4616 printf("\n"); 4617 } 4618 4619 static void 4620 print_iostat_header(iostat_cbdata_t *cb) 4621 { 4622 print_iostat_header_impl(cb, 0, NULL); 4623 } 4624 4625 /* 4626 * Prints a size string (i.e. 120M) with the suffix ("M") colored 4627 * by order of magnitude. Uses column_size to add padding. 4628 */ 4629 static void 4630 print_stat_color(const char *statbuf, unsigned int column_size) 4631 { 4632 fputs(" ", stdout); 4633 size_t len = strlen(statbuf); 4634 while (len < column_size) { 4635 fputc(' ', stdout); 4636 column_size--; 4637 } 4638 if (*statbuf == '0') { 4639 color_start(ANSI_GRAY); 4640 fputc('0', stdout); 4641 } else { 4642 for (; *statbuf; statbuf++) { 4643 if (*statbuf == 'K') color_start(ANSI_GREEN); 4644 else if (*statbuf == 'M') color_start(ANSI_YELLOW); 4645 else if (*statbuf == 'G') color_start(ANSI_RED); 4646 else if (*statbuf == 'T') color_start(ANSI_BOLD_BLUE); 4647 else if (*statbuf == 'P') color_start(ANSI_MAGENTA); 4648 else if (*statbuf == 'E') color_start(ANSI_CYAN); 4649 fputc(*statbuf, stdout); 4650 if (--column_size <= 0) 4651 break; 4652 } 4653 } 4654 color_end(); 4655 } 4656 4657 /* 4658 * Display a single statistic. 4659 */ 4660 static void 4661 print_one_stat(uint64_t value, enum zfs_nicenum_format format, 4662 unsigned int column_size, boolean_t scripted) 4663 { 4664 char buf[64]; 4665 4666 zfs_nicenum_format(value, buf, sizeof (buf), format); 4667 4668 if (scripted) 4669 printf("\t%s", buf); 4670 else 4671 print_stat_color(buf, column_size); 4672 } 4673 4674 /* 4675 * Calculate the default vdev stats 4676 * 4677 * Subtract oldvs from newvs, apply a scaling factor, and save the resulting 4678 * stats into calcvs. 4679 */ 4680 static void 4681 calc_default_iostats(vdev_stat_t *oldvs, vdev_stat_t *newvs, 4682 vdev_stat_t *calcvs) 4683 { 4684 int i; 4685 4686 memcpy(calcvs, newvs, sizeof (*calcvs)); 4687 for (i = 0; i < ARRAY_SIZE(calcvs->vs_ops); i++) 4688 calcvs->vs_ops[i] = (newvs->vs_ops[i] - oldvs->vs_ops[i]); 4689 4690 for (i = 0; i < ARRAY_SIZE(calcvs->vs_bytes); i++) 4691 calcvs->vs_bytes[i] = (newvs->vs_bytes[i] - oldvs->vs_bytes[i]); 4692 } 4693 4694 /* 4695 * Internal representation of the extended iostats data. 4696 * 4697 * The extended iostat stats are exported in nvlists as either uint64_t arrays 4698 * or single uint64_t's. We make both look like arrays to make them easier 4699 * to process. In order to make single uint64_t's look like arrays, we set 4700 * __data to the stat data, and then set *data = &__data with count = 1. Then, 4701 * we can just use *data and count. 4702 */ 4703 struct stat_array { 4704 uint64_t *data; 4705 uint_t count; /* Number of entries in data[] */ 4706 uint64_t __data; /* Only used when data is a single uint64_t */ 4707 }; 4708 4709 static uint64_t 4710 stat_histo_max(struct stat_array *nva, unsigned int len) 4711 { 4712 uint64_t max = 0; 4713 int i; 4714 for (i = 0; i < len; i++) 4715 max = MAX(max, array64_max(nva[i].data, nva[i].count)); 4716 4717 return (max); 4718 } 4719 4720 /* 4721 * Helper function to lookup a uint64_t array or uint64_t value and store its 4722 * data as a stat_array. If the nvpair is a single uint64_t value, then we make 4723 * it look like a one element array to make it easier to process. 4724 */ 4725 static int 4726 nvpair64_to_stat_array(nvlist_t *nvl, const char *name, 4727 struct stat_array *nva) 4728 { 4729 nvpair_t *tmp; 4730 int ret; 4731 4732 verify(nvlist_lookup_nvpair(nvl, name, &tmp) == 0); 4733 switch (nvpair_type(tmp)) { 4734 case DATA_TYPE_UINT64_ARRAY: 4735 ret = nvpair_value_uint64_array(tmp, &nva->data, &nva->count); 4736 break; 4737 case DATA_TYPE_UINT64: 4738 ret = nvpair_value_uint64(tmp, &nva->__data); 4739 nva->data = &nva->__data; 4740 nva->count = 1; 4741 break; 4742 default: 4743 /* Not a uint64_t */ 4744 ret = EINVAL; 4745 break; 4746 } 4747 4748 return (ret); 4749 } 4750 4751 /* 4752 * Given a list of nvlist names, look up the extended stats in newnv and oldnv, 4753 * subtract them, and return the results in a newly allocated stat_array. 4754 * You must free the returned array after you are done with it with 4755 * free_calc_stats(). 4756 * 4757 * Additionally, you can set "oldnv" to NULL if you simply want the newnv 4758 * values. 4759 */ 4760 static struct stat_array * 4761 calc_and_alloc_stats_ex(const char **names, unsigned int len, nvlist_t *oldnv, 4762 nvlist_t *newnv) 4763 { 4764 nvlist_t *oldnvx = NULL, *newnvx; 4765 struct stat_array *oldnva, *newnva, *calcnva; 4766 int i, j; 4767 unsigned int alloc_size = (sizeof (struct stat_array)) * len; 4768 4769 /* Extract our extended stats nvlist from the main list */ 4770 verify(nvlist_lookup_nvlist(newnv, ZPOOL_CONFIG_VDEV_STATS_EX, 4771 &newnvx) == 0); 4772 if (oldnv) { 4773 verify(nvlist_lookup_nvlist(oldnv, ZPOOL_CONFIG_VDEV_STATS_EX, 4774 &oldnvx) == 0); 4775 } 4776 4777 newnva = safe_malloc(alloc_size); 4778 oldnva = safe_malloc(alloc_size); 4779 calcnva = safe_malloc(alloc_size); 4780 4781 for (j = 0; j < len; j++) { 4782 verify(nvpair64_to_stat_array(newnvx, names[j], 4783 &newnva[j]) == 0); 4784 calcnva[j].count = newnva[j].count; 4785 alloc_size = calcnva[j].count * sizeof (calcnva[j].data[0]); 4786 calcnva[j].data = safe_malloc(alloc_size); 4787 memcpy(calcnva[j].data, newnva[j].data, alloc_size); 4788 4789 if (oldnvx) { 4790 verify(nvpair64_to_stat_array(oldnvx, names[j], 4791 &oldnva[j]) == 0); 4792 for (i = 0; i < oldnva[j].count; i++) 4793 calcnva[j].data[i] -= oldnva[j].data[i]; 4794 } 4795 } 4796 free(newnva); 4797 free(oldnva); 4798 return (calcnva); 4799 } 4800 4801 static void 4802 free_calc_stats(struct stat_array *nva, unsigned int len) 4803 { 4804 int i; 4805 for (i = 0; i < len; i++) 4806 free(nva[i].data); 4807 4808 free(nva); 4809 } 4810 4811 static void 4812 print_iostat_histo(struct stat_array *nva, unsigned int len, 4813 iostat_cbdata_t *cb, unsigned int column_width, unsigned int namewidth, 4814 double scale) 4815 { 4816 int i, j; 4817 char buf[6]; 4818 uint64_t val; 4819 enum zfs_nicenum_format format; 4820 unsigned int buckets; 4821 unsigned int start_bucket; 4822 4823 if (cb->cb_literal) 4824 format = ZFS_NICENUM_RAW; 4825 else 4826 format = ZFS_NICENUM_1024; 4827 4828 /* All these histos are the same size, so just use nva[0].count */ 4829 buckets = nva[0].count; 4830 4831 if (cb->cb_flags & IOS_RQ_HISTO_M) { 4832 /* Start at 512 - req size should never be lower than this */ 4833 start_bucket = 9; 4834 } else { 4835 start_bucket = 0; 4836 } 4837 4838 for (j = start_bucket; j < buckets; j++) { 4839 /* Print histogram bucket label */ 4840 if (cb->cb_flags & IOS_L_HISTO_M) { 4841 /* Ending range of this bucket */ 4842 val = (1UL << (j + 1)) - 1; 4843 zfs_nicetime(val, buf, sizeof (buf)); 4844 } else { 4845 /* Request size (starting range of bucket) */ 4846 val = (1UL << j); 4847 zfs_nicenum(val, buf, sizeof (buf)); 4848 } 4849 4850 if (cb->cb_scripted) 4851 printf("%llu", (u_longlong_t)val); 4852 else 4853 printf("%-*s", namewidth, buf); 4854 4855 /* Print the values on the line */ 4856 for (i = 0; i < len; i++) { 4857 print_one_stat(nva[i].data[j] * scale, format, 4858 column_width, cb->cb_scripted); 4859 } 4860 printf("\n"); 4861 } 4862 } 4863 4864 static void 4865 print_solid_separator(unsigned int length) 4866 { 4867 while (length--) 4868 printf("-"); 4869 printf("\n"); 4870 } 4871 4872 static void 4873 print_iostat_histos(iostat_cbdata_t *cb, nvlist_t *oldnv, 4874 nvlist_t *newnv, double scale, const char *name) 4875 { 4876 unsigned int column_width; 4877 unsigned int namewidth; 4878 unsigned int entire_width; 4879 enum iostat_type type; 4880 struct stat_array *nva; 4881 const char **names; 4882 unsigned int names_len; 4883 4884 /* What type of histo are we? */ 4885 type = IOS_HISTO_IDX(cb->cb_flags); 4886 4887 /* Get NULL-terminated array of nvlist names for our histo */ 4888 names = vsx_type_to_nvlist[type]; 4889 names_len = str_array_len(names); /* num of names */ 4890 4891 nva = calc_and_alloc_stats_ex(names, names_len, oldnv, newnv); 4892 4893 if (cb->cb_literal) { 4894 column_width = MAX(5, 4895 (unsigned int) log10(stat_histo_max(nva, names_len)) + 1); 4896 } else { 4897 column_width = 5; 4898 } 4899 4900 namewidth = MAX(cb->cb_namewidth, 4901 strlen(histo_to_title[IOS_HISTO_IDX(cb->cb_flags)])); 4902 4903 /* 4904 * Calculate the entire line width of what we're printing. The 4905 * +2 is for the two spaces between columns: 4906 */ 4907 /* read write */ 4908 /* ----- ----- */ 4909 /* |___| <---------- column_width */ 4910 /* */ 4911 /* |__________| <--- entire_width */ 4912 /* */ 4913 entire_width = namewidth + (column_width + 2) * 4914 label_array_len(iostat_bottom_labels[type]); 4915 4916 if (cb->cb_scripted) 4917 printf("%s\n", name); 4918 else 4919 print_iostat_header_impl(cb, column_width, name); 4920 4921 print_iostat_histo(nva, names_len, cb, column_width, 4922 namewidth, scale); 4923 4924 free_calc_stats(nva, names_len); 4925 if (!cb->cb_scripted) 4926 print_solid_separator(entire_width); 4927 } 4928 4929 /* 4930 * Calculate the average latency of a power-of-two latency histogram 4931 */ 4932 static uint64_t 4933 single_histo_average(uint64_t *histo, unsigned int buckets) 4934 { 4935 int i; 4936 uint64_t count = 0, total = 0; 4937 4938 for (i = 0; i < buckets; i++) { 4939 /* 4940 * Our buckets are power-of-two latency ranges. Use the 4941 * midpoint latency of each bucket to calculate the average. 4942 * For example: 4943 * 4944 * Bucket Midpoint 4945 * 8ns-15ns: 12ns 4946 * 16ns-31ns: 24ns 4947 * ... 4948 */ 4949 if (histo[i] != 0) { 4950 total += histo[i] * (((1UL << i) + ((1UL << i)/2))); 4951 count += histo[i]; 4952 } 4953 } 4954 4955 /* Prevent divide by zero */ 4956 return (count == 0 ? 0 : total / count); 4957 } 4958 4959 static void 4960 print_iostat_queues(iostat_cbdata_t *cb, nvlist_t *newnv) 4961 { 4962 const char *names[] = { 4963 ZPOOL_CONFIG_VDEV_SYNC_R_PEND_QUEUE, 4964 ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE, 4965 ZPOOL_CONFIG_VDEV_SYNC_W_PEND_QUEUE, 4966 ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE, 4967 ZPOOL_CONFIG_VDEV_ASYNC_R_PEND_QUEUE, 4968 ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE, 4969 ZPOOL_CONFIG_VDEV_ASYNC_W_PEND_QUEUE, 4970 ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE, 4971 ZPOOL_CONFIG_VDEV_SCRUB_PEND_QUEUE, 4972 ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE, 4973 ZPOOL_CONFIG_VDEV_TRIM_PEND_QUEUE, 4974 ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE, 4975 ZPOOL_CONFIG_VDEV_REBUILD_PEND_QUEUE, 4976 ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE, 4977 }; 4978 4979 struct stat_array *nva; 4980 4981 unsigned int column_width = default_column_width(cb, IOS_QUEUES); 4982 enum zfs_nicenum_format format; 4983 4984 nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), NULL, newnv); 4985 4986 if (cb->cb_literal) 4987 format = ZFS_NICENUM_RAW; 4988 else 4989 format = ZFS_NICENUM_1024; 4990 4991 for (int i = 0; i < ARRAY_SIZE(names); i++) { 4992 uint64_t val = nva[i].data[0]; 4993 print_one_stat(val, format, column_width, cb->cb_scripted); 4994 } 4995 4996 free_calc_stats(nva, ARRAY_SIZE(names)); 4997 } 4998 4999 static void 5000 print_iostat_latency(iostat_cbdata_t *cb, nvlist_t *oldnv, 5001 nvlist_t *newnv) 5002 { 5003 int i; 5004 uint64_t val; 5005 const char *names[] = { 5006 ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO, 5007 ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO, 5008 ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO, 5009 ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO, 5010 ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO, 5011 ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO, 5012 ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO, 5013 ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO, 5014 ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO, 5015 ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO, 5016 ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO, 5017 }; 5018 struct stat_array *nva; 5019 5020 unsigned int column_width = default_column_width(cb, IOS_LATENCY); 5021 enum zfs_nicenum_format format; 5022 5023 nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), oldnv, newnv); 5024 5025 if (cb->cb_literal) 5026 format = ZFS_NICENUM_RAWTIME; 5027 else 5028 format = ZFS_NICENUM_TIME; 5029 5030 /* Print our avg latencies on the line */ 5031 for (i = 0; i < ARRAY_SIZE(names); i++) { 5032 /* Compute average latency for a latency histo */ 5033 val = single_histo_average(nva[i].data, nva[i].count); 5034 print_one_stat(val, format, column_width, cb->cb_scripted); 5035 } 5036 free_calc_stats(nva, ARRAY_SIZE(names)); 5037 } 5038 5039 /* 5040 * Print default statistics (capacity/operations/bandwidth) 5041 */ 5042 static void 5043 print_iostat_default(vdev_stat_t *vs, iostat_cbdata_t *cb, double scale) 5044 { 5045 unsigned int column_width = default_column_width(cb, IOS_DEFAULT); 5046 enum zfs_nicenum_format format; 5047 char na; /* char to print for "not applicable" values */ 5048 5049 if (cb->cb_literal) { 5050 format = ZFS_NICENUM_RAW; 5051 na = '0'; 5052 } else { 5053 format = ZFS_NICENUM_1024; 5054 na = '-'; 5055 } 5056 5057 /* only toplevel vdevs have capacity stats */ 5058 if (vs->vs_space == 0) { 5059 if (cb->cb_scripted) 5060 printf("\t%c\t%c", na, na); 5061 else 5062 printf(" %*c %*c", column_width, na, column_width, 5063 na); 5064 } else { 5065 print_one_stat(vs->vs_alloc, format, column_width, 5066 cb->cb_scripted); 5067 print_one_stat(vs->vs_space - vs->vs_alloc, format, 5068 column_width, cb->cb_scripted); 5069 } 5070 5071 print_one_stat((uint64_t)(vs->vs_ops[ZIO_TYPE_READ] * scale), 5072 format, column_width, cb->cb_scripted); 5073 print_one_stat((uint64_t)(vs->vs_ops[ZIO_TYPE_WRITE] * scale), 5074 format, column_width, cb->cb_scripted); 5075 print_one_stat((uint64_t)(vs->vs_bytes[ZIO_TYPE_READ] * scale), 5076 format, column_width, cb->cb_scripted); 5077 print_one_stat((uint64_t)(vs->vs_bytes[ZIO_TYPE_WRITE] * scale), 5078 format, column_width, cb->cb_scripted); 5079 } 5080 5081 static const char *const class_name[] = { 5082 VDEV_ALLOC_BIAS_DEDUP, 5083 VDEV_ALLOC_BIAS_SPECIAL, 5084 VDEV_ALLOC_CLASS_LOGS 5085 }; 5086 5087 /* 5088 * Print out all the statistics for the given vdev. This can either be the 5089 * toplevel configuration, or called recursively. If 'name' is NULL, then this 5090 * is a verbose output, and we don't want to display the toplevel pool stats. 5091 * 5092 * Returns the number of stat lines printed. 5093 */ 5094 static unsigned int 5095 print_vdev_stats(zpool_handle_t *zhp, const char *name, nvlist_t *oldnv, 5096 nvlist_t *newnv, iostat_cbdata_t *cb, int depth) 5097 { 5098 nvlist_t **oldchild, **newchild; 5099 uint_t c, children, oldchildren; 5100 vdev_stat_t *oldvs, *newvs, *calcvs; 5101 vdev_stat_t zerovs = { 0 }; 5102 char *vname; 5103 int i; 5104 int ret = 0; 5105 uint64_t tdelta; 5106 double scale; 5107 5108 if (strcmp(name, VDEV_TYPE_INDIRECT) == 0) 5109 return (ret); 5110 5111 calcvs = safe_malloc(sizeof (*calcvs)); 5112 5113 if (oldnv != NULL) { 5114 verify(nvlist_lookup_uint64_array(oldnv, 5115 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&oldvs, &c) == 0); 5116 } else { 5117 oldvs = &zerovs; 5118 } 5119 5120 /* Do we only want to see a specific vdev? */ 5121 for (i = 0; i < cb->cb_vdevs.cb_names_count; i++) { 5122 /* Yes we do. Is this the vdev? */ 5123 if (strcmp(name, cb->cb_vdevs.cb_names[i]) == 0) { 5124 /* 5125 * This is our vdev. Since it is the only vdev we 5126 * will be displaying, make depth = 0 so that it 5127 * doesn't get indented. 5128 */ 5129 depth = 0; 5130 break; 5131 } 5132 } 5133 5134 if (cb->cb_vdevs.cb_names_count && (i == cb->cb_vdevs.cb_names_count)) { 5135 /* Couldn't match the name */ 5136 goto children; 5137 } 5138 5139 5140 verify(nvlist_lookup_uint64_array(newnv, ZPOOL_CONFIG_VDEV_STATS, 5141 (uint64_t **)&newvs, &c) == 0); 5142 5143 /* 5144 * Print the vdev name unless it's is a histogram. Histograms 5145 * display the vdev name in the header itself. 5146 */ 5147 if (!(cb->cb_flags & IOS_ANYHISTO_M)) { 5148 if (cb->cb_scripted) { 5149 printf("%s", name); 5150 } else { 5151 if (strlen(name) + depth > cb->cb_namewidth) 5152 (void) printf("%*s%s", depth, "", name); 5153 else 5154 (void) printf("%*s%s%*s", depth, "", name, 5155 (int)(cb->cb_namewidth - strlen(name) - 5156 depth), ""); 5157 } 5158 } 5159 5160 /* Calculate our scaling factor */ 5161 tdelta = newvs->vs_timestamp - oldvs->vs_timestamp; 5162 if ((oldvs->vs_timestamp == 0) && (cb->cb_flags & IOS_ANYHISTO_M)) { 5163 /* 5164 * If we specify printing histograms with no time interval, then 5165 * print the histogram numbers over the entire lifetime of the 5166 * vdev. 5167 */ 5168 scale = 1; 5169 } else { 5170 if (tdelta == 0) 5171 scale = 1.0; 5172 else 5173 scale = (double)NANOSEC / tdelta; 5174 } 5175 5176 if (cb->cb_flags & IOS_DEFAULT_M) { 5177 calc_default_iostats(oldvs, newvs, calcvs); 5178 print_iostat_default(calcvs, cb, scale); 5179 } 5180 if (cb->cb_flags & IOS_LATENCY_M) 5181 print_iostat_latency(cb, oldnv, newnv); 5182 if (cb->cb_flags & IOS_QUEUES_M) 5183 print_iostat_queues(cb, newnv); 5184 if (cb->cb_flags & IOS_ANYHISTO_M) { 5185 printf("\n"); 5186 print_iostat_histos(cb, oldnv, newnv, scale, name); 5187 } 5188 5189 if (cb->vcdl != NULL) { 5190 const char *path; 5191 if (nvlist_lookup_string(newnv, ZPOOL_CONFIG_PATH, 5192 &path) == 0) { 5193 printf(" "); 5194 zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path); 5195 } 5196 } 5197 5198 if (!(cb->cb_flags & IOS_ANYHISTO_M)) 5199 printf("\n"); 5200 5201 ret++; 5202 5203 children: 5204 5205 free(calcvs); 5206 5207 if (!cb->cb_verbose) 5208 return (ret); 5209 5210 if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_CHILDREN, 5211 &newchild, &children) != 0) 5212 return (ret); 5213 5214 if (oldnv) { 5215 if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_CHILDREN, 5216 &oldchild, &oldchildren) != 0) 5217 return (ret); 5218 5219 children = MIN(oldchildren, children); 5220 } 5221 5222 /* 5223 * print normal top-level devices 5224 */ 5225 for (c = 0; c < children; c++) { 5226 uint64_t ishole = B_FALSE, islog = B_FALSE; 5227 5228 (void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_HOLE, 5229 &ishole); 5230 5231 (void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_LOG, 5232 &islog); 5233 5234 if (ishole || islog) 5235 continue; 5236 5237 if (nvlist_exists(newchild[c], ZPOOL_CONFIG_ALLOCATION_BIAS)) 5238 continue; 5239 5240 vname = zpool_vdev_name(g_zfs, zhp, newchild[c], 5241 cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID); 5242 ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c] : NULL, 5243 newchild[c], cb, depth + 2); 5244 free(vname); 5245 } 5246 5247 /* 5248 * print all other top-level devices 5249 */ 5250 for (uint_t n = 0; n < ARRAY_SIZE(class_name); n++) { 5251 boolean_t printed = B_FALSE; 5252 5253 for (c = 0; c < children; c++) { 5254 uint64_t islog = B_FALSE; 5255 const char *bias = NULL; 5256 const char *type = NULL; 5257 5258 (void) nvlist_lookup_uint64(newchild[c], 5259 ZPOOL_CONFIG_IS_LOG, &islog); 5260 if (islog) { 5261 bias = VDEV_ALLOC_CLASS_LOGS; 5262 } else { 5263 (void) nvlist_lookup_string(newchild[c], 5264 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias); 5265 (void) nvlist_lookup_string(newchild[c], 5266 ZPOOL_CONFIG_TYPE, &type); 5267 } 5268 if (bias == NULL || strcmp(bias, class_name[n]) != 0) 5269 continue; 5270 if (!islog && strcmp(type, VDEV_TYPE_INDIRECT) == 0) 5271 continue; 5272 5273 if (!printed) { 5274 if ((!(cb->cb_flags & IOS_ANYHISTO_M)) && 5275 !cb->cb_scripted && 5276 !cb->cb_vdevs.cb_names) { 5277 print_iostat_dashes(cb, 0, 5278 class_name[n]); 5279 } 5280 printf("\n"); 5281 printed = B_TRUE; 5282 } 5283 5284 vname = zpool_vdev_name(g_zfs, zhp, newchild[c], 5285 cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID); 5286 ret += print_vdev_stats(zhp, vname, oldnv ? 5287 oldchild[c] : NULL, newchild[c], cb, depth + 2); 5288 free(vname); 5289 } 5290 } 5291 5292 /* 5293 * Include level 2 ARC devices in iostat output 5294 */ 5295 if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_L2CACHE, 5296 &newchild, &children) != 0) 5297 return (ret); 5298 5299 if (oldnv) { 5300 if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_L2CACHE, 5301 &oldchild, &oldchildren) != 0) 5302 return (ret); 5303 5304 children = MIN(oldchildren, children); 5305 } 5306 5307 if (children > 0) { 5308 if ((!(cb->cb_flags & IOS_ANYHISTO_M)) && !cb->cb_scripted && 5309 !cb->cb_vdevs.cb_names) { 5310 print_iostat_dashes(cb, 0, "cache"); 5311 } 5312 printf("\n"); 5313 5314 for (c = 0; c < children; c++) { 5315 vname = zpool_vdev_name(g_zfs, zhp, newchild[c], 5316 cb->cb_vdevs.cb_name_flags); 5317 ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c] 5318 : NULL, newchild[c], cb, depth + 2); 5319 free(vname); 5320 } 5321 } 5322 5323 return (ret); 5324 } 5325 5326 static int 5327 refresh_iostat(zpool_handle_t *zhp, void *data) 5328 { 5329 iostat_cbdata_t *cb = data; 5330 boolean_t missing; 5331 5332 /* 5333 * If the pool has disappeared, remove it from the list and continue. 5334 */ 5335 if (zpool_refresh_stats(zhp, &missing) != 0) 5336 return (-1); 5337 5338 if (missing) 5339 pool_list_remove(cb->cb_list, zhp); 5340 5341 return (0); 5342 } 5343 5344 /* 5345 * Callback to print out the iostats for the given pool. 5346 */ 5347 static int 5348 print_iostat(zpool_handle_t *zhp, void *data) 5349 { 5350 iostat_cbdata_t *cb = data; 5351 nvlist_t *oldconfig, *newconfig; 5352 nvlist_t *oldnvroot, *newnvroot; 5353 int ret; 5354 5355 newconfig = zpool_get_config(zhp, &oldconfig); 5356 5357 if (cb->cb_iteration == 1) 5358 oldconfig = NULL; 5359 5360 verify(nvlist_lookup_nvlist(newconfig, ZPOOL_CONFIG_VDEV_TREE, 5361 &newnvroot) == 0); 5362 5363 if (oldconfig == NULL) 5364 oldnvroot = NULL; 5365 else 5366 verify(nvlist_lookup_nvlist(oldconfig, ZPOOL_CONFIG_VDEV_TREE, 5367 &oldnvroot) == 0); 5368 5369 ret = print_vdev_stats(zhp, zpool_get_name(zhp), oldnvroot, newnvroot, 5370 cb, 0); 5371 if ((ret != 0) && !(cb->cb_flags & IOS_ANYHISTO_M) && 5372 !cb->cb_scripted && cb->cb_verbose && 5373 !cb->cb_vdevs.cb_names_count) { 5374 print_iostat_separator(cb); 5375 if (cb->vcdl != NULL) { 5376 print_cmd_columns(cb->vcdl, 1); 5377 } 5378 printf("\n"); 5379 } 5380 5381 return (ret); 5382 } 5383 5384 static int 5385 get_columns(void) 5386 { 5387 struct winsize ws; 5388 int columns = 80; 5389 int error; 5390 5391 if (isatty(STDOUT_FILENO)) { 5392 error = ioctl(STDOUT_FILENO, TIOCGWINSZ, &ws); 5393 if (error == 0) 5394 columns = ws.ws_col; 5395 } else { 5396 columns = 999; 5397 } 5398 5399 return (columns); 5400 } 5401 5402 /* 5403 * Return the required length of the pool/vdev name column. The minimum 5404 * allowed width and output formatting flags must be provided. 5405 */ 5406 static int 5407 get_namewidth(zpool_handle_t *zhp, int min_width, int flags, boolean_t verbose) 5408 { 5409 nvlist_t *config, *nvroot; 5410 int width = min_width; 5411 5412 if ((config = zpool_get_config(zhp, NULL)) != NULL) { 5413 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 5414 &nvroot) == 0); 5415 size_t poolname_len = strlen(zpool_get_name(zhp)); 5416 if (verbose == B_FALSE) { 5417 width = MAX(poolname_len, min_width); 5418 } else { 5419 width = MAX(poolname_len, 5420 max_width(zhp, nvroot, 0, min_width, flags)); 5421 } 5422 } 5423 5424 return (width); 5425 } 5426 5427 /* 5428 * Parse the input string, get the 'interval' and 'count' value if there is one. 5429 */ 5430 static void 5431 get_interval_count(int *argcp, char **argv, float *iv, 5432 unsigned long *cnt) 5433 { 5434 float interval = 0; 5435 unsigned long count = 0; 5436 int argc = *argcp; 5437 5438 /* 5439 * Determine if the last argument is an integer or a pool name 5440 */ 5441 if (argc > 0 && zfs_isnumber(argv[argc - 1])) { 5442 char *end; 5443 5444 errno = 0; 5445 interval = strtof(argv[argc - 1], &end); 5446 5447 if (*end == '\0' && errno == 0) { 5448 if (interval == 0) { 5449 (void) fprintf(stderr, gettext( 5450 "interval cannot be zero\n")); 5451 usage(B_FALSE); 5452 } 5453 /* 5454 * Ignore the last parameter 5455 */ 5456 argc--; 5457 } else { 5458 /* 5459 * If this is not a valid number, just plow on. The 5460 * user will get a more informative error message later 5461 * on. 5462 */ 5463 interval = 0; 5464 } 5465 } 5466 5467 /* 5468 * If the last argument is also an integer, then we have both a count 5469 * and an interval. 5470 */ 5471 if (argc > 0 && zfs_isnumber(argv[argc - 1])) { 5472 char *end; 5473 5474 errno = 0; 5475 count = interval; 5476 interval = strtof(argv[argc - 1], &end); 5477 5478 if (*end == '\0' && errno == 0) { 5479 if (interval == 0) { 5480 (void) fprintf(stderr, gettext( 5481 "interval cannot be zero\n")); 5482 usage(B_FALSE); 5483 } 5484 5485 /* 5486 * Ignore the last parameter 5487 */ 5488 argc--; 5489 } else { 5490 interval = 0; 5491 } 5492 } 5493 5494 *iv = interval; 5495 *cnt = count; 5496 *argcp = argc; 5497 } 5498 5499 static void 5500 get_timestamp_arg(char c) 5501 { 5502 if (c == 'u') 5503 timestamp_fmt = UDATE; 5504 else if (c == 'd') 5505 timestamp_fmt = DDATE; 5506 else 5507 usage(B_FALSE); 5508 } 5509 5510 /* 5511 * Return stat flags that are supported by all pools by both the module and 5512 * zpool iostat. "*data" should be initialized to all 0xFFs before running. 5513 * It will get ANDed down until only the flags that are supported on all pools 5514 * remain. 5515 */ 5516 static int 5517 get_stat_flags_cb(zpool_handle_t *zhp, void *data) 5518 { 5519 uint64_t *mask = data; 5520 nvlist_t *config, *nvroot, *nvx; 5521 uint64_t flags = 0; 5522 int i, j; 5523 5524 config = zpool_get_config(zhp, NULL); 5525 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 5526 &nvroot) == 0); 5527 5528 /* Default stats are always supported, but for completeness.. */ 5529 if (nvlist_exists(nvroot, ZPOOL_CONFIG_VDEV_STATS)) 5530 flags |= IOS_DEFAULT_M; 5531 5532 /* Get our extended stats nvlist from the main list */ 5533 if (nvlist_lookup_nvlist(nvroot, ZPOOL_CONFIG_VDEV_STATS_EX, 5534 &nvx) != 0) { 5535 /* 5536 * No extended stats; they're probably running an older 5537 * module. No big deal, we support that too. 5538 */ 5539 goto end; 5540 } 5541 5542 /* For each extended stat, make sure all its nvpairs are supported */ 5543 for (j = 0; j < ARRAY_SIZE(vsx_type_to_nvlist); j++) { 5544 if (!vsx_type_to_nvlist[j][0]) 5545 continue; 5546 5547 /* Start off by assuming the flag is supported, then check */ 5548 flags |= (1ULL << j); 5549 for (i = 0; vsx_type_to_nvlist[j][i]; i++) { 5550 if (!nvlist_exists(nvx, vsx_type_to_nvlist[j][i])) { 5551 /* flag isn't supported */ 5552 flags = flags & ~(1ULL << j); 5553 break; 5554 } 5555 } 5556 } 5557 end: 5558 *mask = *mask & flags; 5559 return (0); 5560 } 5561 5562 /* 5563 * Return a bitmask of stats that are supported on all pools by both the module 5564 * and zpool iostat. 5565 */ 5566 static uint64_t 5567 get_stat_flags(zpool_list_t *list) 5568 { 5569 uint64_t mask = -1; 5570 5571 /* 5572 * get_stat_flags_cb() will lop off bits from "mask" until only the 5573 * flags that are supported on all pools remain. 5574 */ 5575 pool_list_iter(list, B_FALSE, get_stat_flags_cb, &mask); 5576 return (mask); 5577 } 5578 5579 /* 5580 * Return 1 if cb_data->cb_names[0] is this vdev's name, 0 otherwise. 5581 */ 5582 static int 5583 is_vdev_cb(void *zhp_data, nvlist_t *nv, void *cb_data) 5584 { 5585 uint64_t guid; 5586 vdev_cbdata_t *cb = cb_data; 5587 zpool_handle_t *zhp = zhp_data; 5588 5589 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 5590 return (0); 5591 5592 return (guid == zpool_vdev_path_to_guid(zhp, cb->cb_names[0])); 5593 } 5594 5595 /* 5596 * Returns 1 if cb_data->cb_names[0] is a vdev name, 0 otherwise. 5597 */ 5598 static int 5599 is_vdev(zpool_handle_t *zhp, void *cb_data) 5600 { 5601 return (for_each_vdev(zhp, is_vdev_cb, cb_data)); 5602 } 5603 5604 /* 5605 * Check if vdevs are in a pool 5606 * 5607 * Return 1 if all argv[] strings are vdev names in pool "pool_name". Otherwise 5608 * return 0. If pool_name is NULL, then search all pools. 5609 */ 5610 static int 5611 are_vdevs_in_pool(int argc, char **argv, char *pool_name, 5612 vdev_cbdata_t *cb) 5613 { 5614 char **tmp_name; 5615 int ret = 0; 5616 int i; 5617 int pool_count = 0; 5618 5619 if ((argc == 0) || !*argv) 5620 return (0); 5621 5622 if (pool_name) 5623 pool_count = 1; 5624 5625 /* Temporarily hijack cb_names for a second... */ 5626 tmp_name = cb->cb_names; 5627 5628 /* Go though our list of prospective vdev names */ 5629 for (i = 0; i < argc; i++) { 5630 cb->cb_names = argv + i; 5631 5632 /* Is this name a vdev in our pools? */ 5633 ret = for_each_pool(pool_count, &pool_name, B_TRUE, NULL, 5634 ZFS_TYPE_POOL, B_FALSE, is_vdev, cb); 5635 if (!ret) { 5636 /* No match */ 5637 break; 5638 } 5639 } 5640 5641 cb->cb_names = tmp_name; 5642 5643 return (ret); 5644 } 5645 5646 static int 5647 is_pool_cb(zpool_handle_t *zhp, void *data) 5648 { 5649 char *name = data; 5650 if (strcmp(name, zpool_get_name(zhp)) == 0) 5651 return (1); 5652 5653 return (0); 5654 } 5655 5656 /* 5657 * Do we have a pool named *name? If so, return 1, otherwise 0. 5658 */ 5659 static int 5660 is_pool(char *name) 5661 { 5662 return (for_each_pool(0, NULL, B_TRUE, NULL, ZFS_TYPE_POOL, B_FALSE, 5663 is_pool_cb, name)); 5664 } 5665 5666 /* Are all our argv[] strings pool names? If so return 1, 0 otherwise. */ 5667 static int 5668 are_all_pools(int argc, char **argv) 5669 { 5670 if ((argc == 0) || !*argv) 5671 return (0); 5672 5673 while (--argc >= 0) 5674 if (!is_pool(argv[argc])) 5675 return (0); 5676 5677 return (1); 5678 } 5679 5680 /* 5681 * Helper function to print out vdev/pool names we can't resolve. Used for an 5682 * error message. 5683 */ 5684 static void 5685 error_list_unresolved_vdevs(int argc, char **argv, char *pool_name, 5686 vdev_cbdata_t *cb) 5687 { 5688 int i; 5689 char *name; 5690 char *str; 5691 for (i = 0; i < argc; i++) { 5692 name = argv[i]; 5693 5694 if (is_pool(name)) 5695 str = gettext("pool"); 5696 else if (are_vdevs_in_pool(1, &name, pool_name, cb)) 5697 str = gettext("vdev in this pool"); 5698 else if (are_vdevs_in_pool(1, &name, NULL, cb)) 5699 str = gettext("vdev in another pool"); 5700 else 5701 str = gettext("unknown"); 5702 5703 fprintf(stderr, "\t%s (%s)\n", name, str); 5704 } 5705 } 5706 5707 /* 5708 * Same as get_interval_count(), but with additional checks to not misinterpret 5709 * guids as interval/count values. Assumes VDEV_NAME_GUID is set in 5710 * cb.cb_vdevs.cb_name_flags. 5711 */ 5712 static void 5713 get_interval_count_filter_guids(int *argc, char **argv, float *interval, 5714 unsigned long *count, iostat_cbdata_t *cb) 5715 { 5716 char **tmpargv = argv; 5717 int argc_for_interval = 0; 5718 5719 /* Is the last arg an interval value? Or a guid? */ 5720 if (*argc >= 1 && !are_vdevs_in_pool(1, &argv[*argc - 1], NULL, 5721 &cb->cb_vdevs)) { 5722 /* 5723 * The last arg is not a guid, so it's probably an 5724 * interval value. 5725 */ 5726 argc_for_interval++; 5727 5728 if (*argc >= 2 && 5729 !are_vdevs_in_pool(1, &argv[*argc - 2], NULL, 5730 &cb->cb_vdevs)) { 5731 /* 5732 * The 2nd to last arg is not a guid, so it's probably 5733 * an interval value. 5734 */ 5735 argc_for_interval++; 5736 } 5737 } 5738 5739 /* Point to our list of possible intervals */ 5740 tmpargv = &argv[*argc - argc_for_interval]; 5741 5742 *argc = *argc - argc_for_interval; 5743 get_interval_count(&argc_for_interval, tmpargv, 5744 interval, count); 5745 } 5746 5747 /* 5748 * Terminal height, in rows. Returns -1 if stdout is not connected to a TTY or 5749 * if we were unable to determine its size. 5750 */ 5751 static int 5752 terminal_height(void) 5753 { 5754 struct winsize win; 5755 5756 if (isatty(STDOUT_FILENO) == 0) 5757 return (-1); 5758 5759 if (ioctl(STDOUT_FILENO, TIOCGWINSZ, &win) != -1 && win.ws_row > 0) 5760 return (win.ws_row); 5761 5762 return (-1); 5763 } 5764 5765 /* 5766 * Run one of the zpool status/iostat -c scripts with the help (-h) option and 5767 * print the result. 5768 * 5769 * name: Short name of the script ('iostat'). 5770 * path: Full path to the script ('/usr/local/etc/zfs/zpool.d/iostat'); 5771 */ 5772 static void 5773 print_zpool_script_help(char *name, char *path) 5774 { 5775 char *argv[] = {path, (char *)"-h", NULL}; 5776 char **lines = NULL; 5777 int lines_cnt = 0; 5778 int rc; 5779 5780 rc = libzfs_run_process_get_stdout_nopath(path, argv, NULL, &lines, 5781 &lines_cnt); 5782 if (rc != 0 || lines == NULL || lines_cnt <= 0) { 5783 if (lines != NULL) 5784 libzfs_free_str_array(lines, lines_cnt); 5785 return; 5786 } 5787 5788 for (int i = 0; i < lines_cnt; i++) 5789 if (!is_blank_str(lines[i])) 5790 printf(" %-14s %s\n", name, lines[i]); 5791 5792 libzfs_free_str_array(lines, lines_cnt); 5793 } 5794 5795 /* 5796 * Go though the zpool status/iostat -c scripts in the user's path, run their 5797 * help option (-h), and print out the results. 5798 */ 5799 static void 5800 print_zpool_dir_scripts(char *dirpath) 5801 { 5802 DIR *dir; 5803 struct dirent *ent; 5804 char fullpath[MAXPATHLEN]; 5805 struct stat dir_stat; 5806 5807 if ((dir = opendir(dirpath)) != NULL) { 5808 /* print all the files and directories within directory */ 5809 while ((ent = readdir(dir)) != NULL) { 5810 if (snprintf(fullpath, sizeof (fullpath), "%s/%s", 5811 dirpath, ent->d_name) >= sizeof (fullpath)) { 5812 (void) fprintf(stderr, 5813 gettext("internal error: " 5814 "ZPOOL_SCRIPTS_PATH too large.\n")); 5815 exit(1); 5816 } 5817 5818 /* Print the scripts */ 5819 if (stat(fullpath, &dir_stat) == 0) 5820 if (dir_stat.st_mode & S_IXUSR && 5821 S_ISREG(dir_stat.st_mode)) 5822 print_zpool_script_help(ent->d_name, 5823 fullpath); 5824 } 5825 closedir(dir); 5826 } 5827 } 5828 5829 /* 5830 * Print out help text for all zpool status/iostat -c scripts. 5831 */ 5832 static void 5833 print_zpool_script_list(const char *subcommand) 5834 { 5835 char *dir, *sp, *tmp; 5836 5837 printf(gettext("Available 'zpool %s -c' commands:\n"), subcommand); 5838 5839 sp = zpool_get_cmd_search_path(); 5840 if (sp == NULL) 5841 return; 5842 5843 for (dir = strtok_r(sp, ":", &tmp); 5844 dir != NULL; 5845 dir = strtok_r(NULL, ":", &tmp)) 5846 print_zpool_dir_scripts(dir); 5847 5848 free(sp); 5849 } 5850 5851 /* 5852 * Set the minimum pool/vdev name column width. The width must be at least 10, 5853 * but may be as large as the column width - 42 so it still fits on one line. 5854 * NOTE: 42 is the width of the default capacity/operations/bandwidth output 5855 */ 5856 static int 5857 get_namewidth_iostat(zpool_handle_t *zhp, void *data) 5858 { 5859 iostat_cbdata_t *cb = data; 5860 int width, available_width; 5861 5862 /* 5863 * get_namewidth() returns the maximum width of any name in that column 5864 * for any pool/vdev/device line that will be output. 5865 */ 5866 width = get_namewidth(zhp, cb->cb_namewidth, 5867 cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID, cb->cb_verbose); 5868 5869 /* 5870 * The width we are calculating is the width of the header and also the 5871 * padding width for names that are less than maximum width. The stats 5872 * take up 42 characters, so the width available for names is: 5873 */ 5874 available_width = get_columns() - 42; 5875 5876 /* 5877 * If the maximum width fits on a screen, then great! Make everything 5878 * line up by justifying all lines to the same width. If that max 5879 * width is larger than what's available, the name plus stats won't fit 5880 * on one line, and justifying to that width would cause every line to 5881 * wrap on the screen. We only want lines with long names to wrap. 5882 * Limit the padding to what won't wrap. 5883 */ 5884 if (width > available_width) 5885 width = available_width; 5886 5887 /* 5888 * And regardless of whatever the screen width is (get_columns can 5889 * return 0 if the width is not known or less than 42 for a narrow 5890 * terminal) have the width be a minimum of 10. 5891 */ 5892 if (width < 10) 5893 width = 10; 5894 5895 /* Save the calculated width */ 5896 cb->cb_namewidth = width; 5897 5898 return (0); 5899 } 5900 5901 /* 5902 * zpool iostat [[-c [script1,script2,...]] [-lq]|[-rw]] [-ghHLpPvy] [-n name] 5903 * [-T d|u] [[ pool ...]|[pool vdev ...]|[vdev ...]] 5904 * [interval [count]] 5905 * 5906 * -c CMD For each vdev, run command CMD 5907 * -g Display guid for individual vdev name. 5908 * -L Follow links when resolving vdev path name. 5909 * -P Display full path for vdev name. 5910 * -v Display statistics for individual vdevs 5911 * -h Display help 5912 * -p Display values in parsable (exact) format. 5913 * -H Scripted mode. Don't display headers, and separate properties 5914 * by a single tab. 5915 * -l Display average latency 5916 * -q Display queue depths 5917 * -w Display latency histograms 5918 * -r Display request size histogram 5919 * -T Display a timestamp in date(1) or Unix format 5920 * -n Only print headers once 5921 * 5922 * This command can be tricky because we want to be able to deal with pool 5923 * creation/destruction as well as vdev configuration changes. The bulk of this 5924 * processing is handled by the pool_list_* routines in zpool_iter.c. We rely 5925 * on pool_list_update() to detect the addition of new pools. Configuration 5926 * changes are all handled within libzfs. 5927 */ 5928 int 5929 zpool_do_iostat(int argc, char **argv) 5930 { 5931 int c; 5932 int ret; 5933 int npools; 5934 float interval = 0; 5935 unsigned long count = 0; 5936 int winheight = 24; 5937 zpool_list_t *list; 5938 boolean_t verbose = B_FALSE; 5939 boolean_t latency = B_FALSE, l_histo = B_FALSE, rq_histo = B_FALSE; 5940 boolean_t queues = B_FALSE, parsable = B_FALSE, scripted = B_FALSE; 5941 boolean_t omit_since_boot = B_FALSE; 5942 boolean_t guid = B_FALSE; 5943 boolean_t follow_links = B_FALSE; 5944 boolean_t full_name = B_FALSE; 5945 boolean_t headers_once = B_FALSE; 5946 iostat_cbdata_t cb = { 0 }; 5947 char *cmd = NULL; 5948 5949 /* Used for printing error message */ 5950 const char flag_to_arg[] = {[IOS_LATENCY] = 'l', [IOS_QUEUES] = 'q', 5951 [IOS_L_HISTO] = 'w', [IOS_RQ_HISTO] = 'r'}; 5952 5953 uint64_t unsupported_flags; 5954 5955 /* check options */ 5956 while ((c = getopt(argc, argv, "c:gLPT:vyhplqrwnH")) != -1) { 5957 switch (c) { 5958 case 'c': 5959 if (cmd != NULL) { 5960 fprintf(stderr, 5961 gettext("Can't set -c flag twice\n")); 5962 exit(1); 5963 } 5964 5965 if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL && 5966 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) { 5967 fprintf(stderr, gettext( 5968 "Can't run -c, disabled by " 5969 "ZPOOL_SCRIPTS_ENABLED.\n")); 5970 exit(1); 5971 } 5972 5973 if ((getuid() <= 0 || geteuid() <= 0) && 5974 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) { 5975 fprintf(stderr, gettext( 5976 "Can't run -c with root privileges " 5977 "unless ZPOOL_SCRIPTS_AS_ROOT is set.\n")); 5978 exit(1); 5979 } 5980 cmd = optarg; 5981 verbose = B_TRUE; 5982 break; 5983 case 'g': 5984 guid = B_TRUE; 5985 break; 5986 case 'L': 5987 follow_links = B_TRUE; 5988 break; 5989 case 'P': 5990 full_name = B_TRUE; 5991 break; 5992 case 'T': 5993 get_timestamp_arg(*optarg); 5994 break; 5995 case 'v': 5996 verbose = B_TRUE; 5997 break; 5998 case 'p': 5999 parsable = B_TRUE; 6000 break; 6001 case 'l': 6002 latency = B_TRUE; 6003 break; 6004 case 'q': 6005 queues = B_TRUE; 6006 break; 6007 case 'H': 6008 scripted = B_TRUE; 6009 break; 6010 case 'w': 6011 l_histo = B_TRUE; 6012 break; 6013 case 'r': 6014 rq_histo = B_TRUE; 6015 break; 6016 case 'y': 6017 omit_since_boot = B_TRUE; 6018 break; 6019 case 'n': 6020 headers_once = B_TRUE; 6021 break; 6022 case 'h': 6023 usage(B_FALSE); 6024 break; 6025 case '?': 6026 if (optopt == 'c') { 6027 print_zpool_script_list("iostat"); 6028 exit(0); 6029 } else { 6030 fprintf(stderr, 6031 gettext("invalid option '%c'\n"), optopt); 6032 } 6033 usage(B_FALSE); 6034 } 6035 } 6036 6037 argc -= optind; 6038 argv += optind; 6039 6040 cb.cb_literal = parsable; 6041 cb.cb_scripted = scripted; 6042 6043 if (guid) 6044 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_GUID; 6045 if (follow_links) 6046 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS; 6047 if (full_name) 6048 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_PATH; 6049 cb.cb_iteration = 0; 6050 cb.cb_namewidth = 0; 6051 cb.cb_verbose = verbose; 6052 6053 /* Get our interval and count values (if any) */ 6054 if (guid) { 6055 get_interval_count_filter_guids(&argc, argv, &interval, 6056 &count, &cb); 6057 } else { 6058 get_interval_count(&argc, argv, &interval, &count); 6059 } 6060 6061 if (argc == 0) { 6062 /* No args, so just print the defaults. */ 6063 } else if (are_all_pools(argc, argv)) { 6064 /* All the args are pool names */ 6065 } else if (are_vdevs_in_pool(argc, argv, NULL, &cb.cb_vdevs)) { 6066 /* All the args are vdevs */ 6067 cb.cb_vdevs.cb_names = argv; 6068 cb.cb_vdevs.cb_names_count = argc; 6069 argc = 0; /* No pools to process */ 6070 } else if (are_all_pools(1, argv)) { 6071 /* The first arg is a pool name */ 6072 if (are_vdevs_in_pool(argc - 1, argv + 1, argv[0], 6073 &cb.cb_vdevs)) { 6074 /* ...and the rest are vdev names */ 6075 cb.cb_vdevs.cb_names = argv + 1; 6076 cb.cb_vdevs.cb_names_count = argc - 1; 6077 argc = 1; /* One pool to process */ 6078 } else { 6079 fprintf(stderr, gettext("Expected either a list of ")); 6080 fprintf(stderr, gettext("pools, or list of vdevs in")); 6081 fprintf(stderr, " \"%s\", ", argv[0]); 6082 fprintf(stderr, gettext("but got:\n")); 6083 error_list_unresolved_vdevs(argc - 1, argv + 1, 6084 argv[0], &cb.cb_vdevs); 6085 fprintf(stderr, "\n"); 6086 usage(B_FALSE); 6087 return (1); 6088 } 6089 } else { 6090 /* 6091 * The args don't make sense. The first arg isn't a pool name, 6092 * nor are all the args vdevs. 6093 */ 6094 fprintf(stderr, gettext("Unable to parse pools/vdevs list.\n")); 6095 fprintf(stderr, "\n"); 6096 return (1); 6097 } 6098 6099 if (cb.cb_vdevs.cb_names_count != 0) { 6100 /* 6101 * If user specified vdevs, it implies verbose. 6102 */ 6103 cb.cb_verbose = B_TRUE; 6104 } 6105 6106 /* 6107 * Construct the list of all interesting pools. 6108 */ 6109 ret = 0; 6110 if ((list = pool_list_get(argc, argv, NULL, ZFS_TYPE_POOL, parsable, 6111 &ret)) == NULL) 6112 return (1); 6113 6114 if (pool_list_count(list) == 0 && argc != 0) { 6115 pool_list_free(list); 6116 return (1); 6117 } 6118 6119 if (pool_list_count(list) == 0 && interval == 0) { 6120 pool_list_free(list); 6121 (void) fprintf(stderr, gettext("no pools available\n")); 6122 return (1); 6123 } 6124 6125 if ((l_histo || rq_histo) && (cmd != NULL || latency || queues)) { 6126 pool_list_free(list); 6127 (void) fprintf(stderr, 6128 gettext("[-r|-w] isn't allowed with [-c|-l|-q]\n")); 6129 usage(B_FALSE); 6130 return (1); 6131 } 6132 6133 if (l_histo && rq_histo) { 6134 pool_list_free(list); 6135 (void) fprintf(stderr, 6136 gettext("Only one of [-r|-w] can be passed at a time\n")); 6137 usage(B_FALSE); 6138 return (1); 6139 } 6140 6141 /* 6142 * Enter the main iostat loop. 6143 */ 6144 cb.cb_list = list; 6145 6146 if (l_histo) { 6147 /* 6148 * Histograms tables look out of place when you try to display 6149 * them with the other stats, so make a rule that you can only 6150 * print histograms by themselves. 6151 */ 6152 cb.cb_flags = IOS_L_HISTO_M; 6153 } else if (rq_histo) { 6154 cb.cb_flags = IOS_RQ_HISTO_M; 6155 } else { 6156 cb.cb_flags = IOS_DEFAULT_M; 6157 if (latency) 6158 cb.cb_flags |= IOS_LATENCY_M; 6159 if (queues) 6160 cb.cb_flags |= IOS_QUEUES_M; 6161 } 6162 6163 /* 6164 * See if the module supports all the stats we want to display. 6165 */ 6166 unsupported_flags = cb.cb_flags & ~get_stat_flags(list); 6167 if (unsupported_flags) { 6168 uint64_t f; 6169 int idx; 6170 fprintf(stderr, 6171 gettext("The loaded zfs module doesn't support:")); 6172 6173 /* for each bit set in unsupported_flags */ 6174 for (f = unsupported_flags; f; f &= ~(1ULL << idx)) { 6175 idx = lowbit64(f) - 1; 6176 fprintf(stderr, " -%c", flag_to_arg[idx]); 6177 } 6178 6179 fprintf(stderr, ". Try running a newer module.\n"); 6180 pool_list_free(list); 6181 6182 return (1); 6183 } 6184 6185 for (;;) { 6186 if ((npools = pool_list_count(list)) == 0) 6187 (void) fprintf(stderr, gettext("no pools available\n")); 6188 else { 6189 /* 6190 * If this is the first iteration and -y was supplied 6191 * we skip any printing. 6192 */ 6193 boolean_t skip = (omit_since_boot && 6194 cb.cb_iteration == 0); 6195 6196 /* 6197 * Refresh all statistics. This is done as an 6198 * explicit step before calculating the maximum name 6199 * width, so that any * configuration changes are 6200 * properly accounted for. 6201 */ 6202 (void) pool_list_iter(list, B_FALSE, refresh_iostat, 6203 &cb); 6204 6205 /* 6206 * Iterate over all pools to determine the maximum width 6207 * for the pool / device name column across all pools. 6208 */ 6209 cb.cb_namewidth = 0; 6210 (void) pool_list_iter(list, B_FALSE, 6211 get_namewidth_iostat, &cb); 6212 6213 if (timestamp_fmt != NODATE) 6214 print_timestamp(timestamp_fmt); 6215 6216 if (cmd != NULL && cb.cb_verbose && 6217 !(cb.cb_flags & IOS_ANYHISTO_M)) { 6218 cb.vcdl = all_pools_for_each_vdev_run(argc, 6219 argv, cmd, g_zfs, cb.cb_vdevs.cb_names, 6220 cb.cb_vdevs.cb_names_count, 6221 cb.cb_vdevs.cb_name_flags); 6222 } else { 6223 cb.vcdl = NULL; 6224 } 6225 6226 6227 /* 6228 * Check terminal size so we can print headers 6229 * even when terminal window has its height 6230 * changed. 6231 */ 6232 winheight = terminal_height(); 6233 /* 6234 * Are we connected to TTY? If not, headers_once 6235 * should be true, to avoid breaking scripts. 6236 */ 6237 if (winheight < 0) 6238 headers_once = B_TRUE; 6239 6240 /* 6241 * If it's the first time and we're not skipping it, 6242 * or either skip or verbose mode, print the header. 6243 * 6244 * The histogram code explicitly prints its header on 6245 * every vdev, so skip this for histograms. 6246 */ 6247 if (((++cb.cb_iteration == 1 && !skip) || 6248 (skip != verbose) || 6249 (!headers_once && 6250 (cb.cb_iteration % winheight) == 0)) && 6251 (!(cb.cb_flags & IOS_ANYHISTO_M)) && 6252 !cb.cb_scripted) 6253 print_iostat_header(&cb); 6254 6255 if (skip) { 6256 (void) fflush(stdout); 6257 (void) fsleep(interval); 6258 continue; 6259 } 6260 6261 pool_list_iter(list, B_FALSE, print_iostat, &cb); 6262 6263 /* 6264 * If there's more than one pool, and we're not in 6265 * verbose mode (which prints a separator for us), 6266 * then print a separator. 6267 * 6268 * In addition, if we're printing specific vdevs then 6269 * we also want an ending separator. 6270 */ 6271 if (((npools > 1 && !verbose && 6272 !(cb.cb_flags & IOS_ANYHISTO_M)) || 6273 (!(cb.cb_flags & IOS_ANYHISTO_M) && 6274 cb.cb_vdevs.cb_names_count)) && 6275 !cb.cb_scripted) { 6276 print_iostat_separator(&cb); 6277 if (cb.vcdl != NULL) 6278 print_cmd_columns(cb.vcdl, 1); 6279 printf("\n"); 6280 } 6281 6282 if (cb.vcdl != NULL) 6283 free_vdev_cmd_data_list(cb.vcdl); 6284 6285 } 6286 6287 if (interval == 0) 6288 break; 6289 6290 if (count != 0 && --count == 0) 6291 break; 6292 6293 (void) fflush(stdout); 6294 (void) fsleep(interval); 6295 } 6296 6297 pool_list_free(list); 6298 6299 return (ret); 6300 } 6301 6302 typedef struct list_cbdata { 6303 boolean_t cb_verbose; 6304 int cb_name_flags; 6305 int cb_namewidth; 6306 boolean_t cb_scripted; 6307 zprop_list_t *cb_proplist; 6308 boolean_t cb_literal; 6309 } list_cbdata_t; 6310 6311 6312 /* 6313 * Given a list of columns to display, output appropriate headers for each one. 6314 */ 6315 static void 6316 print_header(list_cbdata_t *cb) 6317 { 6318 zprop_list_t *pl = cb->cb_proplist; 6319 char headerbuf[ZPOOL_MAXPROPLEN]; 6320 const char *header; 6321 boolean_t first = B_TRUE; 6322 boolean_t right_justify; 6323 size_t width = 0; 6324 6325 for (; pl != NULL; pl = pl->pl_next) { 6326 width = pl->pl_width; 6327 if (first && cb->cb_verbose) { 6328 /* 6329 * Reset the width to accommodate the verbose listing 6330 * of devices. 6331 */ 6332 width = cb->cb_namewidth; 6333 } 6334 6335 if (!first) 6336 (void) fputs(" ", stdout); 6337 else 6338 first = B_FALSE; 6339 6340 right_justify = B_FALSE; 6341 if (pl->pl_prop != ZPROP_USERPROP) { 6342 header = zpool_prop_column_name(pl->pl_prop); 6343 right_justify = zpool_prop_align_right(pl->pl_prop); 6344 } else { 6345 int i; 6346 6347 for (i = 0; pl->pl_user_prop[i] != '\0'; i++) 6348 headerbuf[i] = toupper(pl->pl_user_prop[i]); 6349 headerbuf[i] = '\0'; 6350 header = headerbuf; 6351 } 6352 6353 if (pl->pl_next == NULL && !right_justify) 6354 (void) fputs(header, stdout); 6355 else if (right_justify) 6356 (void) printf("%*s", (int)width, header); 6357 else 6358 (void) printf("%-*s", (int)width, header); 6359 } 6360 6361 (void) fputc('\n', stdout); 6362 } 6363 6364 /* 6365 * Given a pool and a list of properties, print out all the properties according 6366 * to the described layout. Used by zpool_do_list(). 6367 */ 6368 static void 6369 print_pool(zpool_handle_t *zhp, list_cbdata_t *cb) 6370 { 6371 zprop_list_t *pl = cb->cb_proplist; 6372 boolean_t first = B_TRUE; 6373 char property[ZPOOL_MAXPROPLEN]; 6374 const char *propstr; 6375 boolean_t right_justify; 6376 size_t width; 6377 6378 for (; pl != NULL; pl = pl->pl_next) { 6379 6380 width = pl->pl_width; 6381 if (first && cb->cb_verbose) { 6382 /* 6383 * Reset the width to accommodate the verbose listing 6384 * of devices. 6385 */ 6386 width = cb->cb_namewidth; 6387 } 6388 6389 if (!first) { 6390 if (cb->cb_scripted) 6391 (void) fputc('\t', stdout); 6392 else 6393 (void) fputs(" ", stdout); 6394 } else { 6395 first = B_FALSE; 6396 } 6397 6398 right_justify = B_FALSE; 6399 if (pl->pl_prop != ZPROP_USERPROP) { 6400 if (zpool_get_prop(zhp, pl->pl_prop, property, 6401 sizeof (property), NULL, cb->cb_literal) != 0) 6402 propstr = "-"; 6403 else 6404 propstr = property; 6405 6406 right_justify = zpool_prop_align_right(pl->pl_prop); 6407 } else if ((zpool_prop_feature(pl->pl_user_prop) || 6408 zpool_prop_unsupported(pl->pl_user_prop)) && 6409 zpool_prop_get_feature(zhp, pl->pl_user_prop, property, 6410 sizeof (property)) == 0) { 6411 propstr = property; 6412 } else if (zfs_prop_user(pl->pl_user_prop) && 6413 zpool_get_userprop(zhp, pl->pl_user_prop, property, 6414 sizeof (property), NULL) == 0) { 6415 propstr = property; 6416 } else { 6417 propstr = "-"; 6418 } 6419 6420 /* 6421 * If this is being called in scripted mode, or if this is the 6422 * last column and it is left-justified, don't include a width 6423 * format specifier. 6424 */ 6425 if (cb->cb_scripted || (pl->pl_next == NULL && !right_justify)) 6426 (void) fputs(propstr, stdout); 6427 else if (right_justify) 6428 (void) printf("%*s", (int)width, propstr); 6429 else 6430 (void) printf("%-*s", (int)width, propstr); 6431 } 6432 6433 (void) fputc('\n', stdout); 6434 } 6435 6436 static void 6437 print_one_column(zpool_prop_t prop, uint64_t value, const char *str, 6438 boolean_t scripted, boolean_t valid, enum zfs_nicenum_format format) 6439 { 6440 char propval[64]; 6441 boolean_t fixed; 6442 size_t width = zprop_width(prop, &fixed, ZFS_TYPE_POOL); 6443 6444 switch (prop) { 6445 case ZPOOL_PROP_SIZE: 6446 case ZPOOL_PROP_EXPANDSZ: 6447 case ZPOOL_PROP_CHECKPOINT: 6448 case ZPOOL_PROP_DEDUPRATIO: 6449 if (value == 0) 6450 (void) strlcpy(propval, "-", sizeof (propval)); 6451 else 6452 zfs_nicenum_format(value, propval, sizeof (propval), 6453 format); 6454 break; 6455 case ZPOOL_PROP_FRAGMENTATION: 6456 if (value == ZFS_FRAG_INVALID) { 6457 (void) strlcpy(propval, "-", sizeof (propval)); 6458 } else if (format == ZFS_NICENUM_RAW) { 6459 (void) snprintf(propval, sizeof (propval), "%llu", 6460 (unsigned long long)value); 6461 } else { 6462 (void) snprintf(propval, sizeof (propval), "%llu%%", 6463 (unsigned long long)value); 6464 } 6465 break; 6466 case ZPOOL_PROP_CAPACITY: 6467 /* capacity value is in parts-per-10,000 (aka permyriad) */ 6468 if (format == ZFS_NICENUM_RAW) 6469 (void) snprintf(propval, sizeof (propval), "%llu", 6470 (unsigned long long)value / 100); 6471 else 6472 (void) snprintf(propval, sizeof (propval), 6473 value < 1000 ? "%1.2f%%" : value < 10000 ? 6474 "%2.1f%%" : "%3.0f%%", value / 100.0); 6475 break; 6476 case ZPOOL_PROP_HEALTH: 6477 width = 8; 6478 (void) strlcpy(propval, str, sizeof (propval)); 6479 break; 6480 default: 6481 zfs_nicenum_format(value, propval, sizeof (propval), format); 6482 } 6483 6484 if (!valid) 6485 (void) strlcpy(propval, "-", sizeof (propval)); 6486 6487 if (scripted) 6488 (void) printf("\t%s", propval); 6489 else 6490 (void) printf(" %*s", (int)width, propval); 6491 } 6492 6493 /* 6494 * print static default line per vdev 6495 * not compatible with '-o' <proplist> option 6496 */ 6497 static void 6498 print_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv, 6499 list_cbdata_t *cb, int depth, boolean_t isspare) 6500 { 6501 nvlist_t **child; 6502 vdev_stat_t *vs; 6503 uint_t c, children; 6504 char *vname; 6505 boolean_t scripted = cb->cb_scripted; 6506 uint64_t islog = B_FALSE; 6507 const char *dashes = "%-*s - - - - " 6508 "- - - - -\n"; 6509 6510 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, 6511 (uint64_t **)&vs, &c) == 0); 6512 6513 if (name != NULL) { 6514 boolean_t toplevel = (vs->vs_space != 0); 6515 uint64_t cap; 6516 enum zfs_nicenum_format format; 6517 const char *state; 6518 6519 if (cb->cb_literal) 6520 format = ZFS_NICENUM_RAW; 6521 else 6522 format = ZFS_NICENUM_1024; 6523 6524 if (strcmp(name, VDEV_TYPE_INDIRECT) == 0) 6525 return; 6526 6527 if (scripted) 6528 (void) printf("\t%s", name); 6529 else if (strlen(name) + depth > cb->cb_namewidth) 6530 (void) printf("%*s%s", depth, "", name); 6531 else 6532 (void) printf("%*s%s%*s", depth, "", name, 6533 (int)(cb->cb_namewidth - strlen(name) - depth), ""); 6534 6535 /* 6536 * Print the properties for the individual vdevs. Some 6537 * properties are only applicable to toplevel vdevs. The 6538 * 'toplevel' boolean value is passed to the print_one_column() 6539 * to indicate that the value is valid. 6540 */ 6541 if (VDEV_STAT_VALID(vs_pspace, c) && vs->vs_pspace) 6542 print_one_column(ZPOOL_PROP_SIZE, vs->vs_pspace, NULL, 6543 scripted, B_TRUE, format); 6544 else 6545 print_one_column(ZPOOL_PROP_SIZE, vs->vs_space, NULL, 6546 scripted, toplevel, format); 6547 print_one_column(ZPOOL_PROP_ALLOCATED, vs->vs_alloc, NULL, 6548 scripted, toplevel, format); 6549 print_one_column(ZPOOL_PROP_FREE, vs->vs_space - vs->vs_alloc, 6550 NULL, scripted, toplevel, format); 6551 print_one_column(ZPOOL_PROP_CHECKPOINT, 6552 vs->vs_checkpoint_space, NULL, scripted, toplevel, format); 6553 print_one_column(ZPOOL_PROP_EXPANDSZ, vs->vs_esize, NULL, 6554 scripted, B_TRUE, format); 6555 print_one_column(ZPOOL_PROP_FRAGMENTATION, 6556 vs->vs_fragmentation, NULL, scripted, 6557 (vs->vs_fragmentation != ZFS_FRAG_INVALID && toplevel), 6558 format); 6559 cap = (vs->vs_space == 0) ? 0 : 6560 (vs->vs_alloc * 10000 / vs->vs_space); 6561 print_one_column(ZPOOL_PROP_CAPACITY, cap, NULL, 6562 scripted, toplevel, format); 6563 print_one_column(ZPOOL_PROP_DEDUPRATIO, 0, NULL, 6564 scripted, toplevel, format); 6565 state = zpool_state_to_name(vs->vs_state, vs->vs_aux); 6566 if (isspare) { 6567 if (vs->vs_aux == VDEV_AUX_SPARED) 6568 state = "INUSE"; 6569 else if (vs->vs_state == VDEV_STATE_HEALTHY) 6570 state = "AVAIL"; 6571 } 6572 print_one_column(ZPOOL_PROP_HEALTH, 0, state, scripted, 6573 B_TRUE, format); 6574 (void) fputc('\n', stdout); 6575 } 6576 6577 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 6578 &child, &children) != 0) 6579 return; 6580 6581 /* list the normal vdevs first */ 6582 for (c = 0; c < children; c++) { 6583 uint64_t ishole = B_FALSE; 6584 6585 if (nvlist_lookup_uint64(child[c], 6586 ZPOOL_CONFIG_IS_HOLE, &ishole) == 0 && ishole) 6587 continue; 6588 6589 if (nvlist_lookup_uint64(child[c], 6590 ZPOOL_CONFIG_IS_LOG, &islog) == 0 && islog) 6591 continue; 6592 6593 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS)) 6594 continue; 6595 6596 vname = zpool_vdev_name(g_zfs, zhp, child[c], 6597 cb->cb_name_flags | VDEV_NAME_TYPE_ID); 6598 print_list_stats(zhp, vname, child[c], cb, depth + 2, B_FALSE); 6599 free(vname); 6600 } 6601 6602 /* list the classes: 'logs', 'dedup', and 'special' */ 6603 for (uint_t n = 0; n < ARRAY_SIZE(class_name); n++) { 6604 boolean_t printed = B_FALSE; 6605 6606 for (c = 0; c < children; c++) { 6607 const char *bias = NULL; 6608 const char *type = NULL; 6609 6610 if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 6611 &islog) == 0 && islog) { 6612 bias = VDEV_ALLOC_CLASS_LOGS; 6613 } else { 6614 (void) nvlist_lookup_string(child[c], 6615 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias); 6616 (void) nvlist_lookup_string(child[c], 6617 ZPOOL_CONFIG_TYPE, &type); 6618 } 6619 if (bias == NULL || strcmp(bias, class_name[n]) != 0) 6620 continue; 6621 if (!islog && strcmp(type, VDEV_TYPE_INDIRECT) == 0) 6622 continue; 6623 6624 if (!printed) { 6625 /* LINTED E_SEC_PRINTF_VAR_FMT */ 6626 (void) printf(dashes, cb->cb_namewidth, 6627 class_name[n]); 6628 printed = B_TRUE; 6629 } 6630 vname = zpool_vdev_name(g_zfs, zhp, child[c], 6631 cb->cb_name_flags | VDEV_NAME_TYPE_ID); 6632 print_list_stats(zhp, vname, child[c], cb, depth + 2, 6633 B_FALSE); 6634 free(vname); 6635 } 6636 } 6637 6638 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, 6639 &child, &children) == 0 && children > 0) { 6640 /* LINTED E_SEC_PRINTF_VAR_FMT */ 6641 (void) printf(dashes, cb->cb_namewidth, "cache"); 6642 for (c = 0; c < children; c++) { 6643 vname = zpool_vdev_name(g_zfs, zhp, child[c], 6644 cb->cb_name_flags); 6645 print_list_stats(zhp, vname, child[c], cb, depth + 2, 6646 B_FALSE); 6647 free(vname); 6648 } 6649 } 6650 6651 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, &child, 6652 &children) == 0 && children > 0) { 6653 /* LINTED E_SEC_PRINTF_VAR_FMT */ 6654 (void) printf(dashes, cb->cb_namewidth, "spare"); 6655 for (c = 0; c < children; c++) { 6656 vname = zpool_vdev_name(g_zfs, zhp, child[c], 6657 cb->cb_name_flags); 6658 print_list_stats(zhp, vname, child[c], cb, depth + 2, 6659 B_TRUE); 6660 free(vname); 6661 } 6662 } 6663 } 6664 6665 /* 6666 * Generic callback function to list a pool. 6667 */ 6668 static int 6669 list_callback(zpool_handle_t *zhp, void *data) 6670 { 6671 list_cbdata_t *cbp = data; 6672 6673 print_pool(zhp, cbp); 6674 6675 if (cbp->cb_verbose) { 6676 nvlist_t *config, *nvroot; 6677 6678 config = zpool_get_config(zhp, NULL); 6679 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 6680 &nvroot) == 0); 6681 print_list_stats(zhp, NULL, nvroot, cbp, 0, B_FALSE); 6682 } 6683 6684 return (0); 6685 } 6686 6687 /* 6688 * Set the minimum pool/vdev name column width. The width must be at least 9, 6689 * but may be as large as needed. 6690 */ 6691 static int 6692 get_namewidth_list(zpool_handle_t *zhp, void *data) 6693 { 6694 list_cbdata_t *cb = data; 6695 int width; 6696 6697 width = get_namewidth(zhp, cb->cb_namewidth, 6698 cb->cb_name_flags | VDEV_NAME_TYPE_ID, cb->cb_verbose); 6699 6700 if (width < 9) 6701 width = 9; 6702 6703 cb->cb_namewidth = width; 6704 6705 return (0); 6706 } 6707 6708 /* 6709 * zpool list [-gHLpP] [-o prop[,prop]*] [-T d|u] [pool] ... [interval [count]] 6710 * 6711 * -g Display guid for individual vdev name. 6712 * -H Scripted mode. Don't display headers, and separate properties 6713 * by a single tab. 6714 * -L Follow links when resolving vdev path name. 6715 * -o List of properties to display. Defaults to 6716 * "name,size,allocated,free,expandsize,fragmentation,capacity," 6717 * "dedupratio,health,altroot" 6718 * -p Display values in parsable (exact) format. 6719 * -P Display full path for vdev name. 6720 * -T Display a timestamp in date(1) or Unix format 6721 * 6722 * List all pools in the system, whether or not they're healthy. Output space 6723 * statistics for each one, as well as health status summary. 6724 */ 6725 int 6726 zpool_do_list(int argc, char **argv) 6727 { 6728 int c; 6729 int ret = 0; 6730 list_cbdata_t cb = { 0 }; 6731 static char default_props[] = 6732 "name,size,allocated,free,checkpoint,expandsize,fragmentation," 6733 "capacity,dedupratio,health,altroot"; 6734 char *props = default_props; 6735 float interval = 0; 6736 unsigned long count = 0; 6737 zpool_list_t *list; 6738 boolean_t first = B_TRUE; 6739 current_prop_type = ZFS_TYPE_POOL; 6740 6741 /* check options */ 6742 while ((c = getopt(argc, argv, ":gHLo:pPT:v")) != -1) { 6743 switch (c) { 6744 case 'g': 6745 cb.cb_name_flags |= VDEV_NAME_GUID; 6746 break; 6747 case 'H': 6748 cb.cb_scripted = B_TRUE; 6749 break; 6750 case 'L': 6751 cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS; 6752 break; 6753 case 'o': 6754 props = optarg; 6755 break; 6756 case 'P': 6757 cb.cb_name_flags |= VDEV_NAME_PATH; 6758 break; 6759 case 'p': 6760 cb.cb_literal = B_TRUE; 6761 break; 6762 case 'T': 6763 get_timestamp_arg(*optarg); 6764 break; 6765 case 'v': 6766 cb.cb_verbose = B_TRUE; 6767 cb.cb_namewidth = 8; /* 8 until precalc is avail */ 6768 break; 6769 case ':': 6770 (void) fprintf(stderr, gettext("missing argument for " 6771 "'%c' option\n"), optopt); 6772 usage(B_FALSE); 6773 break; 6774 case '?': 6775 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 6776 optopt); 6777 usage(B_FALSE); 6778 } 6779 } 6780 6781 argc -= optind; 6782 argv += optind; 6783 6784 get_interval_count(&argc, argv, &interval, &count); 6785 6786 if (zprop_get_list(g_zfs, props, &cb.cb_proplist, ZFS_TYPE_POOL) != 0) 6787 usage(B_FALSE); 6788 6789 for (;;) { 6790 if ((list = pool_list_get(argc, argv, &cb.cb_proplist, 6791 ZFS_TYPE_POOL, cb.cb_literal, &ret)) == NULL) 6792 return (1); 6793 6794 if (pool_list_count(list) == 0) 6795 break; 6796 6797 cb.cb_namewidth = 0; 6798 (void) pool_list_iter(list, B_FALSE, get_namewidth_list, &cb); 6799 6800 if (timestamp_fmt != NODATE) 6801 print_timestamp(timestamp_fmt); 6802 6803 if (!cb.cb_scripted && (first || cb.cb_verbose)) { 6804 print_header(&cb); 6805 first = B_FALSE; 6806 } 6807 ret = pool_list_iter(list, B_TRUE, list_callback, &cb); 6808 6809 if (interval == 0) 6810 break; 6811 6812 if (count != 0 && --count == 0) 6813 break; 6814 6815 pool_list_free(list); 6816 6817 (void) fflush(stdout); 6818 (void) fsleep(interval); 6819 } 6820 6821 if (argc == 0 && !cb.cb_scripted && pool_list_count(list) == 0) { 6822 (void) printf(gettext("no pools available\n")); 6823 ret = 0; 6824 } 6825 6826 pool_list_free(list); 6827 zprop_free_list(cb.cb_proplist); 6828 return (ret); 6829 } 6830 6831 static int 6832 zpool_do_attach_or_replace(int argc, char **argv, int replacing) 6833 { 6834 boolean_t force = B_FALSE; 6835 boolean_t rebuild = B_FALSE; 6836 boolean_t wait = B_FALSE; 6837 int c; 6838 nvlist_t *nvroot; 6839 char *poolname, *old_disk, *new_disk; 6840 zpool_handle_t *zhp; 6841 nvlist_t *props = NULL; 6842 char *propval; 6843 int ret; 6844 6845 /* check options */ 6846 while ((c = getopt(argc, argv, "fo:sw")) != -1) { 6847 switch (c) { 6848 case 'f': 6849 force = B_TRUE; 6850 break; 6851 case 'o': 6852 if ((propval = strchr(optarg, '=')) == NULL) { 6853 (void) fprintf(stderr, gettext("missing " 6854 "'=' for -o option\n")); 6855 usage(B_FALSE); 6856 } 6857 *propval = '\0'; 6858 propval++; 6859 6860 if ((strcmp(optarg, ZPOOL_CONFIG_ASHIFT) != 0) || 6861 (add_prop_list(optarg, propval, &props, B_TRUE))) 6862 usage(B_FALSE); 6863 break; 6864 case 's': 6865 rebuild = B_TRUE; 6866 break; 6867 case 'w': 6868 wait = B_TRUE; 6869 break; 6870 case '?': 6871 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 6872 optopt); 6873 usage(B_FALSE); 6874 } 6875 } 6876 6877 argc -= optind; 6878 argv += optind; 6879 6880 /* get pool name and check number of arguments */ 6881 if (argc < 1) { 6882 (void) fprintf(stderr, gettext("missing pool name argument\n")); 6883 usage(B_FALSE); 6884 } 6885 6886 poolname = argv[0]; 6887 6888 if (argc < 2) { 6889 (void) fprintf(stderr, 6890 gettext("missing <device> specification\n")); 6891 usage(B_FALSE); 6892 } 6893 6894 old_disk = argv[1]; 6895 6896 if (argc < 3) { 6897 if (!replacing) { 6898 (void) fprintf(stderr, 6899 gettext("missing <new_device> specification\n")); 6900 usage(B_FALSE); 6901 } 6902 new_disk = old_disk; 6903 argc -= 1; 6904 argv += 1; 6905 } else { 6906 new_disk = argv[2]; 6907 argc -= 2; 6908 argv += 2; 6909 } 6910 6911 if (argc > 1) { 6912 (void) fprintf(stderr, gettext("too many arguments\n")); 6913 usage(B_FALSE); 6914 } 6915 6916 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) { 6917 nvlist_free(props); 6918 return (1); 6919 } 6920 6921 if (zpool_get_config(zhp, NULL) == NULL) { 6922 (void) fprintf(stderr, gettext("pool '%s' is unavailable\n"), 6923 poolname); 6924 zpool_close(zhp); 6925 nvlist_free(props); 6926 return (1); 6927 } 6928 6929 /* unless manually specified use "ashift" pool property (if set) */ 6930 if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) { 6931 int intval; 6932 zprop_source_t src; 6933 char strval[ZPOOL_MAXPROPLEN]; 6934 6935 intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src); 6936 if (src != ZPROP_SRC_DEFAULT) { 6937 (void) sprintf(strval, "%" PRId32, intval); 6938 verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval, 6939 &props, B_TRUE) == 0); 6940 } 6941 } 6942 6943 nvroot = make_root_vdev(zhp, props, force, B_FALSE, replacing, B_FALSE, 6944 argc, argv); 6945 if (nvroot == NULL) { 6946 zpool_close(zhp); 6947 nvlist_free(props); 6948 return (1); 6949 } 6950 6951 ret = zpool_vdev_attach(zhp, old_disk, new_disk, nvroot, replacing, 6952 rebuild); 6953 6954 if (ret == 0 && wait) { 6955 zpool_wait_activity_t activity = ZPOOL_WAIT_RESILVER; 6956 char raidz_prefix[] = "raidz"; 6957 if (replacing) { 6958 activity = ZPOOL_WAIT_REPLACE; 6959 } else if (strncmp(old_disk, 6960 raidz_prefix, strlen(raidz_prefix)) == 0) { 6961 activity = ZPOOL_WAIT_RAIDZ_EXPAND; 6962 } 6963 ret = zpool_wait(zhp, activity); 6964 } 6965 6966 nvlist_free(props); 6967 nvlist_free(nvroot); 6968 zpool_close(zhp); 6969 6970 return (ret); 6971 } 6972 6973 /* 6974 * zpool replace [-fsw] [-o property=value] <pool> <device> <new_device> 6975 * 6976 * -f Force attach, even if <new_device> appears to be in use. 6977 * -s Use sequential instead of healing reconstruction for resilver. 6978 * -o Set property=value. 6979 * -w Wait for replacing to complete before returning 6980 * 6981 * Replace <device> with <new_device>. 6982 */ 6983 int 6984 zpool_do_replace(int argc, char **argv) 6985 { 6986 return (zpool_do_attach_or_replace(argc, argv, B_TRUE)); 6987 } 6988 6989 /* 6990 * zpool attach [-fsw] [-o property=value] <pool> <device>|<vdev> <new_device> 6991 * 6992 * -f Force attach, even if <new_device> appears to be in use. 6993 * -s Use sequential instead of healing reconstruction for resilver. 6994 * -o Set property=value. 6995 * -w Wait for resilvering (mirror) or expansion (raidz) to complete 6996 * before returning. 6997 * 6998 * Attach <new_device> to a <device> or <vdev>, where the vdev can be of type 6999 * mirror or raidz. If <device> is not part of a mirror, then <device> will 7000 * be transformed into a mirror of <device> and <new_device>. When a mirror 7001 * is involved, <new_device> will begin life with a DTL of [0, now], and will 7002 * immediately begin to resilver itself. For the raidz case, a expansion will 7003 * commence and reflow the raidz data across all the disks including the 7004 * <new_device>. 7005 */ 7006 int 7007 zpool_do_attach(int argc, char **argv) 7008 { 7009 return (zpool_do_attach_or_replace(argc, argv, B_FALSE)); 7010 } 7011 7012 /* 7013 * zpool detach [-f] <pool> <device> 7014 * 7015 * -f Force detach of <device>, even if DTLs argue against it 7016 * (not supported yet) 7017 * 7018 * Detach a device from a mirror. The operation will be refused if <device> 7019 * is the last device in the mirror, or if the DTLs indicate that this device 7020 * has the only valid copy of some data. 7021 */ 7022 int 7023 zpool_do_detach(int argc, char **argv) 7024 { 7025 int c; 7026 char *poolname, *path; 7027 zpool_handle_t *zhp; 7028 int ret; 7029 7030 /* check options */ 7031 while ((c = getopt(argc, argv, "")) != -1) { 7032 switch (c) { 7033 case '?': 7034 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 7035 optopt); 7036 usage(B_FALSE); 7037 } 7038 } 7039 7040 argc -= optind; 7041 argv += optind; 7042 7043 /* get pool name and check number of arguments */ 7044 if (argc < 1) { 7045 (void) fprintf(stderr, gettext("missing pool name argument\n")); 7046 usage(B_FALSE); 7047 } 7048 7049 if (argc < 2) { 7050 (void) fprintf(stderr, 7051 gettext("missing <device> specification\n")); 7052 usage(B_FALSE); 7053 } 7054 7055 poolname = argv[0]; 7056 path = argv[1]; 7057 7058 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) 7059 return (1); 7060 7061 ret = zpool_vdev_detach(zhp, path); 7062 7063 zpool_close(zhp); 7064 7065 return (ret); 7066 } 7067 7068 /* 7069 * zpool split [-gLnP] [-o prop=val] ... 7070 * [-o mntopt] ... 7071 * [-R altroot] <pool> <newpool> [<device> ...] 7072 * 7073 * -g Display guid for individual vdev name. 7074 * -L Follow links when resolving vdev path name. 7075 * -n Do not split the pool, but display the resulting layout if 7076 * it were to be split. 7077 * -o Set property=value, or set mount options. 7078 * -P Display full path for vdev name. 7079 * -R Mount the split-off pool under an alternate root. 7080 * -l Load encryption keys while importing. 7081 * 7082 * Splits the named pool and gives it the new pool name. Devices to be split 7083 * off may be listed, provided that no more than one device is specified 7084 * per top-level vdev mirror. The newly split pool is left in an exported 7085 * state unless -R is specified. 7086 * 7087 * Restrictions: the top-level of the pool pool must only be made up of 7088 * mirrors; all devices in the pool must be healthy; no device may be 7089 * undergoing a resilvering operation. 7090 */ 7091 int 7092 zpool_do_split(int argc, char **argv) 7093 { 7094 char *srcpool, *newpool, *propval; 7095 char *mntopts = NULL; 7096 splitflags_t flags; 7097 int c, ret = 0; 7098 int ms_status = 0; 7099 boolean_t loadkeys = B_FALSE; 7100 zpool_handle_t *zhp; 7101 nvlist_t *config, *props = NULL; 7102 7103 flags.dryrun = B_FALSE; 7104 flags.import = B_FALSE; 7105 flags.name_flags = 0; 7106 7107 /* check options */ 7108 while ((c = getopt(argc, argv, ":gLR:lno:P")) != -1) { 7109 switch (c) { 7110 case 'g': 7111 flags.name_flags |= VDEV_NAME_GUID; 7112 break; 7113 case 'L': 7114 flags.name_flags |= VDEV_NAME_FOLLOW_LINKS; 7115 break; 7116 case 'R': 7117 flags.import = B_TRUE; 7118 if (add_prop_list( 7119 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), optarg, 7120 &props, B_TRUE) != 0) { 7121 nvlist_free(props); 7122 usage(B_FALSE); 7123 } 7124 break; 7125 case 'l': 7126 loadkeys = B_TRUE; 7127 break; 7128 case 'n': 7129 flags.dryrun = B_TRUE; 7130 break; 7131 case 'o': 7132 if ((propval = strchr(optarg, '=')) != NULL) { 7133 *propval = '\0'; 7134 propval++; 7135 if (add_prop_list(optarg, propval, 7136 &props, B_TRUE) != 0) { 7137 nvlist_free(props); 7138 usage(B_FALSE); 7139 } 7140 } else { 7141 mntopts = optarg; 7142 } 7143 break; 7144 case 'P': 7145 flags.name_flags |= VDEV_NAME_PATH; 7146 break; 7147 case ':': 7148 (void) fprintf(stderr, gettext("missing argument for " 7149 "'%c' option\n"), optopt); 7150 usage(B_FALSE); 7151 break; 7152 case '?': 7153 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 7154 optopt); 7155 usage(B_FALSE); 7156 break; 7157 } 7158 } 7159 7160 if (!flags.import && mntopts != NULL) { 7161 (void) fprintf(stderr, gettext("setting mntopts is only " 7162 "valid when importing the pool\n")); 7163 usage(B_FALSE); 7164 } 7165 7166 if (!flags.import && loadkeys) { 7167 (void) fprintf(stderr, gettext("loading keys is only " 7168 "valid when importing the pool\n")); 7169 usage(B_FALSE); 7170 } 7171 7172 argc -= optind; 7173 argv += optind; 7174 7175 if (argc < 1) { 7176 (void) fprintf(stderr, gettext("Missing pool name\n")); 7177 usage(B_FALSE); 7178 } 7179 if (argc < 2) { 7180 (void) fprintf(stderr, gettext("Missing new pool name\n")); 7181 usage(B_FALSE); 7182 } 7183 7184 srcpool = argv[0]; 7185 newpool = argv[1]; 7186 7187 argc -= 2; 7188 argv += 2; 7189 7190 if ((zhp = zpool_open(g_zfs, srcpool)) == NULL) { 7191 nvlist_free(props); 7192 return (1); 7193 } 7194 7195 config = split_mirror_vdev(zhp, newpool, props, flags, argc, argv); 7196 if (config == NULL) { 7197 ret = 1; 7198 } else { 7199 if (flags.dryrun) { 7200 (void) printf(gettext("would create '%s' with the " 7201 "following layout:\n\n"), newpool); 7202 print_vdev_tree(NULL, newpool, config, 0, "", 7203 flags.name_flags); 7204 print_vdev_tree(NULL, "dedup", config, 0, 7205 VDEV_ALLOC_BIAS_DEDUP, 0); 7206 print_vdev_tree(NULL, "special", config, 0, 7207 VDEV_ALLOC_BIAS_SPECIAL, 0); 7208 } 7209 } 7210 7211 zpool_close(zhp); 7212 7213 if (ret != 0 || flags.dryrun || !flags.import) { 7214 nvlist_free(config); 7215 nvlist_free(props); 7216 return (ret); 7217 } 7218 7219 /* 7220 * The split was successful. Now we need to open the new 7221 * pool and import it. 7222 */ 7223 if ((zhp = zpool_open_canfail(g_zfs, newpool)) == NULL) { 7224 nvlist_free(config); 7225 nvlist_free(props); 7226 return (1); 7227 } 7228 7229 if (loadkeys) { 7230 ret = zfs_crypto_attempt_load_keys(g_zfs, newpool); 7231 if (ret != 0) 7232 ret = 1; 7233 } 7234 7235 if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL) { 7236 ms_status = zpool_enable_datasets(zhp, mntopts, 0, 7237 mount_tp_nthr); 7238 if (ms_status == EZFS_SHAREFAILED) { 7239 (void) fprintf(stderr, gettext("Split was successful, " 7240 "datasets are mounted but sharing of some datasets " 7241 "has failed\n")); 7242 } else if (ms_status == EZFS_MOUNTFAILED) { 7243 (void) fprintf(stderr, gettext("Split was successful" 7244 ", but some datasets could not be mounted\n")); 7245 (void) fprintf(stderr, gettext("Try doing '%s' with a " 7246 "different altroot\n"), "zpool import"); 7247 } 7248 } 7249 zpool_close(zhp); 7250 nvlist_free(config); 7251 nvlist_free(props); 7252 7253 return (ret); 7254 } 7255 7256 7257 /* 7258 * zpool online [--power] <pool> <device> ... 7259 * 7260 * --power: Power on the enclosure slot to the drive (if possible) 7261 */ 7262 int 7263 zpool_do_online(int argc, char **argv) 7264 { 7265 int c, i; 7266 char *poolname; 7267 zpool_handle_t *zhp; 7268 int ret = 0; 7269 vdev_state_t newstate; 7270 int flags = 0; 7271 boolean_t is_power_on = B_FALSE; 7272 struct option long_options[] = { 7273 {"power", no_argument, NULL, ZPOOL_OPTION_POWER}, 7274 {0, 0, 0, 0} 7275 }; 7276 7277 /* check options */ 7278 while ((c = getopt_long(argc, argv, "e", long_options, NULL)) != -1) { 7279 switch (c) { 7280 case 'e': 7281 flags |= ZFS_ONLINE_EXPAND; 7282 break; 7283 case ZPOOL_OPTION_POWER: 7284 is_power_on = B_TRUE; 7285 break; 7286 case '?': 7287 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 7288 optopt); 7289 usage(B_FALSE); 7290 } 7291 } 7292 7293 if (libzfs_envvar_is_set("ZPOOL_AUTO_POWER_ON_SLOT")) 7294 is_power_on = B_TRUE; 7295 7296 argc -= optind; 7297 argv += optind; 7298 7299 /* get pool name and check number of arguments */ 7300 if (argc < 1) { 7301 (void) fprintf(stderr, gettext("missing pool name\n")); 7302 usage(B_FALSE); 7303 } 7304 if (argc < 2) { 7305 (void) fprintf(stderr, gettext("missing device name\n")); 7306 usage(B_FALSE); 7307 } 7308 7309 poolname = argv[0]; 7310 7311 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) 7312 return (1); 7313 7314 for (i = 1; i < argc; i++) { 7315 vdev_state_t oldstate; 7316 boolean_t avail_spare, l2cache; 7317 int rc; 7318 7319 if (is_power_on) { 7320 rc = zpool_power_on_and_disk_wait(zhp, argv[i]); 7321 if (rc == ENOTSUP) { 7322 (void) fprintf(stderr, 7323 gettext("Power control not supported\n")); 7324 } 7325 if (rc != 0) 7326 return (rc); 7327 } 7328 7329 nvlist_t *tgt = zpool_find_vdev(zhp, argv[i], &avail_spare, 7330 &l2cache, NULL); 7331 if (tgt == NULL) { 7332 ret = 1; 7333 continue; 7334 } 7335 uint_t vsc; 7336 oldstate = ((vdev_stat_t *)fnvlist_lookup_uint64_array(tgt, 7337 ZPOOL_CONFIG_VDEV_STATS, &vsc))->vs_state; 7338 if (zpool_vdev_online(zhp, argv[i], flags, &newstate) == 0) { 7339 if (newstate != VDEV_STATE_HEALTHY) { 7340 (void) printf(gettext("warning: device '%s' " 7341 "onlined, but remains in faulted state\n"), 7342 argv[i]); 7343 if (newstate == VDEV_STATE_FAULTED) 7344 (void) printf(gettext("use 'zpool " 7345 "clear' to restore a faulted " 7346 "device\n")); 7347 else 7348 (void) printf(gettext("use 'zpool " 7349 "replace' to replace devices " 7350 "that are no longer present\n")); 7351 if ((flags & ZFS_ONLINE_EXPAND)) { 7352 (void) printf(gettext("%s: failed " 7353 "to expand usable space on " 7354 "unhealthy device '%s'\n"), 7355 (oldstate >= VDEV_STATE_DEGRADED ? 7356 "error" : "warning"), argv[i]); 7357 if (oldstate >= VDEV_STATE_DEGRADED) { 7358 ret = 1; 7359 break; 7360 } 7361 } 7362 } 7363 } else { 7364 ret = 1; 7365 } 7366 } 7367 7368 zpool_close(zhp); 7369 7370 return (ret); 7371 } 7372 7373 /* 7374 * zpool offline [-ft]|[--power] <pool> <device> ... 7375 * 7376 * 7377 * -f Force the device into a faulted state. 7378 * 7379 * -t Only take the device off-line temporarily. The offline/faulted 7380 * state will not be persistent across reboots. 7381 * 7382 * --power Power off the enclosure slot to the drive (if possible) 7383 */ 7384 int 7385 zpool_do_offline(int argc, char **argv) 7386 { 7387 int c, i; 7388 char *poolname; 7389 zpool_handle_t *zhp; 7390 int ret = 0; 7391 boolean_t istmp = B_FALSE; 7392 boolean_t fault = B_FALSE; 7393 boolean_t is_power_off = B_FALSE; 7394 7395 struct option long_options[] = { 7396 {"power", no_argument, NULL, ZPOOL_OPTION_POWER}, 7397 {0, 0, 0, 0} 7398 }; 7399 7400 /* check options */ 7401 while ((c = getopt_long(argc, argv, "ft", long_options, NULL)) != -1) { 7402 switch (c) { 7403 case 'f': 7404 fault = B_TRUE; 7405 break; 7406 case 't': 7407 istmp = B_TRUE; 7408 break; 7409 case ZPOOL_OPTION_POWER: 7410 is_power_off = B_TRUE; 7411 break; 7412 case '?': 7413 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 7414 optopt); 7415 usage(B_FALSE); 7416 } 7417 } 7418 7419 if (is_power_off && fault) { 7420 (void) fprintf(stderr, 7421 gettext("-0 and -f cannot be used together\n")); 7422 usage(B_FALSE); 7423 return (1); 7424 } 7425 7426 if (is_power_off && istmp) { 7427 (void) fprintf(stderr, 7428 gettext("-0 and -t cannot be used together\n")); 7429 usage(B_FALSE); 7430 return (1); 7431 } 7432 7433 argc -= optind; 7434 argv += optind; 7435 7436 /* get pool name and check number of arguments */ 7437 if (argc < 1) { 7438 (void) fprintf(stderr, gettext("missing pool name\n")); 7439 usage(B_FALSE); 7440 } 7441 if (argc < 2) { 7442 (void) fprintf(stderr, gettext("missing device name\n")); 7443 usage(B_FALSE); 7444 } 7445 7446 poolname = argv[0]; 7447 7448 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) 7449 return (1); 7450 7451 for (i = 1; i < argc; i++) { 7452 uint64_t guid = zpool_vdev_path_to_guid(zhp, argv[i]); 7453 if (is_power_off) { 7454 /* 7455 * Note: we have to power off first, then set REMOVED, 7456 * or else zpool_vdev_set_removed_state() returns 7457 * EAGAIN. 7458 */ 7459 ret = zpool_power_off(zhp, argv[i]); 7460 if (ret != 0) { 7461 (void) fprintf(stderr, "%s %s %d\n", 7462 gettext("unable to power off slot for"), 7463 argv[i], ret); 7464 } 7465 zpool_vdev_set_removed_state(zhp, guid, VDEV_AUX_NONE); 7466 7467 } else if (fault) { 7468 vdev_aux_t aux; 7469 if (istmp == B_FALSE) { 7470 /* Force the fault to persist across imports */ 7471 aux = VDEV_AUX_EXTERNAL_PERSIST; 7472 } else { 7473 aux = VDEV_AUX_EXTERNAL; 7474 } 7475 7476 if (guid == 0 || zpool_vdev_fault(zhp, guid, aux) != 0) 7477 ret = 1; 7478 } else { 7479 if (zpool_vdev_offline(zhp, argv[i], istmp) != 0) 7480 ret = 1; 7481 } 7482 } 7483 7484 zpool_close(zhp); 7485 7486 return (ret); 7487 } 7488 7489 /* 7490 * zpool clear [-nF]|[--power] <pool> [device] 7491 * 7492 * Clear all errors associated with a pool or a particular device. 7493 */ 7494 int 7495 zpool_do_clear(int argc, char **argv) 7496 { 7497 int c; 7498 int ret = 0; 7499 boolean_t dryrun = B_FALSE; 7500 boolean_t do_rewind = B_FALSE; 7501 boolean_t xtreme_rewind = B_FALSE; 7502 boolean_t is_power_on = B_FALSE; 7503 uint32_t rewind_policy = ZPOOL_NO_REWIND; 7504 nvlist_t *policy = NULL; 7505 zpool_handle_t *zhp; 7506 char *pool, *device; 7507 7508 struct option long_options[] = { 7509 {"power", no_argument, NULL, ZPOOL_OPTION_POWER}, 7510 {0, 0, 0, 0} 7511 }; 7512 7513 /* check options */ 7514 while ((c = getopt_long(argc, argv, "FnX", long_options, 7515 NULL)) != -1) { 7516 switch (c) { 7517 case 'F': 7518 do_rewind = B_TRUE; 7519 break; 7520 case 'n': 7521 dryrun = B_TRUE; 7522 break; 7523 case 'X': 7524 xtreme_rewind = B_TRUE; 7525 break; 7526 case ZPOOL_OPTION_POWER: 7527 is_power_on = B_TRUE; 7528 break; 7529 case '?': 7530 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 7531 optopt); 7532 usage(B_FALSE); 7533 } 7534 } 7535 7536 if (libzfs_envvar_is_set("ZPOOL_AUTO_POWER_ON_SLOT")) 7537 is_power_on = B_TRUE; 7538 7539 argc -= optind; 7540 argv += optind; 7541 7542 if (argc < 1) { 7543 (void) fprintf(stderr, gettext("missing pool name\n")); 7544 usage(B_FALSE); 7545 } 7546 7547 if (argc > 2) { 7548 (void) fprintf(stderr, gettext("too many arguments\n")); 7549 usage(B_FALSE); 7550 } 7551 7552 if ((dryrun || xtreme_rewind) && !do_rewind) { 7553 (void) fprintf(stderr, 7554 gettext("-n or -X only meaningful with -F\n")); 7555 usage(B_FALSE); 7556 } 7557 if (dryrun) 7558 rewind_policy = ZPOOL_TRY_REWIND; 7559 else if (do_rewind) 7560 rewind_policy = ZPOOL_DO_REWIND; 7561 if (xtreme_rewind) 7562 rewind_policy |= ZPOOL_EXTREME_REWIND; 7563 7564 /* In future, further rewind policy choices can be passed along here */ 7565 if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 || 7566 nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY, 7567 rewind_policy) != 0) { 7568 return (1); 7569 } 7570 7571 pool = argv[0]; 7572 device = argc == 2 ? argv[1] : NULL; 7573 7574 if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) { 7575 nvlist_free(policy); 7576 return (1); 7577 } 7578 7579 if (is_power_on) { 7580 if (device == NULL) { 7581 zpool_power_on_pool_and_wait_for_devices(zhp); 7582 } else { 7583 zpool_power_on_and_disk_wait(zhp, device); 7584 } 7585 } 7586 7587 if (zpool_clear(zhp, device, policy) != 0) 7588 ret = 1; 7589 7590 zpool_close(zhp); 7591 7592 nvlist_free(policy); 7593 7594 return (ret); 7595 } 7596 7597 /* 7598 * zpool reguid <pool> 7599 */ 7600 int 7601 zpool_do_reguid(int argc, char **argv) 7602 { 7603 int c; 7604 char *poolname; 7605 zpool_handle_t *zhp; 7606 int ret = 0; 7607 7608 /* check options */ 7609 while ((c = getopt(argc, argv, "")) != -1) { 7610 switch (c) { 7611 case '?': 7612 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 7613 optopt); 7614 usage(B_FALSE); 7615 } 7616 } 7617 7618 argc -= optind; 7619 argv += optind; 7620 7621 /* get pool name and check number of arguments */ 7622 if (argc < 1) { 7623 (void) fprintf(stderr, gettext("missing pool name\n")); 7624 usage(B_FALSE); 7625 } 7626 7627 if (argc > 1) { 7628 (void) fprintf(stderr, gettext("too many arguments\n")); 7629 usage(B_FALSE); 7630 } 7631 7632 poolname = argv[0]; 7633 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) 7634 return (1); 7635 7636 ret = zpool_reguid(zhp); 7637 7638 zpool_close(zhp); 7639 return (ret); 7640 } 7641 7642 7643 /* 7644 * zpool reopen <pool> 7645 * 7646 * Reopen the pool so that the kernel can update the sizes of all vdevs. 7647 */ 7648 int 7649 zpool_do_reopen(int argc, char **argv) 7650 { 7651 int c; 7652 int ret = 0; 7653 boolean_t scrub_restart = B_TRUE; 7654 7655 /* check options */ 7656 while ((c = getopt(argc, argv, "n")) != -1) { 7657 switch (c) { 7658 case 'n': 7659 scrub_restart = B_FALSE; 7660 break; 7661 case '?': 7662 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 7663 optopt); 7664 usage(B_FALSE); 7665 } 7666 } 7667 7668 argc -= optind; 7669 argv += optind; 7670 7671 /* if argc == 0 we will execute zpool_reopen_one on all pools */ 7672 ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL, 7673 B_FALSE, zpool_reopen_one, &scrub_restart); 7674 7675 return (ret); 7676 } 7677 7678 typedef struct scrub_cbdata { 7679 int cb_type; 7680 pool_scrub_cmd_t cb_scrub_cmd; 7681 } scrub_cbdata_t; 7682 7683 static boolean_t 7684 zpool_has_checkpoint(zpool_handle_t *zhp) 7685 { 7686 nvlist_t *config, *nvroot; 7687 7688 config = zpool_get_config(zhp, NULL); 7689 7690 if (config != NULL) { 7691 pool_checkpoint_stat_t *pcs = NULL; 7692 uint_t c; 7693 7694 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE); 7695 (void) nvlist_lookup_uint64_array(nvroot, 7696 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c); 7697 7698 if (pcs == NULL || pcs->pcs_state == CS_NONE) 7699 return (B_FALSE); 7700 7701 assert(pcs->pcs_state == CS_CHECKPOINT_EXISTS || 7702 pcs->pcs_state == CS_CHECKPOINT_DISCARDING); 7703 return (B_TRUE); 7704 } 7705 7706 return (B_FALSE); 7707 } 7708 7709 static int 7710 scrub_callback(zpool_handle_t *zhp, void *data) 7711 { 7712 scrub_cbdata_t *cb = data; 7713 int err; 7714 7715 /* 7716 * Ignore faulted pools. 7717 */ 7718 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) { 7719 (void) fprintf(stderr, gettext("cannot scan '%s': pool is " 7720 "currently unavailable\n"), zpool_get_name(zhp)); 7721 return (1); 7722 } 7723 7724 err = zpool_scan(zhp, cb->cb_type, cb->cb_scrub_cmd); 7725 7726 if (err == 0 && zpool_has_checkpoint(zhp) && 7727 cb->cb_type == POOL_SCAN_SCRUB) { 7728 (void) printf(gettext("warning: will not scrub state that " 7729 "belongs to the checkpoint of pool '%s'\n"), 7730 zpool_get_name(zhp)); 7731 } 7732 7733 return (err != 0); 7734 } 7735 7736 static int 7737 wait_callback(zpool_handle_t *zhp, void *data) 7738 { 7739 zpool_wait_activity_t *act = data; 7740 return (zpool_wait(zhp, *act)); 7741 } 7742 7743 /* 7744 * zpool scrub [-s | -p] [-w] [-e] <pool> ... 7745 * 7746 * -e Only scrub blocks in the error log. 7747 * -s Stop. Stops any in-progress scrub. 7748 * -p Pause. Pause in-progress scrub. 7749 * -w Wait. Blocks until scrub has completed. 7750 */ 7751 int 7752 zpool_do_scrub(int argc, char **argv) 7753 { 7754 int c; 7755 scrub_cbdata_t cb; 7756 boolean_t wait = B_FALSE; 7757 int error; 7758 7759 cb.cb_type = POOL_SCAN_SCRUB; 7760 cb.cb_scrub_cmd = POOL_SCRUB_NORMAL; 7761 7762 boolean_t is_error_scrub = B_FALSE; 7763 boolean_t is_pause = B_FALSE; 7764 boolean_t is_stop = B_FALSE; 7765 7766 /* check options */ 7767 while ((c = getopt(argc, argv, "spwe")) != -1) { 7768 switch (c) { 7769 case 'e': 7770 is_error_scrub = B_TRUE; 7771 break; 7772 case 's': 7773 is_stop = B_TRUE; 7774 break; 7775 case 'p': 7776 is_pause = B_TRUE; 7777 break; 7778 case 'w': 7779 wait = B_TRUE; 7780 break; 7781 case '?': 7782 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 7783 optopt); 7784 usage(B_FALSE); 7785 } 7786 } 7787 7788 if (is_pause && is_stop) { 7789 (void) fprintf(stderr, gettext("invalid option " 7790 "combination :-s and -p are mutually exclusive\n")); 7791 usage(B_FALSE); 7792 } else { 7793 if (is_error_scrub) 7794 cb.cb_type = POOL_SCAN_ERRORSCRUB; 7795 7796 if (is_pause) { 7797 cb.cb_scrub_cmd = POOL_SCRUB_PAUSE; 7798 } else if (is_stop) { 7799 cb.cb_type = POOL_SCAN_NONE; 7800 } else { 7801 cb.cb_scrub_cmd = POOL_SCRUB_NORMAL; 7802 } 7803 } 7804 7805 if (wait && (cb.cb_type == POOL_SCAN_NONE || 7806 cb.cb_scrub_cmd == POOL_SCRUB_PAUSE)) { 7807 (void) fprintf(stderr, gettext("invalid option combination: " 7808 "-w cannot be used with -p or -s\n")); 7809 usage(B_FALSE); 7810 } 7811 7812 argc -= optind; 7813 argv += optind; 7814 7815 if (argc < 1) { 7816 (void) fprintf(stderr, gettext("missing pool name argument\n")); 7817 usage(B_FALSE); 7818 } 7819 7820 error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL, 7821 B_FALSE, scrub_callback, &cb); 7822 7823 if (wait && !error) { 7824 zpool_wait_activity_t act = ZPOOL_WAIT_SCRUB; 7825 error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL, 7826 B_FALSE, wait_callback, &act); 7827 } 7828 7829 return (error); 7830 } 7831 7832 /* 7833 * zpool resilver <pool> ... 7834 * 7835 * Restarts any in-progress resilver 7836 */ 7837 int 7838 zpool_do_resilver(int argc, char **argv) 7839 { 7840 int c; 7841 scrub_cbdata_t cb; 7842 7843 cb.cb_type = POOL_SCAN_RESILVER; 7844 cb.cb_scrub_cmd = POOL_SCRUB_NORMAL; 7845 7846 /* check options */ 7847 while ((c = getopt(argc, argv, "")) != -1) { 7848 switch (c) { 7849 case '?': 7850 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 7851 optopt); 7852 usage(B_FALSE); 7853 } 7854 } 7855 7856 argc -= optind; 7857 argv += optind; 7858 7859 if (argc < 1) { 7860 (void) fprintf(stderr, gettext("missing pool name argument\n")); 7861 usage(B_FALSE); 7862 } 7863 7864 return (for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL, 7865 B_FALSE, scrub_callback, &cb)); 7866 } 7867 7868 /* 7869 * zpool trim [-d] [-r <rate>] [-c | -s] <pool> [<device> ...] 7870 * 7871 * -c Cancel. Ends any in-progress trim. 7872 * -d Secure trim. Requires kernel and device support. 7873 * -r <rate> Sets the TRIM rate in bytes (per second). Supports 7874 * adding a multiplier suffix such as 'k' or 'm'. 7875 * -s Suspend. TRIM can then be restarted with no flags. 7876 * -w Wait. Blocks until trimming has completed. 7877 */ 7878 int 7879 zpool_do_trim(int argc, char **argv) 7880 { 7881 struct option long_options[] = { 7882 {"cancel", no_argument, NULL, 'c'}, 7883 {"secure", no_argument, NULL, 'd'}, 7884 {"rate", required_argument, NULL, 'r'}, 7885 {"suspend", no_argument, NULL, 's'}, 7886 {"wait", no_argument, NULL, 'w'}, 7887 {0, 0, 0, 0} 7888 }; 7889 7890 pool_trim_func_t cmd_type = POOL_TRIM_START; 7891 uint64_t rate = 0; 7892 boolean_t secure = B_FALSE; 7893 boolean_t wait = B_FALSE; 7894 7895 int c; 7896 while ((c = getopt_long(argc, argv, "cdr:sw", long_options, NULL)) 7897 != -1) { 7898 switch (c) { 7899 case 'c': 7900 if (cmd_type != POOL_TRIM_START && 7901 cmd_type != POOL_TRIM_CANCEL) { 7902 (void) fprintf(stderr, gettext("-c cannot be " 7903 "combined with other options\n")); 7904 usage(B_FALSE); 7905 } 7906 cmd_type = POOL_TRIM_CANCEL; 7907 break; 7908 case 'd': 7909 if (cmd_type != POOL_TRIM_START) { 7910 (void) fprintf(stderr, gettext("-d cannot be " 7911 "combined with the -c or -s options\n")); 7912 usage(B_FALSE); 7913 } 7914 secure = B_TRUE; 7915 break; 7916 case 'r': 7917 if (cmd_type != POOL_TRIM_START) { 7918 (void) fprintf(stderr, gettext("-r cannot be " 7919 "combined with the -c or -s options\n")); 7920 usage(B_FALSE); 7921 } 7922 if (zfs_nicestrtonum(g_zfs, optarg, &rate) == -1) { 7923 (void) fprintf(stderr, "%s: %s\n", 7924 gettext("invalid value for rate"), 7925 libzfs_error_description(g_zfs)); 7926 usage(B_FALSE); 7927 } 7928 break; 7929 case 's': 7930 if (cmd_type != POOL_TRIM_START && 7931 cmd_type != POOL_TRIM_SUSPEND) { 7932 (void) fprintf(stderr, gettext("-s cannot be " 7933 "combined with other options\n")); 7934 usage(B_FALSE); 7935 } 7936 cmd_type = POOL_TRIM_SUSPEND; 7937 break; 7938 case 'w': 7939 wait = B_TRUE; 7940 break; 7941 case '?': 7942 if (optopt != 0) { 7943 (void) fprintf(stderr, 7944 gettext("invalid option '%c'\n"), optopt); 7945 } else { 7946 (void) fprintf(stderr, 7947 gettext("invalid option '%s'\n"), 7948 argv[optind - 1]); 7949 } 7950 usage(B_FALSE); 7951 } 7952 } 7953 7954 argc -= optind; 7955 argv += optind; 7956 7957 if (argc < 1) { 7958 (void) fprintf(stderr, gettext("missing pool name argument\n")); 7959 usage(B_FALSE); 7960 return (-1); 7961 } 7962 7963 if (wait && (cmd_type != POOL_TRIM_START)) { 7964 (void) fprintf(stderr, gettext("-w cannot be used with -c or " 7965 "-s\n")); 7966 usage(B_FALSE); 7967 } 7968 7969 char *poolname = argv[0]; 7970 zpool_handle_t *zhp = zpool_open(g_zfs, poolname); 7971 if (zhp == NULL) 7972 return (-1); 7973 7974 trimflags_t trim_flags = { 7975 .secure = secure, 7976 .rate = rate, 7977 .wait = wait, 7978 }; 7979 7980 nvlist_t *vdevs = fnvlist_alloc(); 7981 if (argc == 1) { 7982 /* no individual leaf vdevs specified, so add them all */ 7983 nvlist_t *config = zpool_get_config(zhp, NULL); 7984 nvlist_t *nvroot = fnvlist_lookup_nvlist(config, 7985 ZPOOL_CONFIG_VDEV_TREE); 7986 zpool_collect_leaves(zhp, nvroot, vdevs); 7987 trim_flags.fullpool = B_TRUE; 7988 } else { 7989 trim_flags.fullpool = B_FALSE; 7990 for (int i = 1; i < argc; i++) { 7991 fnvlist_add_boolean(vdevs, argv[i]); 7992 } 7993 } 7994 7995 int error = zpool_trim(zhp, cmd_type, vdevs, &trim_flags); 7996 7997 fnvlist_free(vdevs); 7998 zpool_close(zhp); 7999 8000 return (error); 8001 } 8002 8003 /* 8004 * Converts a total number of seconds to a human readable string broken 8005 * down in to days/hours/minutes/seconds. 8006 */ 8007 static void 8008 secs_to_dhms(uint64_t total, char *buf) 8009 { 8010 uint64_t days = total / 60 / 60 / 24; 8011 uint64_t hours = (total / 60 / 60) % 24; 8012 uint64_t mins = (total / 60) % 60; 8013 uint64_t secs = (total % 60); 8014 8015 if (days > 0) { 8016 (void) sprintf(buf, "%llu days %02llu:%02llu:%02llu", 8017 (u_longlong_t)days, (u_longlong_t)hours, 8018 (u_longlong_t)mins, (u_longlong_t)secs); 8019 } else { 8020 (void) sprintf(buf, "%02llu:%02llu:%02llu", 8021 (u_longlong_t)hours, (u_longlong_t)mins, 8022 (u_longlong_t)secs); 8023 } 8024 } 8025 8026 /* 8027 * Print out detailed error scrub status. 8028 */ 8029 static void 8030 print_err_scrub_status(pool_scan_stat_t *ps) 8031 { 8032 time_t start, end, pause; 8033 uint64_t total_secs_left; 8034 uint64_t secs_left, mins_left, hours_left, days_left; 8035 uint64_t examined, to_be_examined; 8036 8037 if (ps == NULL || ps->pss_error_scrub_func != POOL_SCAN_ERRORSCRUB) { 8038 return; 8039 } 8040 8041 (void) printf(gettext(" scrub: ")); 8042 8043 start = ps->pss_error_scrub_start; 8044 end = ps->pss_error_scrub_end; 8045 pause = ps->pss_pass_error_scrub_pause; 8046 examined = ps->pss_error_scrub_examined; 8047 to_be_examined = ps->pss_error_scrub_to_be_examined; 8048 8049 assert(ps->pss_error_scrub_func == POOL_SCAN_ERRORSCRUB); 8050 8051 if (ps->pss_error_scrub_state == DSS_FINISHED) { 8052 total_secs_left = end - start; 8053 days_left = total_secs_left / 60 / 60 / 24; 8054 hours_left = (total_secs_left / 60 / 60) % 24; 8055 mins_left = (total_secs_left / 60) % 60; 8056 secs_left = (total_secs_left % 60); 8057 8058 (void) printf(gettext("scrubbed %llu error blocks in %llu days " 8059 "%02llu:%02llu:%02llu on %s"), (u_longlong_t)examined, 8060 (u_longlong_t)days_left, (u_longlong_t)hours_left, 8061 (u_longlong_t)mins_left, (u_longlong_t)secs_left, 8062 ctime(&end)); 8063 8064 return; 8065 } else if (ps->pss_error_scrub_state == DSS_CANCELED) { 8066 (void) printf(gettext("error scrub canceled on %s"), 8067 ctime(&end)); 8068 return; 8069 } 8070 assert(ps->pss_error_scrub_state == DSS_ERRORSCRUBBING); 8071 8072 /* Error scrub is in progress. */ 8073 if (pause == 0) { 8074 (void) printf(gettext("error scrub in progress since %s"), 8075 ctime(&start)); 8076 } else { 8077 (void) printf(gettext("error scrub paused since %s"), 8078 ctime(&pause)); 8079 (void) printf(gettext("\terror scrub started on %s"), 8080 ctime(&start)); 8081 } 8082 8083 double fraction_done = (double)examined / (to_be_examined + examined); 8084 (void) printf(gettext("\t%.2f%% done, issued I/O for %llu error" 8085 " blocks"), 100 * fraction_done, (u_longlong_t)examined); 8086 8087 (void) printf("\n"); 8088 } 8089 8090 /* 8091 * Print out detailed scrub status. 8092 */ 8093 static void 8094 print_scan_scrub_resilver_status(pool_scan_stat_t *ps) 8095 { 8096 time_t start, end, pause; 8097 uint64_t pass_scanned, scanned, pass_issued, issued, total_s, total_i; 8098 uint64_t elapsed, scan_rate, issue_rate; 8099 double fraction_done; 8100 char processed_buf[7], scanned_buf[7], issued_buf[7], total_s_buf[7]; 8101 char total_i_buf[7], srate_buf[7], irate_buf[7], time_buf[32]; 8102 8103 printf(" "); 8104 printf_color(ANSI_BOLD, gettext("scan:")); 8105 printf(" "); 8106 8107 /* If there's never been a scan, there's not much to say. */ 8108 if (ps == NULL || ps->pss_func == POOL_SCAN_NONE || 8109 ps->pss_func >= POOL_SCAN_FUNCS) { 8110 (void) printf(gettext("none requested\n")); 8111 return; 8112 } 8113 8114 start = ps->pss_start_time; 8115 end = ps->pss_end_time; 8116 pause = ps->pss_pass_scrub_pause; 8117 8118 zfs_nicebytes(ps->pss_processed, processed_buf, sizeof (processed_buf)); 8119 8120 int is_resilver = ps->pss_func == POOL_SCAN_RESILVER; 8121 int is_scrub = ps->pss_func == POOL_SCAN_SCRUB; 8122 assert(is_resilver || is_scrub); 8123 8124 /* Scan is finished or canceled. */ 8125 if (ps->pss_state == DSS_FINISHED) { 8126 secs_to_dhms(end - start, time_buf); 8127 8128 if (is_scrub) { 8129 (void) printf(gettext("scrub repaired %s " 8130 "in %s with %llu errors on %s"), processed_buf, 8131 time_buf, (u_longlong_t)ps->pss_errors, 8132 ctime(&end)); 8133 } else if (is_resilver) { 8134 (void) printf(gettext("resilvered %s " 8135 "in %s with %llu errors on %s"), processed_buf, 8136 time_buf, (u_longlong_t)ps->pss_errors, 8137 ctime(&end)); 8138 } 8139 return; 8140 } else if (ps->pss_state == DSS_CANCELED) { 8141 if (is_scrub) { 8142 (void) printf(gettext("scrub canceled on %s"), 8143 ctime(&end)); 8144 } else if (is_resilver) { 8145 (void) printf(gettext("resilver canceled on %s"), 8146 ctime(&end)); 8147 } 8148 return; 8149 } 8150 8151 assert(ps->pss_state == DSS_SCANNING); 8152 8153 /* Scan is in progress. Resilvers can't be paused. */ 8154 if (is_scrub) { 8155 if (pause == 0) { 8156 (void) printf(gettext("scrub in progress since %s"), 8157 ctime(&start)); 8158 } else { 8159 (void) printf(gettext("scrub paused since %s"), 8160 ctime(&pause)); 8161 (void) printf(gettext("\tscrub started on %s"), 8162 ctime(&start)); 8163 } 8164 } else if (is_resilver) { 8165 (void) printf(gettext("resilver in progress since %s"), 8166 ctime(&start)); 8167 } 8168 8169 scanned = ps->pss_examined; 8170 pass_scanned = ps->pss_pass_exam; 8171 issued = ps->pss_issued; 8172 pass_issued = ps->pss_pass_issued; 8173 total_s = ps->pss_to_examine; 8174 total_i = ps->pss_to_examine - ps->pss_skipped; 8175 8176 /* we are only done with a block once we have issued the IO for it */ 8177 fraction_done = (double)issued / total_i; 8178 8179 /* elapsed time for this pass, rounding up to 1 if it's 0 */ 8180 elapsed = time(NULL) - ps->pss_pass_start; 8181 elapsed -= ps->pss_pass_scrub_spent_paused; 8182 elapsed = (elapsed != 0) ? elapsed : 1; 8183 8184 scan_rate = pass_scanned / elapsed; 8185 issue_rate = pass_issued / elapsed; 8186 8187 /* format all of the numbers we will be reporting */ 8188 zfs_nicebytes(scanned, scanned_buf, sizeof (scanned_buf)); 8189 zfs_nicebytes(issued, issued_buf, sizeof (issued_buf)); 8190 zfs_nicebytes(total_s, total_s_buf, sizeof (total_s_buf)); 8191 zfs_nicebytes(total_i, total_i_buf, sizeof (total_i_buf)); 8192 8193 /* do not print estimated time if we have a paused scrub */ 8194 (void) printf(gettext("\t%s / %s scanned"), scanned_buf, total_s_buf); 8195 if (pause == 0 && scan_rate > 0) { 8196 zfs_nicebytes(scan_rate, srate_buf, sizeof (srate_buf)); 8197 (void) printf(gettext(" at %s/s"), srate_buf); 8198 } 8199 (void) printf(gettext(", %s / %s issued"), issued_buf, total_i_buf); 8200 if (pause == 0 && issue_rate > 0) { 8201 zfs_nicebytes(issue_rate, irate_buf, sizeof (irate_buf)); 8202 (void) printf(gettext(" at %s/s"), irate_buf); 8203 } 8204 (void) printf(gettext("\n")); 8205 8206 if (is_resilver) { 8207 (void) printf(gettext("\t%s resilvered, %.2f%% done"), 8208 processed_buf, 100 * fraction_done); 8209 } else if (is_scrub) { 8210 (void) printf(gettext("\t%s repaired, %.2f%% done"), 8211 processed_buf, 100 * fraction_done); 8212 } 8213 8214 if (pause == 0) { 8215 /* 8216 * Only provide an estimate iff: 8217 * 1) we haven't yet issued all we expected, and 8218 * 2) the issue rate exceeds 10 MB/s, and 8219 * 3) it's either: 8220 * a) a resilver which has started repairs, or 8221 * b) a scrub which has entered the issue phase. 8222 */ 8223 if (total_i >= issued && issue_rate >= 10 * 1024 * 1024 && 8224 ((is_resilver && ps->pss_processed > 0) || 8225 (is_scrub && issued > 0))) { 8226 secs_to_dhms((total_i - issued) / issue_rate, time_buf); 8227 (void) printf(gettext(", %s to go\n"), time_buf); 8228 } else { 8229 (void) printf(gettext(", no estimated " 8230 "completion time\n")); 8231 } 8232 } else { 8233 (void) printf(gettext("\n")); 8234 } 8235 } 8236 8237 static void 8238 print_rebuild_status_impl(vdev_rebuild_stat_t *vrs, uint_t c, char *vdev_name) 8239 { 8240 if (vrs == NULL || vrs->vrs_state == VDEV_REBUILD_NONE) 8241 return; 8242 8243 printf(" "); 8244 printf_color(ANSI_BOLD, gettext("scan:")); 8245 printf(" "); 8246 8247 uint64_t bytes_scanned = vrs->vrs_bytes_scanned; 8248 uint64_t bytes_issued = vrs->vrs_bytes_issued; 8249 uint64_t bytes_rebuilt = vrs->vrs_bytes_rebuilt; 8250 uint64_t bytes_est_s = vrs->vrs_bytes_est; 8251 uint64_t bytes_est_i = vrs->vrs_bytes_est; 8252 if (c > offsetof(vdev_rebuild_stat_t, vrs_pass_bytes_skipped) / 8) 8253 bytes_est_i -= vrs->vrs_pass_bytes_skipped; 8254 uint64_t scan_rate = (vrs->vrs_pass_bytes_scanned / 8255 (vrs->vrs_pass_time_ms + 1)) * 1000; 8256 uint64_t issue_rate = (vrs->vrs_pass_bytes_issued / 8257 (vrs->vrs_pass_time_ms + 1)) * 1000; 8258 double scan_pct = MIN((double)bytes_scanned * 100 / 8259 (bytes_est_s + 1), 100); 8260 8261 /* Format all of the numbers we will be reporting */ 8262 char bytes_scanned_buf[7], bytes_issued_buf[7]; 8263 char bytes_rebuilt_buf[7], bytes_est_s_buf[7], bytes_est_i_buf[7]; 8264 char scan_rate_buf[7], issue_rate_buf[7], time_buf[32]; 8265 zfs_nicebytes(bytes_scanned, bytes_scanned_buf, 8266 sizeof (bytes_scanned_buf)); 8267 zfs_nicebytes(bytes_issued, bytes_issued_buf, 8268 sizeof (bytes_issued_buf)); 8269 zfs_nicebytes(bytes_rebuilt, bytes_rebuilt_buf, 8270 sizeof (bytes_rebuilt_buf)); 8271 zfs_nicebytes(bytes_est_s, bytes_est_s_buf, sizeof (bytes_est_s_buf)); 8272 zfs_nicebytes(bytes_est_i, bytes_est_i_buf, sizeof (bytes_est_i_buf)); 8273 8274 time_t start = vrs->vrs_start_time; 8275 time_t end = vrs->vrs_end_time; 8276 8277 /* Rebuild is finished or canceled. */ 8278 if (vrs->vrs_state == VDEV_REBUILD_COMPLETE) { 8279 secs_to_dhms(vrs->vrs_scan_time_ms / 1000, time_buf); 8280 (void) printf(gettext("resilvered (%s) %s in %s " 8281 "with %llu errors on %s"), vdev_name, bytes_rebuilt_buf, 8282 time_buf, (u_longlong_t)vrs->vrs_errors, ctime(&end)); 8283 return; 8284 } else if (vrs->vrs_state == VDEV_REBUILD_CANCELED) { 8285 (void) printf(gettext("resilver (%s) canceled on %s"), 8286 vdev_name, ctime(&end)); 8287 return; 8288 } else if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) { 8289 (void) printf(gettext("resilver (%s) in progress since %s"), 8290 vdev_name, ctime(&start)); 8291 } 8292 8293 assert(vrs->vrs_state == VDEV_REBUILD_ACTIVE); 8294 8295 (void) printf(gettext("\t%s / %s scanned"), bytes_scanned_buf, 8296 bytes_est_s_buf); 8297 if (scan_rate > 0) { 8298 zfs_nicebytes(scan_rate, scan_rate_buf, sizeof (scan_rate_buf)); 8299 (void) printf(gettext(" at %s/s"), scan_rate_buf); 8300 } 8301 (void) printf(gettext(", %s / %s issued"), bytes_issued_buf, 8302 bytes_est_i_buf); 8303 if (issue_rate > 0) { 8304 zfs_nicebytes(issue_rate, issue_rate_buf, 8305 sizeof (issue_rate_buf)); 8306 (void) printf(gettext(" at %s/s"), issue_rate_buf); 8307 } 8308 (void) printf(gettext("\n")); 8309 8310 (void) printf(gettext("\t%s resilvered, %.2f%% done"), 8311 bytes_rebuilt_buf, scan_pct); 8312 8313 if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) { 8314 if (bytes_est_s >= bytes_scanned && 8315 scan_rate >= 10 * 1024 * 1024) { 8316 secs_to_dhms((bytes_est_s - bytes_scanned) / scan_rate, 8317 time_buf); 8318 (void) printf(gettext(", %s to go\n"), time_buf); 8319 } else { 8320 (void) printf(gettext(", no estimated " 8321 "completion time\n")); 8322 } 8323 } else { 8324 (void) printf(gettext("\n")); 8325 } 8326 } 8327 8328 /* 8329 * Print rebuild status for top-level vdevs. 8330 */ 8331 static void 8332 print_rebuild_status(zpool_handle_t *zhp, nvlist_t *nvroot) 8333 { 8334 nvlist_t **child; 8335 uint_t children; 8336 8337 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 8338 &child, &children) != 0) 8339 children = 0; 8340 8341 for (uint_t c = 0; c < children; c++) { 8342 vdev_rebuild_stat_t *vrs; 8343 uint_t i; 8344 8345 if (nvlist_lookup_uint64_array(child[c], 8346 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i) == 0) { 8347 char *name = zpool_vdev_name(g_zfs, zhp, 8348 child[c], VDEV_NAME_TYPE_ID); 8349 print_rebuild_status_impl(vrs, i, name); 8350 free(name); 8351 } 8352 } 8353 } 8354 8355 /* 8356 * As we don't scrub checkpointed blocks, we want to warn the user that we 8357 * skipped scanning some blocks if a checkpoint exists or existed at any 8358 * time during the scan. If a sequential instead of healing reconstruction 8359 * was performed then the blocks were reconstructed. However, their checksums 8360 * have not been verified so we still print the warning. 8361 */ 8362 static void 8363 print_checkpoint_scan_warning(pool_scan_stat_t *ps, pool_checkpoint_stat_t *pcs) 8364 { 8365 if (ps == NULL || pcs == NULL) 8366 return; 8367 8368 if (pcs->pcs_state == CS_NONE || 8369 pcs->pcs_state == CS_CHECKPOINT_DISCARDING) 8370 return; 8371 8372 assert(pcs->pcs_state == CS_CHECKPOINT_EXISTS); 8373 8374 if (ps->pss_state == DSS_NONE) 8375 return; 8376 8377 if ((ps->pss_state == DSS_FINISHED || ps->pss_state == DSS_CANCELED) && 8378 ps->pss_end_time < pcs->pcs_start_time) 8379 return; 8380 8381 if (ps->pss_state == DSS_FINISHED || ps->pss_state == DSS_CANCELED) { 8382 (void) printf(gettext(" scan warning: skipped blocks " 8383 "that are only referenced by the checkpoint.\n")); 8384 } else { 8385 assert(ps->pss_state == DSS_SCANNING); 8386 (void) printf(gettext(" scan warning: skipping blocks " 8387 "that are only referenced by the checkpoint.\n")); 8388 } 8389 } 8390 8391 /* 8392 * Returns B_TRUE if there is an active rebuild in progress. Otherwise, 8393 * B_FALSE is returned and 'rebuild_end_time' is set to the end time for 8394 * the last completed (or cancelled) rebuild. 8395 */ 8396 static boolean_t 8397 check_rebuilding(nvlist_t *nvroot, uint64_t *rebuild_end_time) 8398 { 8399 nvlist_t **child; 8400 uint_t children; 8401 boolean_t rebuilding = B_FALSE; 8402 uint64_t end_time = 0; 8403 8404 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 8405 &child, &children) != 0) 8406 children = 0; 8407 8408 for (uint_t c = 0; c < children; c++) { 8409 vdev_rebuild_stat_t *vrs; 8410 uint_t i; 8411 8412 if (nvlist_lookup_uint64_array(child[c], 8413 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i) == 0) { 8414 8415 if (vrs->vrs_end_time > end_time) 8416 end_time = vrs->vrs_end_time; 8417 8418 if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) { 8419 rebuilding = B_TRUE; 8420 end_time = 0; 8421 break; 8422 } 8423 } 8424 } 8425 8426 if (rebuild_end_time != NULL) 8427 *rebuild_end_time = end_time; 8428 8429 return (rebuilding); 8430 } 8431 8432 /* 8433 * Print the scan status. 8434 */ 8435 static void 8436 print_scan_status(zpool_handle_t *zhp, nvlist_t *nvroot) 8437 { 8438 uint64_t rebuild_end_time = 0, resilver_end_time = 0; 8439 boolean_t have_resilver = B_FALSE, have_scrub = B_FALSE; 8440 boolean_t have_errorscrub = B_FALSE; 8441 boolean_t active_resilver = B_FALSE; 8442 pool_checkpoint_stat_t *pcs = NULL; 8443 pool_scan_stat_t *ps = NULL; 8444 uint_t c; 8445 time_t scrub_start = 0, errorscrub_start = 0; 8446 8447 if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_SCAN_STATS, 8448 (uint64_t **)&ps, &c) == 0) { 8449 if (ps->pss_func == POOL_SCAN_RESILVER) { 8450 resilver_end_time = ps->pss_end_time; 8451 active_resilver = (ps->pss_state == DSS_SCANNING); 8452 } 8453 8454 have_resilver = (ps->pss_func == POOL_SCAN_RESILVER); 8455 have_scrub = (ps->pss_func == POOL_SCAN_SCRUB); 8456 scrub_start = ps->pss_start_time; 8457 if (c > offsetof(pool_scan_stat_t, 8458 pss_pass_error_scrub_pause) / 8) { 8459 have_errorscrub = (ps->pss_error_scrub_func == 8460 POOL_SCAN_ERRORSCRUB); 8461 errorscrub_start = ps->pss_error_scrub_start; 8462 } 8463 } 8464 8465 boolean_t active_rebuild = check_rebuilding(nvroot, &rebuild_end_time); 8466 boolean_t have_rebuild = (active_rebuild || (rebuild_end_time > 0)); 8467 8468 /* Always print the scrub status when available. */ 8469 if (have_scrub && scrub_start > errorscrub_start) 8470 print_scan_scrub_resilver_status(ps); 8471 else if (have_errorscrub && errorscrub_start >= scrub_start) 8472 print_err_scrub_status(ps); 8473 8474 /* 8475 * When there is an active resilver or rebuild print its status. 8476 * Otherwise print the status of the last resilver or rebuild. 8477 */ 8478 if (active_resilver || (!active_rebuild && have_resilver && 8479 resilver_end_time && resilver_end_time > rebuild_end_time)) { 8480 print_scan_scrub_resilver_status(ps); 8481 } else if (active_rebuild || (!active_resilver && have_rebuild && 8482 rebuild_end_time && rebuild_end_time > resilver_end_time)) { 8483 print_rebuild_status(zhp, nvroot); 8484 } 8485 8486 (void) nvlist_lookup_uint64_array(nvroot, 8487 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c); 8488 print_checkpoint_scan_warning(ps, pcs); 8489 } 8490 8491 /* 8492 * Print out detailed removal status. 8493 */ 8494 static void 8495 print_removal_status(zpool_handle_t *zhp, pool_removal_stat_t *prs) 8496 { 8497 char copied_buf[7], examined_buf[7], total_buf[7], rate_buf[7]; 8498 time_t start, end; 8499 nvlist_t *config, *nvroot; 8500 nvlist_t **child; 8501 uint_t children; 8502 char *vdev_name; 8503 8504 if (prs == NULL || prs->prs_state == DSS_NONE) 8505 return; 8506 8507 /* 8508 * Determine name of vdev. 8509 */ 8510 config = zpool_get_config(zhp, NULL); 8511 nvroot = fnvlist_lookup_nvlist(config, 8512 ZPOOL_CONFIG_VDEV_TREE); 8513 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 8514 &child, &children) == 0); 8515 assert(prs->prs_removing_vdev < children); 8516 vdev_name = zpool_vdev_name(g_zfs, zhp, 8517 child[prs->prs_removing_vdev], B_TRUE); 8518 8519 printf_color(ANSI_BOLD, gettext("remove: ")); 8520 8521 start = prs->prs_start_time; 8522 end = prs->prs_end_time; 8523 zfs_nicenum(prs->prs_copied, copied_buf, sizeof (copied_buf)); 8524 8525 /* 8526 * Removal is finished or canceled. 8527 */ 8528 if (prs->prs_state == DSS_FINISHED) { 8529 uint64_t minutes_taken = (end - start) / 60; 8530 8531 (void) printf(gettext("Removal of vdev %llu copied %s " 8532 "in %lluh%um, completed on %s"), 8533 (longlong_t)prs->prs_removing_vdev, 8534 copied_buf, 8535 (u_longlong_t)(minutes_taken / 60), 8536 (uint_t)(minutes_taken % 60), 8537 ctime((time_t *)&end)); 8538 } else if (prs->prs_state == DSS_CANCELED) { 8539 (void) printf(gettext("Removal of %s canceled on %s"), 8540 vdev_name, ctime(&end)); 8541 } else { 8542 uint64_t copied, total, elapsed, mins_left, hours_left; 8543 double fraction_done; 8544 uint_t rate; 8545 8546 assert(prs->prs_state == DSS_SCANNING); 8547 8548 /* 8549 * Removal is in progress. 8550 */ 8551 (void) printf(gettext( 8552 "Evacuation of %s in progress since %s"), 8553 vdev_name, ctime(&start)); 8554 8555 copied = prs->prs_copied > 0 ? prs->prs_copied : 1; 8556 total = prs->prs_to_copy; 8557 fraction_done = (double)copied / total; 8558 8559 /* elapsed time for this pass */ 8560 elapsed = time(NULL) - prs->prs_start_time; 8561 elapsed = elapsed > 0 ? elapsed : 1; 8562 rate = copied / elapsed; 8563 rate = rate > 0 ? rate : 1; 8564 mins_left = ((total - copied) / rate) / 60; 8565 hours_left = mins_left / 60; 8566 8567 zfs_nicenum(copied, examined_buf, sizeof (examined_buf)); 8568 zfs_nicenum(total, total_buf, sizeof (total_buf)); 8569 zfs_nicenum(rate, rate_buf, sizeof (rate_buf)); 8570 8571 /* 8572 * do not print estimated time if hours_left is more than 8573 * 30 days 8574 */ 8575 (void) printf(gettext( 8576 "\t%s copied out of %s at %s/s, %.2f%% done"), 8577 examined_buf, total_buf, rate_buf, 100 * fraction_done); 8578 if (hours_left < (30 * 24)) { 8579 (void) printf(gettext(", %lluh%um to go\n"), 8580 (u_longlong_t)hours_left, (uint_t)(mins_left % 60)); 8581 } else { 8582 (void) printf(gettext( 8583 ", (copy is slow, no estimated time)\n")); 8584 } 8585 } 8586 free(vdev_name); 8587 8588 if (prs->prs_mapping_memory > 0) { 8589 char mem_buf[7]; 8590 zfs_nicenum(prs->prs_mapping_memory, mem_buf, sizeof (mem_buf)); 8591 (void) printf(gettext( 8592 "\t%s memory used for removed device mappings\n"), 8593 mem_buf); 8594 } 8595 } 8596 8597 /* 8598 * Print out detailed raidz expansion status. 8599 */ 8600 static void 8601 print_raidz_expand_status(zpool_handle_t *zhp, pool_raidz_expand_stat_t *pres) 8602 { 8603 char copied_buf[7]; 8604 8605 if (pres == NULL || pres->pres_state == DSS_NONE) 8606 return; 8607 8608 /* 8609 * Determine name of vdev. 8610 */ 8611 nvlist_t *config = zpool_get_config(zhp, NULL); 8612 nvlist_t *nvroot = fnvlist_lookup_nvlist(config, 8613 ZPOOL_CONFIG_VDEV_TREE); 8614 nvlist_t **child; 8615 uint_t children; 8616 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 8617 &child, &children) == 0); 8618 assert(pres->pres_expanding_vdev < children); 8619 8620 printf_color(ANSI_BOLD, gettext("expand: ")); 8621 8622 time_t start = pres->pres_start_time; 8623 time_t end = pres->pres_end_time; 8624 char *vname = 8625 zpool_vdev_name(g_zfs, zhp, child[pres->pres_expanding_vdev], 0); 8626 zfs_nicenum(pres->pres_reflowed, copied_buf, sizeof (copied_buf)); 8627 8628 /* 8629 * Expansion is finished or canceled. 8630 */ 8631 if (pres->pres_state == DSS_FINISHED) { 8632 char time_buf[32]; 8633 secs_to_dhms(end - start, time_buf); 8634 8635 (void) printf(gettext("expanded %s-%u copied %s in %s, " 8636 "on %s"), vname, (int)pres->pres_expanding_vdev, 8637 copied_buf, time_buf, ctime((time_t *)&end)); 8638 } else { 8639 char examined_buf[7], total_buf[7], rate_buf[7]; 8640 uint64_t copied, total, elapsed, secs_left; 8641 double fraction_done; 8642 uint_t rate; 8643 8644 assert(pres->pres_state == DSS_SCANNING); 8645 8646 /* 8647 * Expansion is in progress. 8648 */ 8649 (void) printf(gettext( 8650 "expansion of %s-%u in progress since %s"), 8651 vname, (int)pres->pres_expanding_vdev, ctime(&start)); 8652 8653 copied = pres->pres_reflowed > 0 ? pres->pres_reflowed : 1; 8654 total = pres->pres_to_reflow; 8655 fraction_done = (double)copied / total; 8656 8657 /* elapsed time for this pass */ 8658 elapsed = time(NULL) - pres->pres_start_time; 8659 elapsed = elapsed > 0 ? elapsed : 1; 8660 rate = copied / elapsed; 8661 rate = rate > 0 ? rate : 1; 8662 secs_left = (total - copied) / rate; 8663 8664 zfs_nicenum(copied, examined_buf, sizeof (examined_buf)); 8665 zfs_nicenum(total, total_buf, sizeof (total_buf)); 8666 zfs_nicenum(rate, rate_buf, sizeof (rate_buf)); 8667 8668 /* 8669 * do not print estimated time if hours_left is more than 8670 * 30 days 8671 */ 8672 (void) printf(gettext("\t%s / %s copied at %s/s, %.2f%% done"), 8673 examined_buf, total_buf, rate_buf, 100 * fraction_done); 8674 if (pres->pres_waiting_for_resilver) { 8675 (void) printf(gettext(", paused for resilver or " 8676 "clear\n")); 8677 } else if (secs_left < (30 * 24 * 3600)) { 8678 char time_buf[32]; 8679 secs_to_dhms(secs_left, time_buf); 8680 (void) printf(gettext(", %s to go\n"), time_buf); 8681 } else { 8682 (void) printf(gettext( 8683 ", (copy is slow, no estimated time)\n")); 8684 } 8685 } 8686 free(vname); 8687 } 8688 static void 8689 print_checkpoint_status(pool_checkpoint_stat_t *pcs) 8690 { 8691 time_t start; 8692 char space_buf[7]; 8693 8694 if (pcs == NULL || pcs->pcs_state == CS_NONE) 8695 return; 8696 8697 (void) printf(gettext("checkpoint: ")); 8698 8699 start = pcs->pcs_start_time; 8700 zfs_nicenum(pcs->pcs_space, space_buf, sizeof (space_buf)); 8701 8702 if (pcs->pcs_state == CS_CHECKPOINT_EXISTS) { 8703 char *date = ctime(&start); 8704 8705 /* 8706 * ctime() adds a newline at the end of the generated 8707 * string, thus the weird format specifier and the 8708 * strlen() call used to chop it off from the output. 8709 */ 8710 (void) printf(gettext("created %.*s, consumes %s\n"), 8711 (int)(strlen(date) - 1), date, space_buf); 8712 return; 8713 } 8714 8715 assert(pcs->pcs_state == CS_CHECKPOINT_DISCARDING); 8716 8717 (void) printf(gettext("discarding, %s remaining.\n"), 8718 space_buf); 8719 } 8720 8721 static void 8722 print_error_log(zpool_handle_t *zhp) 8723 { 8724 nvlist_t *nverrlist = NULL; 8725 nvpair_t *elem; 8726 char *pathname; 8727 size_t len = MAXPATHLEN * 2; 8728 8729 if (zpool_get_errlog(zhp, &nverrlist) != 0) 8730 return; 8731 8732 (void) printf("errors: Permanent errors have been " 8733 "detected in the following files:\n\n"); 8734 8735 pathname = safe_malloc(len); 8736 elem = NULL; 8737 while ((elem = nvlist_next_nvpair(nverrlist, elem)) != NULL) { 8738 nvlist_t *nv; 8739 uint64_t dsobj, obj; 8740 8741 verify(nvpair_value_nvlist(elem, &nv) == 0); 8742 verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_DATASET, 8743 &dsobj) == 0); 8744 verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_OBJECT, 8745 &obj) == 0); 8746 zpool_obj_to_path(zhp, dsobj, obj, pathname, len); 8747 (void) printf("%7s %s\n", "", pathname); 8748 } 8749 free(pathname); 8750 nvlist_free(nverrlist); 8751 } 8752 8753 static void 8754 print_spares(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **spares, 8755 uint_t nspares) 8756 { 8757 uint_t i; 8758 char *name; 8759 8760 if (nspares == 0) 8761 return; 8762 8763 (void) printf(gettext("\tspares\n")); 8764 8765 for (i = 0; i < nspares; i++) { 8766 name = zpool_vdev_name(g_zfs, zhp, spares[i], 8767 cb->cb_name_flags); 8768 print_status_config(zhp, cb, name, spares[i], 2, B_TRUE, NULL); 8769 free(name); 8770 } 8771 } 8772 8773 static void 8774 print_l2cache(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **l2cache, 8775 uint_t nl2cache) 8776 { 8777 uint_t i; 8778 char *name; 8779 8780 if (nl2cache == 0) 8781 return; 8782 8783 (void) printf(gettext("\tcache\n")); 8784 8785 for (i = 0; i < nl2cache; i++) { 8786 name = zpool_vdev_name(g_zfs, zhp, l2cache[i], 8787 cb->cb_name_flags); 8788 print_status_config(zhp, cb, name, l2cache[i], 2, 8789 B_FALSE, NULL); 8790 free(name); 8791 } 8792 } 8793 8794 static void 8795 print_dedup_stats(nvlist_t *config) 8796 { 8797 ddt_histogram_t *ddh; 8798 ddt_stat_t *dds; 8799 ddt_object_t *ddo; 8800 uint_t c; 8801 char dspace[6], mspace[6]; 8802 8803 /* 8804 * If the pool was faulted then we may not have been able to 8805 * obtain the config. Otherwise, if we have anything in the dedup 8806 * table continue processing the stats. 8807 */ 8808 if (nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_OBJ_STATS, 8809 (uint64_t **)&ddo, &c) != 0) 8810 return; 8811 8812 (void) printf("\n"); 8813 (void) printf(gettext(" dedup: ")); 8814 if (ddo->ddo_count == 0) { 8815 (void) printf(gettext("no DDT entries\n")); 8816 return; 8817 } 8818 8819 zfs_nicebytes(ddo->ddo_dspace, dspace, sizeof (dspace)); 8820 zfs_nicebytes(ddo->ddo_mspace, mspace, sizeof (mspace)); 8821 (void) printf("DDT entries %llu, size %s on disk, %s in core\n", 8822 (u_longlong_t)ddo->ddo_count, 8823 dspace, 8824 mspace); 8825 8826 verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_STATS, 8827 (uint64_t **)&dds, &c) == 0); 8828 verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_HISTOGRAM, 8829 (uint64_t **)&ddh, &c) == 0); 8830 zpool_dump_ddt(dds, ddh); 8831 } 8832 8833 /* 8834 * Display a summary of pool status. Displays a summary such as: 8835 * 8836 * pool: tank 8837 * status: DEGRADED 8838 * reason: One or more devices ... 8839 * see: https://openzfs.github.io/openzfs-docs/msg/ZFS-xxxx-01 8840 * config: 8841 * mirror DEGRADED 8842 * c1t0d0 OK 8843 * c2t0d0 UNAVAIL 8844 * 8845 * When given the '-v' option, we print out the complete config. If the '-e' 8846 * option is specified, then we print out error rate information as well. 8847 */ 8848 static int 8849 status_callback(zpool_handle_t *zhp, void *data) 8850 { 8851 status_cbdata_t *cbp = data; 8852 nvlist_t *config, *nvroot; 8853 const char *msgid; 8854 zpool_status_t reason; 8855 zpool_errata_t errata; 8856 const char *health; 8857 uint_t c; 8858 vdev_stat_t *vs; 8859 8860 config = zpool_get_config(zhp, NULL); 8861 reason = zpool_get_status(zhp, &msgid, &errata); 8862 8863 cbp->cb_count++; 8864 8865 /* 8866 * If we were given 'zpool status -x', only report those pools with 8867 * problems. 8868 */ 8869 if (cbp->cb_explain && 8870 (reason == ZPOOL_STATUS_OK || 8871 reason == ZPOOL_STATUS_VERSION_OLDER || 8872 reason == ZPOOL_STATUS_FEAT_DISABLED || 8873 reason == ZPOOL_STATUS_COMPATIBILITY_ERR || 8874 reason == ZPOOL_STATUS_INCOMPATIBLE_FEAT)) { 8875 if (!cbp->cb_allpools) { 8876 (void) printf(gettext("pool '%s' is healthy\n"), 8877 zpool_get_name(zhp)); 8878 if (cbp->cb_first) 8879 cbp->cb_first = B_FALSE; 8880 } 8881 return (0); 8882 } 8883 8884 if (cbp->cb_first) 8885 cbp->cb_first = B_FALSE; 8886 else 8887 (void) printf("\n"); 8888 8889 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE); 8890 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS, 8891 (uint64_t **)&vs, &c) == 0); 8892 8893 health = zpool_get_state_str(zhp); 8894 8895 printf(" "); 8896 printf_color(ANSI_BOLD, gettext("pool:")); 8897 printf(" %s\n", zpool_get_name(zhp)); 8898 fputc(' ', stdout); 8899 printf_color(ANSI_BOLD, gettext("state: ")); 8900 8901 printf_color(health_str_to_color(health), "%s", health); 8902 8903 fputc('\n', stdout); 8904 8905 switch (reason) { 8906 case ZPOOL_STATUS_MISSING_DEV_R: 8907 printf_color(ANSI_BOLD, gettext("status: ")); 8908 printf_color(ANSI_YELLOW, gettext("One or more devices could " 8909 "not be opened. Sufficient replicas exist for\n\tthe pool " 8910 "to continue functioning in a degraded state.\n")); 8911 printf_color(ANSI_BOLD, gettext("action: ")); 8912 printf_color(ANSI_YELLOW, gettext("Attach the missing device " 8913 "and online it using 'zpool online'.\n")); 8914 break; 8915 8916 case ZPOOL_STATUS_MISSING_DEV_NR: 8917 printf_color(ANSI_BOLD, gettext("status: ")); 8918 printf_color(ANSI_YELLOW, gettext("One or more devices could " 8919 "not be opened. There are insufficient\n\treplicas for the" 8920 " pool to continue functioning.\n")); 8921 printf_color(ANSI_BOLD, gettext("action: ")); 8922 printf_color(ANSI_YELLOW, gettext("Attach the missing device " 8923 "and online it using 'zpool online'.\n")); 8924 break; 8925 8926 case ZPOOL_STATUS_CORRUPT_LABEL_R: 8927 printf_color(ANSI_BOLD, gettext("status: ")); 8928 printf_color(ANSI_YELLOW, gettext("One or more devices could " 8929 "not be used because the label is missing or\n\tinvalid. " 8930 "Sufficient replicas exist for the pool to continue\n\t" 8931 "functioning in a degraded state.\n")); 8932 printf_color(ANSI_BOLD, gettext("action: ")); 8933 printf_color(ANSI_YELLOW, gettext("Replace the device using " 8934 "'zpool replace'.\n")); 8935 break; 8936 8937 case ZPOOL_STATUS_CORRUPT_LABEL_NR: 8938 printf_color(ANSI_BOLD, gettext("status: ")); 8939 printf_color(ANSI_YELLOW, gettext("One or more devices could " 8940 "not be used because the label is missing \n\tor invalid. " 8941 "There are insufficient replicas for the pool to " 8942 "continue\n\tfunctioning.\n")); 8943 zpool_explain_recover(zpool_get_handle(zhp), 8944 zpool_get_name(zhp), reason, config); 8945 break; 8946 8947 case ZPOOL_STATUS_FAILING_DEV: 8948 printf_color(ANSI_BOLD, gettext("status: ")); 8949 printf_color(ANSI_YELLOW, gettext("One or more devices has " 8950 "experienced an unrecoverable error. An\n\tattempt was " 8951 "made to correct the error. Applications are " 8952 "unaffected.\n")); 8953 printf_color(ANSI_BOLD, gettext("action: ")); 8954 printf_color(ANSI_YELLOW, gettext("Determine if the " 8955 "device needs to be replaced, and clear the errors\n\tusing" 8956 " 'zpool clear' or replace the device with 'zpool " 8957 "replace'.\n")); 8958 break; 8959 8960 case ZPOOL_STATUS_OFFLINE_DEV: 8961 printf_color(ANSI_BOLD, gettext("status: ")); 8962 printf_color(ANSI_YELLOW, gettext("One or more devices has " 8963 "been taken offline by the administrator.\n\tSufficient " 8964 "replicas exist for the pool to continue functioning in " 8965 "a\n\tdegraded state.\n")); 8966 printf_color(ANSI_BOLD, gettext("action: ")); 8967 printf_color(ANSI_YELLOW, gettext("Online the device " 8968 "using 'zpool online' or replace the device with\n\t'zpool " 8969 "replace'.\n")); 8970 break; 8971 8972 case ZPOOL_STATUS_REMOVED_DEV: 8973 printf_color(ANSI_BOLD, gettext("status: ")); 8974 printf_color(ANSI_YELLOW, gettext("One or more devices has " 8975 "been removed by the administrator.\n\tSufficient " 8976 "replicas exist for the pool to continue functioning in " 8977 "a\n\tdegraded state.\n")); 8978 printf_color(ANSI_BOLD, gettext("action: ")); 8979 printf_color(ANSI_YELLOW, gettext("Online the device " 8980 "using zpool online' or replace the device with\n\t'zpool " 8981 "replace'.\n")); 8982 break; 8983 8984 case ZPOOL_STATUS_RESILVERING: 8985 case ZPOOL_STATUS_REBUILDING: 8986 printf_color(ANSI_BOLD, gettext("status: ")); 8987 printf_color(ANSI_YELLOW, gettext("One or more devices is " 8988 "currently being resilvered. The pool will\n\tcontinue " 8989 "to function, possibly in a degraded state.\n")); 8990 printf_color(ANSI_BOLD, gettext("action: ")); 8991 printf_color(ANSI_YELLOW, gettext("Wait for the resilver to " 8992 "complete.\n")); 8993 break; 8994 8995 case ZPOOL_STATUS_REBUILD_SCRUB: 8996 printf_color(ANSI_BOLD, gettext("status: ")); 8997 printf_color(ANSI_YELLOW, gettext("One or more devices have " 8998 "been sequentially resilvered, scrubbing\n\tthe pool " 8999 "is recommended.\n")); 9000 printf_color(ANSI_BOLD, gettext("action: ")); 9001 printf_color(ANSI_YELLOW, gettext("Use 'zpool scrub' to " 9002 "verify all data checksums.\n")); 9003 break; 9004 9005 case ZPOOL_STATUS_CORRUPT_DATA: 9006 printf_color(ANSI_BOLD, gettext("status: ")); 9007 printf_color(ANSI_YELLOW, gettext("One or more devices has " 9008 "experienced an error resulting in data\n\tcorruption. " 9009 "Applications may be affected.\n")); 9010 printf_color(ANSI_BOLD, gettext("action: ")); 9011 printf_color(ANSI_YELLOW, gettext("Restore the file in question" 9012 " if possible. Otherwise restore the\n\tentire pool from " 9013 "backup.\n")); 9014 break; 9015 9016 case ZPOOL_STATUS_CORRUPT_POOL: 9017 printf_color(ANSI_BOLD, gettext("status: ")); 9018 printf_color(ANSI_YELLOW, gettext("The pool metadata is " 9019 "corrupted and the pool cannot be opened.\n")); 9020 zpool_explain_recover(zpool_get_handle(zhp), 9021 zpool_get_name(zhp), reason, config); 9022 break; 9023 9024 case ZPOOL_STATUS_VERSION_OLDER: 9025 printf_color(ANSI_BOLD, gettext("status: ")); 9026 printf_color(ANSI_YELLOW, gettext("The pool is formatted using " 9027 "a legacy on-disk format. The pool can\n\tstill be used, " 9028 "but some features are unavailable.\n")); 9029 printf_color(ANSI_BOLD, gettext("action: ")); 9030 printf_color(ANSI_YELLOW, gettext("Upgrade the pool using " 9031 "'zpool upgrade'. Once this is done, the\n\tpool will no " 9032 "longer be accessible on software that does not support\n\t" 9033 "feature flags.\n")); 9034 break; 9035 9036 case ZPOOL_STATUS_VERSION_NEWER: 9037 printf_color(ANSI_BOLD, gettext("status: ")); 9038 printf_color(ANSI_YELLOW, gettext("The pool has been upgraded " 9039 "to a newer, incompatible on-disk version.\n\tThe pool " 9040 "cannot be accessed on this system.\n")); 9041 printf_color(ANSI_BOLD, gettext("action: ")); 9042 printf_color(ANSI_YELLOW, gettext("Access the pool from a " 9043 "system running more recent software, or\n\trestore the " 9044 "pool from backup.\n")); 9045 break; 9046 9047 case ZPOOL_STATUS_FEAT_DISABLED: 9048 printf_color(ANSI_BOLD, gettext("status: ")); 9049 printf_color(ANSI_YELLOW, gettext("Some supported and " 9050 "requested features are not enabled on the pool.\n\t" 9051 "The pool can still be used, but some features are " 9052 "unavailable.\n")); 9053 printf_color(ANSI_BOLD, gettext("action: ")); 9054 printf_color(ANSI_YELLOW, gettext("Enable all features using " 9055 "'zpool upgrade'. Once this is done,\n\tthe pool may no " 9056 "longer be accessible by software that does not support\n\t" 9057 "the features. See zpool-features(7) for details.\n")); 9058 break; 9059 9060 case ZPOOL_STATUS_COMPATIBILITY_ERR: 9061 printf_color(ANSI_BOLD, gettext("status: ")); 9062 printf_color(ANSI_YELLOW, gettext("This pool has a " 9063 "compatibility list specified, but it could not be\n\t" 9064 "read/parsed at this time. The pool can still be used, " 9065 "but this\n\tshould be investigated.\n")); 9066 printf_color(ANSI_BOLD, gettext("action: ")); 9067 printf_color(ANSI_YELLOW, gettext("Check the value of the " 9068 "'compatibility' property against the\n\t" 9069 "appropriate file in " ZPOOL_SYSCONF_COMPAT_D " or " 9070 ZPOOL_DATA_COMPAT_D ".\n")); 9071 break; 9072 9073 case ZPOOL_STATUS_INCOMPATIBLE_FEAT: 9074 printf_color(ANSI_BOLD, gettext("status: ")); 9075 printf_color(ANSI_YELLOW, gettext("One or more features " 9076 "are enabled on the pool despite not being\n\t" 9077 "requested by the 'compatibility' property.\n")); 9078 printf_color(ANSI_BOLD, gettext("action: ")); 9079 printf_color(ANSI_YELLOW, gettext("Consider setting " 9080 "'compatibility' to an appropriate value, or\n\t" 9081 "adding needed features to the relevant file in\n\t" 9082 ZPOOL_SYSCONF_COMPAT_D " or " ZPOOL_DATA_COMPAT_D ".\n")); 9083 break; 9084 9085 case ZPOOL_STATUS_UNSUP_FEAT_READ: 9086 printf_color(ANSI_BOLD, gettext("status: ")); 9087 printf_color(ANSI_YELLOW, gettext("The pool cannot be accessed " 9088 "on this system because it uses the\n\tfollowing feature(s)" 9089 " not supported on this system:\n")); 9090 zpool_print_unsup_feat(config); 9091 (void) printf("\n"); 9092 printf_color(ANSI_BOLD, gettext("action: ")); 9093 printf_color(ANSI_YELLOW, gettext("Access the pool from a " 9094 "system that supports the required feature(s),\n\tor " 9095 "restore the pool from backup.\n")); 9096 break; 9097 9098 case ZPOOL_STATUS_UNSUP_FEAT_WRITE: 9099 printf_color(ANSI_BOLD, gettext("status: ")); 9100 printf_color(ANSI_YELLOW, gettext("The pool can only be " 9101 "accessed in read-only mode on this system. It\n\tcannot be" 9102 " accessed in read-write mode because it uses the " 9103 "following\n\tfeature(s) not supported on this system:\n")); 9104 zpool_print_unsup_feat(config); 9105 (void) printf("\n"); 9106 printf_color(ANSI_BOLD, gettext("action: ")); 9107 printf_color(ANSI_YELLOW, gettext("The pool cannot be accessed " 9108 "in read-write mode. Import the pool with\n" 9109 "\t\"-o readonly=on\", access the pool from a system that " 9110 "supports the\n\trequired feature(s), or restore the " 9111 "pool from backup.\n")); 9112 break; 9113 9114 case ZPOOL_STATUS_FAULTED_DEV_R: 9115 printf_color(ANSI_BOLD, gettext("status: ")); 9116 printf_color(ANSI_YELLOW, gettext("One or more devices are " 9117 "faulted in response to persistent errors.\n\tSufficient " 9118 "replicas exist for the pool to continue functioning " 9119 "in a\n\tdegraded state.\n")); 9120 printf_color(ANSI_BOLD, gettext("action: ")); 9121 printf_color(ANSI_YELLOW, gettext("Replace the faulted device, " 9122 "or use 'zpool clear' to mark the device\n\trepaired.\n")); 9123 break; 9124 9125 case ZPOOL_STATUS_FAULTED_DEV_NR: 9126 printf_color(ANSI_BOLD, gettext("status: ")); 9127 printf_color(ANSI_YELLOW, gettext("One or more devices are " 9128 "faulted in response to persistent errors. There are " 9129 "insufficient replicas for the pool to\n\tcontinue " 9130 "functioning.\n")); 9131 printf_color(ANSI_BOLD, gettext("action: ")); 9132 printf_color(ANSI_YELLOW, gettext("Destroy and re-create the " 9133 "pool from a backup source. Manually marking the device\n" 9134 "\trepaired using 'zpool clear' may allow some data " 9135 "to be recovered.\n")); 9136 break; 9137 9138 case ZPOOL_STATUS_IO_FAILURE_MMP: 9139 printf_color(ANSI_BOLD, gettext("status: ")); 9140 printf_color(ANSI_YELLOW, gettext("The pool is suspended " 9141 "because multihost writes failed or were delayed;\n\t" 9142 "another system could import the pool undetected.\n")); 9143 printf_color(ANSI_BOLD, gettext("action: ")); 9144 printf_color(ANSI_YELLOW, gettext("Make sure the pool's devices" 9145 " are connected, then reboot your system and\n\timport the " 9146 "pool or run 'zpool clear' to resume the pool.\n")); 9147 break; 9148 9149 case ZPOOL_STATUS_IO_FAILURE_WAIT: 9150 case ZPOOL_STATUS_IO_FAILURE_CONTINUE: 9151 printf_color(ANSI_BOLD, gettext("status: ")); 9152 printf_color(ANSI_YELLOW, gettext("One or more devices are " 9153 "faulted in response to IO failures.\n")); 9154 printf_color(ANSI_BOLD, gettext("action: ")); 9155 printf_color(ANSI_YELLOW, gettext("Make sure the affected " 9156 "devices are connected, then run 'zpool clear'.\n")); 9157 break; 9158 9159 case ZPOOL_STATUS_BAD_LOG: 9160 printf_color(ANSI_BOLD, gettext("status: ")); 9161 printf_color(ANSI_YELLOW, gettext("An intent log record " 9162 "could not be read.\n" 9163 "\tWaiting for administrator intervention to fix the " 9164 "faulted pool.\n")); 9165 printf_color(ANSI_BOLD, gettext("action: ")); 9166 printf_color(ANSI_YELLOW, gettext("Either restore the affected " 9167 "device(s) and run 'zpool online',\n" 9168 "\tor ignore the intent log records by running " 9169 "'zpool clear'.\n")); 9170 break; 9171 9172 case ZPOOL_STATUS_NON_NATIVE_ASHIFT: 9173 (void) printf(gettext("status: One or more devices are " 9174 "configured to use a non-native block size.\n" 9175 "\tExpect reduced performance.\n")); 9176 (void) printf(gettext("action: Replace affected devices with " 9177 "devices that support the\n\tconfigured block size, or " 9178 "migrate data to a properly configured\n\tpool.\n")); 9179 break; 9180 9181 case ZPOOL_STATUS_HOSTID_MISMATCH: 9182 printf_color(ANSI_BOLD, gettext("status: ")); 9183 printf_color(ANSI_YELLOW, gettext("Mismatch between pool hostid" 9184 " and system hostid on imported pool.\n\tThis pool was " 9185 "previously imported into a system with a different " 9186 "hostid,\n\tand then was verbatim imported into this " 9187 "system.\n")); 9188 printf_color(ANSI_BOLD, gettext("action: ")); 9189 printf_color(ANSI_YELLOW, gettext("Export this pool on all " 9190 "systems on which it is imported.\n" 9191 "\tThen import it to correct the mismatch.\n")); 9192 break; 9193 9194 case ZPOOL_STATUS_ERRATA: 9195 printf_color(ANSI_BOLD, gettext("status: ")); 9196 printf_color(ANSI_YELLOW, gettext("Errata #%d detected.\n"), 9197 errata); 9198 9199 switch (errata) { 9200 case ZPOOL_ERRATA_NONE: 9201 break; 9202 9203 case ZPOOL_ERRATA_ZOL_2094_SCRUB: 9204 printf_color(ANSI_BOLD, gettext("action: ")); 9205 printf_color(ANSI_YELLOW, gettext("To correct the issue" 9206 " run 'zpool scrub'.\n")); 9207 break; 9208 9209 case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION: 9210 (void) printf(gettext("\tExisting encrypted datasets " 9211 "contain an on-disk incompatibility\n\twhich " 9212 "needs to be corrected.\n")); 9213 printf_color(ANSI_BOLD, gettext("action: ")); 9214 printf_color(ANSI_YELLOW, gettext("To correct the issue" 9215 " backup existing encrypted datasets to new\n\t" 9216 "encrypted datasets and destroy the old ones. " 9217 "'zfs mount -o ro' can\n\tbe used to temporarily " 9218 "mount existing encrypted datasets readonly.\n")); 9219 break; 9220 9221 case ZPOOL_ERRATA_ZOL_8308_ENCRYPTION: 9222 (void) printf(gettext("\tExisting encrypted snapshots " 9223 "and bookmarks contain an on-disk\n\tincompat" 9224 "ibility. This may cause on-disk corruption if " 9225 "they are used\n\twith 'zfs recv'.\n")); 9226 printf_color(ANSI_BOLD, gettext("action: ")); 9227 printf_color(ANSI_YELLOW, gettext("To correct the" 9228 "issue, enable the bookmark_v2 feature. No " 9229 "additional\n\taction is needed if there are no " 9230 "encrypted snapshots or bookmarks.\n\tIf preserving" 9231 "the encrypted snapshots and bookmarks is required," 9232 " use\n\ta non-raw send to backup and restore them." 9233 " Alternately, they may be\n\tremoved to resolve " 9234 "the incompatibility.\n")); 9235 break; 9236 9237 default: 9238 /* 9239 * All errata which allow the pool to be imported 9240 * must contain an action message. 9241 */ 9242 assert(0); 9243 } 9244 break; 9245 9246 default: 9247 /* 9248 * The remaining errors can't actually be generated, yet. 9249 */ 9250 assert(reason == ZPOOL_STATUS_OK); 9251 } 9252 9253 if (msgid != NULL) { 9254 printf(" "); 9255 printf_color(ANSI_BOLD, gettext("see:")); 9256 printf(gettext( 9257 " https://openzfs.github.io/openzfs-docs/msg/%s\n"), 9258 msgid); 9259 } 9260 9261 if (config != NULL) { 9262 uint64_t nerr; 9263 nvlist_t **spares, **l2cache; 9264 uint_t nspares, nl2cache; 9265 9266 print_scan_status(zhp, nvroot); 9267 9268 pool_removal_stat_t *prs = NULL; 9269 (void) nvlist_lookup_uint64_array(nvroot, 9270 ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c); 9271 print_removal_status(zhp, prs); 9272 9273 pool_checkpoint_stat_t *pcs = NULL; 9274 (void) nvlist_lookup_uint64_array(nvroot, 9275 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c); 9276 print_checkpoint_status(pcs); 9277 9278 pool_raidz_expand_stat_t *pres = NULL; 9279 (void) nvlist_lookup_uint64_array(nvroot, 9280 ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, (uint64_t **)&pres, &c); 9281 print_raidz_expand_status(zhp, pres); 9282 9283 cbp->cb_namewidth = max_width(zhp, nvroot, 0, 0, 9284 cbp->cb_name_flags | VDEV_NAME_TYPE_ID); 9285 if (cbp->cb_namewidth < 10) 9286 cbp->cb_namewidth = 10; 9287 9288 color_start(ANSI_BOLD); 9289 (void) printf(gettext("config:\n\n")); 9290 (void) printf(gettext("\t%-*s %-8s %5s %5s %5s"), 9291 cbp->cb_namewidth, "NAME", "STATE", "READ", "WRITE", 9292 "CKSUM"); 9293 color_end(); 9294 9295 if (cbp->cb_print_slow_ios) { 9296 printf_color(ANSI_BOLD, " %5s", gettext("SLOW")); 9297 } 9298 9299 if (cbp->cb_print_power) { 9300 printf_color(ANSI_BOLD, " %5s", gettext("POWER")); 9301 } 9302 9303 if (cbp->vcdl != NULL) 9304 print_cmd_columns(cbp->vcdl, 0); 9305 9306 printf("\n"); 9307 9308 print_status_config(zhp, cbp, zpool_get_name(zhp), nvroot, 0, 9309 B_FALSE, NULL); 9310 9311 print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_BIAS_DEDUP); 9312 print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_BIAS_SPECIAL); 9313 print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_CLASS_LOGS); 9314 9315 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 9316 &l2cache, &nl2cache) == 0) 9317 print_l2cache(zhp, cbp, l2cache, nl2cache); 9318 9319 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 9320 &spares, &nspares) == 0) 9321 print_spares(zhp, cbp, spares, nspares); 9322 9323 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT, 9324 &nerr) == 0) { 9325 (void) printf("\n"); 9326 if (nerr == 0) { 9327 (void) printf(gettext( 9328 "errors: No known data errors\n")); 9329 } else if (!cbp->cb_verbose) { 9330 color_start(ANSI_RED); 9331 (void) printf(gettext("errors: %llu data " 9332 "errors, use '-v' for a list\n"), 9333 (u_longlong_t)nerr); 9334 color_end(); 9335 } else { 9336 print_error_log(zhp); 9337 } 9338 } 9339 9340 if (cbp->cb_dedup_stats) 9341 print_dedup_stats(config); 9342 } else { 9343 (void) printf(gettext("config: The configuration cannot be " 9344 "determined.\n")); 9345 } 9346 9347 return (0); 9348 } 9349 9350 /* 9351 * zpool status [-c [script1,script2,...]] [-DegiLpPstvx] [--power] [-T d|u] ... 9352 * [pool] [interval [count]] 9353 * 9354 * -c CMD For each vdev, run command CMD 9355 * -D Display dedup status (undocumented) 9356 * -e Display only unhealthy vdevs 9357 * -g Display guid for individual vdev name. 9358 * -i Display vdev initialization status. 9359 * -L Follow links when resolving vdev path name. 9360 * -p Display values in parsable (exact) format. 9361 * -P Display full path for vdev name. 9362 * -s Display slow IOs column. 9363 * -t Display vdev TRIM status. 9364 * -T Display a timestamp in date(1) or Unix format 9365 * -v Display complete error logs 9366 * -x Display only pools with potential problems 9367 * --power Display vdev enclosure slot power status 9368 * 9369 * Describes the health status of all pools or some subset. 9370 */ 9371 int 9372 zpool_do_status(int argc, char **argv) 9373 { 9374 int c; 9375 int ret; 9376 float interval = 0; 9377 unsigned long count = 0; 9378 status_cbdata_t cb = { 0 }; 9379 char *cmd = NULL; 9380 9381 struct option long_options[] = { 9382 {"power", no_argument, NULL, ZPOOL_OPTION_POWER}, 9383 {0, 0, 0, 0} 9384 }; 9385 9386 /* check options */ 9387 while ((c = getopt_long(argc, argv, "c:DegiLpPstT:vx", long_options, 9388 NULL)) != -1) { 9389 switch (c) { 9390 case 'c': 9391 if (cmd != NULL) { 9392 fprintf(stderr, 9393 gettext("Can't set -c flag twice\n")); 9394 exit(1); 9395 } 9396 9397 if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL && 9398 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) { 9399 fprintf(stderr, gettext( 9400 "Can't run -c, disabled by " 9401 "ZPOOL_SCRIPTS_ENABLED.\n")); 9402 exit(1); 9403 } 9404 9405 if ((getuid() <= 0 || geteuid() <= 0) && 9406 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) { 9407 fprintf(stderr, gettext( 9408 "Can't run -c with root privileges " 9409 "unless ZPOOL_SCRIPTS_AS_ROOT is set.\n")); 9410 exit(1); 9411 } 9412 cmd = optarg; 9413 break; 9414 case 'D': 9415 cb.cb_dedup_stats = B_TRUE; 9416 break; 9417 case 'e': 9418 cb.cb_print_unhealthy = B_TRUE; 9419 break; 9420 case 'g': 9421 cb.cb_name_flags |= VDEV_NAME_GUID; 9422 break; 9423 case 'i': 9424 cb.cb_print_vdev_init = B_TRUE; 9425 break; 9426 case 'L': 9427 cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS; 9428 break; 9429 case 'p': 9430 cb.cb_literal = B_TRUE; 9431 break; 9432 case 'P': 9433 cb.cb_name_flags |= VDEV_NAME_PATH; 9434 break; 9435 case 's': 9436 cb.cb_print_slow_ios = B_TRUE; 9437 break; 9438 case 't': 9439 cb.cb_print_vdev_trim = B_TRUE; 9440 break; 9441 case 'T': 9442 get_timestamp_arg(*optarg); 9443 break; 9444 case 'v': 9445 cb.cb_verbose = B_TRUE; 9446 break; 9447 case 'x': 9448 cb.cb_explain = B_TRUE; 9449 break; 9450 case ZPOOL_OPTION_POWER: 9451 cb.cb_print_power = B_TRUE; 9452 break; 9453 case '?': 9454 if (optopt == 'c') { 9455 print_zpool_script_list("status"); 9456 exit(0); 9457 } else { 9458 fprintf(stderr, 9459 gettext("invalid option '%c'\n"), optopt); 9460 } 9461 usage(B_FALSE); 9462 } 9463 } 9464 9465 argc -= optind; 9466 argv += optind; 9467 9468 get_interval_count(&argc, argv, &interval, &count); 9469 9470 if (argc == 0) 9471 cb.cb_allpools = B_TRUE; 9472 9473 cb.cb_first = B_TRUE; 9474 cb.cb_print_status = B_TRUE; 9475 9476 for (;;) { 9477 if (timestamp_fmt != NODATE) 9478 print_timestamp(timestamp_fmt); 9479 9480 if (cmd != NULL) 9481 cb.vcdl = all_pools_for_each_vdev_run(argc, argv, cmd, 9482 NULL, NULL, 0, 0); 9483 9484 ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL, 9485 cb.cb_literal, status_callback, &cb); 9486 9487 if (cb.vcdl != NULL) 9488 free_vdev_cmd_data_list(cb.vcdl); 9489 if (argc == 0 && cb.cb_count == 0) 9490 (void) fprintf(stderr, gettext("no pools available\n")); 9491 else if (cb.cb_explain && cb.cb_first && cb.cb_allpools) 9492 (void) printf(gettext("all pools are healthy\n")); 9493 9494 if (ret != 0) 9495 return (ret); 9496 9497 if (interval == 0) 9498 break; 9499 9500 if (count != 0 && --count == 0) 9501 break; 9502 9503 (void) fflush(stdout); 9504 (void) fsleep(interval); 9505 } 9506 9507 return (0); 9508 } 9509 9510 typedef struct upgrade_cbdata { 9511 int cb_first; 9512 int cb_argc; 9513 uint64_t cb_version; 9514 char **cb_argv; 9515 } upgrade_cbdata_t; 9516 9517 static int 9518 check_unsupp_fs(zfs_handle_t *zhp, void *unsupp_fs) 9519 { 9520 int zfs_version = (int)zfs_prop_get_int(zhp, ZFS_PROP_VERSION); 9521 int *count = (int *)unsupp_fs; 9522 9523 if (zfs_version > ZPL_VERSION) { 9524 (void) printf(gettext("%s (v%d) is not supported by this " 9525 "implementation of ZFS.\n"), 9526 zfs_get_name(zhp), zfs_version); 9527 (*count)++; 9528 } 9529 9530 zfs_iter_filesystems_v2(zhp, 0, check_unsupp_fs, unsupp_fs); 9531 9532 zfs_close(zhp); 9533 9534 return (0); 9535 } 9536 9537 static int 9538 upgrade_version(zpool_handle_t *zhp, uint64_t version) 9539 { 9540 int ret; 9541 nvlist_t *config; 9542 uint64_t oldversion; 9543 int unsupp_fs = 0; 9544 9545 config = zpool_get_config(zhp, NULL); 9546 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 9547 &oldversion) == 0); 9548 9549 char compat[ZFS_MAXPROPLEN]; 9550 if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, compat, 9551 ZFS_MAXPROPLEN, NULL, B_FALSE) != 0) 9552 compat[0] = '\0'; 9553 9554 assert(SPA_VERSION_IS_SUPPORTED(oldversion)); 9555 assert(oldversion < version); 9556 9557 ret = zfs_iter_root(zpool_get_handle(zhp), check_unsupp_fs, &unsupp_fs); 9558 if (ret != 0) 9559 return (ret); 9560 9561 if (unsupp_fs) { 9562 (void) fprintf(stderr, gettext("Upgrade not performed due " 9563 "to %d unsupported filesystems (max v%d).\n"), 9564 unsupp_fs, (int)ZPL_VERSION); 9565 return (1); 9566 } 9567 9568 if (strcmp(compat, ZPOOL_COMPAT_LEGACY) == 0) { 9569 (void) fprintf(stderr, gettext("Upgrade not performed because " 9570 "'compatibility' property set to '" 9571 ZPOOL_COMPAT_LEGACY "'.\n")); 9572 return (1); 9573 } 9574 9575 ret = zpool_upgrade(zhp, version); 9576 if (ret != 0) 9577 return (ret); 9578 9579 if (version >= SPA_VERSION_FEATURES) { 9580 (void) printf(gettext("Successfully upgraded " 9581 "'%s' from version %llu to feature flags.\n"), 9582 zpool_get_name(zhp), (u_longlong_t)oldversion); 9583 } else { 9584 (void) printf(gettext("Successfully upgraded " 9585 "'%s' from version %llu to version %llu.\n"), 9586 zpool_get_name(zhp), (u_longlong_t)oldversion, 9587 (u_longlong_t)version); 9588 } 9589 9590 return (0); 9591 } 9592 9593 static int 9594 upgrade_enable_all(zpool_handle_t *zhp, int *countp) 9595 { 9596 int i, ret, count; 9597 boolean_t firstff = B_TRUE; 9598 nvlist_t *enabled = zpool_get_features(zhp); 9599 9600 char compat[ZFS_MAXPROPLEN]; 9601 if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, compat, 9602 ZFS_MAXPROPLEN, NULL, B_FALSE) != 0) 9603 compat[0] = '\0'; 9604 9605 boolean_t requested_features[SPA_FEATURES]; 9606 if (zpool_do_load_compat(compat, requested_features) != 9607 ZPOOL_COMPATIBILITY_OK) 9608 return (-1); 9609 9610 count = 0; 9611 for (i = 0; i < SPA_FEATURES; i++) { 9612 const char *fname = spa_feature_table[i].fi_uname; 9613 const char *fguid = spa_feature_table[i].fi_guid; 9614 9615 if (!spa_feature_table[i].fi_zfs_mod_supported) 9616 continue; 9617 9618 if (!nvlist_exists(enabled, fguid) && requested_features[i]) { 9619 char *propname; 9620 verify(-1 != asprintf(&propname, "feature@%s", fname)); 9621 ret = zpool_set_prop(zhp, propname, 9622 ZFS_FEATURE_ENABLED); 9623 if (ret != 0) { 9624 free(propname); 9625 return (ret); 9626 } 9627 count++; 9628 9629 if (firstff) { 9630 (void) printf(gettext("Enabled the " 9631 "following features on '%s':\n"), 9632 zpool_get_name(zhp)); 9633 firstff = B_FALSE; 9634 } 9635 (void) printf(gettext(" %s\n"), fname); 9636 free(propname); 9637 } 9638 } 9639 9640 if (countp != NULL) 9641 *countp = count; 9642 return (0); 9643 } 9644 9645 static int 9646 upgrade_cb(zpool_handle_t *zhp, void *arg) 9647 { 9648 upgrade_cbdata_t *cbp = arg; 9649 nvlist_t *config; 9650 uint64_t version; 9651 boolean_t modified_pool = B_FALSE; 9652 int ret; 9653 9654 config = zpool_get_config(zhp, NULL); 9655 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 9656 &version) == 0); 9657 9658 assert(SPA_VERSION_IS_SUPPORTED(version)); 9659 9660 if (version < cbp->cb_version) { 9661 cbp->cb_first = B_FALSE; 9662 ret = upgrade_version(zhp, cbp->cb_version); 9663 if (ret != 0) 9664 return (ret); 9665 modified_pool = B_TRUE; 9666 9667 /* 9668 * If they did "zpool upgrade -a", then we could 9669 * be doing ioctls to different pools. We need 9670 * to log this history once to each pool, and bypass 9671 * the normal history logging that happens in main(). 9672 */ 9673 (void) zpool_log_history(g_zfs, history_str); 9674 log_history = B_FALSE; 9675 } 9676 9677 if (cbp->cb_version >= SPA_VERSION_FEATURES) { 9678 int count; 9679 ret = upgrade_enable_all(zhp, &count); 9680 if (ret != 0) 9681 return (ret); 9682 9683 if (count > 0) { 9684 cbp->cb_first = B_FALSE; 9685 modified_pool = B_TRUE; 9686 } 9687 } 9688 9689 if (modified_pool) { 9690 (void) printf("\n"); 9691 (void) after_zpool_upgrade(zhp); 9692 } 9693 9694 return (0); 9695 } 9696 9697 static int 9698 upgrade_list_older_cb(zpool_handle_t *zhp, void *arg) 9699 { 9700 upgrade_cbdata_t *cbp = arg; 9701 nvlist_t *config; 9702 uint64_t version; 9703 9704 config = zpool_get_config(zhp, NULL); 9705 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 9706 &version) == 0); 9707 9708 assert(SPA_VERSION_IS_SUPPORTED(version)); 9709 9710 if (version < SPA_VERSION_FEATURES) { 9711 if (cbp->cb_first) { 9712 (void) printf(gettext("The following pools are " 9713 "formatted with legacy version numbers and can\n" 9714 "be upgraded to use feature flags. After " 9715 "being upgraded, these pools\nwill no " 9716 "longer be accessible by software that does not " 9717 "support feature\nflags.\n\n" 9718 "Note that setting a pool's 'compatibility' " 9719 "feature to '" ZPOOL_COMPAT_LEGACY "' will\n" 9720 "inhibit upgrades.\n\n")); 9721 (void) printf(gettext("VER POOL\n")); 9722 (void) printf(gettext("--- ------------\n")); 9723 cbp->cb_first = B_FALSE; 9724 } 9725 9726 (void) printf("%2llu %s\n", (u_longlong_t)version, 9727 zpool_get_name(zhp)); 9728 } 9729 9730 return (0); 9731 } 9732 9733 static int 9734 upgrade_list_disabled_cb(zpool_handle_t *zhp, void *arg) 9735 { 9736 upgrade_cbdata_t *cbp = arg; 9737 nvlist_t *config; 9738 uint64_t version; 9739 9740 config = zpool_get_config(zhp, NULL); 9741 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 9742 &version) == 0); 9743 9744 if (version >= SPA_VERSION_FEATURES) { 9745 int i; 9746 boolean_t poolfirst = B_TRUE; 9747 nvlist_t *enabled = zpool_get_features(zhp); 9748 9749 for (i = 0; i < SPA_FEATURES; i++) { 9750 const char *fguid = spa_feature_table[i].fi_guid; 9751 const char *fname = spa_feature_table[i].fi_uname; 9752 9753 if (!spa_feature_table[i].fi_zfs_mod_supported) 9754 continue; 9755 9756 if (!nvlist_exists(enabled, fguid)) { 9757 if (cbp->cb_first) { 9758 (void) printf(gettext("\nSome " 9759 "supported features are not " 9760 "enabled on the following pools. " 9761 "Once a\nfeature is enabled the " 9762 "pool may become incompatible with " 9763 "software\nthat does not support " 9764 "the feature. See " 9765 "zpool-features(7) for " 9766 "details.\n\n" 9767 "Note that the pool " 9768 "'compatibility' feature can be " 9769 "used to inhibit\nfeature " 9770 "upgrades.\n\n")); 9771 (void) printf(gettext("POOL " 9772 "FEATURE\n")); 9773 (void) printf(gettext("------" 9774 "---------\n")); 9775 cbp->cb_first = B_FALSE; 9776 } 9777 9778 if (poolfirst) { 9779 (void) printf(gettext("%s\n"), 9780 zpool_get_name(zhp)); 9781 poolfirst = B_FALSE; 9782 } 9783 9784 (void) printf(gettext(" %s\n"), fname); 9785 } 9786 /* 9787 * If they did "zpool upgrade -a", then we could 9788 * be doing ioctls to different pools. We need 9789 * to log this history once to each pool, and bypass 9790 * the normal history logging that happens in main(). 9791 */ 9792 (void) zpool_log_history(g_zfs, history_str); 9793 log_history = B_FALSE; 9794 } 9795 } 9796 9797 return (0); 9798 } 9799 9800 static int 9801 upgrade_one(zpool_handle_t *zhp, void *data) 9802 { 9803 boolean_t modified_pool = B_FALSE; 9804 upgrade_cbdata_t *cbp = data; 9805 uint64_t cur_version; 9806 int ret; 9807 9808 if (strcmp("log", zpool_get_name(zhp)) == 0) { 9809 (void) fprintf(stderr, gettext("'log' is now a reserved word\n" 9810 "Pool 'log' must be renamed using export and import" 9811 " to upgrade.\n")); 9812 return (1); 9813 } 9814 9815 cur_version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 9816 if (cur_version > cbp->cb_version) { 9817 (void) printf(gettext("Pool '%s' is already formatted " 9818 "using more current version '%llu'.\n\n"), 9819 zpool_get_name(zhp), (u_longlong_t)cur_version); 9820 return (0); 9821 } 9822 9823 if (cbp->cb_version != SPA_VERSION && cur_version == cbp->cb_version) { 9824 (void) printf(gettext("Pool '%s' is already formatted " 9825 "using version %llu.\n\n"), zpool_get_name(zhp), 9826 (u_longlong_t)cbp->cb_version); 9827 return (0); 9828 } 9829 9830 if (cur_version != cbp->cb_version) { 9831 modified_pool = B_TRUE; 9832 ret = upgrade_version(zhp, cbp->cb_version); 9833 if (ret != 0) 9834 return (ret); 9835 } 9836 9837 if (cbp->cb_version >= SPA_VERSION_FEATURES) { 9838 int count = 0; 9839 ret = upgrade_enable_all(zhp, &count); 9840 if (ret != 0) 9841 return (ret); 9842 9843 if (count != 0) { 9844 modified_pool = B_TRUE; 9845 } else if (cur_version == SPA_VERSION) { 9846 (void) printf(gettext("Pool '%s' already has all " 9847 "supported and requested features enabled.\n"), 9848 zpool_get_name(zhp)); 9849 } 9850 } 9851 9852 if (modified_pool) { 9853 (void) printf("\n"); 9854 (void) after_zpool_upgrade(zhp); 9855 } 9856 9857 return (0); 9858 } 9859 9860 /* 9861 * zpool upgrade 9862 * zpool upgrade -v 9863 * zpool upgrade [-V version] <-a | pool ...> 9864 * 9865 * With no arguments, display downrev'd ZFS pool available for upgrade. 9866 * Individual pools can be upgraded by specifying the pool, and '-a' will 9867 * upgrade all pools. 9868 */ 9869 int 9870 zpool_do_upgrade(int argc, char **argv) 9871 { 9872 int c; 9873 upgrade_cbdata_t cb = { 0 }; 9874 int ret = 0; 9875 boolean_t showversions = B_FALSE; 9876 boolean_t upgradeall = B_FALSE; 9877 char *end; 9878 9879 9880 /* check options */ 9881 while ((c = getopt(argc, argv, ":avV:")) != -1) { 9882 switch (c) { 9883 case 'a': 9884 upgradeall = B_TRUE; 9885 break; 9886 case 'v': 9887 showversions = B_TRUE; 9888 break; 9889 case 'V': 9890 cb.cb_version = strtoll(optarg, &end, 10); 9891 if (*end != '\0' || 9892 !SPA_VERSION_IS_SUPPORTED(cb.cb_version)) { 9893 (void) fprintf(stderr, 9894 gettext("invalid version '%s'\n"), optarg); 9895 usage(B_FALSE); 9896 } 9897 break; 9898 case ':': 9899 (void) fprintf(stderr, gettext("missing argument for " 9900 "'%c' option\n"), optopt); 9901 usage(B_FALSE); 9902 break; 9903 case '?': 9904 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 9905 optopt); 9906 usage(B_FALSE); 9907 } 9908 } 9909 9910 cb.cb_argc = argc; 9911 cb.cb_argv = argv; 9912 argc -= optind; 9913 argv += optind; 9914 9915 if (cb.cb_version == 0) { 9916 cb.cb_version = SPA_VERSION; 9917 } else if (!upgradeall && argc == 0) { 9918 (void) fprintf(stderr, gettext("-V option is " 9919 "incompatible with other arguments\n")); 9920 usage(B_FALSE); 9921 } 9922 9923 if (showversions) { 9924 if (upgradeall || argc != 0) { 9925 (void) fprintf(stderr, gettext("-v option is " 9926 "incompatible with other arguments\n")); 9927 usage(B_FALSE); 9928 } 9929 } else if (upgradeall) { 9930 if (argc != 0) { 9931 (void) fprintf(stderr, gettext("-a option should not " 9932 "be used along with a pool name\n")); 9933 usage(B_FALSE); 9934 } 9935 } 9936 9937 (void) printf("%s", gettext("This system supports ZFS pool feature " 9938 "flags.\n\n")); 9939 if (showversions) { 9940 int i; 9941 9942 (void) printf(gettext("The following features are " 9943 "supported:\n\n")); 9944 (void) printf(gettext("FEAT DESCRIPTION\n")); 9945 (void) printf("----------------------------------------------" 9946 "---------------\n"); 9947 for (i = 0; i < SPA_FEATURES; i++) { 9948 zfeature_info_t *fi = &spa_feature_table[i]; 9949 if (!fi->fi_zfs_mod_supported) 9950 continue; 9951 const char *ro = 9952 (fi->fi_flags & ZFEATURE_FLAG_READONLY_COMPAT) ? 9953 " (read-only compatible)" : ""; 9954 9955 (void) printf("%-37s%s\n", fi->fi_uname, ro); 9956 (void) printf(" %s\n", fi->fi_desc); 9957 } 9958 (void) printf("\n"); 9959 9960 (void) printf(gettext("The following legacy versions are also " 9961 "supported:\n\n")); 9962 (void) printf(gettext("VER DESCRIPTION\n")); 9963 (void) printf("--- -----------------------------------------" 9964 "---------------\n"); 9965 (void) printf(gettext(" 1 Initial ZFS version\n")); 9966 (void) printf(gettext(" 2 Ditto blocks " 9967 "(replicated metadata)\n")); 9968 (void) printf(gettext(" 3 Hot spares and double parity " 9969 "RAID-Z\n")); 9970 (void) printf(gettext(" 4 zpool history\n")); 9971 (void) printf(gettext(" 5 Compression using the gzip " 9972 "algorithm\n")); 9973 (void) printf(gettext(" 6 bootfs pool property\n")); 9974 (void) printf(gettext(" 7 Separate intent log devices\n")); 9975 (void) printf(gettext(" 8 Delegated administration\n")); 9976 (void) printf(gettext(" 9 refquota and refreservation " 9977 "properties\n")); 9978 (void) printf(gettext(" 10 Cache devices\n")); 9979 (void) printf(gettext(" 11 Improved scrub performance\n")); 9980 (void) printf(gettext(" 12 Snapshot properties\n")); 9981 (void) printf(gettext(" 13 snapused property\n")); 9982 (void) printf(gettext(" 14 passthrough-x aclinherit\n")); 9983 (void) printf(gettext(" 15 user/group space accounting\n")); 9984 (void) printf(gettext(" 16 stmf property support\n")); 9985 (void) printf(gettext(" 17 Triple-parity RAID-Z\n")); 9986 (void) printf(gettext(" 18 Snapshot user holds\n")); 9987 (void) printf(gettext(" 19 Log device removal\n")); 9988 (void) printf(gettext(" 20 Compression using zle " 9989 "(zero-length encoding)\n")); 9990 (void) printf(gettext(" 21 Deduplication\n")); 9991 (void) printf(gettext(" 22 Received properties\n")); 9992 (void) printf(gettext(" 23 Slim ZIL\n")); 9993 (void) printf(gettext(" 24 System attributes\n")); 9994 (void) printf(gettext(" 25 Improved scrub stats\n")); 9995 (void) printf(gettext(" 26 Improved snapshot deletion " 9996 "performance\n")); 9997 (void) printf(gettext(" 27 Improved snapshot creation " 9998 "performance\n")); 9999 (void) printf(gettext(" 28 Multiple vdev replacements\n")); 10000 (void) printf(gettext("\nFor more information on a particular " 10001 "version, including supported releases,\n")); 10002 (void) printf(gettext("see the ZFS Administration Guide.\n\n")); 10003 } else if (argc == 0 && upgradeall) { 10004 cb.cb_first = B_TRUE; 10005 ret = zpool_iter(g_zfs, upgrade_cb, &cb); 10006 if (ret == 0 && cb.cb_first) { 10007 if (cb.cb_version == SPA_VERSION) { 10008 (void) printf(gettext("All pools are already " 10009 "formatted using feature flags.\n\n")); 10010 (void) printf(gettext("Every feature flags " 10011 "pool already has all supported and " 10012 "requested features enabled.\n")); 10013 } else { 10014 (void) printf(gettext("All pools are already " 10015 "formatted with version %llu or higher.\n"), 10016 (u_longlong_t)cb.cb_version); 10017 } 10018 } 10019 } else if (argc == 0) { 10020 cb.cb_first = B_TRUE; 10021 ret = zpool_iter(g_zfs, upgrade_list_older_cb, &cb); 10022 assert(ret == 0); 10023 10024 if (cb.cb_first) { 10025 (void) printf(gettext("All pools are formatted " 10026 "using feature flags.\n\n")); 10027 } else { 10028 (void) printf(gettext("\nUse 'zpool upgrade -v' " 10029 "for a list of available legacy versions.\n")); 10030 } 10031 10032 cb.cb_first = B_TRUE; 10033 ret = zpool_iter(g_zfs, upgrade_list_disabled_cb, &cb); 10034 assert(ret == 0); 10035 10036 if (cb.cb_first) { 10037 (void) printf(gettext("Every feature flags pool has " 10038 "all supported and requested features enabled.\n")); 10039 } else { 10040 (void) printf(gettext("\n")); 10041 } 10042 } else { 10043 ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL, 10044 B_FALSE, upgrade_one, &cb); 10045 } 10046 10047 return (ret); 10048 } 10049 10050 typedef struct hist_cbdata { 10051 boolean_t first; 10052 boolean_t longfmt; 10053 boolean_t internal; 10054 } hist_cbdata_t; 10055 10056 static void 10057 print_history_records(nvlist_t *nvhis, hist_cbdata_t *cb) 10058 { 10059 nvlist_t **records; 10060 uint_t numrecords; 10061 int i; 10062 10063 verify(nvlist_lookup_nvlist_array(nvhis, ZPOOL_HIST_RECORD, 10064 &records, &numrecords) == 0); 10065 for (i = 0; i < numrecords; i++) { 10066 nvlist_t *rec = records[i]; 10067 char tbuf[64] = ""; 10068 10069 if (nvlist_exists(rec, ZPOOL_HIST_TIME)) { 10070 time_t tsec; 10071 struct tm t; 10072 10073 tsec = fnvlist_lookup_uint64(records[i], 10074 ZPOOL_HIST_TIME); 10075 (void) localtime_r(&tsec, &t); 10076 (void) strftime(tbuf, sizeof (tbuf), "%F.%T", &t); 10077 } 10078 10079 if (nvlist_exists(rec, ZPOOL_HIST_ELAPSED_NS)) { 10080 uint64_t elapsed_ns = fnvlist_lookup_int64(records[i], 10081 ZPOOL_HIST_ELAPSED_NS); 10082 (void) snprintf(tbuf + strlen(tbuf), 10083 sizeof (tbuf) - strlen(tbuf), 10084 " (%lldms)", (long long)elapsed_ns / 1000 / 1000); 10085 } 10086 10087 if (nvlist_exists(rec, ZPOOL_HIST_CMD)) { 10088 (void) printf("%s %s", tbuf, 10089 fnvlist_lookup_string(rec, ZPOOL_HIST_CMD)); 10090 } else if (nvlist_exists(rec, ZPOOL_HIST_INT_EVENT)) { 10091 int ievent = 10092 fnvlist_lookup_uint64(rec, ZPOOL_HIST_INT_EVENT); 10093 if (!cb->internal) 10094 continue; 10095 if (ievent >= ZFS_NUM_LEGACY_HISTORY_EVENTS) { 10096 (void) printf("%s unrecognized record:\n", 10097 tbuf); 10098 dump_nvlist(rec, 4); 10099 continue; 10100 } 10101 (void) printf("%s [internal %s txg:%lld] %s", tbuf, 10102 zfs_history_event_names[ievent], 10103 (longlong_t)fnvlist_lookup_uint64( 10104 rec, ZPOOL_HIST_TXG), 10105 fnvlist_lookup_string(rec, ZPOOL_HIST_INT_STR)); 10106 } else if (nvlist_exists(rec, ZPOOL_HIST_INT_NAME)) { 10107 if (!cb->internal) 10108 continue; 10109 (void) printf("%s [txg:%lld] %s", tbuf, 10110 (longlong_t)fnvlist_lookup_uint64( 10111 rec, ZPOOL_HIST_TXG), 10112 fnvlist_lookup_string(rec, ZPOOL_HIST_INT_NAME)); 10113 if (nvlist_exists(rec, ZPOOL_HIST_DSNAME)) { 10114 (void) printf(" %s (%llu)", 10115 fnvlist_lookup_string(rec, 10116 ZPOOL_HIST_DSNAME), 10117 (u_longlong_t)fnvlist_lookup_uint64(rec, 10118 ZPOOL_HIST_DSID)); 10119 } 10120 (void) printf(" %s", fnvlist_lookup_string(rec, 10121 ZPOOL_HIST_INT_STR)); 10122 } else if (nvlist_exists(rec, ZPOOL_HIST_IOCTL)) { 10123 if (!cb->internal) 10124 continue; 10125 (void) printf("%s ioctl %s\n", tbuf, 10126 fnvlist_lookup_string(rec, ZPOOL_HIST_IOCTL)); 10127 if (nvlist_exists(rec, ZPOOL_HIST_INPUT_NVL)) { 10128 (void) printf(" input:\n"); 10129 dump_nvlist(fnvlist_lookup_nvlist(rec, 10130 ZPOOL_HIST_INPUT_NVL), 8); 10131 } 10132 if (nvlist_exists(rec, ZPOOL_HIST_OUTPUT_NVL)) { 10133 (void) printf(" output:\n"); 10134 dump_nvlist(fnvlist_lookup_nvlist(rec, 10135 ZPOOL_HIST_OUTPUT_NVL), 8); 10136 } 10137 if (nvlist_exists(rec, ZPOOL_HIST_OUTPUT_SIZE)) { 10138 (void) printf(" output nvlist omitted; " 10139 "original size: %lldKB\n", 10140 (longlong_t)fnvlist_lookup_int64(rec, 10141 ZPOOL_HIST_OUTPUT_SIZE) / 1024); 10142 } 10143 if (nvlist_exists(rec, ZPOOL_HIST_ERRNO)) { 10144 (void) printf(" errno: %lld\n", 10145 (longlong_t)fnvlist_lookup_int64(rec, 10146 ZPOOL_HIST_ERRNO)); 10147 } 10148 } else { 10149 if (!cb->internal) 10150 continue; 10151 (void) printf("%s unrecognized record:\n", tbuf); 10152 dump_nvlist(rec, 4); 10153 } 10154 10155 if (!cb->longfmt) { 10156 (void) printf("\n"); 10157 continue; 10158 } 10159 (void) printf(" ["); 10160 if (nvlist_exists(rec, ZPOOL_HIST_WHO)) { 10161 uid_t who = fnvlist_lookup_uint64(rec, ZPOOL_HIST_WHO); 10162 struct passwd *pwd = getpwuid(who); 10163 (void) printf("user %d ", (int)who); 10164 if (pwd != NULL) 10165 (void) printf("(%s) ", pwd->pw_name); 10166 } 10167 if (nvlist_exists(rec, ZPOOL_HIST_HOST)) { 10168 (void) printf("on %s", 10169 fnvlist_lookup_string(rec, ZPOOL_HIST_HOST)); 10170 } 10171 if (nvlist_exists(rec, ZPOOL_HIST_ZONE)) { 10172 (void) printf(":%s", 10173 fnvlist_lookup_string(rec, ZPOOL_HIST_ZONE)); 10174 } 10175 10176 (void) printf("]"); 10177 (void) printf("\n"); 10178 } 10179 } 10180 10181 /* 10182 * Print out the command history for a specific pool. 10183 */ 10184 static int 10185 get_history_one(zpool_handle_t *zhp, void *data) 10186 { 10187 nvlist_t *nvhis; 10188 int ret; 10189 hist_cbdata_t *cb = (hist_cbdata_t *)data; 10190 uint64_t off = 0; 10191 boolean_t eof = B_FALSE; 10192 10193 cb->first = B_FALSE; 10194 10195 (void) printf(gettext("History for '%s':\n"), zpool_get_name(zhp)); 10196 10197 while (!eof) { 10198 if ((ret = zpool_get_history(zhp, &nvhis, &off, &eof)) != 0) 10199 return (ret); 10200 10201 print_history_records(nvhis, cb); 10202 nvlist_free(nvhis); 10203 } 10204 (void) printf("\n"); 10205 10206 return (ret); 10207 } 10208 10209 /* 10210 * zpool history <pool> 10211 * 10212 * Displays the history of commands that modified pools. 10213 */ 10214 int 10215 zpool_do_history(int argc, char **argv) 10216 { 10217 hist_cbdata_t cbdata = { 0 }; 10218 int ret; 10219 int c; 10220 10221 cbdata.first = B_TRUE; 10222 /* check options */ 10223 while ((c = getopt(argc, argv, "li")) != -1) { 10224 switch (c) { 10225 case 'l': 10226 cbdata.longfmt = B_TRUE; 10227 break; 10228 case 'i': 10229 cbdata.internal = B_TRUE; 10230 break; 10231 case '?': 10232 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 10233 optopt); 10234 usage(B_FALSE); 10235 } 10236 } 10237 argc -= optind; 10238 argv += optind; 10239 10240 ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL, 10241 B_FALSE, get_history_one, &cbdata); 10242 10243 if (argc == 0 && cbdata.first == B_TRUE) { 10244 (void) fprintf(stderr, gettext("no pools available\n")); 10245 return (0); 10246 } 10247 10248 return (ret); 10249 } 10250 10251 typedef struct ev_opts { 10252 int verbose; 10253 int scripted; 10254 int follow; 10255 int clear; 10256 char poolname[ZFS_MAX_DATASET_NAME_LEN]; 10257 } ev_opts_t; 10258 10259 static void 10260 zpool_do_events_short(nvlist_t *nvl, ev_opts_t *opts) 10261 { 10262 char ctime_str[26], str[32]; 10263 const char *ptr; 10264 int64_t *tv; 10265 uint_t n; 10266 10267 verify(nvlist_lookup_int64_array(nvl, FM_EREPORT_TIME, &tv, &n) == 0); 10268 memset(str, ' ', 32); 10269 (void) ctime_r((const time_t *)&tv[0], ctime_str); 10270 (void) memcpy(str, ctime_str+4, 6); /* 'Jun 30' */ 10271 (void) memcpy(str+7, ctime_str+20, 4); /* '1993' */ 10272 (void) memcpy(str+12, ctime_str+11, 8); /* '21:49:08' */ 10273 (void) sprintf(str+20, ".%09lld", (longlong_t)tv[1]); /* '.123456789' */ 10274 if (opts->scripted) 10275 (void) printf(gettext("%s\t"), str); 10276 else 10277 (void) printf(gettext("%s "), str); 10278 10279 verify(nvlist_lookup_string(nvl, FM_CLASS, &ptr) == 0); 10280 (void) printf(gettext("%s\n"), ptr); 10281 } 10282 10283 static void 10284 zpool_do_events_nvprint(nvlist_t *nvl, int depth) 10285 { 10286 nvpair_t *nvp; 10287 10288 for (nvp = nvlist_next_nvpair(nvl, NULL); 10289 nvp != NULL; nvp = nvlist_next_nvpair(nvl, nvp)) { 10290 10291 data_type_t type = nvpair_type(nvp); 10292 const char *name = nvpair_name(nvp); 10293 10294 boolean_t b; 10295 uint8_t i8; 10296 uint16_t i16; 10297 uint32_t i32; 10298 uint64_t i64; 10299 const char *str; 10300 nvlist_t *cnv; 10301 10302 printf(gettext("%*s%s = "), depth, "", name); 10303 10304 switch (type) { 10305 case DATA_TYPE_BOOLEAN: 10306 printf(gettext("%s"), "1"); 10307 break; 10308 10309 case DATA_TYPE_BOOLEAN_VALUE: 10310 (void) nvpair_value_boolean_value(nvp, &b); 10311 printf(gettext("%s"), b ? "1" : "0"); 10312 break; 10313 10314 case DATA_TYPE_BYTE: 10315 (void) nvpair_value_byte(nvp, &i8); 10316 printf(gettext("0x%x"), i8); 10317 break; 10318 10319 case DATA_TYPE_INT8: 10320 (void) nvpair_value_int8(nvp, (void *)&i8); 10321 printf(gettext("0x%x"), i8); 10322 break; 10323 10324 case DATA_TYPE_UINT8: 10325 (void) nvpair_value_uint8(nvp, &i8); 10326 printf(gettext("0x%x"), i8); 10327 break; 10328 10329 case DATA_TYPE_INT16: 10330 (void) nvpair_value_int16(nvp, (void *)&i16); 10331 printf(gettext("0x%x"), i16); 10332 break; 10333 10334 case DATA_TYPE_UINT16: 10335 (void) nvpair_value_uint16(nvp, &i16); 10336 printf(gettext("0x%x"), i16); 10337 break; 10338 10339 case DATA_TYPE_INT32: 10340 (void) nvpair_value_int32(nvp, (void *)&i32); 10341 printf(gettext("0x%x"), i32); 10342 break; 10343 10344 case DATA_TYPE_UINT32: 10345 (void) nvpair_value_uint32(nvp, &i32); 10346 printf(gettext("0x%x"), i32); 10347 break; 10348 10349 case DATA_TYPE_INT64: 10350 (void) nvpair_value_int64(nvp, (void *)&i64); 10351 printf(gettext("0x%llx"), (u_longlong_t)i64); 10352 break; 10353 10354 case DATA_TYPE_UINT64: 10355 (void) nvpair_value_uint64(nvp, &i64); 10356 /* 10357 * translate vdev state values to readable 10358 * strings to aide zpool events consumers 10359 */ 10360 if (strcmp(name, 10361 FM_EREPORT_PAYLOAD_ZFS_VDEV_STATE) == 0 || 10362 strcmp(name, 10363 FM_EREPORT_PAYLOAD_ZFS_VDEV_LASTSTATE) == 0) { 10364 printf(gettext("\"%s\" (0x%llx)"), 10365 zpool_state_to_name(i64, VDEV_AUX_NONE), 10366 (u_longlong_t)i64); 10367 } else { 10368 printf(gettext("0x%llx"), (u_longlong_t)i64); 10369 } 10370 break; 10371 10372 case DATA_TYPE_HRTIME: 10373 (void) nvpair_value_hrtime(nvp, (void *)&i64); 10374 printf(gettext("0x%llx"), (u_longlong_t)i64); 10375 break; 10376 10377 case DATA_TYPE_STRING: 10378 (void) nvpair_value_string(nvp, &str); 10379 printf(gettext("\"%s\""), str ? str : "<NULL>"); 10380 break; 10381 10382 case DATA_TYPE_NVLIST: 10383 printf(gettext("(embedded nvlist)\n")); 10384 (void) nvpair_value_nvlist(nvp, &cnv); 10385 zpool_do_events_nvprint(cnv, depth + 8); 10386 printf(gettext("%*s(end %s)"), depth, "", name); 10387 break; 10388 10389 case DATA_TYPE_NVLIST_ARRAY: { 10390 nvlist_t **val; 10391 uint_t i, nelem; 10392 10393 (void) nvpair_value_nvlist_array(nvp, &val, &nelem); 10394 printf(gettext("(%d embedded nvlists)\n"), nelem); 10395 for (i = 0; i < nelem; i++) { 10396 printf(gettext("%*s%s[%d] = %s\n"), 10397 depth, "", name, i, "(embedded nvlist)"); 10398 zpool_do_events_nvprint(val[i], depth + 8); 10399 printf(gettext("%*s(end %s[%i])\n"), 10400 depth, "", name, i); 10401 } 10402 printf(gettext("%*s(end %s)\n"), depth, "", name); 10403 } 10404 break; 10405 10406 case DATA_TYPE_INT8_ARRAY: { 10407 int8_t *val; 10408 uint_t i, nelem; 10409 10410 (void) nvpair_value_int8_array(nvp, &val, &nelem); 10411 for (i = 0; i < nelem; i++) 10412 printf(gettext("0x%x "), val[i]); 10413 10414 break; 10415 } 10416 10417 case DATA_TYPE_UINT8_ARRAY: { 10418 uint8_t *val; 10419 uint_t i, nelem; 10420 10421 (void) nvpair_value_uint8_array(nvp, &val, &nelem); 10422 for (i = 0; i < nelem; i++) 10423 printf(gettext("0x%x "), val[i]); 10424 10425 break; 10426 } 10427 10428 case DATA_TYPE_INT16_ARRAY: { 10429 int16_t *val; 10430 uint_t i, nelem; 10431 10432 (void) nvpair_value_int16_array(nvp, &val, &nelem); 10433 for (i = 0; i < nelem; i++) 10434 printf(gettext("0x%x "), val[i]); 10435 10436 break; 10437 } 10438 10439 case DATA_TYPE_UINT16_ARRAY: { 10440 uint16_t *val; 10441 uint_t i, nelem; 10442 10443 (void) nvpair_value_uint16_array(nvp, &val, &nelem); 10444 for (i = 0; i < nelem; i++) 10445 printf(gettext("0x%x "), val[i]); 10446 10447 break; 10448 } 10449 10450 case DATA_TYPE_INT32_ARRAY: { 10451 int32_t *val; 10452 uint_t i, nelem; 10453 10454 (void) nvpair_value_int32_array(nvp, &val, &nelem); 10455 for (i = 0; i < nelem; i++) 10456 printf(gettext("0x%x "), val[i]); 10457 10458 break; 10459 } 10460 10461 case DATA_TYPE_UINT32_ARRAY: { 10462 uint32_t *val; 10463 uint_t i, nelem; 10464 10465 (void) nvpair_value_uint32_array(nvp, &val, &nelem); 10466 for (i = 0; i < nelem; i++) 10467 printf(gettext("0x%x "), val[i]); 10468 10469 break; 10470 } 10471 10472 case DATA_TYPE_INT64_ARRAY: { 10473 int64_t *val; 10474 uint_t i, nelem; 10475 10476 (void) nvpair_value_int64_array(nvp, &val, &nelem); 10477 for (i = 0; i < nelem; i++) 10478 printf(gettext("0x%llx "), 10479 (u_longlong_t)val[i]); 10480 10481 break; 10482 } 10483 10484 case DATA_TYPE_UINT64_ARRAY: { 10485 uint64_t *val; 10486 uint_t i, nelem; 10487 10488 (void) nvpair_value_uint64_array(nvp, &val, &nelem); 10489 for (i = 0; i < nelem; i++) 10490 printf(gettext("0x%llx "), 10491 (u_longlong_t)val[i]); 10492 10493 break; 10494 } 10495 10496 case DATA_TYPE_STRING_ARRAY: { 10497 const char **str; 10498 uint_t i, nelem; 10499 10500 (void) nvpair_value_string_array(nvp, &str, &nelem); 10501 for (i = 0; i < nelem; i++) 10502 printf(gettext("\"%s\" "), 10503 str[i] ? str[i] : "<NULL>"); 10504 10505 break; 10506 } 10507 10508 case DATA_TYPE_BOOLEAN_ARRAY: 10509 case DATA_TYPE_BYTE_ARRAY: 10510 case DATA_TYPE_DOUBLE: 10511 case DATA_TYPE_DONTCARE: 10512 case DATA_TYPE_UNKNOWN: 10513 printf(gettext("<unknown>")); 10514 break; 10515 } 10516 10517 printf(gettext("\n")); 10518 } 10519 } 10520 10521 static int 10522 zpool_do_events_next(ev_opts_t *opts) 10523 { 10524 nvlist_t *nvl; 10525 int zevent_fd, ret, dropped; 10526 const char *pool; 10527 10528 zevent_fd = open(ZFS_DEV, O_RDWR); 10529 VERIFY(zevent_fd >= 0); 10530 10531 if (!opts->scripted) 10532 (void) printf(gettext("%-30s %s\n"), "TIME", "CLASS"); 10533 10534 while (1) { 10535 ret = zpool_events_next(g_zfs, &nvl, &dropped, 10536 (opts->follow ? ZEVENT_NONE : ZEVENT_NONBLOCK), zevent_fd); 10537 if (ret || nvl == NULL) 10538 break; 10539 10540 if (dropped > 0) 10541 (void) printf(gettext("dropped %d events\n"), dropped); 10542 10543 if (strlen(opts->poolname) > 0 && 10544 nvlist_lookup_string(nvl, FM_FMRI_ZFS_POOL, &pool) == 0 && 10545 strcmp(opts->poolname, pool) != 0) 10546 continue; 10547 10548 zpool_do_events_short(nvl, opts); 10549 10550 if (opts->verbose) { 10551 zpool_do_events_nvprint(nvl, 8); 10552 printf(gettext("\n")); 10553 } 10554 (void) fflush(stdout); 10555 10556 nvlist_free(nvl); 10557 } 10558 10559 VERIFY(0 == close(zevent_fd)); 10560 10561 return (ret); 10562 } 10563 10564 static int 10565 zpool_do_events_clear(void) 10566 { 10567 int count, ret; 10568 10569 ret = zpool_events_clear(g_zfs, &count); 10570 if (!ret) 10571 (void) printf(gettext("cleared %d events\n"), count); 10572 10573 return (ret); 10574 } 10575 10576 /* 10577 * zpool events [-vHf [pool] | -c] 10578 * 10579 * Displays events logs by ZFS. 10580 */ 10581 int 10582 zpool_do_events(int argc, char **argv) 10583 { 10584 ev_opts_t opts = { 0 }; 10585 int ret; 10586 int c; 10587 10588 /* check options */ 10589 while ((c = getopt(argc, argv, "vHfc")) != -1) { 10590 switch (c) { 10591 case 'v': 10592 opts.verbose = 1; 10593 break; 10594 case 'H': 10595 opts.scripted = 1; 10596 break; 10597 case 'f': 10598 opts.follow = 1; 10599 break; 10600 case 'c': 10601 opts.clear = 1; 10602 break; 10603 case '?': 10604 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 10605 optopt); 10606 usage(B_FALSE); 10607 } 10608 } 10609 argc -= optind; 10610 argv += optind; 10611 10612 if (argc > 1) { 10613 (void) fprintf(stderr, gettext("too many arguments\n")); 10614 usage(B_FALSE); 10615 } else if (argc == 1) { 10616 (void) strlcpy(opts.poolname, argv[0], sizeof (opts.poolname)); 10617 if (!zfs_name_valid(opts.poolname, ZFS_TYPE_POOL)) { 10618 (void) fprintf(stderr, 10619 gettext("invalid pool name '%s'\n"), opts.poolname); 10620 usage(B_FALSE); 10621 } 10622 } 10623 10624 if ((argc == 1 || opts.verbose || opts.scripted || opts.follow) && 10625 opts.clear) { 10626 (void) fprintf(stderr, 10627 gettext("invalid options combined with -c\n")); 10628 usage(B_FALSE); 10629 } 10630 10631 if (opts.clear) 10632 ret = zpool_do_events_clear(); 10633 else 10634 ret = zpool_do_events_next(&opts); 10635 10636 return (ret); 10637 } 10638 10639 static int 10640 get_callback_vdev(zpool_handle_t *zhp, char *vdevname, void *data) 10641 { 10642 zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data; 10643 char value[ZFS_MAXPROPLEN]; 10644 zprop_source_t srctype; 10645 10646 for (zprop_list_t *pl = cbp->cb_proplist; pl != NULL; 10647 pl = pl->pl_next) { 10648 char *prop_name; 10649 /* 10650 * If the first property is pool name, it is a special 10651 * placeholder that we can skip. This will also skip 10652 * over the name property when 'all' is specified. 10653 */ 10654 if (pl->pl_prop == ZPOOL_PROP_NAME && 10655 pl == cbp->cb_proplist) 10656 continue; 10657 10658 if (pl->pl_prop == ZPROP_INVAL) { 10659 prop_name = pl->pl_user_prop; 10660 } else { 10661 prop_name = (char *)vdev_prop_to_name(pl->pl_prop); 10662 } 10663 if (zpool_get_vdev_prop(zhp, vdevname, pl->pl_prop, 10664 prop_name, value, sizeof (value), &srctype, 10665 cbp->cb_literal) == 0) { 10666 zprop_print_one_property(vdevname, cbp, prop_name, 10667 value, srctype, NULL, NULL); 10668 } 10669 } 10670 10671 return (0); 10672 } 10673 10674 static int 10675 get_callback_vdev_cb(void *zhp_data, nvlist_t *nv, void *data) 10676 { 10677 zpool_handle_t *zhp = zhp_data; 10678 zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data; 10679 char *vdevname; 10680 const char *type; 10681 int ret; 10682 10683 /* 10684 * zpool_vdev_name() transforms the root vdev name (i.e., root-0) to the 10685 * pool name for display purposes, which is not desired. Fallback to 10686 * zpool_vdev_name() when not dealing with the root vdev. 10687 */ 10688 type = fnvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE); 10689 if (zhp != NULL && strcmp(type, "root") == 0) 10690 vdevname = strdup("root-0"); 10691 else 10692 vdevname = zpool_vdev_name(g_zfs, zhp, nv, 10693 cbp->cb_vdevs.cb_name_flags); 10694 10695 (void) vdev_expand_proplist(zhp, vdevname, &cbp->cb_proplist); 10696 10697 ret = get_callback_vdev(zhp, vdevname, data); 10698 10699 free(vdevname); 10700 10701 return (ret); 10702 } 10703 10704 static int 10705 get_callback(zpool_handle_t *zhp, void *data) 10706 { 10707 zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data; 10708 char value[ZFS_MAXPROPLEN]; 10709 zprop_source_t srctype; 10710 zprop_list_t *pl; 10711 int vid; 10712 10713 if (cbp->cb_type == ZFS_TYPE_VDEV) { 10714 if (strcmp(cbp->cb_vdevs.cb_names[0], "all-vdevs") == 0) { 10715 for_each_vdev(zhp, get_callback_vdev_cb, data); 10716 } else { 10717 /* Adjust column widths for vdev properties */ 10718 for (vid = 0; vid < cbp->cb_vdevs.cb_names_count; 10719 vid++) { 10720 vdev_expand_proplist(zhp, 10721 cbp->cb_vdevs.cb_names[vid], 10722 &cbp->cb_proplist); 10723 } 10724 /* Display the properties */ 10725 for (vid = 0; vid < cbp->cb_vdevs.cb_names_count; 10726 vid++) { 10727 get_callback_vdev(zhp, 10728 cbp->cb_vdevs.cb_names[vid], data); 10729 } 10730 } 10731 } else { 10732 assert(cbp->cb_type == ZFS_TYPE_POOL); 10733 for (pl = cbp->cb_proplist; pl != NULL; pl = pl->pl_next) { 10734 /* 10735 * Skip the special fake placeholder. This will also 10736 * skip over the name property when 'all' is specified. 10737 */ 10738 if (pl->pl_prop == ZPOOL_PROP_NAME && 10739 pl == cbp->cb_proplist) 10740 continue; 10741 10742 if (pl->pl_prop == ZPROP_INVAL && 10743 zfs_prop_user(pl->pl_user_prop)) { 10744 srctype = ZPROP_SRC_LOCAL; 10745 10746 if (zpool_get_userprop(zhp, pl->pl_user_prop, 10747 value, sizeof (value), &srctype) != 0) 10748 continue; 10749 10750 zprop_print_one_property(zpool_get_name(zhp), 10751 cbp, pl->pl_user_prop, value, srctype, 10752 NULL, NULL); 10753 } else if (pl->pl_prop == ZPROP_INVAL && 10754 (zpool_prop_feature(pl->pl_user_prop) || 10755 zpool_prop_unsupported(pl->pl_user_prop))) { 10756 srctype = ZPROP_SRC_LOCAL; 10757 10758 if (zpool_prop_get_feature(zhp, 10759 pl->pl_user_prop, value, 10760 sizeof (value)) == 0) { 10761 zprop_print_one_property( 10762 zpool_get_name(zhp), cbp, 10763 pl->pl_user_prop, value, srctype, 10764 NULL, NULL); 10765 } 10766 } else { 10767 if (zpool_get_prop(zhp, pl->pl_prop, value, 10768 sizeof (value), &srctype, 10769 cbp->cb_literal) != 0) 10770 continue; 10771 10772 zprop_print_one_property(zpool_get_name(zhp), 10773 cbp, zpool_prop_to_name(pl->pl_prop), 10774 value, srctype, NULL, NULL); 10775 } 10776 } 10777 } 10778 10779 return (0); 10780 } 10781 10782 /* 10783 * zpool get [-Hp] [-o "all" | field[,...]] <"all" | property[,...]> <pool> ... 10784 * 10785 * -H Scripted mode. Don't display headers, and separate properties 10786 * by a single tab. 10787 * -o List of columns to display. Defaults to 10788 * "name,property,value,source". 10789 * -p Display values in parsable (exact) format. 10790 * 10791 * Get properties of pools in the system. Output space statistics 10792 * for each one as well as other attributes. 10793 */ 10794 int 10795 zpool_do_get(int argc, char **argv) 10796 { 10797 zprop_get_cbdata_t cb = { 0 }; 10798 zprop_list_t fake_name = { 0 }; 10799 int ret; 10800 int c, i; 10801 char *propstr = NULL; 10802 char *vdev = NULL; 10803 10804 cb.cb_first = B_TRUE; 10805 10806 /* 10807 * Set up default columns and sources. 10808 */ 10809 cb.cb_sources = ZPROP_SRC_ALL; 10810 cb.cb_columns[0] = GET_COL_NAME; 10811 cb.cb_columns[1] = GET_COL_PROPERTY; 10812 cb.cb_columns[2] = GET_COL_VALUE; 10813 cb.cb_columns[3] = GET_COL_SOURCE; 10814 cb.cb_type = ZFS_TYPE_POOL; 10815 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_TYPE_ID; 10816 current_prop_type = cb.cb_type; 10817 10818 /* check options */ 10819 while ((c = getopt(argc, argv, ":Hpo:")) != -1) { 10820 switch (c) { 10821 case 'p': 10822 cb.cb_literal = B_TRUE; 10823 break; 10824 case 'H': 10825 cb.cb_scripted = B_TRUE; 10826 break; 10827 case 'o': 10828 memset(&cb.cb_columns, 0, sizeof (cb.cb_columns)); 10829 i = 0; 10830 10831 for (char *tok; (tok = strsep(&optarg, ",")); ) { 10832 static const char *const col_opts[] = 10833 { "name", "property", "value", "source", 10834 "all" }; 10835 static const zfs_get_column_t col_cols[] = 10836 { GET_COL_NAME, GET_COL_PROPERTY, GET_COL_VALUE, 10837 GET_COL_SOURCE }; 10838 10839 if (i == ZFS_GET_NCOLS - 1) { 10840 (void) fprintf(stderr, gettext("too " 10841 "many fields given to -o " 10842 "option\n")); 10843 usage(B_FALSE); 10844 } 10845 10846 for (c = 0; c < ARRAY_SIZE(col_opts); ++c) 10847 if (strcmp(tok, col_opts[c]) == 0) 10848 goto found; 10849 10850 (void) fprintf(stderr, 10851 gettext("invalid column name '%s'\n"), tok); 10852 usage(B_FALSE); 10853 10854 found: 10855 if (c >= 4) { 10856 if (i > 0) { 10857 (void) fprintf(stderr, 10858 gettext("\"all\" conflicts " 10859 "with specific fields " 10860 "given to -o option\n")); 10861 usage(B_FALSE); 10862 } 10863 10864 memcpy(cb.cb_columns, col_cols, 10865 sizeof (col_cols)); 10866 i = ZFS_GET_NCOLS - 1; 10867 } else 10868 cb.cb_columns[i++] = col_cols[c]; 10869 } 10870 break; 10871 case '?': 10872 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 10873 optopt); 10874 usage(B_FALSE); 10875 } 10876 } 10877 10878 argc -= optind; 10879 argv += optind; 10880 10881 if (argc < 1) { 10882 (void) fprintf(stderr, gettext("missing property " 10883 "argument\n")); 10884 usage(B_FALSE); 10885 } 10886 10887 /* Properties list is needed later by zprop_get_list() */ 10888 propstr = argv[0]; 10889 10890 argc--; 10891 argv++; 10892 10893 if (argc == 0) { 10894 /* No args, so just print the defaults. */ 10895 } else if (are_all_pools(argc, argv)) { 10896 /* All the args are pool names */ 10897 } else if (are_all_pools(1, argv)) { 10898 /* The first arg is a pool name */ 10899 if ((argc == 2 && strcmp(argv[1], "all-vdevs") == 0) || 10900 (argc == 2 && strcmp(argv[1], "root") == 0) || 10901 are_vdevs_in_pool(argc - 1, argv + 1, argv[0], 10902 &cb.cb_vdevs)) { 10903 10904 if (strcmp(argv[1], "root") == 0) 10905 vdev = strdup("root-0"); 10906 else 10907 vdev = strdup(argv[1]); 10908 10909 /* ... and the rest are vdev names */ 10910 cb.cb_vdevs.cb_names = &vdev; 10911 cb.cb_vdevs.cb_names_count = argc - 1; 10912 cb.cb_type = ZFS_TYPE_VDEV; 10913 argc = 1; /* One pool to process */ 10914 } else { 10915 fprintf(stderr, gettext("Expected a list of vdevs in" 10916 " \"%s\", but got:\n"), argv[0]); 10917 error_list_unresolved_vdevs(argc - 1, argv + 1, 10918 argv[0], &cb.cb_vdevs); 10919 fprintf(stderr, "\n"); 10920 usage(B_FALSE); 10921 return (1); 10922 } 10923 } else { 10924 /* 10925 * The first arg isn't the name of a valid pool. 10926 */ 10927 fprintf(stderr, gettext("Cannot get properties of %s: " 10928 "no such pool available.\n"), argv[0]); 10929 return (1); 10930 } 10931 10932 if (zprop_get_list(g_zfs, propstr, &cb.cb_proplist, 10933 cb.cb_type) != 0) { 10934 /* Use correct list of valid properties (pool or vdev) */ 10935 current_prop_type = cb.cb_type; 10936 usage(B_FALSE); 10937 } 10938 10939 if (cb.cb_proplist != NULL) { 10940 fake_name.pl_prop = ZPOOL_PROP_NAME; 10941 fake_name.pl_width = strlen(gettext("NAME")); 10942 fake_name.pl_next = cb.cb_proplist; 10943 cb.cb_proplist = &fake_name; 10944 } 10945 10946 ret = for_each_pool(argc, argv, B_TRUE, &cb.cb_proplist, cb.cb_type, 10947 cb.cb_literal, get_callback, &cb); 10948 10949 if (cb.cb_proplist == &fake_name) 10950 zprop_free_list(fake_name.pl_next); 10951 else 10952 zprop_free_list(cb.cb_proplist); 10953 10954 if (vdev != NULL) 10955 free(vdev); 10956 10957 return (ret); 10958 } 10959 10960 typedef struct set_cbdata { 10961 char *cb_propname; 10962 char *cb_value; 10963 zfs_type_t cb_type; 10964 vdev_cbdata_t cb_vdevs; 10965 boolean_t cb_any_successful; 10966 } set_cbdata_t; 10967 10968 static int 10969 set_pool_callback(zpool_handle_t *zhp, set_cbdata_t *cb) 10970 { 10971 int error; 10972 10973 /* Check if we have out-of-bounds features */ 10974 if (strcmp(cb->cb_propname, ZPOOL_CONFIG_COMPATIBILITY) == 0) { 10975 boolean_t features[SPA_FEATURES]; 10976 if (zpool_do_load_compat(cb->cb_value, features) != 10977 ZPOOL_COMPATIBILITY_OK) 10978 return (-1); 10979 10980 nvlist_t *enabled = zpool_get_features(zhp); 10981 spa_feature_t i; 10982 for (i = 0; i < SPA_FEATURES; i++) { 10983 const char *fguid = spa_feature_table[i].fi_guid; 10984 if (nvlist_exists(enabled, fguid) && !features[i]) 10985 break; 10986 } 10987 if (i < SPA_FEATURES) 10988 (void) fprintf(stderr, gettext("Warning: one or " 10989 "more features already enabled on pool '%s'\n" 10990 "are not present in this compatibility set.\n"), 10991 zpool_get_name(zhp)); 10992 } 10993 10994 /* if we're setting a feature, check it's in compatibility set */ 10995 if (zpool_prop_feature(cb->cb_propname) && 10996 strcmp(cb->cb_value, ZFS_FEATURE_ENABLED) == 0) { 10997 char *fname = strchr(cb->cb_propname, '@') + 1; 10998 spa_feature_t f; 10999 11000 if (zfeature_lookup_name(fname, &f) == 0) { 11001 char compat[ZFS_MAXPROPLEN]; 11002 if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, 11003 compat, ZFS_MAXPROPLEN, NULL, B_FALSE) != 0) 11004 compat[0] = '\0'; 11005 11006 boolean_t features[SPA_FEATURES]; 11007 if (zpool_do_load_compat(compat, features) != 11008 ZPOOL_COMPATIBILITY_OK) { 11009 (void) fprintf(stderr, gettext("Error: " 11010 "cannot enable feature '%s' on pool '%s'\n" 11011 "because the pool's 'compatibility' " 11012 "property cannot be parsed.\n"), 11013 fname, zpool_get_name(zhp)); 11014 return (-1); 11015 } 11016 11017 if (!features[f]) { 11018 (void) fprintf(stderr, gettext("Error: " 11019 "cannot enable feature '%s' on pool '%s'\n" 11020 "as it is not specified in this pool's " 11021 "current compatibility set.\n" 11022 "Consider setting 'compatibility' to a " 11023 "less restrictive set, or to 'off'.\n"), 11024 fname, zpool_get_name(zhp)); 11025 return (-1); 11026 } 11027 } 11028 } 11029 11030 error = zpool_set_prop(zhp, cb->cb_propname, cb->cb_value); 11031 11032 return (error); 11033 } 11034 11035 static int 11036 set_callback(zpool_handle_t *zhp, void *data) 11037 { 11038 int error; 11039 set_cbdata_t *cb = (set_cbdata_t *)data; 11040 11041 if (cb->cb_type == ZFS_TYPE_VDEV) { 11042 error = zpool_set_vdev_prop(zhp, *cb->cb_vdevs.cb_names, 11043 cb->cb_propname, cb->cb_value); 11044 } else { 11045 assert(cb->cb_type == ZFS_TYPE_POOL); 11046 error = set_pool_callback(zhp, cb); 11047 } 11048 11049 cb->cb_any_successful = !error; 11050 return (error); 11051 } 11052 11053 int 11054 zpool_do_set(int argc, char **argv) 11055 { 11056 set_cbdata_t cb = { 0 }; 11057 int error; 11058 char *vdev = NULL; 11059 11060 current_prop_type = ZFS_TYPE_POOL; 11061 if (argc > 1 && argv[1][0] == '-') { 11062 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 11063 argv[1][1]); 11064 usage(B_FALSE); 11065 } 11066 11067 if (argc < 2) { 11068 (void) fprintf(stderr, gettext("missing property=value " 11069 "argument\n")); 11070 usage(B_FALSE); 11071 } 11072 11073 if (argc < 3) { 11074 (void) fprintf(stderr, gettext("missing pool name\n")); 11075 usage(B_FALSE); 11076 } 11077 11078 if (argc > 4) { 11079 (void) fprintf(stderr, gettext("too many pool names\n")); 11080 usage(B_FALSE); 11081 } 11082 11083 cb.cb_propname = argv[1]; 11084 cb.cb_type = ZFS_TYPE_POOL; 11085 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_TYPE_ID; 11086 cb.cb_value = strchr(cb.cb_propname, '='); 11087 if (cb.cb_value == NULL) { 11088 (void) fprintf(stderr, gettext("missing value in " 11089 "property=value argument\n")); 11090 usage(B_FALSE); 11091 } 11092 11093 *(cb.cb_value) = '\0'; 11094 cb.cb_value++; 11095 argc -= 2; 11096 argv += 2; 11097 11098 /* argv[0] is pool name */ 11099 if (!is_pool(argv[0])) { 11100 (void) fprintf(stderr, 11101 gettext("cannot open '%s': is not a pool\n"), argv[0]); 11102 return (EINVAL); 11103 } 11104 11105 /* argv[1], when supplied, is vdev name */ 11106 if (argc == 2) { 11107 11108 if (strcmp(argv[1], "root") == 0) 11109 vdev = strdup("root-0"); 11110 else 11111 vdev = strdup(argv[1]); 11112 11113 if (!are_vdevs_in_pool(1, &vdev, argv[0], &cb.cb_vdevs)) { 11114 (void) fprintf(stderr, gettext( 11115 "cannot find '%s' in '%s': device not in pool\n"), 11116 vdev, argv[0]); 11117 free(vdev); 11118 return (EINVAL); 11119 } 11120 cb.cb_vdevs.cb_names = &vdev; 11121 cb.cb_vdevs.cb_names_count = 1; 11122 cb.cb_type = ZFS_TYPE_VDEV; 11123 } 11124 11125 error = for_each_pool(1, argv, B_TRUE, NULL, ZFS_TYPE_POOL, 11126 B_FALSE, set_callback, &cb); 11127 11128 if (vdev != NULL) 11129 free(vdev); 11130 11131 return (error); 11132 } 11133 11134 /* Add up the total number of bytes left to initialize/trim across all vdevs */ 11135 static uint64_t 11136 vdev_activity_remaining(nvlist_t *nv, zpool_wait_activity_t activity) 11137 { 11138 uint64_t bytes_remaining; 11139 nvlist_t **child; 11140 uint_t c, children; 11141 vdev_stat_t *vs; 11142 11143 assert(activity == ZPOOL_WAIT_INITIALIZE || 11144 activity == ZPOOL_WAIT_TRIM); 11145 11146 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, 11147 (uint64_t **)&vs, &c) == 0); 11148 11149 if (activity == ZPOOL_WAIT_INITIALIZE && 11150 vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE) 11151 bytes_remaining = vs->vs_initialize_bytes_est - 11152 vs->vs_initialize_bytes_done; 11153 else if (activity == ZPOOL_WAIT_TRIM && 11154 vs->vs_trim_state == VDEV_TRIM_ACTIVE) 11155 bytes_remaining = vs->vs_trim_bytes_est - 11156 vs->vs_trim_bytes_done; 11157 else 11158 bytes_remaining = 0; 11159 11160 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 11161 &child, &children) != 0) 11162 children = 0; 11163 11164 for (c = 0; c < children; c++) 11165 bytes_remaining += vdev_activity_remaining(child[c], activity); 11166 11167 return (bytes_remaining); 11168 } 11169 11170 /* Add up the total number of bytes left to rebuild across top-level vdevs */ 11171 static uint64_t 11172 vdev_activity_top_remaining(nvlist_t *nv) 11173 { 11174 uint64_t bytes_remaining = 0; 11175 nvlist_t **child; 11176 uint_t children; 11177 int error; 11178 11179 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 11180 &child, &children) != 0) 11181 children = 0; 11182 11183 for (uint_t c = 0; c < children; c++) { 11184 vdev_rebuild_stat_t *vrs; 11185 uint_t i; 11186 11187 error = nvlist_lookup_uint64_array(child[c], 11188 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i); 11189 if (error == 0) { 11190 if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) { 11191 bytes_remaining += (vrs->vrs_bytes_est - 11192 vrs->vrs_bytes_rebuilt); 11193 } 11194 } 11195 } 11196 11197 return (bytes_remaining); 11198 } 11199 11200 /* Whether any vdevs are 'spare' or 'replacing' vdevs */ 11201 static boolean_t 11202 vdev_any_spare_replacing(nvlist_t *nv) 11203 { 11204 nvlist_t **child; 11205 uint_t c, children; 11206 const char *vdev_type; 11207 11208 (void) nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &vdev_type); 11209 11210 if (strcmp(vdev_type, VDEV_TYPE_REPLACING) == 0 || 11211 strcmp(vdev_type, VDEV_TYPE_SPARE) == 0 || 11212 strcmp(vdev_type, VDEV_TYPE_DRAID_SPARE) == 0) { 11213 return (B_TRUE); 11214 } 11215 11216 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 11217 &child, &children) != 0) 11218 children = 0; 11219 11220 for (c = 0; c < children; c++) { 11221 if (vdev_any_spare_replacing(child[c])) 11222 return (B_TRUE); 11223 } 11224 11225 return (B_FALSE); 11226 } 11227 11228 typedef struct wait_data { 11229 char *wd_poolname; 11230 boolean_t wd_scripted; 11231 boolean_t wd_exact; 11232 boolean_t wd_headers_once; 11233 boolean_t wd_should_exit; 11234 /* Which activities to wait for */ 11235 boolean_t wd_enabled[ZPOOL_WAIT_NUM_ACTIVITIES]; 11236 float wd_interval; 11237 pthread_cond_t wd_cv; 11238 pthread_mutex_t wd_mutex; 11239 } wait_data_t; 11240 11241 /* 11242 * Print to stdout a single line, containing one column for each activity that 11243 * we are waiting for specifying how many bytes of work are left for that 11244 * activity. 11245 */ 11246 static void 11247 print_wait_status_row(wait_data_t *wd, zpool_handle_t *zhp, int row) 11248 { 11249 nvlist_t *config, *nvroot; 11250 uint_t c; 11251 int i; 11252 pool_checkpoint_stat_t *pcs = NULL; 11253 pool_scan_stat_t *pss = NULL; 11254 pool_removal_stat_t *prs = NULL; 11255 pool_raidz_expand_stat_t *pres = NULL; 11256 const char *const headers[] = {"DISCARD", "FREE", "INITIALIZE", 11257 "REPLACE", "REMOVE", "RESILVER", "SCRUB", "TRIM", "RAIDZ_EXPAND"}; 11258 int col_widths[ZPOOL_WAIT_NUM_ACTIVITIES]; 11259 11260 /* Calculate the width of each column */ 11261 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) { 11262 /* 11263 * Make sure we have enough space in the col for pretty-printed 11264 * numbers and for the column header, and then leave a couple 11265 * spaces between cols for readability. 11266 */ 11267 col_widths[i] = MAX(strlen(headers[i]), 6) + 2; 11268 } 11269 11270 if (timestamp_fmt != NODATE) 11271 print_timestamp(timestamp_fmt); 11272 11273 /* Print header if appropriate */ 11274 int term_height = terminal_height(); 11275 boolean_t reprint_header = (!wd->wd_headers_once && term_height > 0 && 11276 row % (term_height-1) == 0); 11277 if (!wd->wd_scripted && (row == 0 || reprint_header)) { 11278 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) { 11279 if (wd->wd_enabled[i]) 11280 (void) printf("%*s", col_widths[i], headers[i]); 11281 } 11282 (void) fputc('\n', stdout); 11283 } 11284 11285 /* Bytes of work remaining in each activity */ 11286 int64_t bytes_rem[ZPOOL_WAIT_NUM_ACTIVITIES] = {0}; 11287 11288 bytes_rem[ZPOOL_WAIT_FREE] = 11289 zpool_get_prop_int(zhp, ZPOOL_PROP_FREEING, NULL); 11290 11291 config = zpool_get_config(zhp, NULL); 11292 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE); 11293 11294 (void) nvlist_lookup_uint64_array(nvroot, 11295 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c); 11296 if (pcs != NULL && pcs->pcs_state == CS_CHECKPOINT_DISCARDING) 11297 bytes_rem[ZPOOL_WAIT_CKPT_DISCARD] = pcs->pcs_space; 11298 11299 (void) nvlist_lookup_uint64_array(nvroot, 11300 ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c); 11301 if (prs != NULL && prs->prs_state == DSS_SCANNING) 11302 bytes_rem[ZPOOL_WAIT_REMOVE] = prs->prs_to_copy - 11303 prs->prs_copied; 11304 11305 (void) nvlist_lookup_uint64_array(nvroot, 11306 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&pss, &c); 11307 if (pss != NULL && pss->pss_state == DSS_SCANNING && 11308 pss->pss_pass_scrub_pause == 0) { 11309 int64_t rem = pss->pss_to_examine - pss->pss_issued; 11310 if (pss->pss_func == POOL_SCAN_SCRUB) 11311 bytes_rem[ZPOOL_WAIT_SCRUB] = rem; 11312 else 11313 bytes_rem[ZPOOL_WAIT_RESILVER] = rem; 11314 } else if (check_rebuilding(nvroot, NULL)) { 11315 bytes_rem[ZPOOL_WAIT_RESILVER] = 11316 vdev_activity_top_remaining(nvroot); 11317 } 11318 11319 (void) nvlist_lookup_uint64_array(nvroot, 11320 ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, (uint64_t **)&pres, &c); 11321 if (pres != NULL && pres->pres_state == DSS_SCANNING) { 11322 int64_t rem = pres->pres_to_reflow - pres->pres_reflowed; 11323 bytes_rem[ZPOOL_WAIT_RAIDZ_EXPAND] = rem; 11324 } 11325 11326 bytes_rem[ZPOOL_WAIT_INITIALIZE] = 11327 vdev_activity_remaining(nvroot, ZPOOL_WAIT_INITIALIZE); 11328 bytes_rem[ZPOOL_WAIT_TRIM] = 11329 vdev_activity_remaining(nvroot, ZPOOL_WAIT_TRIM); 11330 11331 /* 11332 * A replace finishes after resilvering finishes, so the amount of work 11333 * left for a replace is the same as for resilvering. 11334 * 11335 * It isn't quite correct to say that if we have any 'spare' or 11336 * 'replacing' vdevs and a resilver is happening, then a replace is in 11337 * progress, like we do here. When a hot spare is used, the faulted vdev 11338 * is not removed after the hot spare is resilvered, so parent 'spare' 11339 * vdev is not removed either. So we could have a 'spare' vdev, but be 11340 * resilvering for a different reason. However, we use it as a heuristic 11341 * because we don't have access to the DTLs, which could tell us whether 11342 * or not we have really finished resilvering a hot spare. 11343 */ 11344 if (vdev_any_spare_replacing(nvroot)) 11345 bytes_rem[ZPOOL_WAIT_REPLACE] = bytes_rem[ZPOOL_WAIT_RESILVER]; 11346 11347 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) { 11348 char buf[64]; 11349 if (!wd->wd_enabled[i]) 11350 continue; 11351 11352 if (wd->wd_exact) { 11353 (void) snprintf(buf, sizeof (buf), "%" PRIi64, 11354 bytes_rem[i]); 11355 } else { 11356 zfs_nicenum(bytes_rem[i], buf, sizeof (buf)); 11357 } 11358 11359 if (wd->wd_scripted) 11360 (void) printf(i == 0 ? "%s" : "\t%s", buf); 11361 else 11362 (void) printf(" %*s", col_widths[i] - 1, buf); 11363 } 11364 (void) printf("\n"); 11365 (void) fflush(stdout); 11366 } 11367 11368 static void * 11369 wait_status_thread(void *arg) 11370 { 11371 wait_data_t *wd = (wait_data_t *)arg; 11372 zpool_handle_t *zhp; 11373 11374 if ((zhp = zpool_open(g_zfs, wd->wd_poolname)) == NULL) 11375 return (void *)(1); 11376 11377 for (int row = 0; ; row++) { 11378 boolean_t missing; 11379 struct timespec timeout; 11380 int ret = 0; 11381 (void) clock_gettime(CLOCK_REALTIME, &timeout); 11382 11383 if (zpool_refresh_stats(zhp, &missing) != 0 || missing || 11384 zpool_props_refresh(zhp) != 0) { 11385 zpool_close(zhp); 11386 return (void *)(uintptr_t)(missing ? 0 : 1); 11387 } 11388 11389 print_wait_status_row(wd, zhp, row); 11390 11391 timeout.tv_sec += floor(wd->wd_interval); 11392 long nanos = timeout.tv_nsec + 11393 (wd->wd_interval - floor(wd->wd_interval)) * NANOSEC; 11394 if (nanos >= NANOSEC) { 11395 timeout.tv_sec++; 11396 timeout.tv_nsec = nanos - NANOSEC; 11397 } else { 11398 timeout.tv_nsec = nanos; 11399 } 11400 pthread_mutex_lock(&wd->wd_mutex); 11401 if (!wd->wd_should_exit) 11402 ret = pthread_cond_timedwait(&wd->wd_cv, &wd->wd_mutex, 11403 &timeout); 11404 pthread_mutex_unlock(&wd->wd_mutex); 11405 if (ret == 0) { 11406 break; /* signaled by main thread */ 11407 } else if (ret != ETIMEDOUT) { 11408 (void) fprintf(stderr, gettext("pthread_cond_timedwait " 11409 "failed: %s\n"), strerror(ret)); 11410 zpool_close(zhp); 11411 return (void *)(uintptr_t)(1); 11412 } 11413 } 11414 11415 zpool_close(zhp); 11416 return (void *)(0); 11417 } 11418 11419 int 11420 zpool_do_wait(int argc, char **argv) 11421 { 11422 boolean_t verbose = B_FALSE; 11423 int c, i; 11424 unsigned long count; 11425 pthread_t status_thr; 11426 int error = 0; 11427 zpool_handle_t *zhp; 11428 11429 wait_data_t wd; 11430 wd.wd_scripted = B_FALSE; 11431 wd.wd_exact = B_FALSE; 11432 wd.wd_headers_once = B_FALSE; 11433 wd.wd_should_exit = B_FALSE; 11434 11435 pthread_mutex_init(&wd.wd_mutex, NULL); 11436 pthread_cond_init(&wd.wd_cv, NULL); 11437 11438 /* By default, wait for all types of activity. */ 11439 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) 11440 wd.wd_enabled[i] = B_TRUE; 11441 11442 while ((c = getopt(argc, argv, "HpT:t:")) != -1) { 11443 switch (c) { 11444 case 'H': 11445 wd.wd_scripted = B_TRUE; 11446 break; 11447 case 'n': 11448 wd.wd_headers_once = B_TRUE; 11449 break; 11450 case 'p': 11451 wd.wd_exact = B_TRUE; 11452 break; 11453 case 'T': 11454 get_timestamp_arg(*optarg); 11455 break; 11456 case 't': 11457 /* Reset activities array */ 11458 memset(&wd.wd_enabled, 0, sizeof (wd.wd_enabled)); 11459 11460 for (char *tok; (tok = strsep(&optarg, ",")); ) { 11461 static const char *const col_opts[] = { 11462 "discard", "free", "initialize", "replace", 11463 "remove", "resilver", "scrub", "trim", 11464 "raidz_expand" }; 11465 11466 for (i = 0; i < ARRAY_SIZE(col_opts); ++i) 11467 if (strcmp(tok, col_opts[i]) == 0) { 11468 wd.wd_enabled[i] = B_TRUE; 11469 goto found; 11470 } 11471 11472 (void) fprintf(stderr, 11473 gettext("invalid activity '%s'\n"), tok); 11474 usage(B_FALSE); 11475 found:; 11476 } 11477 break; 11478 case '?': 11479 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 11480 optopt); 11481 usage(B_FALSE); 11482 } 11483 } 11484 11485 argc -= optind; 11486 argv += optind; 11487 11488 get_interval_count(&argc, argv, &wd.wd_interval, &count); 11489 if (count != 0) { 11490 /* This subcmd only accepts an interval, not a count */ 11491 (void) fprintf(stderr, gettext("too many arguments\n")); 11492 usage(B_FALSE); 11493 } 11494 11495 if (wd.wd_interval != 0) 11496 verbose = B_TRUE; 11497 11498 if (argc < 1) { 11499 (void) fprintf(stderr, gettext("missing 'pool' argument\n")); 11500 usage(B_FALSE); 11501 } 11502 if (argc > 1) { 11503 (void) fprintf(stderr, gettext("too many arguments\n")); 11504 usage(B_FALSE); 11505 } 11506 11507 wd.wd_poolname = argv[0]; 11508 11509 if ((zhp = zpool_open(g_zfs, wd.wd_poolname)) == NULL) 11510 return (1); 11511 11512 if (verbose) { 11513 /* 11514 * We use a separate thread for printing status updates because 11515 * the main thread will call lzc_wait(), which blocks as long 11516 * as an activity is in progress, which can be a long time. 11517 */ 11518 if (pthread_create(&status_thr, NULL, wait_status_thread, &wd) 11519 != 0) { 11520 (void) fprintf(stderr, gettext("failed to create status" 11521 "thread: %s\n"), strerror(errno)); 11522 zpool_close(zhp); 11523 return (1); 11524 } 11525 } 11526 11527 /* 11528 * Loop over all activities that we are supposed to wait for until none 11529 * of them are in progress. Note that this means we can end up waiting 11530 * for more activities to complete than just those that were in progress 11531 * when we began waiting; if an activity we are interested in begins 11532 * while we are waiting for another activity, we will wait for both to 11533 * complete before exiting. 11534 */ 11535 for (;;) { 11536 boolean_t missing = B_FALSE; 11537 boolean_t any_waited = B_FALSE; 11538 11539 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) { 11540 boolean_t waited; 11541 11542 if (!wd.wd_enabled[i]) 11543 continue; 11544 11545 error = zpool_wait_status(zhp, i, &missing, &waited); 11546 if (error != 0 || missing) 11547 break; 11548 11549 any_waited = (any_waited || waited); 11550 } 11551 11552 if (error != 0 || missing || !any_waited) 11553 break; 11554 } 11555 11556 zpool_close(zhp); 11557 11558 if (verbose) { 11559 uintptr_t status; 11560 pthread_mutex_lock(&wd.wd_mutex); 11561 wd.wd_should_exit = B_TRUE; 11562 pthread_cond_signal(&wd.wd_cv); 11563 pthread_mutex_unlock(&wd.wd_mutex); 11564 (void) pthread_join(status_thr, (void *)&status); 11565 if (status != 0) 11566 error = status; 11567 } 11568 11569 pthread_mutex_destroy(&wd.wd_mutex); 11570 pthread_cond_destroy(&wd.wd_cv); 11571 return (error); 11572 } 11573 11574 static int 11575 find_command_idx(const char *command, int *idx) 11576 { 11577 for (int i = 0; i < NCOMMAND; ++i) { 11578 if (command_table[i].name == NULL) 11579 continue; 11580 11581 if (strcmp(command, command_table[i].name) == 0) { 11582 *idx = i; 11583 return (0); 11584 } 11585 } 11586 return (1); 11587 } 11588 11589 /* 11590 * Display version message 11591 */ 11592 static int 11593 zpool_do_version(int argc, char **argv) 11594 { 11595 (void) argc, (void) argv; 11596 return (zfs_version_print() != 0); 11597 } 11598 11599 /* Display documentation */ 11600 static int 11601 zpool_do_help(int argc, char **argv) 11602 { 11603 char page[MAXNAMELEN]; 11604 if (argc < 3 || strcmp(argv[2], "zpool") == 0) 11605 strcpy(page, "zpool"); 11606 else if (strcmp(argv[2], "concepts") == 0 || 11607 strcmp(argv[2], "props") == 0) 11608 snprintf(page, sizeof (page), "zpool%s", argv[2]); 11609 else 11610 snprintf(page, sizeof (page), "zpool-%s", argv[2]); 11611 11612 execlp("man", "man", page, NULL); 11613 11614 fprintf(stderr, "couldn't run man program: %s", strerror(errno)); 11615 return (-1); 11616 } 11617 11618 /* 11619 * Do zpool_load_compat() and print error message on failure 11620 */ 11621 static zpool_compat_status_t 11622 zpool_do_load_compat(const char *compat, boolean_t *list) 11623 { 11624 char report[1024]; 11625 11626 zpool_compat_status_t ret; 11627 11628 ret = zpool_load_compat(compat, list, report, 1024); 11629 switch (ret) { 11630 11631 case ZPOOL_COMPATIBILITY_OK: 11632 break; 11633 11634 case ZPOOL_COMPATIBILITY_NOFILES: 11635 case ZPOOL_COMPATIBILITY_BADFILE: 11636 case ZPOOL_COMPATIBILITY_BADTOKEN: 11637 (void) fprintf(stderr, "Error: %s\n", report); 11638 break; 11639 11640 case ZPOOL_COMPATIBILITY_WARNTOKEN: 11641 (void) fprintf(stderr, "Warning: %s\n", report); 11642 ret = ZPOOL_COMPATIBILITY_OK; 11643 break; 11644 } 11645 return (ret); 11646 } 11647 11648 int 11649 main(int argc, char **argv) 11650 { 11651 int ret = 0; 11652 int i = 0; 11653 char *cmdname; 11654 char **newargv; 11655 11656 (void) setlocale(LC_ALL, ""); 11657 (void) setlocale(LC_NUMERIC, "C"); 11658 (void) textdomain(TEXT_DOMAIN); 11659 srand(time(NULL)); 11660 11661 opterr = 0; 11662 11663 /* 11664 * Make sure the user has specified some command. 11665 */ 11666 if (argc < 2) { 11667 (void) fprintf(stderr, gettext("missing command\n")); 11668 usage(B_FALSE); 11669 } 11670 11671 cmdname = argv[1]; 11672 11673 /* 11674 * Special case '-?' 11675 */ 11676 if ((strcmp(cmdname, "-?") == 0) || strcmp(cmdname, "--help") == 0) 11677 usage(B_TRUE); 11678 11679 /* 11680 * Special case '-V|--version' 11681 */ 11682 if ((strcmp(cmdname, "-V") == 0) || (strcmp(cmdname, "--version") == 0)) 11683 return (zpool_do_version(argc, argv)); 11684 11685 /* 11686 * Special case 'help' 11687 */ 11688 if (strcmp(cmdname, "help") == 0) 11689 return (zpool_do_help(argc, argv)); 11690 11691 if ((g_zfs = libzfs_init()) == NULL) { 11692 (void) fprintf(stderr, "%s\n", libzfs_error_init(errno)); 11693 return (1); 11694 } 11695 11696 libzfs_print_on_error(g_zfs, B_TRUE); 11697 11698 zfs_save_arguments(argc, argv, history_str, sizeof (history_str)); 11699 11700 /* 11701 * Many commands modify input strings for string parsing reasons. 11702 * We create a copy to protect the original argv. 11703 */ 11704 newargv = safe_malloc((argc + 1) * sizeof (newargv[0])); 11705 for (i = 0; i < argc; i++) 11706 newargv[i] = strdup(argv[i]); 11707 newargv[argc] = NULL; 11708 11709 /* 11710 * Run the appropriate command. 11711 */ 11712 if (find_command_idx(cmdname, &i) == 0) { 11713 current_command = &command_table[i]; 11714 ret = command_table[i].func(argc - 1, newargv + 1); 11715 } else if (strchr(cmdname, '=')) { 11716 verify(find_command_idx("set", &i) == 0); 11717 current_command = &command_table[i]; 11718 ret = command_table[i].func(argc, newargv); 11719 } else if (strcmp(cmdname, "freeze") == 0 && argc == 3) { 11720 /* 11721 * 'freeze' is a vile debugging abomination, so we treat 11722 * it as such. 11723 */ 11724 zfs_cmd_t zc = {"\0"}; 11725 11726 (void) strlcpy(zc.zc_name, argv[2], sizeof (zc.zc_name)); 11727 ret = zfs_ioctl(g_zfs, ZFS_IOC_POOL_FREEZE, &zc); 11728 if (ret != 0) { 11729 (void) fprintf(stderr, 11730 gettext("failed to freeze pool: %d\n"), errno); 11731 ret = 1; 11732 } 11733 11734 log_history = 0; 11735 } else { 11736 (void) fprintf(stderr, gettext("unrecognized " 11737 "command '%s'\n"), cmdname); 11738 usage(B_FALSE); 11739 ret = 1; 11740 } 11741 11742 for (i = 0; i < argc; i++) 11743 free(newargv[i]); 11744 free(newargv); 11745 11746 if (ret == 0 && log_history) 11747 (void) zpool_log_history(g_zfs, history_str); 11748 11749 libzfs_fini(g_zfs); 11750 11751 /* 11752 * The 'ZFS_ABORT' environment variable causes us to dump core on exit 11753 * for the purposes of running ::findleaks. 11754 */ 11755 if (getenv("ZFS_ABORT") != NULL) { 11756 (void) printf("dumping core by request\n"); 11757 abort(); 11758 } 11759 11760 return (ret); 11761 } 11762